-
Notifications
You must be signed in to change notification settings - Fork 8
/
zol.py
239 lines (198 loc) · 8.98 KB
/
zol.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# Copyright 2012 David DOUARD, LOGILAB S.A.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Driver for ZFS-on-Linux-stored volumes.
This is mainly taken from http://www.logilab.org/blogentry/114769 with
modifications to make it work well with cinder and OpenStack Folsom.
My setup is utilizing locally stored ZFS volumes so SSH access was not tested
"""
import os
from cinder import exception
from cinder import flags
from cinder import utils
from cinder.openstack.common import cfg
from cinder.openstack.common import log as logging
from cinder.volume import iscsi
from cinder.volume.driver import _iscsi_location
from cinder.volume.san import SanISCSIDriver
LOG = logging.getLogger(__name__)
san_opts = [
cfg.StrOpt('san_zfs_command',
default='zfs',
help='The ZFS command.'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(san_opts)
class ZFSonLinuxISCSIDriver(SanISCSIDriver):
"""Executes commands relating to ZFS-on-Linux-hosted ISCSI volumes.
Basic setup for a ZoL iSCSI server:
XXX
Note that current implementation of ZFS on Linux does not handle:
zfs allow/unallow
For now, needs to have root access to the ZFS host. The best is to
use a ssh key with ssh authorized_keys restriction mechanisms to
limit root access.
Make sure you can login using san_login & san_password/san_private_key
"""
ZFSCMD = FLAGS.san_zfs_command
_local_execute = utils.execute
def _getrl(self):
return self._runlocal
def _setrl(self, v):
if isinstance(v, basestring):
v = v.lower() in ('true', 't', '1', 'y', 'yes')
self._runlocal = v
run_local = property(_getrl, _setrl)
def __init__(self):
super(ZFSonLinuxISCSIDriver, self).__init__()
self.tgtadm.set_execute(self._execute)
self.tgtadm = iscsi.get_target_admin()
LOG.info("run local = %s (%s)" % (self.run_local, FLAGS.san_is_local))
def set_execute(self, execute):
LOG.debug("override local execute cmd with %s (%s)" % (
repr(execute), execute.__module__))
self._local_execute = execute
def _execute(self, *cmd, **kwargs):
if self.run_local:
LOG.debug("LOCAL execute cmd %s (%s)" % (cmd, kwargs))
return self._local_execute(*cmd, **kwargs)
else:
LOG.debug("SSH execute cmd %s (%s)" % (cmd, kwargs))
check_exit_code = kwargs.pop('check_exit_code', None)
command = ' '.join(cmd)
return self._run_ssh(command, check_exit_code)
def _create_volume(self, volume_name, sizestr):
zfs_poolname = self._build_zfs_poolname(volume_name)
# Create a zfs volume
cmd = [self.ZFSCMD, 'create']
if FLAGS.san_thin_provision:
cmd.append('-s')
cmd.extend(['-V', sizestr])
cmd.append(zfs_poolname)
self._execute(*cmd, run_as_root=True)
def _volume_not_present(self, volume_name):
zfs_poolname = self._build_zfs_poolname(volume_name)
try:
out, err = self._execute(self.ZFSCMD, 'list', '-H',
zfs_poolname, run_as_root=True)
if out.startswith(zfs_poolname):
return False
except Exception as e:
# If the volume isn't present
return True
return False
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
zfs_snap = self._build_zfs_poolname(snapshot['name'])
zfs_vol = self._build_zfs_poolname(snapshot['name'])
self._execute(self.ZFSCMD, 'clone', zfs_snap,
zfs_vol, run_as_root=True)
self._execute(self.ZFSCMD, 'promote', zfs_vol, run_as_root=True)
def delete_volume(self, volume):
"""Deletes a volume."""
if self._volume_not_present(volume['name']):
# If the volume isn't present, then don't attempt to delete
return True
zfs_poolname = self._build_zfs_poolname(volume['name'])
self._execute(self.ZFSCMD, 'destroy', zfs_poolname, run_as_root=True)
def create_export(self, context, volume):
"""Creates an export for a logical volume."""
iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
# set volume path properly for ZFS
volume_path = "/dev/zvol/%s/%s" % (FLAGS.volume_group, volume['name'])
model_update = {}
# TODO(jdg): In the future move all of the dependent stuff into the
# cooresponding target admin class
if not isinstance(self.tgtadm, iscsi.TgtAdm):
lun = 0
self._ensure_iscsi_targets(context, volume['host'])
iscsi_target = self.db.volume_allocate_iscsi_target(context,
volume['id'],
volume['host'])
else:
lun = 1
iscsi_target = 0
# NOTE(jdg): For TgtAdm case iscsi_name is the ONLY param we need
# should clean this all up at some point in the future
tid = self.tgtadm.create_iscsi_target(iscsi_name,
iscsi_target,
0,
volume_path)
model_update['provider_location'] = _iscsi_location(
FLAGS.iscsi_ip_address, tid, iscsi_name, lun)
return model_update
def remove_export(self, context, volume):
"""Removes an export for a logical volume."""
# NOTE(jdg): tgtadm doesn't use the iscsi_targets table
# TODO(jdg): In the future move all of the dependent stuff into the
# cooresponding target admin class
if not isinstance(self.tgtadm, iscsi.TgtAdm):
try:
iscsi_target = self.db.volume_get_iscsi_target_num(context,
volume['id'])
except exception.NotFound:
LOG.info(_("Skipping remove_export. No iscsi_target "
"provisioned for volume: %s"), volume['id'])
return
else:
iscsi_target = 0
try:
# NOTE: provider_location may be unset if the volume hasn't
# been exported
location = volume['provider_location'].split(' ')
iqn = location[1]
# ietadm show will exit with an error
# this export has already been removed
self.tgtadm.show_target(iscsi_target, iqn=iqn)
except Exception as e:
LOG.info(_("Skipping remove_export. No iscsi_target "
"is presently exported for volume: %s"), volume['id'])
return
self.tgtadm.remove_iscsi_target(iscsi_target, 0, volume['id'])
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
vol_uuid_file = 'volume-%s' % volume_id
volume_path = os.path.join(FLAGS.volumes_dir, vol_uuid_file)
if os.path.isfile(volume_path):
iqn = '%s%s' % (FLAGS.iscsi_target_prefix,
vol_uuid_file)
else:
raise exception.PersistentVolumeFileNotFound(volume_id=volume_id)
# TODO(jdg): In the future move all of the dependent stuff into the
# cooresponding target admin class
if not isinstance(self.tgtadm, iscsi.TgtAdm):
tid = self.db.volume_get_iscsi_target_num(context, volume_id)
else:
tid = 0
try:
self.tgtadm.show_target(tid, iqn=iqn)
except exception.ProcessExecutionError, e:
# Instances remount read-only in this case.
# /etc/init.d/iscsitarget restart and rebooting cinder-volume
# is better since ensure_export() works at boot time.
LOG.error(_("Cannot confirm exported volume "
"id:%(volume_id)s.") % locals())
raise
def local_path(self, volume):
zfs_poolname = self._build_zfs_poolname(volume['name'])
zvoldev = '/dev/zvol/%s' % zfs_poolname
return zvoldev
def _build_zfs_poolname(self, volume_name):
zfs_poolname = '%s%s' % (FLAGS.san_zfs_volume_base, volume_name)
return zfs_poolname