Ticket #3957 (partial) - ganeti_webmgr uses ganeti python namespace:
[ganeti_webmgr.git] / ganeti_web / models.py
blob4fe1780742a67a1a924be8897810aaae646e382e
1 # coding: utf-8
3 # Copyright (C) 2010 Oregon State University et al.
4 # Copyright (C) 2010 Greek Research and Technology Network
6 # This program is free software; you can redistribute it and/or
7 # modify it under the terms of the GNU General Public License
8 # as published by the Free Software Foundation; either version 2
9 # of the License, or (at your option) any later version.
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
19 # USA.
22 import cPickle
23 from datetime import datetime, timedelta
24 from hashlib import sha1
26 from django.conf import settings
28 from django.contrib.sites import models as sites_app
29 from django.contrib.sites.management import create_default_site
30 from django.contrib.auth.models import User, Group
31 from django.contrib.contenttypes.models import ContentType
32 from django.contrib.contenttypes.generic import GenericForeignKey
34 from django.core.validators import RegexValidator, MinValueValidator
35 from django.utils.translation import ugettext_lazy as _
36 import re
38 from django.db import models
39 from django.db.models import Q, Sum
40 from django.db.models.query import QuerySet
41 from django.db.models.signals import post_save, post_syncdb
42 from django.db.utils import DatabaseError
43 from ganeti_web.logs import register_log_actions
45 from object_log.models import LogItem
46 log_action = LogItem.objects.log_action
48 from object_permissions.registration import register
49 from object_permissions import signals as op_signals
51 from muddle_users import signals as muddle_user_signals
53 from ganeti_web import constants, management
54 from ganeti_web.fields import PreciseDateTimeField, SumIf
55 from ganeti_web import permissions
56 from util import client
57 from util.client import GanetiApiError
59 if settings.VNC_PROXY:
60 from util.vncdaemon.vapclient import request_forwarding
61 import random
62 import string
64 def generate_random_password(length=12):
65 "Generate random sequence of specified length"
66 return "".join( random.sample(string.letters + string.digits, length) )
68 RAPI_CACHE = {}
69 RAPI_CACHE_HASHES = {}
70 def get_rapi(hash, cluster):
71 """
72 Retrieves the cached Ganeti RAPI client for a given hash. The Hash is
73 derived from the connection credentials required for a cluster. If the
74 client is not yet cached, it will be created and added.
76 If a hash does not correspond to any cluster then Cluster.DoesNotExist will
77 be raised.
79 @param cluster - either a cluster object, or ID of object. This is used for
80 resolving the cluster if the client is not already found. The id is
81 used rather than the hash, because the hash is mutable.
83 @return a Ganeti RAPI client.
84 """
85 if hash in RAPI_CACHE:
86 return RAPI_CACHE[hash]
88 # always look up the instance, even if we were given a Cluster instance
89 # it ensures we are retrieving the latest credentials. This helps avoid
90 # stale credentials. Retrieve only the values because we don't actually
91 # need another Cluster instance here.
92 if isinstance(cluster, (Cluster,)):
93 cluster = cluster.id
94 (credentials,) = Cluster.objects.filter(id=cluster) \
95 .values_list('hash','hostname','port','username','password')
96 hash, host, port, user, password = credentials
97 user = user if user else None
98 password = password if password else None
100 # now that we know hash is fresh, check cache again. The original hash could
101 # have been stale. This avoids constructing a new RAPI that already exists.
102 if hash in RAPI_CACHE:
103 return RAPI_CACHE[hash]
105 # delete any old version of the client that was cached.
106 if cluster in RAPI_CACHE_HASHES:
107 del RAPI_CACHE[RAPI_CACHE_HASHES[cluster]]
109 rapi = client.GanetiRapiClient(host, port, user, password)
110 RAPI_CACHE[hash] = rapi
111 RAPI_CACHE_HASHES[cluster] = hash
112 return rapi
115 def clear_rapi_cache():
117 clears the rapi cache
119 RAPI_CACHE.clear()
120 RAPI_CACHE_HASHES.clear()
123 ssh_public_key_re = re.compile(
124 r'^ssh-(rsa|dsa|dss) [A-Z0-9+/=]+ .+$', re.IGNORECASE)
125 validate_sshkey = RegexValidator(ssh_public_key_re,
126 _(u"Enter a valid SSH public key with comment (SSH2 RSA or DSA)."), "invalid")
129 class CachedClusterObject(models.Model):
131 mixin class for objects that reside on the cluster but some portion is
132 cached in the database. This class contains logic and other structures for
133 handling cache loading transparently
135 serialized_info = models.TextField(null=True, default=None, editable=False)
136 mtime = PreciseDateTimeField(null=True, editable=False)
137 cached = PreciseDateTimeField(null=True, editable=False)
138 ignore_cache = models.BooleanField(default=False)
140 __info = None
141 error = None
142 ctime = None
144 def __init__(self, *args, **kwargs):
145 super(CachedClusterObject, self).__init__(*args, **kwargs)
146 self.load_info()
148 @property
149 def info(self):
151 Getter for self.info, a dictionary of data about a VirtualMachine. This
152 is a proxy to self.serialized_info that handles deserialization.
153 Accessing this property will lazily deserialize info if it has not yet
154 been deserialized.
156 if self.__info is None:
157 if self.serialized_info is not None:
158 self.__info = cPickle.loads(str(self.serialized_info))
159 return self.__info
161 @info.setter
162 def info(self, value):
164 Setter for self.info, proxy to self.serialized_info that handles
165 serialization. When info is set, it will be parsed will trigger
166 self._parse_info() to update persistent and non-persistent properties
167 stored on the model instance.
169 Calling this method will not force serialization. Serialization of info
170 is lazy and will only occur when saving.
172 self.__info = value
173 if value is not None:
174 self.parse_info()
175 self.serialized_info = None
177 def load_info(self):
179 Load cached info retrieved from the ganeti cluster. This function
180 includes a lazy cache mechanism that uses a timer to decide whether or
181 not to refresh the cached information with new information from the
182 ganeti cluster.
184 This will ignore the cache when self.ignore_cache is True
186 if self.id:
187 if self.ignore_cache:
188 self.refresh()
190 elif self.cached is None \
191 or datetime.now() > self.cached+timedelta(0, 0, 0, settings.LAZY_CACHE_REFRESH):
192 self.refresh()
193 else:
194 if self.info:
195 self.parse_transient_info()
196 else:
197 self.error = 'No Cached Info'
199 def parse_info(self):
200 """ Parse all values from the cached info """
201 self.parse_transient_info()
202 data = self.parse_persistent_info(self.info)
203 for k in data:
204 setattr(self, k, data[k])
206 def refresh(self):
208 Retrieve and parse info from the ganeti cluster. If successfully
209 retrieved and parsed, this method will also call save().
211 Failure while loading the remote class will result in an incomplete
212 object. The error will be stored to self.error
214 try:
215 info_ = self._refresh()
216 if info_:
217 if info_['mtime']:
218 mtime = datetime.fromtimestamp(info_['mtime'])
219 else:
220 mtime = None
221 self.cached = datetime.now()
222 else:
223 # no info retrieved, use current mtime
224 mtime = self.mtime
226 if self.mtime is None or mtime > self.mtime:
227 # there was an update. Set info and save the object
228 self.info = info_
229 self.check_job_status()
230 self.save()
231 else:
232 # There was no change on the server. Only update the cache
233 # time. This bypasses the info serialization mechanism and
234 # uses a smaller query.
235 updates = self.check_job_status()
236 if updates:
237 self.__class__.objects.filter(pk=self.id) \
238 .update(cached=self.cached, **updates)
239 elif self.id is not None:
240 self.__class__.objects.filter(pk=self.id) \
241 .update(cached=self.cached)
243 except GanetiApiError, e:
244 self.error = str(e)
245 GanetiError.objects.store_error(str(e), obj=self, code=e.code)
247 else:
248 self.error = None
249 GanetiError.objects.clear_errors(obj=self)
251 def _refresh(self):
253 Fetch raw data from the ganeti cluster. This is specific to the object
254 and must be implemented by it.
256 raise NotImplementedError
258 def check_job_status(self):
259 pass
261 def parse_transient_info(self):
263 Parse properties from cached info that is stored on the class but not in
264 the database. These properties will be loaded every time the object is
265 instantiated. Properties stored on the class cannot be search
266 efficiently via the django query api.
268 This method is specific to the child object.
270 info_ = self.info
271 # XXX ganeti 2.1 ctime is always None
272 if info_['ctime'] is not None:
273 self.ctime = datetime.fromtimestamp(info_['ctime'])
275 @classmethod
276 def parse_persistent_info(cls, info):
278 Parse properties from cached info that are stored in the database. These
279 properties will be searchable by the django query api.
281 This method is specific to the child object.
283 # mtime is sometimes None if object has never been modified
284 if info['mtime'] is None:
285 return {'mtime': None}
286 return {'mtime': datetime.fromtimestamp(info['mtime'])}
288 def save(self, *args, **kwargs):
290 overridden to ensure info is serialized prior to save
292 if self.serialized_info is None:
293 self.serialized_info = cPickle.dumps(self.__info)
294 super(CachedClusterObject, self).save(*args, **kwargs)
296 class Meta:
297 abstract = True
300 class JobManager(models.Manager):
302 Custom manager for Ganeti Jobs model
304 def create(self, **kwargs):
305 """ helper method for creating a job with disabled cache """
306 job = Job(ignore_cache=True, **kwargs)
307 job.save(force_insert=True)
308 return job
311 class Job(CachedClusterObject):
313 model representing a job being run on a ganeti Cluster. This includes
314 operations such as creating or delting a virtual machine.
316 Jobs are a special type of CachedClusterObject. Job's run once then become
317 immutable. The lazy cache is modified to become permanent once a complete
318 status (success/error) has been detected. The cache can be disabled by
319 settning ignore_cache=True.
321 job_id = models.IntegerField(null=False)
322 content_type = models.ForeignKey(ContentType, null=False)
323 object_id = models.IntegerField(null=False)
324 obj = GenericForeignKey('content_type', 'object_id')
325 cluster = models.ForeignKey('Cluster', editable=False, related_name='jobs')
326 cluster_hash = models.CharField(max_length=40, editable=False)
328 cleared = models.BooleanField(default=False)
329 finished = models.DateTimeField(null=True)
330 status = models.CharField(max_length=10)
332 objects = JobManager()
334 @property
335 def rapi(self):
336 return get_rapi(self.cluster_hash, self.cluster_id)
338 def _refresh(self):
339 return self.rapi.GetJobStatus(self.job_id)
341 def load_info(self):
343 Load info for class. This will load from ganeti if ignore_cache==True,
344 otherwise this will always load from the cache.
346 if self.id and (self.ignore_cache or self.info is None):
347 self.info = self._refresh()
348 self.save()
350 @classmethod
351 def parse_persistent_info(cls, info):
353 Parse status and turn off cache bypass flag if job has finished
355 data = {'status': info['status']}
356 if data['status'] in ('error','success'):
357 data['ignore_cache'] = False
358 if info['end_ts']:
359 data['finished'] = cls.parse_end_timestamp(info)
360 return data
362 @classmethod
363 def parse_end_timestamp(cls, info):
364 sec, micro = info['end_ts']
365 return datetime.fromtimestamp(sec+(micro/1000000.0))
367 def parse_transient_info(self):
368 pass
370 def save(self, *args, **kwargs):
372 sets the cluster_hash for newly saved instances
374 if self.id is None or self.cluster_hash == '':
375 self.cluster_hash = self.cluster.hash
377 super(Job, self).save(*args, **kwargs)
379 @property
380 def current_operation(self):
382 Jobs may consist of multiple commands/operations. This helper
383 method will return the operation that is currently running or errored
384 out, or the last operation if all operations have completed
386 @returns raw name of the current operation
388 info = self.info
389 index = 0
390 for i in range(len(info['opstatus'])):
391 if info['opstatus'][i] != 'success':
392 index = i
393 break
394 return info['ops'][index]['OP_ID']
396 @property
397 def operation(self):
399 Returns the last operation, which is generally the primary operation.
401 return self.info['ops'][-1]['OP_ID']
403 def __repr__(self):
404 return "<Job: '%s'>" % self.id
406 def __str__(self):
407 return repr(self)
410 class VirtualMachine(CachedClusterObject):
412 The VirtualMachine (VM) model represents VMs within a Ganeti cluster. The
413 majority of properties are a cache for data stored in the cluster. All data
414 retrieved via the RAPI is stored in VirtualMachine.info, and serialized
415 automatically into VirtualMachine.serialized_info.
417 Attributes that need to be searchable should be stored as model fields. All
418 other attributes will be stored within VirtualMachine.info.
420 This object uses a lazy update mechanism on instantiation. If the cached
421 info from the Ganeti cluster has expired, it will trigger an update. This
422 allows the cache to function in the absence of a periodic update mechanism
423 such as Cron, Celery, or Threads.
425 The lazy update and periodic update should use separate refresh timeouts
426 where LAZY_CACHE_REFRESH > PERIODIC_CACHE_REFRESH. This ensures that lazy
427 cache will only be used if the periodic cache is not updating.
429 XXX Serialized_info can possibly be changed to a CharField if an upper
430 limit can be determined. (Later Date, if it will optimize db)
433 cluster = models.ForeignKey('Cluster', editable=False, default=0,
434 related_name='virtual_machines')
435 hostname = models.CharField(max_length=128, db_index=True)
436 owner = models.ForeignKey('ClusterUser', null=True, \
437 related_name='virtual_machines')
438 virtual_cpus = models.IntegerField(default=-1)
439 disk_size = models.IntegerField(default=-1)
440 ram = models.IntegerField(default=-1)
441 cluster_hash = models.CharField(max_length=40, editable=False)
442 operating_system = models.CharField(max_length=128)
443 status = models.CharField(max_length=10)
445 # node relations
446 primary_node = models.ForeignKey('Node', null=True,
447 related_name='primary_vms')
448 secondary_node = models.ForeignKey('Node', null=True,
449 related_name='secondary_vms')
451 # The last job reference indicates that there is at least one pending job
452 # for this virtual machine. There may be more than one job, and that can
453 # never be prevented. This just indicates that job(s) are pending and the
454 # job related code should be run (status, cleanup, etc).
455 last_job = models.ForeignKey('Job', null=True)
457 # deleted flag indicates a VM is being deleted, but the job has not
458 # completed yet. VMs that have pending_delete are still displayed in lists
459 # and counted in quotas, but only so status can be checked.
460 pending_delete = models.BooleanField(default=False)
461 deleted = False
463 # Template temporarily stores parameters used to create this virtual machine
464 # This template is used to recreate the values entered into the form.
465 template = models.ForeignKey("VirtualMachineTemplate", null=True)
467 class Meta:
468 ordering = ["hostname", ]
469 unique_together = (("cluster", "hostname"),)
471 @property
472 def rapi(self):
473 return get_rapi(self.cluster_hash, self.cluster_id)
475 @property
476 def is_running(self):
477 return self.status == 'running'
479 def save(self, *args, **kwargs):
481 sets the cluster_hash for newly saved instances
483 if self.id is None:
484 self.cluster_hash = self.cluster.hash
486 info_ = self.info
487 if info_:
488 found = False
489 remove = []
490 if self.cluster.username:
491 for tag in info_['tags']:
492 # Update owner Tag. Make sure the tag is set to the owner
493 # that is set in webmgr.
494 if tag.startswith(constants.OWNER_TAG):
495 id = int(tag[len(constants.OWNER_TAG):])
496 # Since there is no 'update tag' delete old tag and
497 # replace with tag containing correct owner id.
498 if id == self.owner_id:
499 found = True
500 else:
501 remove.append(tag)
502 if remove:
503 self.rapi.DeleteInstanceTags(self.hostname, remove)
504 for tag in remove:
505 info_['tags'].remove(tag)
506 if self.owner_id and not found:
507 tag = '%s%s' % (constants.OWNER_TAG, self.owner_id)
508 self.rapi.AddInstanceTags(self.hostname, [tag])
509 self.info['tags'].append(tag)
511 super(VirtualMachine, self).save(*args, **kwargs)
513 @classmethod
514 def parse_persistent_info(cls, info):
516 Loads all values from cached info, included persistent properties that
517 are stored in the database
519 data = super(VirtualMachine, cls).parse_persistent_info(info)
521 # Parse resource properties
522 data['ram'] = info['beparams']['memory']
523 data['virtual_cpus'] = info['beparams']['vcpus']
524 # Sum up the size of each disk used by the VM
525 disk_size = 0
526 for disk in info['disk.sizes']:
527 disk_size += disk
528 data['disk_size'] = disk_size
529 data['operating_system'] = info['os']
530 data['status'] = info['status']
532 primary = info['pnode']
533 if primary:
534 try:
535 data['primary_node'] = Node.objects.get(hostname=primary)
536 except Node.DoesNotExist:
537 # node is not created yet. fail silently
538 data['primary_node'] = None
539 else:
540 data['primary_node'] = None
542 secondary = info['snodes']
543 if len(secondary):
544 secondary = secondary[0]
545 try:
546 data['secondary_node'] = Node.objects.get(hostname=secondary)
547 except Node.DoesNotExist:
548 # node is not created yet. fail silently
549 data['secondary_node'] = None
550 else:
551 data['secondary_node'] = None
553 return data
555 def check_job_status(self):
557 if the cache bypass is enabled then check the status of the last job
558 when the job is complete we can reenable the cache.
560 @returns - dictionary of values that were updates
562 if self.ignore_cache and self.last_job_id:
563 (job_id,) = Job.objects.filter(pk=self.last_job_id)\
564 .values_list('job_id', flat=True)
565 data = self.rapi.GetJobStatus(job_id)
566 status = data['status']
568 if status in ('success', 'error'):
569 finished = Job.parse_end_timestamp(data)
570 Job.objects.filter(pk=self.last_job_id) \
571 .update(status=status, ignore_cache=False, finished=finished)
572 self.ignore_cache = False
574 op_id = data['ops'][-1]['OP_ID']
576 if status == 'success':
577 self.last_job = None
578 # job cleanups
579 # - if the job was a deletion, then delete this vm
580 # - if the job was creation, then delete temporary template
581 # XXX return a None to prevent refresh() from trying to update
582 # the cache setting for this VM
583 # XXX delete may have multiple ops in it, but delete is always
584 # the last command run.
585 if op_id == 'OP_INSTANCE_REMOVE':
586 self.delete()
587 self.deleted = True
588 return None
589 elif op_id == 'OP_INSTANCE_CREATE':
590 # XXX must update before deleting the template to maintain
591 # referential integrity. as a consequence return no other
592 # updates.
593 VirtualMachine.objects.filter(pk=self.pk) \
594 .update(ignore_cache=False, last_job=None, template=None)
596 VirtualMachineTemplate.objects.filter(pk=self.template_id) \
597 .delete()
598 self.template=None
599 return dict()
601 return dict(ignore_cache=False, last_job=None)
603 elif status == 'error':
604 if op_id == 'OP_INSTANCE_CREATE' and self.info:
605 # create failed but vm was deployed, template is no longer
606 # needed
608 # XXX must update before deleting the template to maintain
609 # referential integrity. as a consequence return no other
610 # updates.
611 VirtualMachine.objects.filter(pk=self.pk) \
612 .update(ignore_cache=False, template=None)
614 VirtualMachineTemplate.objects.filter(pk=self.template_id) \
615 .delete()
616 self.template=None
617 return dict()
618 else:
619 return dict(ignore_cache=False)
621 def _refresh(self):
622 # XXX if delete is pending then no need to refresh this object.
623 if self.pending_delete:
624 return None
625 return self.rapi.GetInstance(self.hostname)
627 def shutdown(self):
628 id = self.rapi.ShutdownInstance(self.hostname)
629 job = Job.objects.create(job_id=id, obj=self, cluster_id=self.cluster_id)
630 self.last_job = job
631 VirtualMachine.objects.filter(pk=self.id) \
632 .update(last_job=job, ignore_cache=True)
633 return job
635 def startup(self):
636 id = self.rapi.StartupInstance(self.hostname)
637 job = Job.objects.create(job_id=id, obj=self, cluster_id=self.cluster_id)
638 self.last_job = job
639 VirtualMachine.objects.filter(pk=self.id) \
640 .update(last_job=job, ignore_cache=True)
641 return job
643 def reboot(self):
644 id = self.rapi.RebootInstance(self.hostname)
645 job = Job.objects.create(job_id=id, obj=self, cluster_id=self.cluster_id)
646 self.last_job = job
647 VirtualMachine.objects.filter(pk=self.id) \
648 .update(last_job=job, ignore_cache=True)
649 return job
651 def migrate(self, mode='live', cleanup=False):
653 Migrates this VirtualMachine to another node. only works if the disk
654 type is DRDB
656 @param mode: live or non-live
657 @param cleanup: clean up a previous migration, default is False
659 id = self.rapi.MigrateInstance(self.hostname, mode, cleanup)
660 job = Job.objects.create(job_id=id, obj=self, cluster_id=self.cluster_id)
661 self.last_job = job
662 VirtualMachine.objects.filter(pk=self.id) \
663 .update(last_job=job, ignore_cache=True)
664 return job
666 def setup_vnc_forwarding(self, sport=''):
667 password = ''
668 info_ = self.info
669 port = info_['network_port']
670 node = info_['pnode']
672 # use proxy for VNC connection
673 if settings.VNC_PROXY:
674 proxy_server = settings.VNC_PROXY.split(":")
675 password = generate_random_password()
676 result = request_forwarding(proxy_server, sport, node, port, password)
677 if not result:
678 return False, False, False
679 else:
680 return proxy_server[0], int(result), password
682 else:
683 return node, port, password
685 @models.permalink
686 def get_absolute_url(self):
688 Return absolute url for this instance. Since the canonical url requires
689 the cluster object this method will check to see if the cluster is
690 already queried. If it has not been queried it will use the
691 non-canonical url which is quicker to render.
693 if hasattr(self, '_cluster_cache'):
694 return 'instance-detail', (), {'cluster_slug':self.cluster.slug,
695 'instance':self.hostname}
696 return 'instance-detail-id', (), {'id':self.pk}
698 def __repr__(self):
699 return "<VirtualMachine: '%s'>" % self.hostname
701 def __unicode__(self):
702 return self.hostname
705 class Node(CachedClusterObject):
707 The Node model represents nodes within a Ganeti cluster. The
708 majority of properties are a cache for data stored in the cluster. All data
709 retrieved via the RAPI is stored in VirtualMachine.info, and serialized
710 automatically into VirtualMachine.serialized_info.
712 Attributes that need to be searchable should be stored as model fields. All
713 other attributes will be stored within VirtualMachine.info.
715 ROLE_CHOICES = ((k, v) for k, v in constants.NODE_ROLE_MAP.items())
717 cluster = models.ForeignKey('Cluster', related_name='nodes')
718 hostname = models.CharField(max_length=128, unique=True)
719 cluster_hash = models.CharField(max_length=40, editable=False)
720 offline = models.BooleanField()
721 role = models.CharField(max_length=1, choices=ROLE_CHOICES)
722 ram_total = models.IntegerField(default=-1)
723 disk_total = models.IntegerField(default=-1)
725 # The last job reference indicates that there is at least one pending job
726 # for this virtual machine. There may be more than one job, and that can
727 # never be prevented. This just indicates that job(s) are pending and the
728 # job related code should be run (status, cleanup, etc).
729 last_job = models.ForeignKey('Job', null=True)
731 def _refresh(self):
732 """ returns node info from the ganeti server """
733 return self.rapi.GetNode(self.hostname)
735 def save(self, *args, **kwargs):
737 sets the cluster_hash for newly saved instances
739 if self.id is None:
740 self.cluster_hash = self.cluster.hash
741 super(Node, self).save(*args, **kwargs)
743 @models.permalink
744 def get_absolute_url(self):
746 Return absolute url for this node. Since the canonical url requires
747 the cluster object this method will check to see if the cluster is
748 already queried. If it has not been queried it will use the
749 non-canonical url which is quicker to render.
751 if hasattr(self, '_cluster_cache'):
752 return 'node-detail', (), {'cluster_slug':self.cluster.slug,
753 'host':self.hostname}
754 return 'node-detail-id', (), {'id':self.pk}
756 @property
757 def rapi(self):
758 return get_rapi(self.cluster_hash, self.cluster_id)
760 @classmethod
761 def parse_persistent_info(cls, info):
763 Loads all values from cached info, included persistent properties that
764 are stored in the database
766 data = super(Node, cls).parse_persistent_info(info)
768 # Parse resource properties
769 data['ram_total'] = info['mtotal'] if info['mtotal'] is not None else 0
770 data['disk_total'] = info['dtotal'] if info['dtotal'] is not None else 0
771 data['offline'] = info['offline']
772 data['role'] = info['role']
773 return data
775 def check_job_status(self):
777 if the cache bypass is enabled then check the status of the last job
778 when the job is complete we can reenable the cache.
780 @returns - dictionary of values that were updates
782 if self.last_job_id:
783 (job_id,) = Job.objects.filter(pk=self.last_job_id)\
784 .values_list('job_id', flat=True)
785 data = self.rapi.GetJobStatus(job_id)
786 status = data['status']
788 if status in ('success', 'error'):
789 finished = Job.parse_end_timestamp(data)
790 Job.objects.filter(pk=self.last_job_id) \
791 .update(status=status, ignore_cache=False, finished=finished)
792 self.ignore_cache = False
794 if status == 'success':
795 self.last_job = None
796 return dict(ignore_cache=False, last_job=None)
798 elif status == 'error':
799 return dict(ignore_cache=False)
801 @property
802 def ram(self):
803 """ returns dict of free and total ram """
804 values = VirtualMachine.objects \
805 .filter(Q(primary_node=self) | Q(secondary_node=self)) \
806 .filter(status='running') \
807 .exclude(ram=-1).order_by() \
808 .aggregate(used=Sum('ram'))
810 total = self.ram_total
811 running = 0 if values['used'] is None else values['used']
812 free = total-running if running >= 0 and total >=0 else -1
813 return {'total':total, 'free': free}
815 @property
816 def disk(self):
817 """ returns dict of free and total disk space """
818 values = VirtualMachine.objects \
819 .filter(Q(primary_node=self) | Q(secondary_node=self)) \
820 .exclude(disk_size=-1).order_by() \
821 .aggregate(used=Sum('disk_size'))
823 total = self.disk_total
824 running = 0 if 'used' not in values or values['used'] is None else values['used']
825 free = total-running if running >= 0 and total >=0 else -1
826 return {'total':total, 'free': free}
828 def set_role(self, role, force=False):
830 Sets the role for this node
832 @param role - one of the following choices:
833 * master
834 * master-candidate
835 * regular
836 * drained
837 * offline
839 id = self.rapi.SetNodeRole(self.hostname, role, force)
840 job = Job.objects.create(job_id=id, obj=self, cluster_id=self.cluster_id)
841 self.last_job = job
842 Node.objects.filter(pk=self.pk).update(ignore_cache=True, last_job=job)
843 return job
845 def evacuate(self, iallocator=None, node=None):
847 migrates all secondary instances off this node
849 id = self.rapi.EvacuateNode(self.hostname, iallocator=iallocator, remote_node=node)
850 job = Job.objects.create(job_id=id, obj=self, cluster_id=self.cluster_id)
851 self.last_job = job
852 Node.objects.filter(pk=self.pk) \
853 .update(ignore_cache=True, last_job=job)
854 return job
856 def migrate(self, mode=None):
858 migrates all primary instances off this node
860 id = self.rapi.MigrateNode(self.hostname, mode)
861 job = Job.objects.create(job_id=id, obj=self, cluster_id=self.cluster_id)
862 self.last_job = job
863 Node.objects.filter(pk=self.pk).update(ignore_cache=True, last_job=job)
864 return job
866 def __repr__(self):
867 return "<Node: '%s'>" % self.hostname
869 def __unicode__(self):
870 return self.hostname
873 class Cluster(CachedClusterObject):
875 A Ganeti cluster that is being tracked by this manager tool
877 hostname = models.CharField(_('hostname'), max_length=128, unique=True)
878 slug = models.SlugField(_('slug'), max_length=50, unique=True, db_index=True)
879 port = models.PositiveIntegerField(_('port'), default=5080)
880 description = models.CharField(_('description'), max_length=128, blank=True, null=True)
881 username = models.CharField(_('username'), max_length=128, blank=True, null=True)
882 password = models.CharField(_('password'), max_length=128, blank=True, null=True)
883 hash = models.CharField(_('hash'), max_length=40, editable=False)
885 # quota properties
886 virtual_cpus = models.IntegerField(_('Virtual CPUs'), null=True, blank=True)
887 disk = models.IntegerField(_('disk'), null=True, blank=True)
888 ram = models.IntegerField(_('ram'), null=True, blank=True)
890 # The last job reference indicates that there is at least one pending job
891 # for this virtual machine. There may be more than one job, and that can
892 # never be prevented. This just indicates that job(s) are pending and the
893 # job related code should be run (status, cleanup, etc).
894 last_job = models.ForeignKey('Job', null=True, blank=True, \
895 related_name='cluster_last_job')
897 class Meta:
898 ordering = ["hostname", "description"]
900 def __unicode__(self):
901 return self.hostname
903 def save(self, *args, **kwargs):
904 self.hash = self.create_hash()
905 super(Cluster, self).save(*args, **kwargs)
907 @models.permalink
908 def get_absolute_url(self):
909 return 'cluster-detail', (), {'cluster_slug':self.slug}
911 @property
912 def rapi(self):
914 retrieves the rapi client for this cluster.
916 # XXX always pass self in. not only does it avoid querying this object
917 # from the DB a second time, it also prevents a recursion loop caused
918 # by __init__ fetching info from the Cluster
919 return get_rapi(self.hash, self)
921 def create_hash(self):
923 Creates a hash for this cluster based on credentials required for
924 connecting to the server
926 return sha1('%s%s%s%s' % \
927 (self.username, self.password, self.hostname, self.port)) \
928 .hexdigest()
930 def get_quota(self, user=None):
932 Get the quota for a ClusterUser
934 @return user's quota, default quota, or none
936 if user is None:
937 return {'default':1, 'ram':self.ram, 'disk':self.disk, \
938 'virtual_cpus':self.virtual_cpus}
940 # attempt to query user specific quota first. if it does not exist then
941 # fall back to the default quota
942 query = Quota.objects.filter(cluster=self, user=user) \
943 .values('ram', 'disk', 'virtual_cpus')
944 if len(query):
945 (quota,) = query
946 quota['default'] = 0
947 return quota
949 return {'default':1, 'ram':self.ram, 'disk':self.disk, \
950 'virtual_cpus':self.virtual_cpus, }
952 def set_quota(self, user, values=None):
954 set the quota for a ClusterUser
956 @param values: dictionary of values, or None to delete the quota
958 kwargs = {'cluster':self, 'user':user}
959 if values is None:
960 Quota.objects.filter(**kwargs).delete()
961 else:
962 quota, new = Quota.objects.get_or_create(**kwargs)
963 quota.__dict__.update(values)
964 quota.save()
966 def sync_virtual_machines(self, remove=False):
968 Synchronizes the VirtualMachines in the database with the information
969 this ganeti cluster has:
970 * VMs no longer in ganeti are deleted
971 * VMs missing from the database are added
973 ganeti = self.instances()
974 db = self.virtual_machines.all().values_list('hostname', flat=True)
976 # add VMs missing from the database
977 for hostname in filter(lambda x: unicode(x) not in db, ganeti):
978 VirtualMachine(cluster=self, hostname=hostname).save()
980 # deletes VMs that are no longer in ganeti
981 if remove:
982 missing_ganeti = filter(lambda x: str(x) not in ganeti, db)
983 if missing_ganeti:
984 self.virtual_machines \
985 .filter(hostname__in=missing_ganeti).delete()
987 def sync_nodes(self, remove=False):
989 Synchronizes the Nodes in the database with the information
990 this ganeti cluster has:
991 * Nodes no longer in ganeti are deleted
992 * Nodes missing from the database are added
994 ganeti = self.rapi.GetNodes()
995 db = self.nodes.all().values_list('hostname', flat=True)
997 # add Nodes missing from the database
998 for hostname in filter(lambda x: unicode(x) not in db, ganeti):
999 Node(cluster=self, hostname=hostname).save()
1001 # deletes Nodes that are no longer in ganeti
1002 if remove:
1003 missing_ganeti = filter(lambda x: str(x) not in ganeti, db)
1004 if missing_ganeti:
1005 self.nodes.filter(hostname__in=missing_ganeti).delete()
1007 @property
1008 def missing_in_ganeti(self):
1010 Returns list of VirtualMachines that are missing from the ganeti cluster
1011 but present in the database
1013 ganeti = self.instances()
1014 db = self.virtual_machines.all().values_list('hostname', flat=True)
1015 return filter(lambda x: str(x) not in ganeti, db)
1017 @property
1018 def missing_in_db(self):
1020 Returns list of VirtualMachines that are missing from the database, but
1021 present in ganeti
1023 ganeti = self.instances()
1024 db = self.virtual_machines.all().values_list('hostname', flat=True)
1025 return filter(lambda x: unicode(x) not in db, ganeti)
1027 @property
1028 def nodes_missing_in_db(self):
1030 Returns list of Nodes that are missing from the database, but present
1031 in ganeti.
1033 try:
1034 ganeti = self.rapi.GetNodes()
1035 except GanetiError:
1036 ganeti = []
1037 db = self.nodes.all().values_list('hostname', flat=True)
1038 return filter(lambda x: unicode(x) not in db, ganeti)
1040 @property
1041 def nodes_missing_in_ganeti(self):
1043 Returns list of Nodes that are missing from the ganeti cluster
1044 but present in the database
1046 try:
1047 ganeti = self.rapi.GetNodes()
1048 except GanetiError:
1049 ganeti = []
1050 db = self.nodes.all().values_list('hostname', flat=True)
1051 return filter(lambda x: str(x) not in ganeti, db)
1053 @property
1054 def available_ram(self):
1055 """ returns dict of free and total ram """
1056 nodes = self.nodes.exclude(ram_total=-1) \
1057 .aggregate(total=Sum('ram_total'))
1058 total = nodes['total'] if 'total' in nodes and nodes['total'] >= 0 else 0
1059 values = self.virtual_machines \
1060 .filter(status='running') \
1061 .exclude(ram=-1).order_by() \
1062 .aggregate(used=Sum('ram'))
1064 used = 0 if 'used' not in values or values['used'] is None else values['used']
1065 free = total-used if total-used >= 0 else 0
1066 return {'total':total, 'free':free}
1068 @property
1069 def available_disk(self):
1070 """ returns dict of free and total disk space """
1071 nodes = self.nodes.exclude(disk_total=-1) \
1072 .aggregate(total=Sum('disk_total'))
1073 total = nodes['total'] if 'total' in nodes and nodes['total'] >= 0 else 0
1074 values = self.virtual_machines \
1075 .exclude(disk_size=-1).order_by() \
1076 .aggregate(used=Sum('disk_size'))
1078 used = 0 if 'used' not in values or values['used'] is None else values['used']
1079 free = total-used if total-used >= 0 else 0
1081 return {'total':total, 'free':free}
1083 def _refresh(self):
1084 return self.rapi.GetInfo()
1086 def instances(self, bulk=False):
1087 """Gets all VMs which reside under the Cluster
1088 Calls the rapi client for all instances.
1090 try:
1091 return self.rapi.GetInstances(bulk=bulk)
1092 except GanetiApiError:
1093 return []
1095 def instance(self, instance):
1096 """Get a single Instance
1097 Calls the rapi client for a specific instance.
1099 try:
1100 return self.rapi.GetInstance(instance)
1101 except GanetiApiError:
1102 return None
1104 def check_job_status(self):
1106 if the cache bypass is enabled then check the status of the last job
1107 when the job is complete we can reenable the cache.
1108 @returns - dictionary of values that were updates
1110 if self.last_job_id:
1111 (job_id,) = Job.objects.filter(pk=self.last_job_id)\
1112 .values_list('job_id', flat=True)
1113 data = self.rapi.GetJobStatus(job_id)
1114 status = data['status']
1116 if status in ('success', 'error'):
1117 finished = Job.parse_end_timestamp(data)
1118 Job.objects.filter(pk=self.last_job_id) \
1119 .update(status=status, ignore_cache=False, finished=finished)
1120 self.ignore_cache = False
1122 if status == 'success':
1123 self.last_job = None
1124 return dict(ignore_cache=False, last_job=None)
1126 elif status == 'error':
1127 return dict(ignore_cache=False)
1129 def redistribute_config(self):
1131 Redistribute config from cluster's master node to all
1132 other nodes.
1134 # no exception handling, because it's being done in a view
1135 id = self.rapi.RedistributeConfig()
1136 job = Job.objects.create(job_id=id, obj=self, cluster_id=self.id)
1137 self.last_job = job
1138 Cluster.objects.filter(pk=self.id) \
1139 .update(last_job=job, ignore_cache=True)
1140 return job
1143 class VirtualMachineTemplate(models.Model):
1145 Virtual Machine Template holds all the values for the create virtual machine
1146 form so that they can automatically be used or edited by a user.
1148 template_name = models.CharField(max_length=255, null=True, blank=True)
1149 cluster = models.ForeignKey('Cluster', null=True)
1150 start = models.BooleanField(verbose_name=_('Start up After Creation'), \
1151 default=True)
1152 name_check = models.BooleanField(verbose_name=_('DNS Name Check'), \
1153 default=True)
1154 iallocator = models.BooleanField(verbose_name=_('Automatic Allocation'), \
1155 default=False)
1156 iallocator_hostname = models.CharField(null=True, blank=True, \
1157 max_length=255)
1158 disk_template = models.CharField(verbose_name=_('Disk Template'), max_length=16)
1159 pnode = models.CharField(verbose_name=_('Primary Node'), max_length=255, \
1160 null=True, blank=True)
1161 snode = models.CharField(verbose_name=_('Secondary Node'), max_length=255, \
1162 null=True, blank=True)
1163 os = models.CharField(verbose_name=_('Operating System'), max_length=255)
1164 # BEPARAMS
1165 vcpus = models.IntegerField(verbose_name=_('Virtual CPUs'), \
1166 validators=[MinValueValidator(1)], null=True, blank=True)
1167 memory = models.IntegerField(verbose_name=_('Memory'), \
1168 validators=[MinValueValidator(100)],null=True, blank=True)
1169 disk_size = models.IntegerField(verbose_name=_('Disk Size'), null=True, \
1170 validators=[MinValueValidator(100)], blank=True)
1171 disk_type = models.CharField(verbose_name=_('Disk Type'), max_length=255, \
1172 null=True, blank=True)
1173 nic_mode = models.CharField(verbose_name=_('NIC Mode'), max_length=255, \
1174 null=True, blank=True)
1175 nic_link = models.CharField(verbose_name=_('NIC Link'), max_length=255, \
1176 null=True, blank=True)
1177 nic_type = models.CharField(verbose_name=_('NIC Type'), max_length=255, \
1178 null=True, blank=True)
1179 # HVPARAMS
1180 kernel_path = models.CharField(verbose_name=_('Kernel Path'), null=True, \
1181 blank=True, max_length=255)
1182 root_path = models.CharField(verbose_name=_('Root Path'), default='/', \
1183 max_length=255, null=True, blank=True)
1184 serial_console = models.BooleanField(verbose_name=_('Enable Serial Console'))
1185 boot_order = models.CharField(verbose_name=_('Boot Device'), max_length=255, \
1186 null=True, blank=True)
1187 cdrom_image_path = models.CharField(verbose_name=_('CD-ROM Image Path'), null=True, \
1188 blank=True, max_length=512)
1190 def __str__(self):
1191 if self.template_name is None:
1192 return 'unnamed'
1193 else:
1194 return self.template_name
1197 if settings.TESTING:
1198 # XXX - if in debug mode create a model for testing cached cluster objects
1199 class TestModel(CachedClusterObject):
1200 """ simple implementation of a cached model that has been instrumented """
1201 cluster = models.ForeignKey(Cluster)
1202 saved = False
1203 data = {'mtime': 1285883187.8692000, 'ctime': 1285799513.4741000}
1204 throw_error = None
1206 def _refresh(self):
1207 if self.throw_error:
1208 raise self.throw_error
1209 return self.data
1211 def save(self, *args, **kwargs):
1212 self.saved = True
1213 super(TestModel, self).save(*args, **kwargs)
1216 class GanetiErrorManager(models.Manager):
1218 def clear_error(self, id):
1220 Clear one particular error (used in overview template).
1222 return self.filter(pk=id).update(cleared=True)
1224 def clear_errors(self, *args, **kwargs):
1226 Clear errors instead of deleting them.
1228 return self.get_errors(cleared=False, *args, **kwargs) \
1229 .update(cleared=True)
1231 def remove_errors(self, *args, **kwargs):
1233 Just shortcut if someone wants to remove some errors.
1235 return self.get_errors(*args, **kwargs).delete()
1237 def get_errors(self, obj=None, **kwargs):
1239 Manager method used for getting QuerySet of all errors depending on
1240 passed arguments.
1242 @param obj affected object (itself or just QuerySet)
1243 @param kwargs: additional kwargs for filtering GanetiErrors
1245 if obj is None:
1246 return self.filter(**kwargs)
1248 # Create base query of errors to return.
1250 # if it's a Cluster or a queryset for Clusters, then we need to get all
1251 # errors from the Clusters. Do this by filtering on GanetiError.cluster
1252 # instead of obj_id.
1253 if isinstance(obj, (Cluster,)):
1254 return self.filter(cluster=obj, **kwargs)
1256 elif isinstance(obj, (QuerySet,)):
1257 if obj.model == Cluster:
1258 return self.filter(cluster__in=obj, **kwargs)
1259 else:
1260 ct = ContentType.objects.get_for_model(obj.model)
1261 return self.filter(obj_type=ct, obj_id__in=obj, **kwargs)
1263 else:
1264 ct = ContentType.objects.get_for_model(obj.__class__)
1265 return self.filter(obj_type=ct, obj_id=obj.pk, **kwargs)
1267 def store_error(self, msg, obj, code, **kwargs):
1269 Manager method used to store errors
1271 @param msg error's message
1272 @param obj object (i.e. cluster or vm) affected by the error
1273 @param code error's code number
1275 ct = ContentType.objects.get_for_model(obj.__class__)
1276 is_cluster = isinstance(obj, Cluster)
1278 # 401 -- bad permissions
1279 # 401 is cluster-specific error and thus shouldn't appear on any other
1280 # object.
1281 if code == 401:
1282 if not is_cluster:
1283 # NOTE: what we do here is almost like:
1284 # return self.store_error(msg=msg, code=code, obj=obj.cluster)
1285 # we just omit the recursiveness
1286 obj = obj.cluster
1287 ct = ContentType.objects.get_for_model(Cluster)
1288 is_cluster = True
1290 # 404 -- object not found
1291 # 404 can occur on any object, but when it occurs on a cluster, then any
1292 # of its children must not see the error again
1293 elif code == 404:
1294 if not is_cluster:
1295 # return if the error exists for cluster
1296 try:
1297 c_ct = ContentType.objects.get_for_model(Cluster)
1298 return self.get(msg=msg, obj_type=c_ct, code=code,
1299 obj_id=obj.cluster_id, cleared=False)
1301 except GanetiError.DoesNotExist:
1302 # we want to proceed when the error is not cluster-specific
1303 pass
1305 # XXX use a try/except instead of get_or_create(). get_or_create()
1306 # does not allow us to set cluster_id. This means we'd have to query
1307 # the cluster object to create the error. we can't guaranteee the
1308 # cluster will already be queried so use create() instead which does
1309 # allow cluster_id
1310 try:
1311 return self.get(msg=msg, obj_type=ct, obj_id=obj.pk, code=code,
1312 **kwargs)
1314 except GanetiError.DoesNotExist:
1315 cluster_id = obj.pk if is_cluster else obj.cluster_id
1317 return self.create(msg=msg, obj_type=ct, obj_id=obj.pk,
1318 cluster_id=cluster_id, code=code, **kwargs)
1321 class GanetiError(models.Model):
1323 Class for storing errors which occured in Ganeti
1325 cluster = models.ForeignKey(Cluster)
1326 msg = models.TextField()
1327 code = models.PositiveSmallIntegerField(blank=True, null=True)
1328 timestamp = models.DateTimeField(auto_now_add=True)
1330 # determines if the errors still appears or not
1331 cleared = models.BooleanField(default=False)
1333 # cluster object (cluster, VM, Node) affected by the error (if any)
1334 obj_type = models.ForeignKey(ContentType, related_name="ganeti_errors")
1335 obj_id = models.PositiveIntegerField()
1336 obj = GenericForeignKey("obj_type", "obj_id")
1338 objects = GanetiErrorManager()
1340 class Meta:
1341 ordering = ("-timestamp", "code", "msg")
1343 def __repr__(self):
1344 return "<GanetiError '%s'>" % self.msg
1346 def __unicode__(self):
1347 base = "[%s] %s" % (self.timestamp, self.msg)
1348 return base
1351 class ClusterUser(models.Model):
1353 Base class for objects that may interact with a Cluster or VirtualMachine.
1355 #clusters = models.ManyToManyField(Cluster, through='Quota',
1356 # related_name='users')
1357 name = models.CharField(max_length=128)
1358 real_type = models.ForeignKey(ContentType, editable=False, null=True)
1360 def save(self, *args, **kwargs):
1361 if not self.id:
1362 self.real_type = self._get_real_type()
1363 super(ClusterUser, self).save(*args, **kwargs)
1365 def _get_real_type(self):
1366 return ContentType.objects.get_for_model(type(self))
1368 def cast(self):
1369 return self.real_type.get_object_for_this_type(pk=self.pk)
1371 def __unicode__(self):
1372 return self.name
1374 def used_resources(self, cluster=None, only_running=False):
1376 Return dictionary of total resources used by VMs that this ClusterUser
1377 has perms to.
1378 @param cluster if set, get only VMs from specified cluster
1379 @param only_running if set, get only running VMs
1381 # XXX - order_by must be cleared or it breaks annotation grouping since
1382 # the default order_by field is also added to the group_by clause
1383 base = self.virtual_machines.all().order_by()
1385 # XXX - use a custom aggregate for ram and vcpu count when filtering by
1386 # running. this allows us to execute a single query.
1388 # XXX - quotes must be used in this order. postgresql quirk
1389 if only_running:
1390 sum_ram = SumIf('ram', condition="status='running'")
1391 sum_vcpus = SumIf('virtual_cpus', condition="status='running'")
1392 else:
1393 sum_ram = Sum('ram')
1394 sum_vcpus = Sum('virtual_cpus')
1396 base = base.exclude(ram=-1, disk_size=-1, virtual_cpus=-1)
1398 if cluster:
1399 base = base.filter(cluster=cluster)
1400 result = base.aggregate(ram=sum_ram, disk=Sum('disk_size'), \
1401 virtual_cpus=sum_vcpus)
1403 # repack with zeros instead of Nones
1404 if result['disk'] is None:
1405 result['disk'] = 0
1406 if result['ram'] is None:
1407 result['ram'] = 0
1408 if result['virtual_cpus'] is None:
1409 result['virtual_cpus'] = 0
1410 return result
1412 else:
1413 base = base.values('cluster').annotate(uram=sum_ram, \
1414 udisk=Sum('disk_size'), \
1415 uvirtual_cpus=sum_vcpus)
1417 # repack as dictionary
1418 result = {}
1419 for used in base:
1420 # repack with zeros instead of Nones, change index names
1421 used['ram'] = 0 if not used['uram'] else used['uram']
1422 used['disk'] = 0 if not used['udisk'] else used['udisk']
1423 used['virtual_cpus'] = 0 if not used['uvirtual_cpus'] else used['uvirtual_cpus']
1424 used.pop("uvirtual_cpus")
1425 used.pop("udisk")
1426 used.pop("uram")
1427 result[used.pop('cluster')] = used
1429 return result
1432 class Profile(ClusterUser):
1434 Profile associated with a django.contrib.auth.User object.
1436 user = models.OneToOneField(User)
1438 def grant(self, perm, object):
1439 self.user.grant(perm, object)
1441 def set_perms(self, perms, object):
1442 self.user.set_perms(perms, object)
1444 def get_objects_any_perms(self, *args, **kwargs):
1445 return self.user.get_objects_any_perms(*args, **kwargs)
1447 def has_perm(self, *args, **kwargs):
1448 return self.user.has_perm(*args, **kwargs)
1451 class Organization(ClusterUser):
1453 An organization is used for grouping Users. Organizations are matched with
1454 an instance of contrib.auth.models.Group. This model exists so that
1455 contrib.auth.models.Group have a 1:1 relation with a ClusterUser on which quotas and
1456 permissions can be assigned.
1458 group = models.OneToOneField(Group, related_name='organization')
1460 def grant(self, perm, object):
1461 self.group.grant(perm, object)
1463 def set_perms(self, perms, object):
1464 self.group.set_perms(perms, object)
1466 def get_objects_any_perms(self, *args, **kwargs):
1467 return self.group.get_objects_any_perms(*args, **kwargs)
1469 def has_perm(self, *args, **kwargs):
1470 return self.group.has_perm(*args, **kwargs)
1473 class Quota(models.Model):
1475 A resource limit imposed on a ClusterUser for a given Cluster. The
1476 attributes of this model represent maximum values the ClusterUser can
1477 consume. The absence of a Quota indicates unlimited usage.
1479 user = models.ForeignKey(ClusterUser, related_name='quotas')
1480 cluster = models.ForeignKey(Cluster, related_name='quotas')
1482 ram = models.IntegerField(default=0, null=True)
1483 disk = models.IntegerField(default=0, null=True)
1484 virtual_cpus = models.IntegerField(default=0, null=True)
1487 class SSHKey(models.Model):
1489 Model representing user's SSH public key. Virtual machines rely on
1490 many ssh keys.
1492 key = models.TextField(validators=[validate_sshkey])
1493 #filename = models.CharField(max_length=128) # saves key file's name
1494 user = models.ForeignKey(User, related_name='ssh_keys')
1497 def create_profile(sender, instance, **kwargs):
1499 Create a profile object whenever a new user is created, also keeps the
1500 profile name synchronized with the username
1502 try:
1503 profile, new = Profile.objects.get_or_create(user=instance)
1504 if profile.name != instance.username:
1505 profile.name = instance.username
1506 profile.save()
1507 except DatabaseError:
1508 # XXX - since we're using south to track migrations the Profile table
1509 # won't be available the first time syncdb is run. Catch the error here
1510 # and let the south migration handle it.
1511 pass
1514 def update_cluster_hash(sender, instance, **kwargs):
1516 Updates the Cluster hash for all of it's VirtualMachines, Nodes, and Jobs
1518 instance.virtual_machines.all().update(cluster_hash=instance.hash)
1519 instance.jobs.all().update(cluster_hash=instance.hash)
1520 instance.nodes.all().update(cluster_hash=instance.hash)
1523 def update_organization(sender, instance, **kwargs):
1525 Creates a Organizations whenever a contrib.auth.models.Group is created
1527 org, new = Organization.objects.get_or_create(group=instance)
1528 org.name = instance.name
1529 org.save()
1531 post_save.connect(create_profile, sender=User)
1532 post_save.connect(update_cluster_hash, sender=Cluster)
1533 post_save.connect(update_organization, sender=Group)
1535 # Disconnect create_default_site from django.contrib.sites so that
1536 # the useless table for sites is not created. This will be
1537 # reconnected for other apps to use in update_sites_module.
1538 post_syncdb.disconnect(create_default_site, sender=sites_app)
1539 post_syncdb.connect(management.update_sites_module, sender=sites_app, \
1540 dispatch_uid = "ganeti.management.update_sites_module")
1542 def regenerate_cu_children(sender, **kwargs):
1544 Resets may destroy Profiles and/or Organizations. We need to regenerate
1545 them.
1548 # So. What are we actually doing here?
1549 # Whenever a User or Group is saved, the associated Profile or
1550 # Organization is also updated. This means that, if a Profile for a User
1551 # is absent, it will be created.
1552 # More importantly, *why* might a Profile be missing? Simple. Resets of
1553 # the ganeti app destroy them. This shouldn't happen in production, and
1554 # only occasionally in development, but it's good to explicitly handle
1555 # this particular case so that missing Profiles not resulting from a reset
1556 # are easier to diagnose.
1557 try:
1558 for user in User.objects.filter(profile__isnull=True):
1559 user.save()
1560 for group in Group.objects.filter(organization__isnull=True):
1561 group.save()
1562 except DatabaseError:
1563 # XXX - since we're using south to track migrations the Profile table
1564 # won't be available the first time syncdb is run. Catch the error here
1565 # and let the south migration handle it.
1566 pass
1568 post_syncdb.connect(regenerate_cu_children)
1571 def log_group_create(sender, editor, **kwargs):
1572 """ log group creation signal """
1573 log_action('CREATE', editor, sender)
1575 def log_group_edit(sender, editor, **kwargs):
1576 """ log group edit signal """
1577 log_action('EDIT', editor, sender)
1579 muddle_user_signals.view_group_created.connect(log_group_create)
1580 muddle_user_signals.view_group_edited.connect(log_group_edit)
1583 # Register permissions on our models.
1584 # These are part of the DB schema and should not be changed without serious
1585 # forethought.
1586 # You *must* syncdb after you change these.
1587 register(permissions.CLUSTER_PARAMS, Cluster, 'ganeti_web')
1588 register(permissions.VIRTUAL_MACHINE_PARAMS, VirtualMachine, 'ganeti_web')
1591 # register log actions
1592 register_log_actions()