Merge branch 'release/0.7' into develop
[ganeti_webmgr.git] / ganeti_web / models.py
blob4258a2d55a0ac57b1793734da277c62037ceae77
1 # coding: utf-8
3 # Copyright (C) 2010 Oregon State University et al.
4 # Copyright (C) 2010 Greek Research and Technology Network
6 # This program is free software; you can redistribute it and/or
7 # modify it under the terms of the GNU General Public License
8 # as published by the Free Software Foundation; either version 2
9 # of the License, or (at your option) any later version.
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
19 # USA.
22 import cPickle
23 from datetime import datetime, timedelta
24 from hashlib import sha1
26 from django.conf import settings
28 from django.contrib.sites import models as sites_app
29 from django.contrib.sites.management import create_default_site
30 from django.contrib.auth.models import User, Group
31 from django.contrib.contenttypes.models import ContentType
32 from django.contrib.contenttypes.generic import GenericForeignKey
34 from django.core.validators import RegexValidator, MinValueValidator
35 from django.utils.translation import ugettext_lazy as _
36 import re
38 from django.db import models
39 from django.db.models import Q, Sum
40 from django.db.models.query import QuerySet
41 from django.db.models.signals import post_save, post_syncdb
42 from django.db.utils import DatabaseError
43 from ganeti_web.logs import register_log_actions
45 from object_log.models import LogItem
46 log_action = LogItem.objects.log_action
48 from object_permissions.registration import register
49 from object_permissions import signals as op_signals
51 from muddle_users import signals as muddle_user_signals
53 from ganeti_web import constants, management
54 from ganeti_web.fields import PreciseDateTimeField, SumIf
55 from ganeti_web import permissions
56 from util import client
57 from util.client import GanetiApiError
59 if settings.VNC_PROXY:
60 from util.vncdaemon.vapclient import request_forwarding
61 import random
62 import string
64 def generate_random_password(length=12):
65 "Generate random sequence of specified length"
66 return "".join( random.sample(string.letters + string.digits, length) )
68 RAPI_CACHE = {}
69 RAPI_CACHE_HASHES = {}
70 def get_rapi(hash, cluster):
71 """
72 Retrieves the cached Ganeti RAPI client for a given hash. The Hash is
73 derived from the connection credentials required for a cluster. If the
74 client is not yet cached, it will be created and added.
76 If a hash does not correspond to any cluster then Cluster.DoesNotExist will
77 be raised.
79 @param cluster - either a cluster object, or ID of object. This is used for
80 resolving the cluster if the client is not already found. The id is
81 used rather than the hash, because the hash is mutable.
83 @return a Ganeti RAPI client.
84 """
85 if hash in RAPI_CACHE:
86 return RAPI_CACHE[hash]
88 # always look up the instance, even if we were given a Cluster instance
89 # it ensures we are retrieving the latest credentials. This helps avoid
90 # stale credentials. Retrieve only the values because we don't actually
91 # need another Cluster instance here.
92 if isinstance(cluster, (Cluster,)):
93 cluster = cluster.id
94 (credentials,) = Cluster.objects.filter(id=cluster) \
95 .values_list('hash','hostname','port','username','password')
96 hash, host, port, user, password = credentials
97 user = user if user else None
98 password = password if password else None
100 # now that we know hash is fresh, check cache again. The original hash could
101 # have been stale. This avoids constructing a new RAPI that already exists.
102 if hash in RAPI_CACHE:
103 return RAPI_CACHE[hash]
105 # delete any old version of the client that was cached.
106 if cluster in RAPI_CACHE_HASHES:
107 del RAPI_CACHE[RAPI_CACHE_HASHES[cluster]]
109 rapi = client.GanetiRapiClient(host, port, user, password)
110 RAPI_CACHE[hash] = rapi
111 RAPI_CACHE_HASHES[cluster] = hash
112 return rapi
115 def clear_rapi_cache():
117 clears the rapi cache
119 RAPI_CACHE.clear()
120 RAPI_CACHE_HASHES.clear()
123 ssh_public_key_re = re.compile(
124 r'^ssh-(rsa|dsa|dss) [A-Z0-9+/=]+ .+$', re.IGNORECASE)
125 validate_sshkey = RegexValidator(ssh_public_key_re,
126 _(u"Enter a valid SSH public key with comment (SSH2 RSA or DSA)."), "invalid")
129 class CachedClusterObject(models.Model):
131 mixin class for objects that reside on the cluster but some portion is
132 cached in the database. This class contains logic and other structures for
133 handling cache loading transparently
135 serialized_info = models.TextField(null=True, default=None, editable=False)
136 mtime = PreciseDateTimeField(null=True, editable=False)
137 cached = PreciseDateTimeField(null=True, editable=False)
138 ignore_cache = models.BooleanField(default=False)
140 __info = None
141 error = None
142 ctime = None
144 def __init__(self, *args, **kwargs):
145 super(CachedClusterObject, self).__init__(*args, **kwargs)
146 self.load_info()
148 @property
149 def info(self):
151 Getter for self.info, a dictionary of data about a VirtualMachine. This
152 is a proxy to self.serialized_info that handles deserialization.
153 Accessing this property will lazily deserialize info if it has not yet
154 been deserialized.
156 if self.__info is None:
157 if self.serialized_info is not None:
158 self.__info = cPickle.loads(str(self.serialized_info))
159 return self.__info
161 @info.setter
162 def info(self, value):
164 Setter for self.info, proxy to self.serialized_info that handles
165 serialization. When info is set, it will be parsed will trigger
166 self._parse_info() to update persistent and non-persistent properties
167 stored on the model instance.
169 Calling this method will not force serialization. Serialization of info
170 is lazy and will only occur when saving.
172 self.__info = value
173 if value is not None:
174 self.parse_info()
175 self.serialized_info = None
177 def load_info(self):
179 Load cached info retrieved from the ganeti cluster. This function
180 includes a lazy cache mechanism that uses a timer to decide whether or
181 not to refresh the cached information with new information from the
182 ganeti cluster.
184 This will ignore the cache when self.ignore_cache is True
186 if self.id:
187 if self.ignore_cache:
188 self.refresh()
190 elif self.cached is None \
191 or datetime.now() > self.cached+timedelta(0, 0, 0, settings.LAZY_CACHE_REFRESH):
192 self.refresh()
193 else:
194 if self.info:
195 self.parse_transient_info()
196 else:
197 self.error = 'No Cached Info'
199 def parse_info(self):
200 """ Parse all values from the cached info """
201 self.parse_transient_info()
202 data = self.parse_persistent_info(self.info)
203 for k in data:
204 setattr(self, k, data[k])
206 def refresh(self):
208 Retrieve and parse info from the ganeti cluster. If successfully
209 retrieved and parsed, this method will also call save().
211 Failure while loading the remote class will result in an incomplete
212 object. The error will be stored to self.error
214 try:
215 info_ = self._refresh()
216 if info_:
217 if info_['mtime']:
218 mtime = datetime.fromtimestamp(info_['mtime'])
219 else:
220 mtime = None
221 self.cached = datetime.now()
222 else:
223 # no info retrieved, use current mtime
224 mtime = self.mtime
226 if self.mtime is None or mtime > self.mtime:
227 # there was an update. Set info and save the object
228 self.info = info_
229 self.check_job_status()
230 self.save()
231 else:
232 # There was no change on the server. Only update the cache
233 # time. This bypasses the info serialization mechanism and
234 # uses a smaller query.
235 updates = self.check_job_status()
236 if updates:
237 self.__class__.objects.filter(pk=self.id) \
238 .update(cached=self.cached, **updates)
239 elif self.id is not None:
240 self.__class__.objects.filter(pk=self.id) \
241 .update(cached=self.cached)
243 except GanetiApiError, e:
244 self.error = str(e)
245 GanetiError.objects.store_error(str(e), obj=self, code=e.code)
247 else:
248 if self.error:
249 self.error = None
250 GanetiError.objects.clear_errors(obj=self)
252 def _refresh(self):
254 Fetch raw data from the ganeti cluster. This is specific to the object
255 and must be implemented by it.
257 raise NotImplementedError
259 def check_job_status(self):
260 pass
262 def parse_transient_info(self):
264 Parse properties from cached info that is stored on the class but not in
265 the database. These properties will be loaded every time the object is
266 instantiated. Properties stored on the class cannot be search
267 efficiently via the django query api.
269 This method is specific to the child object.
271 info_ = self.info
272 # XXX ganeti 2.1 ctime is always None
273 if info_['ctime'] is not None:
274 self.ctime = datetime.fromtimestamp(info_['ctime'])
276 @classmethod
277 def parse_persistent_info(cls, info):
279 Parse properties from cached info that are stored in the database. These
280 properties will be searchable by the django query api.
282 This method is specific to the child object.
284 # mtime is sometimes None if object has never been modified
285 if info['mtime'] is None:
286 return {'mtime': None}
287 return {'mtime': datetime.fromtimestamp(info['mtime'])}
289 def save(self, *args, **kwargs):
291 overridden to ensure info is serialized prior to save
293 if self.serialized_info is None:
294 self.serialized_info = cPickle.dumps(self.__info)
295 super(CachedClusterObject, self).save(*args, **kwargs)
297 class Meta:
298 abstract = True
301 class JobManager(models.Manager):
303 Custom manager for Ganeti Jobs model
305 def create(self, **kwargs):
306 """ helper method for creating a job with disabled cache """
307 job = Job(ignore_cache=True, **kwargs)
308 job.save(force_insert=True)
309 return job
312 class Job(CachedClusterObject):
314 model representing a job being run on a ganeti Cluster. This includes
315 operations such as creating or delting a virtual machine.
317 Jobs are a special type of CachedClusterObject. Job's run once then become
318 immutable. The lazy cache is modified to become permanent once a complete
319 status (success/error) has been detected. The cache can be disabled by
320 settning ignore_cache=True.
322 job_id = models.IntegerField(null=False)
323 content_type = models.ForeignKey(ContentType, null=False)
324 object_id = models.IntegerField(null=False)
325 obj = GenericForeignKey('content_type', 'object_id')
326 cluster = models.ForeignKey('Cluster', editable=False, related_name='jobs')
327 cluster_hash = models.CharField(max_length=40, editable=False)
329 cleared = models.BooleanField(default=False)
330 finished = models.DateTimeField(null=True)
331 status = models.CharField(max_length=10)
333 objects = JobManager()
335 @property
336 def rapi(self):
337 return get_rapi(self.cluster_hash, self.cluster_id)
339 def _refresh(self):
340 return self.rapi.GetJobStatus(self.job_id)
342 def load_info(self):
344 Load info for class. This will load from ganeti if ignore_cache==True,
345 otherwise this will always load from the cache.
347 if self.id and (self.ignore_cache or self.info is None):
348 self.info = self._refresh()
349 self.save()
351 @classmethod
352 def parse_persistent_info(cls, info):
354 Parse status and turn off cache bypass flag if job has finished
356 data = {'status': info['status']}
357 if data['status'] in ('error','success'):
358 data['ignore_cache'] = False
359 if info['end_ts']:
360 data['finished'] = cls.parse_end_timestamp(info)
361 return data
363 @classmethod
364 def parse_end_timestamp(cls, info):
365 sec, micro = info['end_ts']
366 return datetime.fromtimestamp(sec+(micro/1000000.0))
368 def parse_transient_info(self):
369 pass
371 def save(self, *args, **kwargs):
373 sets the cluster_hash for newly saved instances
375 if self.id is None or self.cluster_hash == '':
376 self.cluster_hash = self.cluster.hash
378 super(Job, self).save(*args, **kwargs)
380 @property
381 def current_operation(self):
383 Jobs may consist of multiple commands/operations. This helper
384 method will return the operation that is currently running or errored
385 out, or the last operation if all operations have completed
387 @returns raw name of the current operation
389 info = self.info
390 index = 0
391 for i in range(len(info['opstatus'])):
392 if info['opstatus'][i] != 'success':
393 index = i
394 break
395 return info['ops'][index]['OP_ID']
397 @property
398 def operation(self):
400 Returns the last operation, which is generally the primary operation.
402 return self.info['ops'][-1]['OP_ID']
404 def __repr__(self):
405 return "<Job: '%s'>" % self.id
407 def __str__(self):
408 return repr(self)
411 class VirtualMachine(CachedClusterObject):
413 The VirtualMachine (VM) model represents VMs within a Ganeti cluster. The
414 majority of properties are a cache for data stored in the cluster. All data
415 retrieved via the RAPI is stored in VirtualMachine.info, and serialized
416 automatically into VirtualMachine.serialized_info.
418 Attributes that need to be searchable should be stored as model fields. All
419 other attributes will be stored within VirtualMachine.info.
421 This object uses a lazy update mechanism on instantiation. If the cached
422 info from the Ganeti cluster has expired, it will trigger an update. This
423 allows the cache to function in the absence of a periodic update mechanism
424 such as Cron, Celery, or Threads.
426 The lazy update and periodic update should use separate refresh timeouts
427 where LAZY_CACHE_REFRESH > PERIODIC_CACHE_REFRESH. This ensures that lazy
428 cache will only be used if the periodic cache is not updating.
430 XXX Serialized_info can possibly be changed to a CharField if an upper
431 limit can be determined. (Later Date, if it will optimize db)
434 cluster = models.ForeignKey('Cluster', editable=False, default=0,
435 related_name='virtual_machines')
436 hostname = models.CharField(max_length=128, db_index=True)
437 owner = models.ForeignKey('ClusterUser', null=True,
438 related_name='virtual_machines',
439 on_delete=models.SET_NULL)
440 virtual_cpus = models.IntegerField(default=-1)
441 disk_size = models.IntegerField(default=-1)
442 ram = models.IntegerField(default=-1)
443 cluster_hash = models.CharField(max_length=40, editable=False)
444 operating_system = models.CharField(max_length=128)
445 status = models.CharField(max_length=10)
447 # node relations
448 primary_node = models.ForeignKey('Node', null=True,
449 related_name='primary_vms')
450 secondary_node = models.ForeignKey('Node', null=True,
451 related_name='secondary_vms')
453 # The last job reference indicates that there is at least one pending job
454 # for this virtual machine. There may be more than one job, and that can
455 # never be prevented. This just indicates that job(s) are pending and the
456 # job related code should be run (status, cleanup, etc).
457 last_job = models.ForeignKey('Job', null=True)
459 # deleted flag indicates a VM is being deleted, but the job has not
460 # completed yet. VMs that have pending_delete are still displayed in lists
461 # and counted in quotas, but only so status can be checked.
462 pending_delete = models.BooleanField(default=False)
463 deleted = False
465 # Template temporarily stores parameters used to create this virtual machine
466 # This template is used to recreate the values entered into the form.
467 template = models.ForeignKey("VirtualMachineTemplate", null=True)
469 class Meta:
470 ordering = ["hostname", ]
471 unique_together = (("cluster", "hostname"),)
473 @property
474 def rapi(self):
475 return get_rapi(self.cluster_hash, self.cluster_id)
477 @property
478 def is_running(self):
479 return self.status == 'running'
481 def save(self, *args, **kwargs):
483 sets the cluster_hash for newly saved instances
485 if self.id is None:
486 self.cluster_hash = self.cluster.hash
488 info_ = self.info
489 if info_:
490 found = False
491 remove = []
492 if self.cluster.username:
493 for tag in info_['tags']:
494 # Update owner Tag. Make sure the tag is set to the owner
495 # that is set in webmgr.
496 if tag.startswith(constants.OWNER_TAG):
497 id = int(tag[len(constants.OWNER_TAG):])
498 # Since there is no 'update tag' delete old tag and
499 # replace with tag containing correct owner id.
500 if id == self.owner_id:
501 found = True
502 else:
503 remove.append(tag)
504 if remove:
505 self.rapi.DeleteInstanceTags(self.hostname, remove)
506 for tag in remove:
507 info_['tags'].remove(tag)
508 if self.owner_id and not found:
509 tag = '%s%s' % (constants.OWNER_TAG, self.owner_id)
510 self.rapi.AddInstanceTags(self.hostname, [tag])
511 self.info['tags'].append(tag)
513 super(VirtualMachine, self).save(*args, **kwargs)
515 @classmethod
516 def parse_persistent_info(cls, info):
518 Loads all values from cached info, included persistent properties that
519 are stored in the database
521 data = super(VirtualMachine, cls).parse_persistent_info(info)
523 # Parse resource properties
524 data['ram'] = info['beparams']['memory']
525 data['virtual_cpus'] = info['beparams']['vcpus']
526 # Sum up the size of each disk used by the VM
527 disk_size = 0
528 for disk in info['disk.sizes']:
529 disk_size += disk
530 data['disk_size'] = disk_size
531 data['operating_system'] = info['os']
532 data['status'] = info['status']
534 primary = info['pnode']
535 if primary:
536 try:
537 data['primary_node'] = Node.objects.get(hostname=primary)
538 except Node.DoesNotExist:
539 # node is not created yet. fail silently
540 data['primary_node'] = None
541 else:
542 data['primary_node'] = None
544 secondary = info['snodes']
545 if len(secondary):
546 secondary = secondary[0]
547 try:
548 data['secondary_node'] = Node.objects.get(hostname=secondary)
549 except Node.DoesNotExist:
550 # node is not created yet. fail silently
551 data['secondary_node'] = None
552 else:
553 data['secondary_node'] = None
555 return data
557 def check_job_status(self):
559 if the cache bypass is enabled then check the status of the last job
560 when the job is complete we can reenable the cache.
562 @returns - dictionary of values that were updates
564 if self.ignore_cache and self.last_job_id:
565 (job_id,) = Job.objects.filter(pk=self.last_job_id)\
566 .values_list('job_id', flat=True)
567 data = self.rapi.GetJobStatus(job_id)
568 status = data['status']
570 if status in ('success', 'error'):
571 finished = Job.parse_end_timestamp(data)
572 Job.objects.filter(pk=self.last_job_id) \
573 .update(status=status, ignore_cache=False, finished=finished)
574 self.ignore_cache = False
576 op_id = data['ops'][-1]['OP_ID']
578 if status == 'success':
579 self.last_job = None
580 # job cleanups
581 # - if the job was a deletion, then delete this vm
582 # - if the job was creation, then delete temporary template
583 # XXX return a None to prevent refresh() from trying to update
584 # the cache setting for this VM
585 # XXX delete may have multiple ops in it, but delete is always
586 # the last command run.
587 if op_id == 'OP_INSTANCE_REMOVE':
588 self.delete()
589 self.deleted = True
590 return None
591 elif op_id == 'OP_INSTANCE_CREATE':
592 # XXX must update before deleting the template to maintain
593 # referential integrity. as a consequence return no other
594 # updates.
595 VirtualMachine.objects.filter(pk=self.pk) \
596 .update(ignore_cache=False, last_job=None, template=None)
598 VirtualMachineTemplate.objects.filter(pk=self.template_id) \
599 .delete()
600 self.template=None
601 return dict()
603 return dict(ignore_cache=False, last_job=None)
605 elif status == 'error':
606 if op_id == 'OP_INSTANCE_CREATE' and self.info:
607 # create failed but vm was deployed, template is no longer
608 # needed
610 # XXX must update before deleting the template to maintain
611 # referential integrity. as a consequence return no other
612 # updates.
613 VirtualMachine.objects.filter(pk=self.pk) \
614 .update(ignore_cache=False, template=None)
616 VirtualMachineTemplate.objects.filter(pk=self.template_id) \
617 .delete()
618 self.template=None
619 return dict()
620 else:
621 return dict(ignore_cache=False)
623 def _refresh(self):
624 # XXX if delete is pending then no need to refresh this object.
625 if self.pending_delete:
626 return None
627 return self.rapi.GetInstance(self.hostname)
629 def shutdown(self):
630 id = self.rapi.ShutdownInstance(self.hostname)
631 job = Job.objects.create(job_id=id, obj=self, cluster_id=self.cluster_id)
632 self.last_job = job
633 VirtualMachine.objects.filter(pk=self.id) \
634 .update(last_job=job, ignore_cache=True)
635 return job
637 def startup(self):
638 id = self.rapi.StartupInstance(self.hostname)
639 job = Job.objects.create(job_id=id, obj=self, cluster_id=self.cluster_id)
640 self.last_job = job
641 VirtualMachine.objects.filter(pk=self.id) \
642 .update(last_job=job, ignore_cache=True)
643 return job
645 def reboot(self):
646 id = self.rapi.RebootInstance(self.hostname)
647 job = Job.objects.create(job_id=id, obj=self, cluster_id=self.cluster_id)
648 self.last_job = job
649 VirtualMachine.objects.filter(pk=self.id) \
650 .update(last_job=job, ignore_cache=True)
651 return job
653 def migrate(self, mode='live', cleanup=False):
655 Migrates this VirtualMachine to another node. only works if the disk
656 type is DRDB
658 @param mode: live or non-live
659 @param cleanup: clean up a previous migration, default is False
661 id = self.rapi.MigrateInstance(self.hostname, mode, cleanup)
662 job = Job.objects.create(job_id=id, obj=self, cluster_id=self.cluster_id)
663 self.last_job = job
664 VirtualMachine.objects.filter(pk=self.id) \
665 .update(last_job=job, ignore_cache=True)
666 return job
668 def setup_vnc_forwarding(self, sport=''):
669 password = ''
670 info_ = self.info
671 port = info_['network_port']
672 node = info_['pnode']
674 # use proxy for VNC connection
675 if settings.VNC_PROXY:
676 proxy_server = settings.VNC_PROXY.split(":")
677 password = generate_random_password()
678 result = request_forwarding(proxy_server, sport, node, port, password)
679 if not result:
680 return False, False, False
681 else:
682 return proxy_server[0], int(result), password
684 else:
685 return node, port, password
687 @models.permalink
688 def get_absolute_url(self):
690 Return absolute url for this instance. Since the canonical url requires
691 the cluster object this method will check to see if the cluster is
692 already queried. If it has not been queried it will use the
693 non-canonical url which is quicker to render.
695 if hasattr(self, '_cluster_cache'):
696 return 'instance-detail', (), {'cluster_slug':self.cluster.slug,
697 'instance':self.hostname}
698 return 'instance-detail-id', (), {'id':self.pk}
700 def __repr__(self):
701 return "<VirtualMachine: '%s'>" % self.hostname
703 def __unicode__(self):
704 return self.hostname
707 class Node(CachedClusterObject):
709 The Node model represents nodes within a Ganeti cluster. The
710 majority of properties are a cache for data stored in the cluster. All data
711 retrieved via the RAPI is stored in VirtualMachine.info, and serialized
712 automatically into VirtualMachine.serialized_info.
714 Attributes that need to be searchable should be stored as model fields. All
715 other attributes will be stored within VirtualMachine.info.
717 ROLE_CHOICES = ((k, v) for k, v in constants.NODE_ROLE_MAP.items())
719 cluster = models.ForeignKey('Cluster', related_name='nodes')
720 hostname = models.CharField(max_length=128, unique=True)
721 cluster_hash = models.CharField(max_length=40, editable=False)
722 offline = models.BooleanField()
723 role = models.CharField(max_length=1, choices=ROLE_CHOICES)
724 ram_total = models.IntegerField(default=-1)
725 ram_free = models.IntegerField(default=-1)
726 disk_total = models.IntegerField(default=-1)
727 disk_free = models.IntegerField(default=-1)
729 # The last job reference indicates that there is at least one pending job
730 # for this virtual machine. There may be more than one job, and that can
731 # never be prevented. This just indicates that job(s) are pending and the
732 # job related code should be run (status, cleanup, etc).
733 last_job = models.ForeignKey('Job', null=True)
735 def _refresh(self):
736 """ returns node info from the ganeti server """
737 return self.rapi.GetNode(self.hostname)
739 def save(self, *args, **kwargs):
741 sets the cluster_hash for newly saved instances
743 if self.id is None:
744 self.cluster_hash = self.cluster.hash
745 super(Node, self).save(*args, **kwargs)
747 @models.permalink
748 def get_absolute_url(self):
750 Return absolute url for this node. Since the canonical url requires
751 the cluster object this method will check to see if the cluster is
752 already queried. If it has not been queried it will use the
753 non-canonical url which is quicker to render.
755 if hasattr(self, '_cluster_cache'):
756 return 'node-detail', (), {'cluster_slug':self.cluster.slug,
757 'host':self.hostname}
758 return 'node-detail-id', (), {'id':self.pk}
760 @property
761 def rapi(self):
762 return get_rapi(self.cluster_hash, self.cluster_id)
764 @classmethod
765 def parse_persistent_info(cls, info):
767 Loads all values from cached info, included persistent properties that
768 are stored in the database
770 data = super(Node, cls).parse_persistent_info(info)
772 # Parse resource properties
773 data['ram_total'] = info['mtotal'] if info['mtotal'] is not None else 0
774 data['ram_free'] = info['mfree'] if info['mfree'] is not None else 0
775 data['disk_total'] = info['dtotal'] if info['dtotal'] is not None else 0
776 data['disk_free'] = info['dfree'] if info['dfree'] is not None else 0
777 data['offline'] = info['offline']
778 data['role'] = info['role']
779 return data
781 def check_job_status(self):
783 if the cache bypass is enabled then check the status of the last job
784 when the job is complete we can reenable the cache.
786 @returns - dictionary of values that were updates
788 if self.last_job_id:
789 (job_id,) = Job.objects.filter(pk=self.last_job_id)\
790 .values_list('job_id', flat=True)
791 data = self.rapi.GetJobStatus(job_id)
792 status = data['status']
794 if status in ('success', 'error'):
795 finished = Job.parse_end_timestamp(data)
796 Job.objects.filter(pk=self.last_job_id) \
797 .update(status=status, ignore_cache=False, finished=finished)
798 self.ignore_cache = False
800 if status == 'success':
801 self.last_job = None
802 return dict(ignore_cache=False, last_job=None)
804 elif status == 'error':
805 return dict(ignore_cache=False)
807 @property
808 def ram(self):
809 """ returns dict of free and total ram """
810 values = VirtualMachine.objects \
811 .filter(Q(primary_node=self) | Q(secondary_node=self)) \
812 .filter(status='running') \
813 .exclude(ram=-1).order_by() \
814 .aggregate(used=Sum('ram'))
816 total = self.ram_total
817 used = total - self.ram_free
818 allocated = 0 if values['used'] is None else values['used']
819 free = total-allocated if allocated >= 0 and total >=0 else -1
820 return {'total':total, 'free': free, 'allocated':allocated, 'used':used}
822 @property
823 def disk(self):
824 """ returns dict of free and total disk space """
825 values = VirtualMachine.objects \
826 .filter(Q(primary_node=self) | Q(secondary_node=self)) \
827 .exclude(disk_size=-1).order_by() \
828 .aggregate(used=Sum('disk_size'))
830 total = self.disk_total
831 used = total - self.disk_free
832 allocated = 0 if 'used' not in values or values['used'] is None else values['used']
833 free = total-allocated if allocated >= 0 and total >=0 else -1
835 return {'total':total, 'free': free, 'allocated':allocated, 'used':used}
837 def set_role(self, role, force=False):
839 Sets the role for this node
841 @param role - one of the following choices:
842 * master
843 * master-candidate
844 * regular
845 * drained
846 * offline
848 id = self.rapi.SetNodeRole(self.hostname, role, force)
849 job = Job.objects.create(job_id=id, obj=self, cluster_id=self.cluster_id)
850 self.last_job = job
851 Node.objects.filter(pk=self.pk).update(ignore_cache=True, last_job=job)
852 return job
854 def evacuate(self, iallocator=None, node=None):
856 migrates all secondary instances off this node
858 id = self.rapi.EvacuateNode(self.hostname, iallocator=iallocator, remote_node=node)
859 job = Job.objects.create(job_id=id, obj=self, cluster_id=self.cluster_id)
860 self.last_job = job
861 Node.objects.filter(pk=self.pk) \
862 .update(ignore_cache=True, last_job=job)
863 return job
865 def migrate(self, mode=None):
867 migrates all primary instances off this node
869 id = self.rapi.MigrateNode(self.hostname, mode)
870 job = Job.objects.create(job_id=id, obj=self, cluster_id=self.cluster_id)
871 self.last_job = job
872 Node.objects.filter(pk=self.pk).update(ignore_cache=True, last_job=job)
873 return job
875 def __repr__(self):
876 return "<Node: '%s'>" % self.hostname
878 def __unicode__(self):
879 return self.hostname
882 class Cluster(CachedClusterObject):
884 A Ganeti cluster that is being tracked by this manager tool
886 hostname = models.CharField(_('hostname'), max_length=128, unique=True)
887 slug = models.SlugField(_('slug'), max_length=50, unique=True, db_index=True)
888 port = models.PositiveIntegerField(_('port'), default=5080)
889 description = models.CharField(_('description'), max_length=128, blank=True, null=True)
890 username = models.CharField(_('username'), max_length=128, blank=True, null=True)
891 password = models.CharField(_('password'), max_length=128, blank=True, null=True)
892 hash = models.CharField(_('hash'), max_length=40, editable=False)
894 # quota properties
895 virtual_cpus = models.IntegerField(_('Virtual CPUs'), null=True, blank=True)
896 disk = models.IntegerField(_('disk'), null=True, blank=True)
897 ram = models.IntegerField(_('ram'), null=True, blank=True)
899 # The last job reference indicates that there is at least one pending job
900 # for this virtual machine. There may be more than one job, and that can
901 # never be prevented. This just indicates that job(s) are pending and the
902 # job related code should be run (status, cleanup, etc).
903 last_job = models.ForeignKey('Job', null=True, blank=True, \
904 related_name='cluster_last_job')
906 class Meta:
907 ordering = ["hostname", "description"]
909 def __unicode__(self):
910 return self.hostname
912 def save(self, *args, **kwargs):
913 self.hash = self.create_hash()
914 super(Cluster, self).save(*args, **kwargs)
916 @models.permalink
917 def get_absolute_url(self):
918 return 'cluster-detail', (), {'cluster_slug':self.slug}
920 @property
921 def rapi(self):
923 retrieves the rapi client for this cluster.
925 # XXX always pass self in. not only does it avoid querying this object
926 # from the DB a second time, it also prevents a recursion loop caused
927 # by __init__ fetching info from the Cluster
928 return get_rapi(self.hash, self)
930 def create_hash(self):
932 Creates a hash for this cluster based on credentials required for
933 connecting to the server
935 return sha1('%s%s%s%s' % \
936 (self.username, self.password, self.hostname, self.port)) \
937 .hexdigest()
939 def get_quota(self, user=None):
941 Get the quota for a ClusterUser
943 @return user's quota, default quota, or none
945 if user is None:
946 return {'default':1, 'ram':self.ram, 'disk':self.disk, \
947 'virtual_cpus':self.virtual_cpus}
949 # attempt to query user specific quota first. if it does not exist then
950 # fall back to the default quota
951 query = Quota.objects.filter(cluster=self, user=user) \
952 .values('ram', 'disk', 'virtual_cpus')
953 if len(query):
954 (quota,) = query
955 quota['default'] = 0
956 return quota
958 return {'default':1, 'ram':self.ram, 'disk':self.disk, \
959 'virtual_cpus':self.virtual_cpus, }
961 def set_quota(self, user, values=None):
963 set the quota for a ClusterUser
965 @param values: dictionary of values, or None to delete the quota
967 kwargs = {'cluster':self, 'user':user}
968 if values is None:
969 Quota.objects.filter(**kwargs).delete()
970 else:
971 quota, new = Quota.objects.get_or_create(**kwargs)
972 quota.__dict__.update(values)
973 quota.save()
975 @classmethod
976 def get_quotas(cls, clusters=None, user=None):
977 """ retrieve a bulk list of cluster quotas """
979 if clusters is None:
980 clusters = Cluster.objects.all()
982 quotas = {}
983 cluster_id_map = {}
984 for cluster in clusters:
985 quotas[cluster] = {'default':1,
986 'ram':cluster.ram, \
987 'disk':cluster.disk, \
988 'virtual_cpus':cluster.virtual_cpus}
989 cluster_id_map[cluster.id] = cluster
991 # get user's custom queries if any
992 if user is not None:
993 query = Quota.objects.filter(cluster__in=clusters, user=user) \
994 .values('ram', 'disk', 'virtual_cpus','cluster__id')
996 for custom in query:
997 try:
998 cluster = cluster_id_map[custom['cluster__id']]
999 except KeyError:
1000 continue
1001 custom['default'] = 0
1002 del custom['cluster__id']
1003 quotas[cluster] = custom
1005 return quotas
1008 def sync_virtual_machines(self, remove=False):
1010 Synchronizes the VirtualMachines in the database with the information
1011 this ganeti cluster has:
1012 * VMs no longer in ganeti are deleted
1013 * VMs missing from the database are added
1015 ganeti = self.instances()
1016 db = self.virtual_machines.all().values_list('hostname', flat=True)
1018 # add VMs missing from the database
1019 for hostname in filter(lambda x: unicode(x) not in db, ganeti):
1020 VirtualMachine(cluster=self, hostname=hostname).save()
1022 # deletes VMs that are no longer in ganeti
1023 if remove:
1024 missing_ganeti = filter(lambda x: str(x) not in ganeti, db)
1025 if missing_ganeti:
1026 self.virtual_machines \
1027 .filter(hostname__in=missing_ganeti).delete()
1029 def sync_nodes(self, remove=False):
1031 Synchronizes the Nodes in the database with the information
1032 this ganeti cluster has:
1033 * Nodes no longer in ganeti are deleted
1034 * Nodes missing from the database are added
1036 ganeti = self.rapi.GetNodes()
1037 db = self.nodes.all().values_list('hostname', flat=True)
1039 # add Nodes missing from the database
1040 for hostname in filter(lambda x: unicode(x) not in db, ganeti):
1041 Node(cluster=self, hostname=hostname).save()
1043 # deletes Nodes that are no longer in ganeti
1044 if remove:
1045 missing_ganeti = filter(lambda x: str(x) not in ganeti, db)
1046 if missing_ganeti:
1047 self.nodes.filter(hostname__in=missing_ganeti).delete()
1049 @property
1050 def missing_in_ganeti(self):
1052 Returns list of VirtualMachines that are missing from the ganeti cluster
1053 but present in the database
1055 ganeti = self.instances()
1056 db = self.virtual_machines.all().values_list('hostname', flat=True)
1057 return filter(lambda x: str(x) not in ganeti, db)
1059 @property
1060 def missing_in_db(self):
1062 Returns list of VirtualMachines that are missing from the database, but
1063 present in ganeti
1065 ganeti = self.instances()
1066 db = self.virtual_machines.all().values_list('hostname', flat=True)
1067 return filter(lambda x: unicode(x) not in db, ganeti)
1069 @property
1070 def nodes_missing_in_db(self):
1072 Returns list of Nodes that are missing from the database, but present
1073 in ganeti.
1075 try:
1076 ganeti = self.rapi.GetNodes()
1077 except GanetiApiError:
1078 ganeti = []
1079 db = self.nodes.all().values_list('hostname', flat=True)
1080 return filter(lambda x: unicode(x) not in db, ganeti)
1082 @property
1083 def nodes_missing_in_ganeti(self):
1085 Returns list of Nodes that are missing from the ganeti cluster
1086 but present in the database
1088 try:
1089 ganeti = self.rapi.GetNodes()
1090 except GanetiApiError:
1091 ganeti = []
1092 db = self.nodes.all().values_list('hostname', flat=True)
1093 return filter(lambda x: str(x) not in ganeti, db)
1095 @property
1096 def available_ram(self):
1097 """ returns dict of free and total ram """
1098 nodes = self.nodes.exclude(ram_total=-1) \
1099 .aggregate(total=Sum('ram_total'), free=Sum('ram_free'))
1100 total = nodes['total'] if 'total' in nodes and nodes['total'] >= 0 else 0
1101 free = nodes['free'] if 'free' in nodes and nodes['free'] >= 0 else 0
1102 used = total-free
1103 values = self.virtual_machines \
1104 .filter(status='running') \
1105 .exclude(ram=-1).order_by() \
1106 .aggregate(used=Sum('ram'))
1108 allocated = 0 if 'used' not in values or values['used'] is None else values['used']
1109 free = total-allocated if total-allocated >= 0 else 0
1110 return {'total':total, 'free':free, 'allocated':allocated, 'used':used}
1112 @property
1113 def available_disk(self):
1114 """ returns dict of free and total disk space """
1115 nodes = self.nodes.exclude(disk_total=-1) \
1116 .aggregate(total=Sum('disk_total'), free=Sum('disk_free'))
1117 total = nodes['total'] if 'total' in nodes and nodes['total'] >= 0 else 0
1118 free = nodes['free'] if 'free' in nodes and nodes['free'] >= 0 else 0
1119 used = total-free
1120 values = self.virtual_machines \
1121 .exclude(disk_size=-1).order_by() \
1122 .aggregate(used=Sum('disk_size'))
1124 allocated = 0 if 'used' not in values or values['used'] is None else values['used']
1125 free = total-allocated if total-allocated >= 0 else 0
1127 return {'total':total, 'free':free, 'allocated':allocated, 'used':used}
1129 def _refresh(self):
1130 return self.rapi.GetInfo()
1132 def instances(self, bulk=False):
1133 """Gets all VMs which reside under the Cluster
1134 Calls the rapi client for all instances.
1136 try:
1137 return self.rapi.GetInstances(bulk=bulk)
1138 except GanetiApiError:
1139 return []
1141 def instance(self, instance):
1142 """Get a single Instance
1143 Calls the rapi client for a specific instance.
1145 try:
1146 return self.rapi.GetInstance(instance)
1147 except GanetiApiError:
1148 return None
1150 def check_job_status(self):
1152 if the cache bypass is enabled then check the status of the last job
1153 when the job is complete we can reenable the cache.
1154 @returns - dictionary of values that were updates
1156 if self.last_job_id:
1157 (job_id,) = Job.objects.filter(pk=self.last_job_id)\
1158 .values_list('job_id', flat=True)
1159 data = self.rapi.GetJobStatus(job_id)
1160 status = data['status']
1162 if status in ('success', 'error'):
1163 finished = Job.parse_end_timestamp(data)
1164 Job.objects.filter(pk=self.last_job_id) \
1165 .update(status=status, ignore_cache=False, finished=finished)
1166 self.ignore_cache = False
1168 if status == 'success':
1169 self.last_job = None
1170 return dict(ignore_cache=False, last_job=None)
1172 elif status == 'error':
1173 return dict(ignore_cache=False)
1175 def redistribute_config(self):
1177 Redistribute config from cluster's master node to all
1178 other nodes.
1180 # no exception handling, because it's being done in a view
1181 id = self.rapi.RedistributeConfig()
1182 job = Job.objects.create(job_id=id, obj=self, cluster_id=self.id)
1183 self.last_job = job
1184 Cluster.objects.filter(pk=self.id) \
1185 .update(last_job=job, ignore_cache=True)
1186 return job
1189 class VirtualMachineTemplate(models.Model):
1191 Virtual Machine Template holds all the values for the create virtual machine
1192 form so that they can automatically be used or edited by a user.
1194 template_name = models.CharField(max_length=255, null=True, blank=True)
1195 cluster = models.ForeignKey('Cluster', null=True)
1196 start = models.BooleanField(verbose_name=_('Start up After Creation'), \
1197 default=True)
1198 name_check = models.BooleanField(verbose_name=_('DNS Name Check'), \
1199 default=True)
1200 iallocator = models.BooleanField(verbose_name=_('Automatic Allocation'), \
1201 default=False)
1202 iallocator_hostname = models.CharField(null=True, blank=True, \
1203 max_length=255)
1204 disk_template = models.CharField(verbose_name=_('Disk Template'), max_length=16)
1205 pnode = models.CharField(verbose_name=_('Primary Node'), max_length=255, \
1206 null=True, blank=True)
1207 snode = models.CharField(verbose_name=_('Secondary Node'), max_length=255, \
1208 null=True, blank=True)
1209 os = models.CharField(verbose_name=_('Operating System'), max_length=255)
1210 # BEPARAMS
1211 vcpus = models.IntegerField(verbose_name=_('Virtual CPUs'), \
1212 validators=[MinValueValidator(1)], null=True, blank=True)
1213 memory = models.IntegerField(verbose_name=_('Memory'), \
1214 validators=[MinValueValidator(100)],null=True, blank=True)
1215 disk_size = models.IntegerField(verbose_name=_('Disk Size'), null=True, \
1216 validators=[MinValueValidator(100)], blank=True)
1217 disk_type = models.CharField(verbose_name=_('Disk Type'), max_length=255, \
1218 null=True, blank=True)
1219 nic_mode = models.CharField(verbose_name=_('NIC Mode'), max_length=255, \
1220 null=True, blank=True)
1221 nic_link = models.CharField(verbose_name=_('NIC Link'), max_length=255, \
1222 null=True, blank=True)
1223 nic_type = models.CharField(verbose_name=_('NIC Type'), max_length=255, \
1224 null=True, blank=True)
1225 # HVPARAMS
1226 kernel_path = models.CharField(verbose_name=_('Kernel Path'), null=True, \
1227 blank=True, max_length=255)
1228 root_path = models.CharField(verbose_name=_('Root Path'), default='/', \
1229 max_length=255, null=True, blank=True)
1230 serial_console = models.BooleanField(verbose_name=_('Enable Serial Console'))
1231 boot_order = models.CharField(verbose_name=_('Boot Device'), max_length=255, \
1232 null=True, blank=True)
1233 cdrom_image_path = models.CharField(verbose_name=_('CD-ROM Image Path'), null=True, \
1234 blank=True, max_length=512)
1236 def __str__(self):
1237 if self.template_name is None:
1238 return 'unnamed'
1239 else:
1240 return self.template_name
1243 if settings.TESTING:
1244 # XXX - if in debug mode create a model for testing cached cluster objects
1245 class TestModel(CachedClusterObject):
1246 """ simple implementation of a cached model that has been instrumented """
1247 cluster = models.ForeignKey(Cluster)
1248 saved = False
1249 data = {'mtime': 1285883187.8692000, 'ctime': 1285799513.4741000}
1250 throw_error = None
1252 def _refresh(self):
1253 if self.throw_error:
1254 raise self.throw_error
1255 return self.data
1257 def save(self, *args, **kwargs):
1258 self.saved = True
1259 super(TestModel, self).save(*args, **kwargs)
1262 class GanetiErrorManager(models.Manager):
1264 def clear_error(self, id):
1266 Clear one particular error (used in overview template).
1268 return self.filter(pk=id).update(cleared=True)
1270 def clear_errors(self, *args, **kwargs):
1272 Clear errors instead of deleting them.
1274 return self.get_errors(cleared=False, *args, **kwargs) \
1275 .update(cleared=True)
1277 def remove_errors(self, *args, **kwargs):
1279 Just shortcut if someone wants to remove some errors.
1281 return self.get_errors(*args, **kwargs).delete()
1283 def get_errors(self, obj=None, **kwargs):
1285 Manager method used for getting QuerySet of all errors depending on
1286 passed arguments.
1288 @param obj affected object (itself or just QuerySet)
1289 @param kwargs: additional kwargs for filtering GanetiErrors
1291 if obj is None:
1292 return self.filter(**kwargs)
1294 # Create base query of errors to return.
1296 # if it's a Cluster or a queryset for Clusters, then we need to get all
1297 # errors from the Clusters. Do this by filtering on GanetiError.cluster
1298 # instead of obj_id.
1299 if isinstance(obj, (Cluster,)):
1300 return self.filter(cluster=obj, **kwargs)
1302 elif isinstance(obj, (QuerySet,)):
1303 if obj.model == Cluster:
1304 return self.filter(cluster__in=obj, **kwargs)
1305 else:
1306 ct = ContentType.objects.get_for_model(obj.model)
1307 return self.filter(obj_type=ct, obj_id__in=obj, **kwargs)
1309 else:
1310 ct = ContentType.objects.get_for_model(obj.__class__)
1311 return self.filter(obj_type=ct, obj_id=obj.pk, **kwargs)
1313 def store_error(self, msg, obj, code, **kwargs):
1315 Manager method used to store errors
1317 @param msg error's message
1318 @param obj object (i.e. cluster or vm) affected by the error
1319 @param code error's code number
1321 ct = ContentType.objects.get_for_model(obj.__class__)
1322 is_cluster = isinstance(obj, Cluster)
1324 # 401 -- bad permissions
1325 # 401 is cluster-specific error and thus shouldn't appear on any other
1326 # object.
1327 if code == 401:
1328 if not is_cluster:
1329 # NOTE: what we do here is almost like:
1330 # return self.store_error(msg=msg, code=code, obj=obj.cluster)
1331 # we just omit the recursiveness
1332 obj = obj.cluster
1333 ct = ContentType.objects.get_for_model(Cluster)
1334 is_cluster = True
1336 # 404 -- object not found
1337 # 404 can occur on any object, but when it occurs on a cluster, then any
1338 # of its children must not see the error again
1339 elif code == 404:
1340 if not is_cluster:
1341 # return if the error exists for cluster
1342 try:
1343 c_ct = ContentType.objects.get_for_model(Cluster)
1344 return self.get(msg=msg, obj_type=c_ct, code=code,
1345 obj_id=obj.cluster_id, cleared=False)
1347 except GanetiError.DoesNotExist:
1348 # we want to proceed when the error is not cluster-specific
1349 pass
1351 # XXX use a try/except instead of get_or_create(). get_or_create()
1352 # does not allow us to set cluster_id. This means we'd have to query
1353 # the cluster object to create the error. we can't guaranteee the
1354 # cluster will already be queried so use create() instead which does
1355 # allow cluster_id
1356 try:
1357 return self.get(msg=msg, obj_type=ct, obj_id=obj.pk, code=code,
1358 **kwargs)
1360 except GanetiError.DoesNotExist:
1361 cluster_id = obj.pk if is_cluster else obj.cluster_id
1363 return self.create(msg=msg, obj_type=ct, obj_id=obj.pk,
1364 cluster_id=cluster_id, code=code, **kwargs)
1367 class GanetiError(models.Model):
1369 Class for storing errors which occured in Ganeti
1371 cluster = models.ForeignKey(Cluster)
1372 msg = models.TextField()
1373 code = models.PositiveSmallIntegerField(blank=True, null=True)
1374 timestamp = models.DateTimeField(auto_now_add=True)
1376 # determines if the errors still appears or not
1377 cleared = models.BooleanField(default=False)
1379 # cluster object (cluster, VM, Node) affected by the error (if any)
1380 obj_type = models.ForeignKey(ContentType, related_name="ganeti_errors")
1381 obj_id = models.PositiveIntegerField()
1382 obj = GenericForeignKey("obj_type", "obj_id")
1384 objects = GanetiErrorManager()
1386 class Meta:
1387 ordering = ("-timestamp", "code", "msg")
1389 def __repr__(self):
1390 return "<GanetiError '%s'>" % self.msg
1392 def __unicode__(self):
1393 base = "[%s] %s" % (self.timestamp, self.msg)
1394 return base
1397 class ClusterUser(models.Model):
1399 Base class for objects that may interact with a Cluster or VirtualMachine.
1401 #clusters = models.ManyToManyField(Cluster, through='Quota',
1402 # related_name='users')
1403 name = models.CharField(max_length=128)
1404 real_type = models.ForeignKey(ContentType, editable=False, null=True)
1406 @property
1407 def permissable(self):
1408 """ returns an object that can be granted permissions """
1409 raise self.cast().permissable
1411 def save(self, *args, **kwargs):
1412 if not self.id:
1413 self.real_type = self._get_real_type()
1414 super(ClusterUser, self).save(*args, **kwargs)
1416 def _get_real_type(self):
1417 return ContentType.objects.get_for_model(type(self))
1419 def cast(self):
1420 return self.real_type.get_object_for_this_type(pk=self.pk)
1422 def __unicode__(self):
1423 return self.name
1425 def used_resources(self, cluster=None, only_running=True):
1427 Return dictionary of total resources used by VMs that this ClusterUser
1428 has perms to.
1429 @param cluster if set, get only VMs from specified cluster
1430 @param only_running if set, get only running VMs
1432 # XXX - order_by must be cleared or it breaks annotation grouping since
1433 # the default order_by field is also added to the group_by clause
1434 base = self.virtual_machines.all().order_by()
1436 # XXX - use a custom aggregate for ram and vcpu count when filtering by
1437 # running. this allows us to execute a single query.
1439 # XXX - quotes must be used in this order. postgresql quirk
1440 if only_running:
1441 sum_ram = SumIf('ram', condition="status='running'")
1442 sum_vcpus = SumIf('virtual_cpus', condition="status='running'")
1443 else:
1444 sum_ram = Sum('ram')
1445 sum_vcpus = Sum('virtual_cpus')
1447 base = base.exclude(ram=-1, disk_size=-1, virtual_cpus=-1)
1449 if cluster:
1450 base = base.filter(cluster=cluster)
1451 result = base.aggregate(ram=sum_ram, disk=Sum('disk_size'), \
1452 virtual_cpus=sum_vcpus)
1454 # repack with zeros instead of Nones
1455 if result['disk'] is None:
1456 result['disk'] = 0
1457 if result['ram'] is None:
1458 result['ram'] = 0
1459 if result['virtual_cpus'] is None:
1460 result['virtual_cpus'] = 0
1461 return result
1463 else:
1464 base = base.values('cluster').annotate(uram=sum_ram, \
1465 udisk=Sum('disk_size'), \
1466 uvirtual_cpus=sum_vcpus)
1468 # repack as dictionary
1469 result = {}
1470 for used in base:
1471 # repack with zeros instead of Nones, change index names
1472 used['ram'] = 0 if not used['uram'] else used['uram']
1473 used['disk'] = 0 if not used['udisk'] else used['udisk']
1474 used['virtual_cpus'] = 0 if not used['uvirtual_cpus'] else used['uvirtual_cpus']
1475 used.pop("uvirtual_cpus")
1476 used.pop("udisk")
1477 used.pop("uram")
1478 result[used.pop('cluster')] = used
1480 return result
1483 class Profile(ClusterUser):
1485 Profile associated with a django.contrib.auth.User object.
1487 user = models.OneToOneField(User)
1489 def grant(self, perm, object):
1490 self.user.grant(perm, object)
1492 def set_perms(self, perms, object):
1493 self.user.set_perms(perms, object)
1495 def get_objects_any_perms(self, *args, **kwargs):
1496 return self.user.get_objects_any_perms(*args, **kwargs)
1498 def has_perm(self, *args, **kwargs):
1499 return self.user.has_perm(*args, **kwargs)
1501 @property
1502 def permissable(self):
1503 """ returns an object that can be granted permissions """
1504 return self.user
1507 class Organization(ClusterUser):
1509 An organization is used for grouping Users. Organizations are matched with
1510 an instance of contrib.auth.models.Group. This model exists so that
1511 contrib.auth.models.Group have a 1:1 relation with a ClusterUser on which quotas and
1512 permissions can be assigned.
1514 group = models.OneToOneField(Group, related_name='organization')
1516 def grant(self, perm, object):
1517 self.group.grant(perm, object)
1519 def set_perms(self, perms, object):
1520 self.group.set_perms(perms, object)
1522 def get_objects_any_perms(self, *args, **kwargs):
1523 return self.group.get_objects_any_perms(*args, **kwargs)
1525 def has_perm(self, *args, **kwargs):
1526 return self.group.has_perm(*args, **kwargs)
1528 @property
1529 def permissable(self):
1530 """ returns an object that can be granted permissions """
1531 return self.group
1534 class Quota(models.Model):
1536 A resource limit imposed on a ClusterUser for a given Cluster. The
1537 attributes of this model represent maximum values the ClusterUser can
1538 consume. The absence of a Quota indicates unlimited usage.
1540 user = models.ForeignKey(ClusterUser, related_name='quotas')
1541 cluster = models.ForeignKey(Cluster, related_name='quotas')
1543 ram = models.IntegerField(default=0, null=True)
1544 disk = models.IntegerField(default=0, null=True)
1545 virtual_cpus = models.IntegerField(default=0, null=True)
1548 class SSHKey(models.Model):
1550 Model representing user's SSH public key. Virtual machines rely on
1551 many ssh keys.
1553 key = models.TextField(validators=[validate_sshkey])
1554 #filename = models.CharField(max_length=128) # saves key file's name
1555 user = models.ForeignKey(User, related_name='ssh_keys')
1558 def create_profile(sender, instance, **kwargs):
1560 Create a profile object whenever a new user is created, also keeps the
1561 profile name synchronized with the username
1563 try:
1564 profile, new = Profile.objects.get_or_create(user=instance)
1565 if profile.name != instance.username:
1566 profile.name = instance.username
1567 profile.save()
1568 except DatabaseError:
1569 # XXX - since we're using south to track migrations the Profile table
1570 # won't be available the first time syncdb is run. Catch the error here
1571 # and let the south migration handle it.
1572 pass
1575 def update_cluster_hash(sender, instance, **kwargs):
1577 Updates the Cluster hash for all of it's VirtualMachines, Nodes, and Jobs
1579 instance.virtual_machines.all().update(cluster_hash=instance.hash)
1580 instance.jobs.all().update(cluster_hash=instance.hash)
1581 instance.nodes.all().update(cluster_hash=instance.hash)
1584 def update_organization(sender, instance, **kwargs):
1586 Creates a Organizations whenever a contrib.auth.models.Group is created
1588 org, new = Organization.objects.get_or_create(group=instance)
1589 org.name = instance.name
1590 org.save()
1592 post_save.connect(create_profile, sender=User)
1593 post_save.connect(update_cluster_hash, sender=Cluster)
1594 post_save.connect(update_organization, sender=Group)
1596 # Disconnect create_default_site from django.contrib.sites so that
1597 # the useless table for sites is not created. This will be
1598 # reconnected for other apps to use in update_sites_module.
1599 post_syncdb.disconnect(create_default_site, sender=sites_app)
1600 post_syncdb.connect(management.update_sites_module, sender=sites_app, \
1601 dispatch_uid = "ganeti.management.update_sites_module")
1603 def regenerate_cu_children(sender, **kwargs):
1605 Resets may destroy Profiles and/or Organizations. We need to regenerate
1606 them.
1609 # So. What are we actually doing here?
1610 # Whenever a User or Group is saved, the associated Profile or
1611 # Organization is also updated. This means that, if a Profile for a User
1612 # is absent, it will be created.
1613 # More importantly, *why* might a Profile be missing? Simple. Resets of
1614 # the ganeti app destroy them. This shouldn't happen in production, and
1615 # only occasionally in development, but it's good to explicitly handle
1616 # this particular case so that missing Profiles not resulting from a reset
1617 # are easier to diagnose.
1618 try:
1619 for user in User.objects.filter(profile__isnull=True):
1620 user.save()
1621 for group in Group.objects.filter(organization__isnull=True):
1622 group.save()
1623 except DatabaseError:
1624 # XXX - since we're using south to track migrations the Profile table
1625 # won't be available the first time syncdb is run. Catch the error here
1626 # and let the south migration handle it.
1627 pass
1629 post_syncdb.connect(regenerate_cu_children)
1632 def log_group_create(sender, editor, **kwargs):
1633 """ log group creation signal """
1634 log_action('CREATE', editor, sender)
1636 def log_group_edit(sender, editor, **kwargs):
1637 """ log group edit signal """
1638 log_action('EDIT', editor, sender)
1640 muddle_user_signals.view_group_created.connect(log_group_create)
1641 muddle_user_signals.view_group_edited.connect(log_group_edit)
1644 # Register permissions on our models.
1645 # These are part of the DB schema and should not be changed without serious
1646 # forethought.
1647 # You *must* syncdb after you change these.
1648 register(permissions.CLUSTER_PARAMS, Cluster, 'ganeti_web')
1649 register(permissions.VIRTUAL_MACHINE_PARAMS, VirtualMachine, 'ganeti_web')
1652 # register log actions
1653 register_log_actions()