Add new tests to support create_vm permissions for owners
[ganeti_webmgr.git] / ganeti_web / models.py
blob5fe485a1a2289e4c0c249039683db55171d9cf4b
1 # coding: utf-8
3 # Copyright (C) 2010 Oregon State University et al.
4 # Copyright (C) 2010 Greek Research and Technology Network
6 # This program is free software; you can redistribute it and/or
7 # modify it under the terms of the GNU General Public License
8 # as published by the Free Software Foundation; either version 2
9 # of the License, or (at your option) any later version.
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
19 # USA.
21 import binascii
22 import cPickle
23 from datetime import datetime, timedelta
24 from hashlib import sha1
25 import random
26 import re
27 import string
28 import sys
29 import time
31 from django.conf import settings
33 from django.contrib.auth.models import User, Group
34 from django.contrib.contenttypes.generic import GenericForeignKey
35 from django.contrib.contenttypes.models import ContentType
36 from django.contrib.sites import models as sites_app
37 from django.contrib.sites.management import create_default_site
38 from django.core.validators import RegexValidator, MinValueValidator
39 from django.db import models
40 from django.db.models import BooleanField, Q, Sum
41 from django.db.models.query import QuerySet
42 from django.db.models.signals import post_save, post_syncdb
43 from django.db.utils import DatabaseError
44 from django.utils.encoding import force_unicode
45 from django.utils.translation import ugettext_lazy as _
47 from django_fields.fields import PickleField
49 from ganeti_web.logs import register_log_actions
51 from object_log.models import LogItem
52 log_action = LogItem.objects.log_action
54 from object_permissions.registration import register
56 from muddle_users import signals as muddle_user_signals
58 from ganeti_web import constants, management, permissions
59 from ganeti_web.fields import (PatchedEncryptedCharField, LowerCaseCharField,
60 PreciseDateTimeField, SumIf)
61 from ganeti_web.util import client
62 from ganeti_web.util.client import GanetiApiError, REPLACE_DISK_AUTO
64 from south.signals import post_migrate
66 if settings.VNC_PROXY:
67 from ganeti_web.util.vncdaemon.vapclient import (request_forwarding,
68 request_ssh)
71 class QuerySetManager(models.Manager):
72 """
73 Useful if you want to define manager methods that need to chain. In this
74 case create a QuerySet class within your model and add all of your methods
75 directly to the queryset. Example:
77 class Foo(models.Model):
78 enabled = fields.BooleanField()
79 dirty = fields.BooleanField()
81 class QuerySet:
82 def active(self):
83 return self.filter(enabled=True)
84 def clean(self):
85 return self.filter(dirty=False)
87 Foo.objects.active().clean()
88 """
90 def __getattr__(self, name, *args):
91 # Cull under/dunder names to avoid certain kinds of recursion. Django
92 # isn't super-bright here.
93 if name.startswith('_'):
94 raise AttributeError
95 return getattr(self.get_query_set(), name, *args)
97 def get_query_set(self):
98 return self.model.QuerySet(self.model)
101 def generate_random_password(length=12):
102 "Generate random sequence of specified length"
103 return "".join(random.sample(string.letters + string.digits, length))
105 FINISHED_JOBS = 'success', 'unknown', 'error'
107 RAPI_CACHE = {}
108 RAPI_CACHE_HASHES = {}
111 def get_rapi(hash, cluster):
113 Retrieves the cached Ganeti RAPI client for a given hash. The Hash is
114 derived from the connection credentials required for a cluster. If the
115 client is not yet cached, it will be created and added.
117 If a hash does not correspond to any cluster then Cluster.DoesNotExist will
118 be raised.
120 @param cluster - either a cluster object, or ID of object. This is used
121 for resolving the cluster if the client is not already found. The id is
122 used rather than the hash, because the hash is mutable.
124 @return a Ganeti RAPI client.
126 if hash in RAPI_CACHE:
127 return RAPI_CACHE[hash]
129 # always look up the instance, even if we were given a Cluster instance
130 # it ensures we are retrieving the latest credentials. This helps avoid
131 # stale credentials. Retrieve only the values because we don't actually
132 # need another Cluster instance here.
133 if isinstance(cluster, (Cluster,)):
134 cluster = cluster.id
135 (credentials,) = Cluster.objects.filter(id=cluster) \
136 .values_list('hash', 'hostname', 'port', 'username', 'password')
137 hash, host, port, user, password = credentials
138 user = user or None
139 # decrypt password
140 # XXX django-fields only stores str, convert to None if needed
141 password = Cluster.decrypt_password(password) if password else None
142 password = None if password in ('None', '') else password
144 # now that we know hash is fresh, check cache again. The original hash
145 # could have been stale. This avoids constructing a new RAPI that already
146 # exists.
147 if hash in RAPI_CACHE:
148 return RAPI_CACHE[hash]
150 # delete any old version of the client that was cached.
151 if cluster in RAPI_CACHE_HASHES:
152 del RAPI_CACHE[RAPI_CACHE_HASHES[cluster]]
154 # Set connect timeout in settings.py so that you do not learn patience.
155 rapi = client.GanetiRapiClient(host, port, user, password,
156 timeout=settings.RAPI_CONNECT_TIMEOUT)
157 RAPI_CACHE[hash] = rapi
158 RAPI_CACHE_HASHES[cluster] = hash
159 return rapi
162 def clear_rapi_cache():
164 clears the rapi cache
166 RAPI_CACHE.clear()
167 RAPI_CACHE_HASHES.clear()
170 ssh_public_key_re = re.compile(
171 r'^ssh-(rsa|dsa|dss) [A-Z0-9+/=]+ .+$', re.IGNORECASE)
172 ssh_public_key_error = _("Enter a valid RSA or DSA SSH key.")
173 validate_sshkey = RegexValidator(ssh_public_key_re, ssh_public_key_error,
174 "invalid")
177 class CachedClusterObject(models.Model):
179 Parent class for objects which belong to Ganeti but have cached data in
180 GWM.
182 The main point of this class is to permit saving lots of data from Ganeti
183 so that we don't have to look things up constantly. The Ganeti RAPI is
184 slow, so avoiding it as much as possible is a good idea.
186 This class provides transparent caching for all of the data that it
187 serializes; no explicit cache accesses are required.
189 This model is abstract and may not be instantiated on its own.
192 serialized_info = models.TextField(default="", editable=False)
193 mtime = PreciseDateTimeField(null=True, editable=False)
194 cached = PreciseDateTimeField(null=True, editable=False)
195 ignore_cache = models.BooleanField(default=False)
197 last_job_id = None
198 __info = None
199 error = None
200 ctime = None
201 deleted = False
203 class Meta:
204 abstract = True
206 def save(self, *args, **kwargs):
208 overridden to ensure info is serialized prior to save
210 if not self.serialized_info:
211 self.serialized_info = cPickle.dumps(self.__info)
212 super(CachedClusterObject, self).save(*args, **kwargs)
214 def __init__(self, *args, **kwargs):
215 super(CachedClusterObject, self).__init__(*args, **kwargs)
216 self.load_info()
218 @property
219 def info(self):
221 A dictionary of metadata for this object.
223 This is a proxy for the ``serialized_info`` field. Reads from this
224 property lazily access the field, and writes to this property will be
225 lazily saved.
227 Writes to this property do *not* force serialization.
230 if self.__info is None:
231 if self.serialized_info:
232 self.__info = cPickle.loads(str(self.serialized_info))
233 return self.__info
235 def _set_info(self, value):
236 self.__info = value
237 if value is not None:
238 self.parse_info()
239 self.serialized_info = ""
241 info = info.setter(_set_info)
243 def load_info(self):
245 Load cached info retrieved from the ganeti cluster. This function
246 includes a lazy cache mechanism that uses a timer to decide whether or
247 not to refresh the cached information with new information from the
248 ganeti cluster.
250 This will ignore the cache when self.ignore_cache is True
253 epsilon = timedelta(0, 0, 0, settings.LAZY_CACHE_REFRESH)
255 if self.id:
256 if (self.ignore_cache
257 or self.cached is None
258 or datetime.now() > self.cached + epsilon):
259 self.refresh()
260 elif self.info:
261 self.parse_transient_info()
262 else:
263 self.error = 'No Cached Info'
265 def parse_info(self):
267 Parse all of the attached metadata, and attach it to this object.
270 self.parse_transient_info()
271 data = self.parse_persistent_info(self.info)
272 for k in data:
273 setattr(self, k, data[k])
275 def refresh(self):
277 Retrieve and parse info from the ganeti cluster. If successfully
278 retrieved and parsed, this method will also call save().
280 If communication with Ganeti fails, an error will be stored in
281 ``error``.
284 job_data = self.check_job_status()
285 for k, v in job_data.items():
286 setattr(self, k, v)
288 # XXX this try/except is far too big; see if we can pare it down.
289 try:
290 info_ = self._refresh()
291 if info_:
292 if info_['mtime']:
293 mtime = datetime.fromtimestamp(info_['mtime'])
294 else:
295 mtime = None
296 self.cached = datetime.now()
297 else:
298 # no info retrieved, use current mtime
299 mtime = self.mtime
301 if self.id and (self.mtime is None or mtime > self.mtime):
302 # there was an update. Set info and save the object
303 self.info = info_
304 self.save()
305 else:
306 # There was no change on the server. Only update the cache
307 # time. This bypasses the info serialization mechanism and
308 # uses a smaller query.
309 if job_data:
310 self.__class__.objects.filter(pk=self.id) \
311 .update(cached=self.cached, **job_data)
312 elif self.id is not None:
313 self.__class__.objects.filter(pk=self.id) \
314 .update(cached=self.cached)
316 except GanetiApiError, e:
317 # Use regular expressions to match the quoted message
318 # given by GanetiApiError. '\\1' is a group substitution
319 # which places the first group '('|\")' in it's place.
320 comp = re.compile("('|\")(?P<msg>.*)\\1")
321 err = comp.search(str(e))
322 # Any search that has 0 results will just return None.
323 # That is why we must check for err before proceeding.
324 if err:
325 msg = err.groupdict()['msg']
326 self.error = msg
327 else:
328 msg = str(e)
329 self.error = str(e)
330 GanetiError.store_error(msg, obj=self, code=e.code)
332 else:
333 if self.error:
334 self.error = None
335 GanetiError.objects.clear_errors(obj=self)
337 def _refresh(self):
339 Fetch raw data from the Ganeti cluster.
341 This must be implemented by children of this class.
344 raise NotImplementedError
346 def check_job_status(self):
347 if not self.last_job_id:
348 return {}
350 ct = ContentType.objects.get_for_model(self)
351 qs = Job.objects.filter(content_type=ct, object_id=self.pk)
352 jobs = qs.order_by("job_id")
354 updates = {}
355 op = None
356 status = 'unknown'
358 for job in jobs:
359 try:
360 data = self.rapi.GetJobStatus(job.job_id)
362 if Job.valid_job(data):
363 op = data['ops'][-1]['OP_ID']
364 status = data['status']
366 except GanetiApiError:
367 pass
369 if status in ('success', 'error'):
370 for k, v in Job.parse_persistent_info(data).items():
371 setattr(job, k, v)
373 if status == 'unknown':
374 job.status = "unknown"
375 job.ignore_cache = False
377 if status in ('success', 'error', 'unknown'):
378 _updates = self._complete_job(self.cluster_id,
379 self.hostname, op, status)
380 # XXX if the delete flag is set in updates then delete this
381 # model this happens here because _complete_job cannot delete
382 # this model
383 if _updates:
384 if 'deleted' in _updates:
385 # Delete ourselves. Also delete the job that caused us
386 # to delete ourselves; see #8439 for "fun" details.
387 # Order matters; the job's deletion cascades over us.
388 # Revisit that when we finally nuke all this caching
389 # bullshit.
390 self.delete()
391 job.delete()
392 else:
393 updates.update(_updates)
395 # we only care about the very last job for resetting the cache flags
396 if not jobs or status in ('success', 'error', 'unknown'):
397 updates['ignore_cache'] = False
398 updates['last_job'] = None
400 return updates
402 @classmethod
403 def _complete_job(cls, cluster_id, hostname, op, status):
405 Process a completed job. This method will make any updates to related
406 classes (like deleting an instance template) and return any data that
407 should be updated. This is a class method so that this processing can
408 be done without a full instance.
410 @returns dict of updated values
413 pass
415 def parse_transient_info(self):
417 Parse properties from cached info that is stored on the class but not
418 in the database.
420 These properties will be loaded every time the object is instantiated.
421 Properties stored on the class cannot be search efficiently via the
422 django query api.
424 This method is specific to the child object.
427 info_ = self.info
428 # XXX ganeti 2.1 ctime is always None
429 # XXX this means that we could nuke the conditionals!
430 if info_['ctime'] is not None:
431 self.ctime = datetime.fromtimestamp(info_['ctime'])
433 @classmethod
434 def parse_persistent_info(cls, info):
436 Parse properties from cached info that are stored in the database.
438 These properties will be searchable by the django query api.
440 This method is specific to the child object.
443 # mtime is sometimes None if object has never been modified
444 if info['mtime'] is None:
445 return {'mtime': None}
446 return {'mtime': datetime.fromtimestamp(info['mtime'])}
449 class JobManager(models.Manager):
451 Custom manager for Ganeti Jobs model
453 def create(self, **kwargs):
454 """ helper method for creating a job with disabled cache """
455 job = Job(ignore_cache=True, **kwargs)
456 job.save(force_insert=True)
457 return job
460 class Job(CachedClusterObject):
462 model representing a job being run on a ganeti Cluster. This includes
463 operations such as creating or delting a virtual machine.
465 Jobs are a special type of CachedClusterObject. Job's run once then become
466 immutable. The lazy cache is modified to become permanent once a complete
467 status (success/error) has been detected. The cache can be disabled by
468 settning ignore_cache=True.
471 job_id = models.IntegerField()
472 content_type = models.ForeignKey(ContentType, related_name="+")
473 object_id = models.IntegerField()
474 obj = GenericForeignKey('content_type', 'object_id')
475 cluster = models.ForeignKey('Cluster', related_name='jobs', editable=False)
476 cluster_hash = models.CharField(max_length=40, editable=False)
478 finished = models.DateTimeField(null=True, blank=True)
479 status = models.CharField(max_length=10)
480 op = models.CharField(max_length=50)
482 objects = JobManager()
484 def save(self, *args, **kwargs):
486 sets the cluster_hash for newly saved instances
488 if self.id is None or self.cluster_hash == '':
489 self.cluster_hash = self.cluster.hash
491 super(Job, self).save(*args, **kwargs)
493 @models.permalink
494 def get_absolute_url(self):
495 job = '%s/job/(?P<job_id>\d+)' % self.cluster
497 return 'ganeti_web.views.jobs.detail', (), {'job': job}
499 @property
500 def rapi(self):
501 return get_rapi(self.cluster_hash, self.cluster_id)
503 def _refresh(self):
504 return self.rapi.GetJobStatus(self.job_id)
506 def load_info(self):
508 Load info for class. This will load from ganeti if ignore_cache==True,
509 otherwise this will always load from the cache.
511 if self.id and (self.ignore_cache or self.info is None):
512 try:
513 self.refresh()
514 except GanetiApiError, e:
515 # if the Job has been archived then we don't know whether it
516 # was successful or not. Mark it as unknown.
517 if e.code == 404:
518 self.status = 'unknown'
519 self.save()
520 else:
521 # its possible the cluster or crednetials are bad. fail
522 # silently
523 pass
525 def refresh(self):
526 info = self._refresh()
527 valid = self.valid_job(info)
528 if valid:
529 self.info = info
530 self.save()
531 # else:
532 # Job.objects.get(job_id=self.info['id']).delete()
534 @classmethod
535 def valid_job(cls, info):
536 status = info.get('status')
537 ops = info.get('ops')
538 return not (ops is None and status is None)
540 @classmethod
541 def parse_op(cls, info):
542 ops = info['ops']
543 op = None
544 if ops:
545 # Return the most recent operation
546 op = ops[-1]['OP_ID']
547 return op
549 @classmethod
550 def parse_persistent_info(cls, info):
552 Parse status and turn off cache bypass flag if job has finished
554 if not cls.valid_job(info):
555 return {}
556 op = cls.parse_op(info)
557 data = {'status': info['status'], 'op': op}
558 if data['status'] in ('error', 'success'):
559 data['ignore_cache'] = False
560 if info['end_ts']:
561 data['finished'] = cls.parse_end_timestamp(info)
562 return data
564 @staticmethod
565 def parse_end_timestamp(info):
566 sec, micro = info['end_ts']
567 return datetime.fromtimestamp(sec + (micro / 1000000.0))
569 def parse_transient_info(self):
570 pass
572 @property
573 def current_operation(self):
575 Jobs may consist of multiple commands/operations. This helper
576 method will return the operation that is currently running or errored
577 out, or the last operation if all operations have completed
579 @returns raw name of the current operation
581 info = self.info
582 index = 0
583 for i in range(len(info['opstatus'])):
584 if info['opstatus'][i] != 'success':
585 index = i
586 break
587 return info['ops'][index]['OP_ID']
589 @property
590 def operation(self):
592 Returns the last operation, which is generally the primary operation.
594 return self.parse_op(self.info)
596 def __repr__(self):
597 return "<Job %d (%d), status %r>" % (self.id, self.job_id,
598 self.status)
600 __unicode__ = __repr__
603 class VirtualMachine(CachedClusterObject):
605 The VirtualMachine (VM) model represents VMs within a Ganeti cluster.
607 The majority of properties are a cache for data stored in the cluster.
608 All data retrieved via the RAPI is stored in VirtualMachine.info, and
609 serialized automatically into VirtualMachine.serialized_info.
611 Attributes that need to be searchable should be stored as model fields.
612 All other attributes will be stored within VirtualMachine.info.
614 This object uses a lazy update mechanism on instantiation. If the cached
615 info from the Ganeti cluster has expired, it will trigger an update. This
616 allows the cache to function in the absence of a periodic update mechanism
617 such as Cron, Celery, or Threads.
619 XXX Serialized_info can possibly be changed to a CharField if an upper
620 limit can be determined. (Later Date, if it will optimize db)
623 cluster = models.ForeignKey('Cluster', related_name='virtual_machines',
624 editable=False, default=0)
625 hostname = LowerCaseCharField(max_length=128, db_index=True)
626 owner = models.ForeignKey('ClusterUser', related_name='virtual_machines',
627 null=True, blank=True,
628 on_delete=models.SET_NULL)
629 virtual_cpus = models.IntegerField(default=-1)
630 disk_size = models.IntegerField(default=-1)
631 ram = models.IntegerField(default=-1)
632 minram = models.IntegerField(default=-1)
633 cluster_hash = models.CharField(max_length=40, editable=False)
634 operating_system = models.CharField(max_length=128)
635 status = models.CharField(max_length=14)
637 # node relations
638 primary_node = models.ForeignKey('Node', related_name='primary_vms',
639 null=True, blank=True)
640 secondary_node = models.ForeignKey('Node', related_name='secondary_vms',
641 null=True, blank=True)
643 # The last job reference indicates that there is at least one pending job
644 # for this virtual machine. There may be more than one job, and that can
645 # never be prevented. This just indicates that job(s) are pending and the
646 # job related code should be run (status, cleanup, etc).
647 last_job = models.ForeignKey('Job', related_name="+", null=True,
648 blank=True)
650 # deleted flag indicates a VM is being deleted, but the job has not
651 # completed yet. VMs that have pending_delete are still displayed in lists
652 # and counted in quotas, but only so status can be checked.
653 pending_delete = models.BooleanField(default=False)
654 deleted = False
656 # Template temporarily stores parameters used to create this virtual
657 # machine. This template is used to recreate the values entered into the
658 # form.
659 template = models.ForeignKey("VirtualMachineTemplate",
660 related_name="instances", null=True,
661 blank=True)
663 class Meta:
664 ordering = ["hostname"]
665 unique_together = (("cluster", "hostname"),)
667 def __unicode__(self):
668 return self.hostname
670 def save(self, *args, **kwargs):
672 sets the cluster_hash for newly saved instances
674 if self.id is None:
675 self.cluster_hash = self.cluster.hash
677 info_ = self.info
678 if info_:
679 found = False
680 remove = []
681 if self.cluster.username:
682 for tag in info_['tags']:
683 # Update owner Tag. Make sure the tag is set to the owner
684 # that is set in webmgr.
685 if tag.startswith(constants.OWNER_TAG):
686 id = int(tag[len(constants.OWNER_TAG):])
687 # Since there is no 'update tag' delete old tag and
688 # replace with tag containing correct owner id.
689 if id == self.owner_id:
690 found = True
691 else:
692 remove.append(tag)
693 if remove:
694 self.rapi.DeleteInstanceTags(self.hostname, remove)
695 for tag in remove:
696 info_['tags'].remove(tag)
697 if self.owner_id and not found:
698 tag = '%s%s' % (constants.OWNER_TAG, self.owner_id)
699 self.rapi.AddInstanceTags(self.hostname, [tag])
700 self.info['tags'].append(tag)
702 super(VirtualMachine, self).save(*args, **kwargs)
704 @models.permalink
705 def get_absolute_url(self):
707 Return absolute url for this instance.
710 return 'instance-detail', (), {'cluster_slug': self.cluster.slug,
711 'instance': self.hostname}
713 @property
714 def rapi(self):
715 return get_rapi(self.cluster_hash, self.cluster_id)
717 @property
718 def is_running(self):
719 return self.status == 'running'
721 @classmethod
722 def parse_persistent_info(cls, info):
724 Loads all values from cached info, included persistent properties that
725 are stored in the database
727 data = super(VirtualMachine, cls).parse_persistent_info(info)
729 # Parse resource properties
730 data['ram'] = info['beparams']['memory']
731 data['virtual_cpus'] = info['beparams']['vcpus']
732 # Sum up the size of each disk used by the VM
733 disk_size = 0
734 for disk in info['disk.sizes']:
735 disk_size += disk
736 data['disk_size'] = disk_size
737 data['operating_system'] = info['os']
738 data['status'] = info['status']
740 primary = info['pnode']
741 if primary:
742 try:
743 data['primary_node'] = Node.objects.get(hostname=primary)
744 except Node.DoesNotExist:
745 # node is not created yet. fail silently
746 data['primary_node'] = None
747 else:
748 data['primary_node'] = None
750 secondary = info['snodes']
751 if len(secondary):
752 secondary = secondary[0]
753 try:
754 data['secondary_node'] = Node.objects.get(hostname=secondary)
755 except Node.DoesNotExist:
756 # node is not created yet. fail silently
757 data['secondary_node'] = None
758 else:
759 data['secondary_node'] = None
761 return data
763 @classmethod
764 def _complete_job(cls, cluster_id, hostname, op, status):
766 if the cache bypass is enabled then check the status of the last job
767 when the job is complete we can reenable the cache.
769 @returns - dictionary of values that were updates
772 if status == 'unknown':
773 # unknown status, the job was archived before it's final status
774 # was polled. Impossible to tell what happened. Clear the job
775 # so it is no longer polled.
777 # XXX This VM might be added by the CLI and be in an invalid
778 # pending_delete state. clearing pending_delete prevents this
779 # but will result in "missing" vms in some cases.
780 return dict(pending_delete=False)
782 base = VirtualMachine.objects.filter(cluster=cluster_id,
783 hostname=hostname)
784 if op == 'OP_INSTANCE_REMOVE':
785 if status == 'success':
786 # XXX can't actually delete here since it would cause a
787 # recursive loop
788 return dict(deleted=True)
790 elif op == 'OP_INSTANCE_CREATE' and status == 'success':
791 # XXX must update before deleting the template to maintain
792 # referential integrity. as a consequence return no other
793 # updates.
794 base.update(template=None)
795 VirtualMachineTemplate.objects \
796 .filter(instances__hostname=hostname,
797 instances__cluster=cluster_id) \
798 .delete()
799 return dict(template=None)
800 return
802 def _refresh(self):
803 # XXX if delete is pending then no need to refresh this object.
804 if self.pending_delete or self.template_id:
805 return None
806 return self.rapi.GetInstance(self.hostname)
808 def shutdown(self, timeout=None):
809 if timeout is None:
810 id = self.rapi.ShutdownInstance(self.hostname)
811 else:
812 id = self.rapi.ShutdownInstance(self.hostname, timeout=timeout)
814 job = Job.objects.create(job_id=id, obj=self,
815 cluster_id=self.cluster_id)
816 self.last_job = job
817 VirtualMachine.objects.filter(pk=self.id) \
818 .update(last_job=job, ignore_cache=True)
819 return job
821 def startup(self):
822 id = self.rapi.StartupInstance(self.hostname)
823 job = Job.objects.create(job_id=id, obj=self,
824 cluster_id=self.cluster_id)
825 self.last_job = job
826 VirtualMachine.objects.filter(pk=self.id) \
827 .update(last_job=job, ignore_cache=True)
828 return job
830 def reboot(self):
831 id = self.rapi.RebootInstance(self.hostname)
832 job = Job.objects.create(job_id=id, obj=self,
833 cluster_id=self.cluster_id)
834 self.last_job = job
835 VirtualMachine.objects.filter(pk=self.id) \
836 .update(last_job=job, ignore_cache=True)
837 return job
839 def migrate(self, mode='live', cleanup=False):
841 Migrates this VirtualMachine to another node.
843 Only works if the disk type is DRDB.
845 @param mode: live or non-live
846 @param cleanup: clean up a previous migration, default is False
848 id = self.rapi.MigrateInstance(self.hostname, mode, cleanup)
849 job = Job.objects.create(job_id=id, obj=self,
850 cluster_id=self.cluster_id)
851 self.last_job = job
852 VirtualMachine.objects.filter(pk=self.id) \
853 .update(last_job=job, ignore_cache=True)
854 return job
856 def replace_disks(self, mode=REPLACE_DISK_AUTO, disks=None, node=None,
857 iallocator=None):
858 id = self.rapi.ReplaceInstanceDisks(self.hostname, disks, mode, node,
859 iallocator)
860 job = Job.objects.create(job_id=id, obj=self,
861 cluster_id=self.cluster_id)
862 self.last_job = job
863 VirtualMachine.objects.filter(pk=self.id) \
864 .update(last_job=job, ignore_cache=True)
865 return job
867 def setup_ssh_forwarding(self, sport=0):
869 Poke a proxy to start SSH forwarding.
871 Returns None if no proxy is configured, or if there was an error
872 contacting the proxy.
875 command = self.rapi.GetInstanceConsole(self.hostname)["command"]
877 if settings.VNC_PROXY:
878 proxy_server = settings.VNC_PROXY.split(":")
879 password = generate_random_password()
880 sport = request_ssh(proxy_server, sport, self.info["pnode"],
881 self.info["network_port"], password, command)
883 if sport:
884 return proxy_server[0], sport, password
886 def setup_vnc_forwarding(self, sport=0, tls=False):
888 Obtain VNC forwarding information, optionally configuring a proxy.
890 Returns None if a proxy is configured and there was an error
891 contacting the proxy.
894 password = ''
895 info_ = self.info
896 port = info_['network_port']
897 node = info_['pnode']
899 # use proxy for VNC connection
900 if settings.VNC_PROXY:
901 proxy_server = settings.VNC_PROXY.split(":")
902 password = generate_random_password()
903 result = request_forwarding(proxy_server, node, port, password,
904 sport=sport, tls=tls)
905 if result:
906 return proxy_server[0], int(result), password
907 else:
908 return node, port, password
910 def __repr__(self):
911 return "<VirtualMachine: '%s'>" % self.hostname
914 class Node(CachedClusterObject):
916 The Node model represents nodes within a Ganeti cluster.
918 The majority of properties are a cache for data stored in the cluster.
919 All data retrieved via the RAPI is stored in VirtualMachine.info, and
920 serialized automatically into VirtualMachine.serialized_info.
922 Attributes that need to be searchable should be stored as model fields.
923 All other attributes will be stored within VirtualMachine.info.
926 ROLE_CHOICES = ((k, v) for k, v in constants.NODE_ROLE_MAP.items())
928 cluster = models.ForeignKey('Cluster', related_name='nodes')
929 hostname = LowerCaseCharField(max_length=128, unique=True)
930 cluster_hash = models.CharField(max_length=40, editable=False)
931 offline = models.BooleanField()
932 role = models.CharField(max_length=1, choices=ROLE_CHOICES)
933 ram_total = models.IntegerField(default=-1)
934 ram_free = models.IntegerField(default=-1)
935 disk_total = models.IntegerField(default=-1)
936 disk_free = models.IntegerField(default=-1)
937 cpus = models.IntegerField(null=True, blank=True)
939 # The last job reference indicates that there is at least one pending job
940 # for this virtual machine. There may be more than one job, and that can
941 # never be prevented. This just indicates that job(s) are pending and the
942 # job related code should be run (status, cleanup, etc).
943 last_job = models.ForeignKey('Job', related_name="+", null=True,
944 blank=True)
946 def __unicode__(self):
947 return self.hostname
949 def save(self, *args, **kwargs):
951 sets the cluster_hash for newly saved instances
953 if self.id is None:
954 self.cluster_hash = self.cluster.hash
955 super(Node, self).save(*args, **kwargs)
957 @models.permalink
958 def get_absolute_url(self):
960 Return absolute url for this node.
963 return 'node-detail', (), {'cluster_slug': self.cluster.slug,
964 'host': self.hostname}
966 def _refresh(self):
967 """ returns node info from the ganeti server """
968 return self.rapi.GetNode(self.hostname)
970 @property
971 def rapi(self):
972 return get_rapi(self.cluster_hash, self.cluster_id)
974 @classmethod
975 def parse_persistent_info(cls, info):
977 Loads all values from cached info, included persistent properties that
978 are stored in the database
980 data = super(Node, cls).parse_persistent_info(info)
982 # Parse resource properties
983 data['ram_total'] = info.get("mtotal") or 0
984 data['ram_free'] = info.get("mfree") or 0
985 data['disk_total'] = info.get("dtotal") or 0
986 data['disk_free'] = info.get("dfree") or 0
987 data['cpus'] = info.get("csockets")
988 data['offline'] = info['offline']
989 data['role'] = info['role']
990 return data
992 @property
993 def ram(self):
994 """ returns dict of free and total ram """
995 values = VirtualMachine.objects \
996 .filter(Q(primary_node=self) | Q(secondary_node=self)) \
997 .filter(status='running') \
998 .exclude(ram=-1).order_by() \
999 .aggregate(used=Sum('ram'))
1001 total = self.ram_total
1002 used = total - self.ram_free
1003 allocated = values.get("used") or 0
1004 free = total - allocated if allocated >= 0 and total >= 0 else -1
1006 return {
1007 'total': total,
1008 'free': free,
1009 'allocated': allocated,
1010 'used': used,
1013 @property
1014 def disk(self):
1015 """ returns dict of free and total disk space """
1016 values = VirtualMachine.objects \
1017 .filter(Q(primary_node=self) | Q(secondary_node=self)) \
1018 .exclude(disk_size=-1).order_by() \
1019 .aggregate(used=Sum('disk_size'))
1021 total = self.disk_total
1022 used = total - self.disk_free
1023 allocated = values.get("used") or 0
1024 free = total - allocated if allocated >= 0 and total >= 0 else -1
1026 return {
1027 'total': total,
1028 'free': free,
1029 'allocated': allocated,
1030 'used': used,
1033 @property
1034 def allocated_cpus(self):
1035 values = VirtualMachine.objects \
1036 .filter(primary_node=self, status='running') \
1037 .exclude(virtual_cpus=-1).order_by() \
1038 .aggregate(cpus=Sum('virtual_cpus'))
1039 return values.get("cpus") or 0
1041 def set_role(self, role, force=False):
1043 Sets the role for this node
1045 @param role - one of the following choices:
1046 * master
1047 * master-candidate
1048 * regular
1049 * drained
1050 * offline
1052 id = self.rapi.SetNodeRole(self.hostname, role, force)
1053 job = Job.objects.create(job_id=id, obj=self,
1054 cluster_id=self.cluster_id)
1055 self.last_job = job
1056 Node.objects.filter(pk=self.pk).update(ignore_cache=True, last_job=job)
1057 return job
1059 def evacuate(self, iallocator=None, node=None):
1061 migrates all secondary instances off this node
1063 id = self.rapi.EvacuateNode(self.hostname, iallocator=iallocator,
1064 remote_node=node)
1065 job = Job.objects.create(job_id=id, obj=self,
1066 cluster_id=self.cluster_id)
1067 self.last_job = job
1068 Node.objects.filter(pk=self.pk) \
1069 .update(ignore_cache=True, last_job=job)
1070 return job
1072 def migrate(self, mode=None):
1074 migrates all primary instances off this node
1076 id = self.rapi.MigrateNode(self.hostname, mode)
1077 job = Job.objects.create(job_id=id, obj=self,
1078 cluster_id=self.cluster_id)
1079 self.last_job = job
1080 Node.objects.filter(pk=self.pk).update(ignore_cache=True, last_job=job)
1081 return job
1083 def __repr__(self):
1084 return "<Node: '%s'>" % self.hostname
1087 class Cluster(CachedClusterObject):
1089 A Ganeti cluster that is being tracked by this manager tool
1091 hostname = LowerCaseCharField(_('hostname'), max_length=128, unique=True)
1092 slug = models.SlugField(_('slug'), max_length=50, unique=True,
1093 db_index=True)
1094 port = models.PositiveIntegerField(_('port'), default=5080)
1095 description = models.CharField(_('description'), max_length=128,
1096 blank=True)
1097 username = models.CharField(_('username'), max_length=128, blank=True)
1098 password = PatchedEncryptedCharField(_('password'), default="",
1099 max_length=128, blank=True)
1100 hash = models.CharField(_('hash'), max_length=40, editable=False)
1102 # quota properties
1103 virtual_cpus = models.IntegerField(_('Virtual CPUs'), null=True,
1104 blank=True)
1105 disk = models.IntegerField(_('disk'), null=True, blank=True)
1106 ram = models.IntegerField(_('ram'), null=True, blank=True)
1108 # The last job reference indicates that there is at least one pending job
1109 # for this virtual machine. There may be more than one job, and that can
1110 # never be prevented. This just indicates that job(s) are pending and the
1111 # job related code should be run (status, cleanup, etc).
1112 last_job = models.ForeignKey('Job', related_name='cluster_last_job',
1113 null=True, blank=True)
1115 class Meta:
1116 ordering = ["hostname", "description"]
1118 def __unicode__(self):
1119 return self.hostname
1121 def save(self, *args, **kwargs):
1122 self.hash = self.create_hash()
1123 super(Cluster, self).save(*args, **kwargs)
1125 @models.permalink
1126 def get_absolute_url(self):
1127 return 'cluster-detail', (), {'cluster_slug': self.slug}
1129 # XXX probably hax
1130 @property
1131 def cluster_id(self):
1132 return self.id
1134 @classmethod
1135 def decrypt_password(cls, value):
1137 Convenience method for decrypting a password without an instance.
1138 This was partly cribbed from django-fields which only allows decrypting
1139 from a model instance.
1141 If the password appears to be encrypted, this method will decrypt it;
1142 otherwise, it will return the password unchanged.
1144 This method is bonghits.
1147 field, chaff, chaff, chaff = cls._meta.get_field_by_name('password')
1149 if value.startswith(field.prefix):
1150 ciphertext = value[len(field.prefix):]
1151 plaintext = field.cipher.decrypt(binascii.a2b_hex(ciphertext))
1152 password = plaintext.split('\0')[0]
1153 else:
1154 password = value
1156 return force_unicode(password)
1158 @property
1159 def rapi(self):
1161 retrieves the rapi client for this cluster.
1163 # XXX always pass self in. not only does it avoid querying this object
1164 # from the DB a second time, it also prevents a recursion loop caused
1165 # by __init__ fetching info from the Cluster
1166 return get_rapi(self.hash, self)
1168 def create_hash(self):
1170 Creates a hash for this cluster based on credentials required for
1171 connecting to the server
1173 s = '%s%s%s%s' % (self.username, self.password, self.hostname,
1174 self.port)
1175 return sha1(s).hexdigest()
1177 def get_default_quota(self):
1179 Returns the default quota for this cluster
1181 return {
1182 "default": 1,
1183 "ram": self.ram,
1184 "disk": self.disk,
1185 "virtual_cpus": self.virtual_cpus,
1188 def get_quota(self, user=None):
1190 Get the quota for a ClusterUser
1192 @return user's quota, default quota, or none
1194 if user is None:
1195 return self.get_default_quota()
1197 # attempt to query user specific quota first. if it does not exist
1198 # then fall back to the default quota
1199 query = Quota.objects.filter(cluster=self, user=user)
1200 quotas = query.values('ram', 'disk', 'virtual_cpus')
1201 if quotas:
1202 quota = quotas[0]
1203 quota['default'] = 0
1204 return quota
1206 return self.get_default_quota()
1208 def set_quota(self, user, data):
1210 Set the quota for a ClusterUser.
1212 If data is None, the quota will be removed.
1214 @param values: dictionary of values, or None to delete the quota
1217 kwargs = {'cluster': self, 'user': user}
1218 if data is None:
1219 Quota.objects.filter(**kwargs).delete()
1220 else:
1221 quota, new = Quota.objects.get_or_create(**kwargs)
1222 quota.__dict__.update(data)
1223 quota.save()
1225 @classmethod
1226 def get_quotas(cls, clusters=None, user=None):
1227 """ retrieve a bulk list of cluster quotas """
1229 if clusters is None:
1230 clusters = Cluster.objects.all()
1232 quotas = {}
1233 cluster_id_map = {}
1234 for cluster in clusters:
1235 quotas[cluster] = {
1236 'default': 1,
1237 'ram': cluster.ram,
1238 'disk': cluster.disk,
1239 'virtual_cpus': cluster.virtual_cpus,
1241 cluster_id_map[cluster.id] = cluster
1243 # get user's custom queries if any
1244 if user is not None:
1245 qs = Quota.objects.filter(cluster__in=clusters, user=user)
1246 values = qs.values('ram', 'disk', 'virtual_cpus', 'cluster__id')
1248 for custom in values:
1249 try:
1250 cluster = cluster_id_map[custom['cluster__id']]
1251 except KeyError:
1252 continue
1253 custom['default'] = 0
1254 del custom['cluster__id']
1255 quotas[cluster] = custom
1257 return quotas
1259 def sync_virtual_machines(self, remove=False):
1261 Synchronizes the VirtualMachines in the database with the information
1262 this ganeti cluster has:
1263 * VMs no longer in ganeti are deleted
1264 * VMs missing from the database are added
1266 ganeti = self.instances()
1267 db = self.virtual_machines.all().values_list('hostname', flat=True)
1269 # add VMs missing from the database
1270 for hostname in filter(lambda x: unicode(x) not in db, ganeti):
1271 vm = VirtualMachine.objects.create(cluster=self, hostname=hostname)
1272 vm.refresh()
1274 # deletes VMs that are no longer in ganeti
1275 if remove:
1276 missing_ganeti = filter(lambda x: str(x) not in ganeti, db)
1277 if missing_ganeti:
1278 self.virtual_machines \
1279 .filter(hostname__in=missing_ganeti).delete()
1281 # Get up to date data on all VMs
1282 self.refresh_virtual_machines()
1284 def refresh_virtual_machines(self):
1285 for vm in self.virtual_machines.all():
1286 vm.refresh()
1288 def sync_nodes(self, remove=False):
1290 Synchronizes the Nodes in the database with the information
1291 this ganeti cluster has:
1292 * Nodes no longer in ganeti are deleted
1293 * Nodes missing from the database are added
1295 ganeti = self.rapi.GetNodes()
1296 db = self.nodes.all().values_list('hostname', flat=True)
1298 # add Nodes missing from the database
1299 for hostname in filter(lambda x: unicode(x) not in db, ganeti):
1300 node = Node.objects.create(cluster=self, hostname=hostname)
1301 node.refresh()
1303 # deletes Nodes that are no longer in ganeti
1304 if remove:
1305 missing_ganeti = filter(lambda x: str(x) not in ganeti, db)
1306 if missing_ganeti:
1307 self.nodes.filter(hostname__in=missing_ganeti).delete()
1309 # Get up to date data for all Nodes
1310 self.refresh_nodes()
1312 def refresh_nodes(self):
1313 for node in self.nodes.all():
1314 node.refresh()
1316 @property
1317 def missing_in_ganeti(self):
1319 Returns a list of VirtualMachines that are missing from the Ganeti
1320 cluster but present in the database.
1322 ganeti = self.instances()
1323 qs = self.virtual_machines.exclude(template__isnull=False)
1324 db = qs.values_list('hostname', flat=True)
1325 return [x for x in db if str(x) not in ganeti]
1327 @property
1328 def missing_in_db(self):
1330 Returns list of VirtualMachines that are missing from the database, but
1331 present in ganeti
1333 ganeti = self.instances()
1334 db = self.virtual_machines.all().values_list('hostname', flat=True)
1335 return [x for x in ganeti if unicode(x) not in db]
1337 @property
1338 def nodes_missing_in_db(self):
1340 Returns list of Nodes that are missing from the database, but present
1341 in ganeti.
1343 try:
1344 ganeti = self.rapi.GetNodes()
1345 except GanetiApiError:
1346 ganeti = []
1347 db = self.nodes.all().values_list('hostname', flat=True)
1348 return [x for x in ganeti if unicode(x) not in db]
1350 @property
1351 def nodes_missing_in_ganeti(self):
1353 Returns list of Nodes that are missing from the ganeti cluster
1354 but present in the database
1356 try:
1357 ganeti = self.rapi.GetNodes()
1358 except GanetiApiError:
1359 ganeti = []
1360 db = self.nodes.all().values_list('hostname', flat=True)
1361 return filter(lambda x: str(x) not in ganeti, db)
1363 @property
1364 def available_ram(self):
1365 """ returns dict of free and total ram """
1366 nodes = self.nodes.exclude(ram_total=-1) \
1367 .aggregate(total=Sum('ram_total'), free=Sum('ram_free'))
1368 total = max(nodes.get("total", 0), 0)
1369 free = max(nodes.get("free", 0), 0)
1370 used = total - free
1371 values = self.virtual_machines \
1372 .filter(status='running') \
1373 .exclude(ram=-1).order_by() \
1374 .aggregate(used=Sum('ram'))
1376 if values.get("used") is None:
1377 allocated = 0
1378 else:
1379 allocated = values["used"]
1381 free = max(total - allocated, 0)
1383 return {
1384 'total': total,
1385 'free': free,
1386 'allocated': allocated,
1387 'used': used,
1390 @property
1391 def available_disk(self):
1392 """ returns dict of free and total disk space """
1393 nodes = self.nodes.exclude(disk_total=-1) \
1394 .aggregate(total=Sum('disk_total'), free=Sum('disk_free'))
1395 total = max(nodes.get("total", 0), 0)
1396 free = max(nodes.get("free", 0), 0)
1397 used = total - free
1398 values = self.virtual_machines \
1399 .exclude(disk_size=-1).order_by() \
1400 .aggregate(used=Sum('disk_size'))
1402 if values.get("used") is None:
1403 allocated = 0
1404 else:
1405 allocated = values["used"]
1407 free = max(total - allocated, 0)
1409 return {
1410 'total': total,
1411 'free': free,
1412 'allocated': allocated,
1413 'used': used,
1416 def _refresh(self):
1417 return self.rapi.GetInfo()
1419 def instances(self, bulk=False):
1420 """Gets all VMs which reside under the Cluster
1421 Calls the rapi client for all instances.
1423 try:
1424 return self.rapi.GetInstances(bulk=bulk)
1425 except GanetiApiError:
1426 return []
1428 def instance(self, instance):
1429 """Get a single Instance
1430 Calls the rapi client for a specific instance.
1432 try:
1433 return self.rapi.GetInstance(instance)
1434 except GanetiApiError:
1435 return None
1437 def redistribute_config(self):
1439 Redistribute config from cluster's master node to all
1440 other nodes.
1442 # no exception handling, because it's being done in a view
1443 id = self.rapi.RedistributeConfig()
1444 job = Job.objects.create(job_id=id, obj=self, cluster_id=self.id)
1445 self.last_job = job
1446 Cluster.objects.filter(pk=self.id) \
1447 .update(last_job=job, ignore_cache=True)
1448 return job
1451 class VirtualMachineTemplate(models.Model):
1453 Virtual Machine Template holds all the values for the create virtual
1454 machine form so that they can automatically be used or edited by a user.
1457 template_name = models.CharField(max_length=255, default="")
1458 temporary = BooleanField(verbose_name=_("Temporary"), default=False)
1459 description = models.CharField(max_length=255, default="")
1460 cluster = models.ForeignKey(Cluster, related_name="templates", null=True,
1461 blank=True)
1462 start = models.BooleanField(verbose_name=_('Start up After Creation'),
1463 default=True)
1464 no_install = models.BooleanField(verbose_name=_('Do not install OS'),
1465 default=False)
1466 ip_check = BooleanField(verbose_name=_("IP Check"), default=True)
1467 name_check = models.BooleanField(verbose_name=_('DNS Name Check'),
1468 default=True)
1469 iallocator = models.BooleanField(verbose_name=_('Automatic Allocation'),
1470 default=False)
1471 iallocator_hostname = LowerCaseCharField(max_length=255, blank=True)
1472 disk_template = models.CharField(verbose_name=_('Disk Template'),
1473 max_length=16)
1474 # XXX why aren't these FKs?
1475 pnode = models.CharField(verbose_name=_('Primary Node'), max_length=255,
1476 default="")
1477 snode = models.CharField(verbose_name=_('Secondary Node'), max_length=255,
1478 default="")
1479 os = models.CharField(verbose_name=_('Operating System'), max_length=255)
1481 # Backend parameters (BEPARAMS)
1482 vcpus = models.IntegerField(verbose_name=_('Virtual CPUs'),
1483 validators=[MinValueValidator(1)], null=True,
1484 blank=True)
1485 # XXX do we really want the minimum memory to be 100MiB? This isn't
1486 # strictly necessary AFAICT.
1487 memory = models.IntegerField(verbose_name=_('Memory'),
1488 validators=[MinValueValidator(100)],
1489 null=True, blank=True)
1490 minmem = models.IntegerField(verbose_name=_('Minimum Memory'),
1491 validators=[MinValueValidator(100)],
1492 null=True, blank=True)
1493 disks = PickleField(verbose_name=_('Disks'), null=True, blank=True)
1494 # XXX why isn't this an enum?
1495 disk_type = models.CharField(verbose_name=_('Disk Type'), max_length=255,
1496 default="")
1497 nics = PickleField(verbose_name=_('NICs'), null=True, blank=True)
1498 # XXX why isn't this an enum?
1499 nic_type = models.CharField(verbose_name=_('NIC Type'), max_length=255,
1500 default="")
1502 # Hypervisor parameters (HVPARAMS)
1503 kernel_path = models.CharField(verbose_name=_('Kernel Path'),
1504 max_length=255, default="", blank=True)
1505 root_path = models.CharField(verbose_name=_('Root Path'), max_length=255,
1506 default='/', blank=True)
1507 serial_console = models.BooleanField(
1508 verbose_name=_('Enable Serial Console'))
1509 boot_order = models.CharField(verbose_name=_('Boot Device'),
1510 max_length=255, default="")
1511 cdrom_image_path = models.CharField(verbose_name=_('CD-ROM Image Path'),
1512 max_length=512, blank=True)
1513 cdrom2_image_path = models.CharField(
1514 verbose_name=_('CD-ROM 2 Image Path'),
1515 max_length=512, blank=True)
1517 class Meta:
1518 unique_together = (("cluster", "template_name"),)
1520 def __unicode__(self):
1521 if self.temporary:
1522 return u'(temporary)'
1523 else:
1524 return self.template_name
1526 def set_name(self, name):
1528 Set this template's name.
1530 If the name is blank, this template will become temporary and its name
1531 will be set to a unique timestamp.
1534 if name:
1535 self.template_name = name
1536 else:
1537 # The template is temporary and will be removed by the VM when the
1538 # VM successfully comes into existence.
1539 self.temporary = True
1540 # Give it a temporary name. Something unique. This is the number
1541 # of microseconds since the epoch; I figure that it'll work out
1542 # alright.
1543 self.template_name = str(int(time.time() * (10 ** 6)))
1546 class GanetiError(models.Model):
1548 Class for storing errors which occured in Ganeti
1550 cluster = models.ForeignKey(Cluster, related_name="errors")
1551 msg = models.TextField()
1552 code = models.PositiveIntegerField(blank=True, null=True)
1554 # XXX could be fixed with django-model-util's TimeStampedModel
1555 timestamp = models.DateTimeField()
1557 # determines if the errors still appears or not
1558 cleared = models.BooleanField(default=False)
1560 # cluster object (cluster, VM, Node) affected by the error (if any)
1561 obj_type = models.ForeignKey(ContentType, related_name="ganeti_errors")
1562 obj_id = models.PositiveIntegerField()
1563 obj = GenericForeignKey("obj_type", "obj_id")
1565 objects = QuerySetManager()
1567 class Meta:
1568 ordering = ("-timestamp", "code", "msg")
1570 def __unicode__(self):
1571 base = u"[%s] %s" % (self.timestamp, self.msg)
1572 return base
1574 class QuerySet(QuerySet):
1576 def clear_errors(self, obj=None):
1578 Clear errors instead of deleting them.
1581 qs = self.filter(cleared=False)
1583 if obj:
1584 qs = qs.get_errors(obj)
1586 return qs.update(cleared=True)
1588 def get_errors(self, obj):
1590 Manager method used for getting QuerySet of all errors depending
1591 on passed arguments.
1593 @param obj affected object (itself or just QuerySet)
1596 if obj is None:
1597 raise RuntimeError("Implementation error calling get_errors()"
1598 "with None")
1600 # Create base query of errors to return.
1602 # if it's a Cluster or a queryset for Clusters, then we need to
1603 # get all errors from the Clusters. Do this by filtering on
1604 # GanetiError.cluster instead of obj_id.
1605 if isinstance(obj, (Cluster,)):
1606 return self.filter(cluster=obj)
1608 elif isinstance(obj, (QuerySet,)):
1609 if obj.model == Cluster:
1610 return self.filter(cluster__in=obj)
1611 else:
1612 ct = ContentType.objects.get_for_model(obj.model)
1613 return self.filter(obj_type=ct, obj_id__in=obj)
1615 else:
1616 ct = ContentType.objects.get_for_model(obj.__class__)
1617 return self.filter(obj_type=ct, obj_id=obj.pk)
1619 def __repr__(self):
1620 return "<GanetiError '%s'>" % self.msg
1622 @classmethod
1623 def store_error(cls, msg, obj, code, **kwargs):
1625 Create and save an error with the given information.
1627 @param msg error's message
1628 @param obj object (i.e. cluster or vm) affected by the error
1629 @param code error's code number
1631 ct = ContentType.objects.get_for_model(obj.__class__)
1632 is_cluster = isinstance(obj, Cluster)
1634 # 401 -- bad permissions
1635 # 401 is cluster-specific error and thus shouldn't appear on any other
1636 # object.
1637 if code == 401:
1638 if not is_cluster:
1639 # NOTE: what we do here is almost like:
1640 # return self.store_error(msg=msg, code=code, obj=obj.cluster)
1641 # we just omit the recursiveness
1642 obj = obj.cluster
1643 ct = ContentType.objects.get_for_model(Cluster)
1644 is_cluster = True
1646 # 404 -- object not found
1647 # 404 can occur on any object, but when it occurs on a cluster, then
1648 # any of its children must not see the error again
1649 elif code == 404:
1650 if not is_cluster:
1651 # return if the error exists for cluster
1652 try:
1653 c_ct = ContentType.objects.get_for_model(Cluster)
1654 return cls.objects.filter(msg=msg, obj_type=c_ct,
1655 code=code,
1656 obj_id=obj.cluster_id,
1657 cleared=False)[0]
1659 except (cls.DoesNotExist, IndexError):
1660 # we want to proceed when the error is not
1661 # cluster-specific
1662 pass
1664 # XXX use a try/except instead of get_or_create(). get_or_create()
1665 # does not allow us to set cluster_id. This means we'd have to query
1666 # the cluster object to create the error. we can't guaranteee the
1667 # cluster will already be queried so use create() instead which does
1668 # allow cluster_id
1669 try:
1670 return cls.objects.filter(msg=msg, obj_type=ct, obj_id=obj.pk,
1671 code=code, **kwargs)[0]
1673 except (cls.DoesNotExist, IndexError):
1674 cluster_id = obj.pk if is_cluster else obj.cluster_id
1676 return cls.objects.create(timestamp=datetime.now(), msg=msg,
1677 obj_type=ct, obj_id=obj.pk,
1678 cluster_id=cluster_id, code=code,
1679 **kwargs)
1682 class ClusterUser(models.Model):
1684 Base class for objects that may interact with a Cluster or VirtualMachine.
1687 name = models.CharField(max_length=128)
1688 real_type = models.ForeignKey(ContentType, related_name="+",
1689 editable=False, null=True, blank=True)
1691 def __repr__(self):
1692 return "<%s: %s>" % (str(self.real_type), self.name)
1694 def __unicode__(self):
1695 return self.name
1697 def save(self, *args, **kwargs):
1698 if not self.id:
1699 self.real_type = self._get_real_type()
1700 super(ClusterUser, self).save(*args, **kwargs)
1702 def get_absolute_url(self):
1703 return self.cast().get_absolute_url()
1705 @property
1706 def permissable(self):
1707 """ returns an object that can be granted permissions """
1708 return self.cast().permissable
1710 @classmethod
1711 def _get_real_type(cls):
1712 return ContentType.objects.get_for_model(cls)
1714 def cast(self):
1715 return self.real_type.get_object_for_this_type(pk=self.pk)
1717 def used_resources(self, cluster=None, only_running=True):
1719 Return dictionary of total resources used by VMs that this ClusterUser
1720 has perms to.
1721 @param cluster if set, get only VMs from specified cluster
1722 @param only_running if set, get only running VMs
1724 # XXX - order_by must be cleared or it breaks annotation grouping since
1725 # the default order_by field is also added to the group_by clause
1726 base = self.virtual_machines.all().order_by()
1728 # XXX - use a custom aggregate for ram and vcpu count when filtering by
1729 # running. this allows us to execute a single query.
1731 # XXX - quotes must be used in this order. postgresql quirk
1732 if only_running:
1733 sum_ram = SumIf('ram', condition="status='running'")
1734 sum_vcpus = SumIf('virtual_cpus', condition="status='running'")
1735 else:
1736 sum_ram = Sum('ram')
1737 sum_vcpus = Sum('virtual_cpus')
1739 base = base.exclude(ram=-1, disk_size=-1, virtual_cpus=-1)
1741 if cluster:
1742 base = base.filter(cluster=cluster)
1743 result = base.aggregate(ram=sum_ram, disk=Sum('disk_size'),
1744 virtual_cpus=sum_vcpus)
1746 # repack with zeros instead of Nones
1747 if result['disk'] is None:
1748 result['disk'] = 0
1749 if result['ram'] is None:
1750 result['ram'] = 0
1751 if result['virtual_cpus'] is None:
1752 result['virtual_cpus'] = 0
1753 return result
1755 else:
1756 base = base.values('cluster').annotate(uram=sum_ram,
1757 udisk=Sum('disk_size'),
1758 uvirtual_cpus=sum_vcpus)
1760 # repack as dictionary
1761 result = {}
1762 for used in base:
1763 # repack with zeros instead of Nones, change index names
1764 used["ram"] = used.pop("uram") or 0
1765 used["disk"] = used.pop("udisk") or 0
1766 used["virtual_cpus"] = used.pop("uvirtual_cpus") or 0
1767 result[used.pop('cluster')] = used
1769 return result
1772 class Profile(ClusterUser):
1774 Profile associated with a django.contrib.auth.User object.
1776 user = models.OneToOneField(User)
1778 def get_absolute_url(self):
1779 return self.user.get_absolute_url()
1781 def grant(self, perm, obj):
1782 self.user.grant(perm, obj)
1784 def set_perms(self, perms, obj):
1785 self.user.set_perms(perms, obj)
1787 def get_objects_any_perms(self, *args, **kwargs):
1788 return self.user.get_objects_any_perms(*args, **kwargs)
1790 def has_perm(self, *args, **kwargs):
1791 return self.user.has_perm(*args, **kwargs)
1793 @property
1794 def permissable(self):
1795 """ returns an object that can be granted permissions """
1796 return self.user
1799 class Organization(ClusterUser):
1801 An organization is used for grouping Users.
1803 Organizations are matched with an instance of contrib.auth.models.Group.
1804 This model exists so that contrib.auth.models.Group have a 1:1 relation
1805 with a ClusterUser on which quotas and permissions can be assigned.
1808 group = models.OneToOneField(Group, related_name='organization')
1810 def get_absolute_url(self):
1811 return self.group.get_absolute_url()
1813 def grant(self, perm, object):
1814 self.group.grant(perm, object)
1816 def set_perms(self, perms, object):
1817 self.group.set_perms(perms, object)
1819 def get_objects_any_perms(self, *args, **kwargs):
1820 return self.group.get_objects_any_perms(*args, **kwargs)
1822 def has_perm(self, *args, **kwargs):
1823 return self.group.has_perm(*args, **kwargs)
1825 @property
1826 def permissable(self):
1827 """ returns an object that can be granted permissions """
1828 return self.group
1831 class Quota(models.Model):
1833 A resource limit imposed on a ClusterUser for a given Cluster. The
1834 attributes of this model represent maximum values the ClusterUser can
1835 consume. The absence of a Quota indicates unlimited usage.
1837 user = models.ForeignKey(ClusterUser, related_name='quotas')
1838 cluster = models.ForeignKey(Cluster, related_name='quotas')
1840 ram = models.IntegerField(default=0, null=True, blank=True)
1841 disk = models.IntegerField(default=0, null=True, blank=True)
1842 virtual_cpus = models.IntegerField(default=0, null=True, blank=True)
1845 class SSHKey(models.Model):
1847 Model representing user's SSH public key. Virtual machines rely on
1848 many ssh keys.
1850 key = models.TextField(validators=[validate_sshkey])
1851 #filename = models.CharField(max_length=128) # saves key file's name
1852 user = models.ForeignKey(User, related_name='ssh_keys')
1855 def create_profile(sender, instance, **kwargs):
1857 Create a profile object whenever a new user is created, also keeps the
1858 profile name synchronized with the username
1860 try:
1861 profile, new = Profile.objects.get_or_create(user=instance)
1862 if profile.name != instance.username:
1863 profile.name = instance.username
1864 profile.save()
1865 except DatabaseError:
1866 # XXX - since we're using south to track migrations the Profile table
1867 # won't be available the first time syncdb is run. Catch the error
1868 # here and let the south migration handle it.
1869 pass
1872 def update_cluster_hash(sender, instance, **kwargs):
1874 Updates the Cluster hash for all of it's VirtualMachines, Nodes, and Jobs
1876 instance.virtual_machines.all().update(cluster_hash=instance.hash)
1877 instance.jobs.all().update(cluster_hash=instance.hash)
1878 instance.nodes.all().update(cluster_hash=instance.hash)
1881 def update_organization(sender, instance, **kwargs):
1883 Creates a Organizations whenever a contrib.auth.models.Group is created
1885 org, new = Organization.objects.get_or_create(group=instance)
1886 org.name = instance.name
1887 org.save()
1889 post_save.connect(create_profile, sender=User)
1890 post_save.connect(update_cluster_hash, sender=Cluster)
1891 post_save.connect(update_organization, sender=Group)
1893 # Disconnect create_default_site from django.contrib.sites so that
1894 # the useless table for sites is not created. This will be
1895 # reconnected for other apps to use in update_sites_module.
1896 post_syncdb.disconnect(create_default_site, sender=sites_app)
1897 post_syncdb.connect(management.update_sites_module, sender=sites_app,
1898 dispatch_uid="ganeti.management.update_sites_module")
1901 def regenerate_cu_children(sender, **kwargs):
1903 Resets may destroy Profiles and/or Organizations. We need to regenerate
1904 them.
1907 # So. What are we actually doing here?
1908 # Whenever a User or Group is saved, the associated Profile or
1909 # Organization is also updated. This means that, if a Profile for a User
1910 # is absent, it will be created.
1911 # More importantly, *why* might a Profile be missing? Simple. Resets of
1912 # the ganeti app destroy them. This shouldn't happen in production, and
1913 # only occasionally in development, but it's good to explicitly handle
1914 # this particular case so that missing Profiles not resulting from a reset
1915 # are easier to diagnose.
1916 try:
1917 for user in User.objects.filter(profile__isnull=True):
1918 user.save()
1919 for group in Group.objects.filter(organization__isnull=True):
1920 group.save()
1921 except DatabaseError:
1922 # XXX - since we're using south to track migrations the Profile table
1923 # won't be available the first time syncdb is run. Catch the error
1924 # here and let the south migration handle it.
1925 pass
1927 post_syncdb.connect(regenerate_cu_children)
1930 def log_group_create(sender, editor, **kwargs):
1931 """ log group creation signal """
1932 log_action('CREATE', editor, sender)
1935 def log_group_edit(sender, editor, **kwargs):
1936 """ log group edit signal """
1937 log_action('EDIT', editor, sender)
1940 muddle_user_signals.view_group_created.connect(log_group_create)
1941 muddle_user_signals.view_group_edited.connect(log_group_edit)
1944 def refresh_objects(sender, **kwargs):
1946 This was originally the code in the 0009
1947 and then 0010 'force_object_refresh' migration
1949 Force a refresh of all Cluster, Nodes, and VirtualMachines, and
1950 import any new Nodes.
1953 if kwargs.get('app', False) and kwargs['app'] == 'ganeti_web':
1954 Cluster.objects.all().update(mtime=None)
1955 Node.objects.all().update(mtime=None)
1956 VirtualMachine.objects.all().update(mtime=None)
1958 write = sys.stdout.write
1959 flush = sys.stdout.flush
1961 def wf(str, newline=False):
1962 if newline:
1963 write('\n')
1964 write(str)
1965 flush()
1967 wf('- Refresh Cached Cluster Objects')
1968 wf(' > Synchronizing Cluster Nodes ', True)
1969 flush()
1970 for cluster in Cluster.objects.all().iterator():
1971 try:
1972 cluster.sync_nodes()
1973 wf('.')
1974 except GanetiApiError:
1975 wf('E')
1977 wf(' > Refreshing Node Caches ', True)
1978 for node in Node.objects.all().iterator():
1979 try:
1980 wf('.')
1981 except GanetiApiError:
1982 wf('E')
1984 wf(' > Refreshing Instance Caches ', True)
1985 for instance in VirtualMachine.objects.all().iterator():
1986 try:
1987 wf('.')
1988 except GanetiApiError:
1989 wf('E')
1990 wf('\n')
1993 # Set this as post_migrate hook.
1994 post_migrate.connect(refresh_objects)
1996 # Register permissions on our models.
1997 # These are part of the DB schema and should not be changed without serious
1998 # forethought.
1999 # You *must* syncdb after you change these.
2000 register(permissions.CLUSTER_PARAMS, Cluster, 'ganeti_web')
2001 register(permissions.VIRTUAL_MACHINE_PARAMS, VirtualMachine, 'ganeti_web')
2004 # register log actions
2005 register_log_actions()