3 # Copyright (C) 2010 Oregon State University et al.
4 # Copyright (C) 2010 Greek Research and Technology Network
6 # This program is free software; you can redistribute it and/or
7 # modify it under the terms of the GNU General Public License
8 # as published by the Free Software Foundation; either version 2
9 # of the License, or (at your option) any later version.
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
23 from datetime
import datetime
, timedelta
24 from hashlib
import sha1
31 from django
.conf
import settings
33 from django
.contrib
.auth
.models
import User
, Group
34 from django
.contrib
.contenttypes
.generic
import GenericForeignKey
35 from django
.contrib
.contenttypes
.models
import ContentType
36 from django
.contrib
.sites
import models
as sites_app
37 from django
.contrib
.sites
.management
import create_default_site
38 from django
.core
.validators
import RegexValidator
, MinValueValidator
39 from django
.db
import models
40 from django
.db
.models
import BooleanField
, Q
, Sum
41 from django
.db
.models
.query
import QuerySet
42 from django
.db
.models
.signals
import post_save
, post_syncdb
43 from django
.db
.utils
import DatabaseError
44 from django
.utils
.encoding
import force_unicode
45 from django
.utils
.translation
import ugettext_lazy
as _
47 from django_fields
.fields
import PickleField
49 from ganeti_web
.logs
import register_log_actions
51 from object_log
.models
import LogItem
52 log_action
= LogItem
.objects
.log_action
54 from object_permissions
.registration
import register
56 from muddle_users
import signals
as muddle_user_signals
58 from ganeti_web
import constants
, management
, permissions
59 from ganeti_web
.fields
import (PatchedEncryptedCharField
,
60 PreciseDateTimeField
, SumIf
)
61 from ganeti_web
.util
import client
62 from ganeti_web
.util
.client
import GanetiApiError
, REPLACE_DISK_AUTO
64 from south
.signals
import post_migrate
66 if settings
.VNC_PROXY
:
67 from ganeti_web
.util
.vncdaemon
.vapclient
import (request_forwarding
,
71 class QuerySetManager(models
.Manager
):
73 Useful if you want to define manager methods that need to chain. In this
74 case create a QuerySet class within your model and add all of your methods
75 directly to the queryset. Example:
77 class Foo(models.Model):
78 enabled = fields.BooleanField()
79 dirty = fields.BooleanField()
83 return self.filter(enabled=True)
85 return self.filter(dirty=False)
87 Foo.objects.active().clean()
90 def __getattr__(self
, name
, *args
):
91 # Cull under/dunder names to avoid certain kinds of recursion. Django
92 # isn't super-bright here.
93 if name
.startswith('_'):
95 return getattr(self
.get_query_set(), name
, *args
)
97 def get_query_set(self
):
98 return self
.model
.QuerySet(self
.model
)
101 def generate_random_password(length
=12):
102 "Generate random sequence of specified length"
103 return "".join(random
.sample(string
.letters
+ string
.digits
, length
))
105 FINISHED_JOBS
= 'success', 'unknown', 'error'
108 RAPI_CACHE_HASHES
= {}
111 def get_rapi(hash, cluster
):
113 Retrieves the cached Ganeti RAPI client for a given hash. The Hash is
114 derived from the connection credentials required for a cluster. If the
115 client is not yet cached, it will be created and added.
117 If a hash does not correspond to any cluster then Cluster.DoesNotExist will
120 @param cluster - either a cluster object, or ID of object. This is used
121 for resolving the cluster if the client is not already found. The id is
122 used rather than the hash, because the hash is mutable.
124 @return a Ganeti RAPI client.
126 if hash in RAPI_CACHE
:
127 return RAPI_CACHE
[hash]
129 # always look up the instance, even if we were given a Cluster instance
130 # it ensures we are retrieving the latest credentials. This helps avoid
131 # stale credentials. Retrieve only the values because we don't actually
132 # need another Cluster instance here.
133 if isinstance(cluster
, (Cluster
,)):
135 (credentials
,) = Cluster
.objects
.filter(id=cluster
) \
136 .values_list('hash', 'hostname', 'port', 'username', 'password')
137 hash, host
, port
, user
, password
= credentials
140 # XXX django-fields only stores str, convert to None if needed
141 password
= Cluster
.decrypt_password(password
) if password
else None
142 password
= None if password
in ('None', '') else password
144 # now that we know hash is fresh, check cache again. The original hash
145 # could have been stale. This avoids constructing a new RAPI that already
147 if hash in RAPI_CACHE
:
148 return RAPI_CACHE
[hash]
150 # delete any old version of the client that was cached.
151 if cluster
in RAPI_CACHE_HASHES
:
152 del RAPI_CACHE
[RAPI_CACHE_HASHES
[cluster
]]
154 # Set connect timeout in settings.py so that you do not learn patience.
155 rapi
= client
.GanetiRapiClient(host
, port
, user
, password
,
156 timeout
=settings
.RAPI_CONNECT_TIMEOUT
)
157 RAPI_CACHE
[hash] = rapi
158 RAPI_CACHE_HASHES
[cluster
] = hash
162 def clear_rapi_cache():
164 clears the rapi cache
167 RAPI_CACHE_HASHES
.clear()
170 ssh_public_key_re
= re
.compile(
171 r
'^ssh-(rsa|dsa|dss) [A-Z0-9+/=]+ .+$', re
.IGNORECASE
)
172 ssh_public_key_error
= _("Enter a valid RSA or DSA SSH key.")
173 validate_sshkey
= RegexValidator(ssh_public_key_re
, ssh_public_key_error
,
177 class CachedClusterObject(models
.Model
):
179 Parent class for objects which belong to Ganeti but have cached data in
182 The main point of this class is to permit saving lots of data from Ganeti
183 so that we don't have to look things up constantly. The Ganeti RAPI is
184 slow, so avoiding it as much as possible is a good idea.
186 This class provides transparent caching for all of the data that it
187 serializes; no explicit cache accesses are required.
189 This model is abstract and may not be instantiated on its own.
192 serialized_info
= models
.TextField(default
="", editable
=False)
193 mtime
= PreciseDateTimeField(null
=True, editable
=False)
194 cached
= PreciseDateTimeField(null
=True, editable
=False)
195 ignore_cache
= models
.BooleanField(default
=False)
206 def save(self
, *args
, **kwargs
):
208 overridden to ensure info is serialized prior to save
210 if not self
.serialized_info
:
211 self
.serialized_info
= cPickle
.dumps(self
.__info
)
212 super(CachedClusterObject
, self
).save(*args
, **kwargs
)
214 def __init__(self
, *args
, **kwargs
):
215 super(CachedClusterObject
, self
).__init
__(*args
, **kwargs
)
221 A dictionary of metadata for this object.
223 This is a proxy for the ``serialized_info`` field. Reads from this
224 property lazily access the field, and writes to this property will be
227 Writes to this property do *not* force serialization.
230 if self
.__info
is None:
231 if self
.serialized_info
:
232 self
.__info
= cPickle
.loads(str(self
.serialized_info
))
235 def _set_info(self
, value
):
237 if value
is not None:
239 self
.serialized_info
= ""
241 info
= info
.setter(_set_info
)
245 Load cached info retrieved from the ganeti cluster. This function
246 includes a lazy cache mechanism that uses a timer to decide whether or
247 not to refresh the cached information with new information from the
250 This will ignore the cache when self.ignore_cache is True
253 epsilon
= timedelta(0, 0, 0, settings
.LAZY_CACHE_REFRESH
)
256 if (self
.ignore_cache
257 or self
.cached
is None
258 or datetime
.now() > self
.cached
+ epsilon
):
261 self
.parse_transient_info()
263 self
.error
= 'No Cached Info'
265 def parse_info(self
):
267 Parse all of the attached metadata, and attach it to this object.
270 self
.parse_transient_info()
271 data
= self
.parse_persistent_info(self
.info
)
273 setattr(self
, k
, data
[k
])
277 Retrieve and parse info from the ganeti cluster. If successfully
278 retrieved and parsed, this method will also call save().
280 If communication with Ganeti fails, an error will be stored in
284 job_data
= self
.check_job_status()
285 for k
, v
in job_data
.items():
288 # XXX this try/except is far too big; see if we can pare it down.
290 info_
= self
._refresh
()
293 mtime
= datetime
.fromtimestamp(info_
['mtime'])
296 self
.cached
= datetime
.now()
298 # no info retrieved, use current mtime
301 if self
.id and (self
.mtime
is None or mtime
> self
.mtime
):
302 # there was an update. Set info and save the object
306 # There was no change on the server. Only update the cache
307 # time. This bypasses the info serialization mechanism and
308 # uses a smaller query.
310 self
.__class
__.objects
.filter(pk
=self
.id) \
311 .update(cached
=self
.cached
, **job_data
)
312 elif self
.id is not None:
313 self
.__class
__.objects
.filter(pk
=self
.id) \
314 .update(cached
=self
.cached
)
316 except GanetiApiError
, e
:
317 # Use regular expressions to match the quoted message
318 # given by GanetiApiError. '\\1' is a group substitution
319 # which places the first group '('|\")' in it's place.
320 comp
= re
.compile("('|\")(?P<msg>.*)\\1")
321 err
= comp
.search(str(e
))
322 # Any search that has 0 results will just return None.
323 # That is why we must check for err before proceeding.
325 msg
= err
.groupdict()['msg']
330 GanetiError
.store_error(msg
, obj
=self
, code
=e
.code
)
335 GanetiError
.objects
.clear_errors(obj
=self
)
339 Fetch raw data from the Ganeti cluster.
341 This must be implemented by children of this class.
344 raise NotImplementedError
346 def check_job_status(self
):
347 if not self
.last_job_id
:
350 ct
= ContentType
.objects
.get_for_model(self
)
351 qs
= Job
.objects
.filter(content_type
=ct
, object_id
=self
.pk
)
352 jobs
= qs
.order_by("job_id")
360 data
= self
.rapi
.GetJobStatus(job
.job_id
)
362 op
= data
.get('ops', None)
365 status
= data
['status']
367 except GanetiApiError
:
370 if status
in ('success', 'error'):
371 for k
, v
in Job
.parse_persistent_info(data
).items():
374 if status
== 'unknown':
375 job
.status
= "unknown"
376 job
.ignore_cache
= False
378 if status
in ('success', 'error', 'unknown'):
379 _updates
= self
._complete
_job
(self
.cluster_id
,
380 self
.hostname
, op
, status
)
381 # XXX if the delete flag is set in updates then delete this
382 # model this happens here because _complete_job cannot delete
385 if 'deleted' in _updates
:
386 # Delete ourselves. Also delete the job that caused us
387 # to delete ourselves; see #8439 for "fun" details.
388 # Order matters; the job's deletion cascades over us.
389 # Revisit that when we finally nuke all this caching
394 updates
.update(_updates
)
396 # we only care about the very last job for resetting the cache flags
397 if status
in ('success', 'error', 'unknown') or not jobs
:
398 updates
['ignore_cache'] = False
399 updates
['last_job'] = None
404 def _complete_job(cls
, cluster_id
, hostname
, op
, status
):
406 Process a completed job. This method will make any updates to related
407 classes (like deleting an instance template) and return any data that
408 should be updated. This is a class method so that this processing can
409 be done without a full instance.
411 @returns dict of updated values
416 def parse_transient_info(self
):
418 Parse properties from cached info that is stored on the class but not
421 These properties will be loaded every time the object is instantiated.
422 Properties stored on the class cannot be search efficiently via the
425 This method is specific to the child object.
429 # XXX ganeti 2.1 ctime is always None
430 # XXX this means that we could nuke the conditionals!
431 if info_
['ctime'] is not None:
432 self
.ctime
= datetime
.fromtimestamp(info_
['ctime'])
435 def parse_persistent_info(cls
, info
):
437 Parse properties from cached info that are stored in the database.
439 These properties will be searchable by the django query api.
441 This method is specific to the child object.
444 # mtime is sometimes None if object has never been modified
445 if info
['mtime'] is None:
446 return {'mtime': None}
447 return {'mtime': datetime
.fromtimestamp(info
['mtime'])}
450 class JobManager(models
.Manager
):
452 Custom manager for Ganeti Jobs model
454 def create(self
, **kwargs
):
455 """ helper method for creating a job with disabled cache """
456 job
= Job(ignore_cache
=True, **kwargs
)
457 job
.save(force_insert
=True)
461 class Job(CachedClusterObject
):
463 model representing a job being run on a ganeti Cluster. This includes
464 operations such as creating or delting a virtual machine.
466 Jobs are a special type of CachedClusterObject. Job's run once then become
467 immutable. The lazy cache is modified to become permanent once a complete
468 status (success/error) has been detected. The cache can be disabled by
469 settning ignore_cache=True.
472 job_id
= models
.IntegerField()
473 content_type
= models
.ForeignKey(ContentType
, related_name
="+")
474 object_id
= models
.IntegerField()
475 obj
= GenericForeignKey('content_type', 'object_id')
476 cluster
= models
.ForeignKey('Cluster', related_name
='jobs', editable
=False)
477 cluster_hash
= models
.CharField(max_length
=40, editable
=False)
479 finished
= models
.DateTimeField(null
=True, blank
=True)
480 status
= models
.CharField(max_length
=10)
481 op
= models
.CharField(max_length
=50)
483 objects
= JobManager()
485 def save(self
, *args
, **kwargs
):
487 sets the cluster_hash for newly saved instances
489 if self
.id is None or self
.cluster_hash
== '':
490 self
.cluster_hash
= self
.cluster
.hash
492 super(Job
, self
).save(*args
, **kwargs
)
495 def get_absolute_url(self
):
496 job
= '%s/job/(?P<job_id>\d+)' % self
.cluster
498 return 'ganeti_web.views.jobs.detail', (), {'job': job
}
502 return get_rapi(self
.cluster_hash
, self
.cluster_id
)
505 return self
.rapi
.GetJobStatus(self
.job_id
)
509 Load info for class. This will load from ganeti if ignore_cache==True,
510 otherwise this will always load from the cache.
512 if self
.id and (self
.ignore_cache
or self
.info
is None):
515 except GanetiApiError
, e
:
516 # if the Job has been archived then we don't know whether it
517 # was successful or not. Mark it as unknown.
519 self
.status
= 'unknown'
522 # its possible the cluster or crednetials are bad. fail
527 self
.info
= self
._refresh
()
531 def parse_persistent_info(cls
, info
):
533 Parse status and turn off cache bypass flag if job has finished
535 data
= {'status': info
['status'],
536 'op': info
['ops'][-1]['OP_ID']}
537 if data
['status'] in ('error', 'success'):
538 data
['ignore_cache'] = False
540 data
['finished'] = cls
.parse_end_timestamp(info
)
544 def parse_end_timestamp(info
):
545 sec
, micro
= info
['end_ts']
546 return datetime
.fromtimestamp(sec
+ (micro
/ 1000000.0))
548 def parse_transient_info(self
):
552 def current_operation(self
):
554 Jobs may consist of multiple commands/operations. This helper
555 method will return the operation that is currently running or errored
556 out, or the last operation if all operations have completed
558 @returns raw name of the current operation
562 for i
in range(len(info
['opstatus'])):
563 if info
['opstatus'][i
] != 'success':
566 return info
['ops'][index
]['OP_ID']
571 Returns the last operation, which is generally the primary operation.
573 return self
.info
['ops'][-1]['OP_ID']
576 return "<Job %d (%d), status %r>" % (self
.id, self
.job_id
,
579 __unicode__
= __repr__
582 class VirtualMachine(CachedClusterObject
):
584 The VirtualMachine (VM) model represents VMs within a Ganeti cluster.
586 The majority of properties are a cache for data stored in the cluster.
587 All data retrieved via the RAPI is stored in VirtualMachine.info, and
588 serialized automatically into VirtualMachine.serialized_info.
590 Attributes that need to be searchable should be stored as model fields.
591 All other attributes will be stored within VirtualMachine.info.
593 This object uses a lazy update mechanism on instantiation. If the cached
594 info from the Ganeti cluster has expired, it will trigger an update. This
595 allows the cache to function in the absence of a periodic update mechanism
596 such as Cron, Celery, or Threads.
598 XXX Serialized_info can possibly be changed to a CharField if an upper
599 limit can be determined. (Later Date, if it will optimize db)
602 cluster
= models
.ForeignKey('Cluster', related_name
='virtual_machines',
603 editable
=False, default
=0)
604 hostname
= models
.CharField(max_length
=128, db_index
=True)
605 owner
= models
.ForeignKey('ClusterUser', related_name
='virtual_machines',
606 null
=True, blank
=True,
607 on_delete
=models
.SET_NULL
)
608 virtual_cpus
= models
.IntegerField(default
=-1)
609 disk_size
= models
.IntegerField(default
=-1)
610 ram
= models
.IntegerField(default
=-1)
611 minram
= models
.IntegerField(default
=-1)
612 cluster_hash
= models
.CharField(max_length
=40, editable
=False)
613 operating_system
= models
.CharField(max_length
=128)
614 status
= models
.CharField(max_length
=14)
617 primary_node
= models
.ForeignKey('Node', related_name
='primary_vms',
618 null
=True, blank
=True)
619 secondary_node
= models
.ForeignKey('Node', related_name
='secondary_vms',
620 null
=True, blank
=True)
622 # The last job reference indicates that there is at least one pending job
623 # for this virtual machine. There may be more than one job, and that can
624 # never be prevented. This just indicates that job(s) are pending and the
625 # job related code should be run (status, cleanup, etc).
626 last_job
= models
.ForeignKey('Job', related_name
="+", null
=True,
629 # deleted flag indicates a VM is being deleted, but the job has not
630 # completed yet. VMs that have pending_delete are still displayed in lists
631 # and counted in quotas, but only so status can be checked.
632 pending_delete
= models
.BooleanField(default
=False)
635 # Template temporarily stores parameters used to create this virtual
636 # machine. This template is used to recreate the values entered into the
638 template
= models
.ForeignKey("VirtualMachineTemplate",
639 related_name
="instances", null
=True,
643 ordering
= ["hostname"]
644 unique_together
= (("cluster", "hostname"),)
646 def __unicode__(self
):
649 def save(self
, *args
, **kwargs
):
651 sets the cluster_hash for newly saved instances
654 self
.cluster_hash
= self
.cluster
.hash
660 if self
.cluster
.username
:
661 for tag
in info_
['tags']:
662 # Update owner Tag. Make sure the tag is set to the owner
663 # that is set in webmgr.
664 if tag
.startswith(constants
.OWNER_TAG
):
665 id = int(tag
[len(constants
.OWNER_TAG
):])
666 # Since there is no 'update tag' delete old tag and
667 # replace with tag containing correct owner id.
668 if id == self
.owner_id
:
673 self
.rapi
.DeleteInstanceTags(self
.hostname
, remove
)
675 info_
['tags'].remove(tag
)
676 if self
.owner_id
and not found
:
677 tag
= '%s%s' % (constants
.OWNER_TAG
, self
.owner_id
)
678 self
.rapi
.AddInstanceTags(self
.hostname
, [tag
])
679 self
.info
['tags'].append(tag
)
681 super(VirtualMachine
, self
).save(*args
, **kwargs
)
684 def get_absolute_url(self
):
686 Return absolute url for this instance.
689 return 'instance-detail', (), {'cluster_slug': self
.cluster
.slug
,
690 'instance': self
.hostname
}
694 return get_rapi(self
.cluster_hash
, self
.cluster_id
)
697 def is_running(self
):
698 return self
.status
== 'running'
701 def parse_persistent_info(cls
, info
):
703 Loads all values from cached info, included persistent properties that
704 are stored in the database
706 data
= super(VirtualMachine
, cls
).parse_persistent_info(info
)
708 # Parse resource properties
709 data
['ram'] = info
['beparams']['memory']
710 data
['virtual_cpus'] = info
['beparams']['vcpus']
711 # Sum up the size of each disk used by the VM
713 for disk
in info
['disk.sizes']:
715 data
['disk_size'] = disk_size
716 data
['operating_system'] = info
['os']
717 data
['status'] = info
['status']
719 primary
= info
['pnode']
722 data
['primary_node'] = Node
.objects
.get(hostname
=primary
)
723 except Node
.DoesNotExist
:
724 # node is not created yet. fail silently
725 data
['primary_node'] = None
727 data
['primary_node'] = None
729 secondary
= info
['snodes']
731 secondary
= secondary
[0]
733 data
['secondary_node'] = Node
.objects
.get(hostname
=secondary
)
734 except Node
.DoesNotExist
:
735 # node is not created yet. fail silently
736 data
['secondary_node'] = None
738 data
['secondary_node'] = None
743 def _complete_job(cls
, cluster_id
, hostname
, op
, status
):
745 if the cache bypass is enabled then check the status of the last job
746 when the job is complete we can reenable the cache.
748 @returns - dictionary of values that were updates
751 if status
== 'unknown':
752 # unknown status, the job was archived before it's final status
753 # was polled. Impossible to tell what happened. Clear the job
754 # so it is no longer polled.
756 # XXX This VM might be added by the CLI and be in an invalid
757 # pending_delete state. clearing pending_delete prevents this
758 # but will result in "missing" vms in some cases.
759 return dict(pending_delete
=False)
761 base
= VirtualMachine
.objects
.filter(cluster
=cluster_id
,
763 if op
== 'OP_INSTANCE_REMOVE':
764 if status
== 'success':
765 # XXX can't actually delete here since it would cause a
767 return dict(deleted
=True)
769 elif op
== 'OP_INSTANCE_CREATE' and status
== 'success':
770 # XXX must update before deleting the template to maintain
771 # referential integrity. as a consequence return no other
773 base
.update(template
=None)
774 VirtualMachineTemplate
.objects \
775 .filter(instances__hostname
=hostname
,
776 instances__cluster
=cluster_id
) \
778 return dict(template
=None)
782 # XXX if delete is pending then no need to refresh this object.
783 if self
.pending_delete
or self
.template_id
:
785 return self
.rapi
.GetInstance(self
.hostname
)
787 def shutdown(self
, timeout
=None):
789 id = self
.rapi
.ShutdownInstance(self
.hostname
)
791 id = self
.rapi
.ShutdownInstance(self
.hostname
, timeout
=timeout
)
793 job
= Job
.objects
.create(job_id
=id, obj
=self
,
794 cluster_id
=self
.cluster_id
)
796 VirtualMachine
.objects
.filter(pk
=self
.id) \
797 .update(last_job
=job
, ignore_cache
=True)
801 id = self
.rapi
.StartupInstance(self
.hostname
)
802 job
= Job
.objects
.create(job_id
=id, obj
=self
,
803 cluster_id
=self
.cluster_id
)
805 VirtualMachine
.objects
.filter(pk
=self
.id) \
806 .update(last_job
=job
, ignore_cache
=True)
810 id = self
.rapi
.RebootInstance(self
.hostname
)
811 job
= Job
.objects
.create(job_id
=id, obj
=self
,
812 cluster_id
=self
.cluster_id
)
814 VirtualMachine
.objects
.filter(pk
=self
.id) \
815 .update(last_job
=job
, ignore_cache
=True)
818 def migrate(self
, mode
='live', cleanup
=False):
820 Migrates this VirtualMachine to another node.
822 Only works if the disk type is DRDB.
824 @param mode: live or non-live
825 @param cleanup: clean up a previous migration, default is False
827 id = self
.rapi
.MigrateInstance(self
.hostname
, mode
, cleanup
)
828 job
= Job
.objects
.create(job_id
=id, obj
=self
,
829 cluster_id
=self
.cluster_id
)
831 VirtualMachine
.objects
.filter(pk
=self
.id) \
832 .update(last_job
=job
, ignore_cache
=True)
835 def replace_disks(self
, mode
=REPLACE_DISK_AUTO
, disks
=None, node
=None,
837 id = self
.rapi
.ReplaceInstanceDisks(self
.hostname
, disks
, mode
, node
,
839 job
= Job
.objects
.create(job_id
=id, obj
=self
,
840 cluster_id
=self
.cluster_id
)
842 VirtualMachine
.objects
.filter(pk
=self
.id) \
843 .update(last_job
=job
, ignore_cache
=True)
846 def setup_ssh_forwarding(self
, sport
=0):
848 Poke a proxy to start SSH forwarding.
850 Returns None if no proxy is configured, or if there was an error
851 contacting the proxy.
854 command
= self
.rapi
.GetInstanceConsole(self
.hostname
)["command"]
856 if settings
.VNC_PROXY
:
857 proxy_server
= settings
.VNC_PROXY
.split(":")
858 password
= generate_random_password()
859 sport
= request_ssh(proxy_server
, sport
, self
.info
["pnode"],
860 self
.info
["network_port"], password
, command
)
863 return proxy_server
[0], sport
, password
865 def setup_vnc_forwarding(self
, sport
=0, tls
=False):
867 Obtain VNC forwarding information, optionally configuring a proxy.
869 Returns None if a proxy is configured and there was an error
870 contacting the proxy.
875 port
= info_
['network_port']
876 node
= info_
['pnode']
878 # use proxy for VNC connection
879 if settings
.VNC_PROXY
:
880 proxy_server
= settings
.VNC_PROXY
.split(":")
881 password
= generate_random_password()
882 result
= request_forwarding(proxy_server
, node
, port
, password
,
883 sport
=sport
, tls
=tls
)
885 return proxy_server
[0], int(result
), password
887 return node
, port
, password
890 return "<VirtualMachine: '%s'>" % self
.hostname
893 class Node(CachedClusterObject
):
895 The Node model represents nodes within a Ganeti cluster.
897 The majority of properties are a cache for data stored in the cluster.
898 All data retrieved via the RAPI is stored in VirtualMachine.info, and
899 serialized automatically into VirtualMachine.serialized_info.
901 Attributes that need to be searchable should be stored as model fields.
902 All other attributes will be stored within VirtualMachine.info.
905 ROLE_CHOICES
= ((k
, v
) for k
, v
in constants
.NODE_ROLE_MAP
.items())
907 cluster
= models
.ForeignKey('Cluster', related_name
='nodes')
908 hostname
= models
.CharField(max_length
=128, unique
=True)
909 cluster_hash
= models
.CharField(max_length
=40, editable
=False)
910 offline
= models
.BooleanField()
911 role
= models
.CharField(max_length
=1, choices
=ROLE_CHOICES
)
912 ram_total
= models
.IntegerField(default
=-1)
913 ram_free
= models
.IntegerField(default
=-1)
914 disk_total
= models
.IntegerField(default
=-1)
915 disk_free
= models
.IntegerField(default
=-1)
916 cpus
= models
.IntegerField(null
=True, blank
=True)
918 # The last job reference indicates that there is at least one pending job
919 # for this virtual machine. There may be more than one job, and that can
920 # never be prevented. This just indicates that job(s) are pending and the
921 # job related code should be run (status, cleanup, etc).
922 last_job
= models
.ForeignKey('Job', related_name
="+", null
=True,
925 def __unicode__(self
):
928 def save(self
, *args
, **kwargs
):
930 sets the cluster_hash for newly saved instances
933 self
.cluster_hash
= self
.cluster
.hash
934 super(Node
, self
).save(*args
, **kwargs
)
937 def get_absolute_url(self
):
939 Return absolute url for this node.
942 return 'node-detail', (), {'cluster_slug': self
.cluster
.slug
,
943 'host': self
.hostname
}
946 """ returns node info from the ganeti server """
947 return self
.rapi
.GetNode(self
.hostname
)
951 return get_rapi(self
.cluster_hash
, self
.cluster_id
)
954 def parse_persistent_info(cls
, info
):
956 Loads all values from cached info, included persistent properties that
957 are stored in the database
959 data
= super(Node
, cls
).parse_persistent_info(info
)
961 # Parse resource properties
962 data
['ram_total'] = info
.get("mtotal") or 0
963 data
['ram_free'] = info
.get("mfree") or 0
964 data
['disk_total'] = info
.get("dtotal") or 0
965 data
['disk_free'] = info
.get("dfree") or 0
966 data
['cpus'] = info
.get("csockets")
967 data
['offline'] = info
['offline']
968 data
['role'] = info
['role']
973 """ returns dict of free and total ram """
974 values
= VirtualMachine
.objects \
975 .filter(Q(primary_node
=self
) |
Q(secondary_node
=self
)) \
976 .filter(status
='running') \
977 .exclude(ram
=-1).order_by() \
978 .aggregate(used
=Sum('ram'))
980 total
= self
.ram_total
981 used
= total
- self
.ram_free
982 allocated
= values
.get("used") or 0
983 free
= total
- allocated
if allocated
>= 0 and total
>= 0 else -1
988 'allocated': allocated
,
994 """ returns dict of free and total disk space """
995 values
= VirtualMachine
.objects \
996 .filter(Q(primary_node
=self
) |
Q(secondary_node
=self
)) \
997 .exclude(disk_size
=-1).order_by() \
998 .aggregate(used
=Sum('disk_size'))
1000 total
= self
.disk_total
1001 used
= total
- self
.disk_free
1002 allocated
= values
.get("used") or 0
1003 free
= total
- allocated
if allocated
>= 0 and total
>= 0 else -1
1008 'allocated': allocated
,
1013 def allocated_cpus(self
):
1014 values
= VirtualMachine
.objects \
1015 .filter(primary_node
=self
, status
='running') \
1016 .exclude(virtual_cpus
=-1).order_by() \
1017 .aggregate(cpus
=Sum('virtual_cpus'))
1018 return values
.get("cpus") or 0
1020 def set_role(self
, role
, force
=False):
1022 Sets the role for this node
1024 @param role - one of the following choices:
1031 id = self
.rapi
.SetNodeRole(self
.hostname
, role
, force
)
1032 job
= Job
.objects
.create(job_id
=id, obj
=self
,
1033 cluster_id
=self
.cluster_id
)
1035 Node
.objects
.filter(pk
=self
.pk
).update(ignore_cache
=True, last_job
=job
)
1038 def evacuate(self
, iallocator
=None, node
=None):
1040 migrates all secondary instances off this node
1042 id = self
.rapi
.EvacuateNode(self
.hostname
, iallocator
=iallocator
,
1044 job
= Job
.objects
.create(job_id
=id, obj
=self
,
1045 cluster_id
=self
.cluster_id
)
1047 Node
.objects
.filter(pk
=self
.pk
) \
1048 .update(ignore_cache
=True, last_job
=job
)
1051 def migrate(self
, mode
=None):
1053 migrates all primary instances off this node
1055 id = self
.rapi
.MigrateNode(self
.hostname
, mode
)
1056 job
= Job
.objects
.create(job_id
=id, obj
=self
,
1057 cluster_id
=self
.cluster_id
)
1059 Node
.objects
.filter(pk
=self
.pk
).update(ignore_cache
=True, last_job
=job
)
1063 return "<Node: '%s'>" % self
.hostname
1066 class Cluster(CachedClusterObject
):
1068 A Ganeti cluster that is being tracked by this manager tool
1070 hostname
= models
.CharField(_('hostname'), max_length
=128, unique
=True)
1071 slug
= models
.SlugField(_('slug'), max_length
=50, unique
=True,
1073 port
= models
.PositiveIntegerField(_('port'), default
=5080)
1074 description
= models
.CharField(_('description'), max_length
=128,
1076 username
= models
.CharField(_('username'), max_length
=128, blank
=True)
1077 password
= PatchedEncryptedCharField(_('password'), default
="",
1078 max_length
=128, blank
=True)
1079 hash = models
.CharField(_('hash'), max_length
=40, editable
=False)
1082 virtual_cpus
= models
.IntegerField(_('Virtual CPUs'), null
=True,
1084 disk
= models
.IntegerField(_('disk'), null
=True, blank
=True)
1085 ram
= models
.IntegerField(_('ram'), null
=True, blank
=True)
1087 # The last job reference indicates that there is at least one pending job
1088 # for this virtual machine. There may be more than one job, and that can
1089 # never be prevented. This just indicates that job(s) are pending and the
1090 # job related code should be run (status, cleanup, etc).
1091 last_job
= models
.ForeignKey('Job', related_name
='cluster_last_job',
1092 null
=True, blank
=True)
1095 ordering
= ["hostname", "description"]
1097 def __unicode__(self
):
1098 return self
.hostname
1100 def save(self
, *args
, **kwargs
):
1101 self
.hash = self
.create_hash()
1102 super(Cluster
, self
).save(*args
, **kwargs
)
1105 def get_absolute_url(self
):
1106 return 'cluster-detail', (), {'cluster_slug': self
.slug
}
1110 def cluster_id(self
):
1114 def decrypt_password(cls
, value
):
1116 Convenience method for decrypting a password without an instance.
1117 This was partly cribbed from django-fields which only allows decrypting
1118 from a model instance.
1120 If the password appears to be encrypted, this method will decrypt it;
1121 otherwise, it will return the password unchanged.
1123 This method is bonghits.
1126 field
, chaff
, chaff
, chaff
= cls
._meta
.get_field_by_name('password')
1128 if value
.startswith(field
.prefix
):
1129 ciphertext
= value
[len(field
.prefix
):]
1130 plaintext
= field
.cipher
.decrypt(binascii
.a2b_hex(ciphertext
))
1131 password
= plaintext
.split('\0')[0]
1135 return force_unicode(password
)
1140 retrieves the rapi client for this cluster.
1142 # XXX always pass self in. not only does it avoid querying this object
1143 # from the DB a second time, it also prevents a recursion loop caused
1144 # by __init__ fetching info from the Cluster
1145 return get_rapi(self
.hash, self
)
1147 def create_hash(self
):
1149 Creates a hash for this cluster based on credentials required for
1150 connecting to the server
1152 s
= '%s%s%s%s' % (self
.username
, self
.password
, self
.hostname
,
1154 return sha1(s
).hexdigest()
1156 def get_default_quota(self
):
1158 Returns the default quota for this cluster
1164 "virtual_cpus": self
.virtual_cpus
,
1167 def get_quota(self
, user
=None):
1169 Get the quota for a ClusterUser
1171 @return user's quota, default quota, or none
1174 return self
.get_default_quota()
1176 # attempt to query user specific quota first. if it does not exist
1177 # then fall back to the default quota
1178 query
= Quota
.objects
.filter(cluster
=self
, user
=user
)
1179 quotas
= query
.values('ram', 'disk', 'virtual_cpus')
1182 quota
['default'] = 0
1185 return self
.get_default_quota()
1187 def set_quota(self
, user
, data
):
1189 Set the quota for a ClusterUser.
1191 If data is None, the quota will be removed.
1193 @param values: dictionary of values, or None to delete the quota
1196 kwargs
= {'cluster': self
, 'user': user
}
1198 Quota
.objects
.filter(**kwargs
).delete()
1200 quota
, new
= Quota
.objects
.get_or_create(**kwargs
)
1201 quota
.__dict
__.update(data
)
1205 def get_quotas(cls
, clusters
=None, user
=None):
1206 """ retrieve a bulk list of cluster quotas """
1208 if clusters
is None:
1209 clusters
= Cluster
.objects
.all()
1213 for cluster
in clusters
:
1217 'disk': cluster
.disk
,
1218 'virtual_cpus': cluster
.virtual_cpus
,
1220 cluster_id_map
[cluster
.id] = cluster
1222 # get user's custom queries if any
1223 if user
is not None:
1224 qs
= Quota
.objects
.filter(cluster__in
=clusters
, user
=user
)
1225 values
= qs
.values('ram', 'disk', 'virtual_cpus', 'cluster__id')
1227 for custom
in values
:
1229 cluster
= cluster_id_map
[custom
['cluster__id']]
1232 custom
['default'] = 0
1233 del custom
['cluster__id']
1234 quotas
[cluster
] = custom
1238 def sync_virtual_machines(self
, remove
=False):
1240 Synchronizes the VirtualMachines in the database with the information
1241 this ganeti cluster has:
1242 * VMs no longer in ganeti are deleted
1243 * VMs missing from the database are added
1245 ganeti
= self
.instances()
1246 db
= self
.virtual_machines
.all().values_list('hostname', flat
=True)
1248 # add VMs missing from the database
1249 for hostname
in filter(lambda x
: unicode(x
) not in db
, ganeti
):
1250 vm
= VirtualMachine
.objects
.create(cluster
=self
, hostname
=hostname
)
1253 # deletes VMs that are no longer in ganeti
1255 missing_ganeti
= filter(lambda x
: str(x
) not in ganeti
, db
)
1257 self
.virtual_machines \
1258 .filter(hostname__in
=missing_ganeti
).delete()
1260 def sync_nodes(self
, remove
=False):
1262 Synchronizes the Nodes in the database with the information
1263 this ganeti cluster has:
1264 * Nodes no longer in ganeti are deleted
1265 * Nodes missing from the database are added
1267 ganeti
= self
.rapi
.GetNodes()
1268 db
= self
.nodes
.all().values_list('hostname', flat
=True)
1270 # add Nodes missing from the database
1271 for hostname
in filter(lambda x
: unicode(x
) not in db
, ganeti
):
1272 node
= Node
.objects
.create(cluster
=self
, hostname
=hostname
)
1275 # deletes Nodes that are no longer in ganeti
1277 missing_ganeti
= filter(lambda x
: str(x
) not in ganeti
, db
)
1279 self
.nodes
.filter(hostname__in
=missing_ganeti
).delete()
1282 def missing_in_ganeti(self
):
1284 Returns a list of VirtualMachines that are missing from the Ganeti
1285 cluster but present in the database.
1287 ganeti
= self
.instances()
1288 qs
= self
.virtual_machines
.exclude(template__isnull
=False)
1289 db
= qs
.values_list('hostname', flat
=True)
1290 return [x
for x
in db
if str(x
) not in ganeti
]
1293 def missing_in_db(self
):
1295 Returns list of VirtualMachines that are missing from the database, but
1298 ganeti
= self
.instances()
1299 db
= self
.virtual_machines
.all().values_list('hostname', flat
=True)
1300 return [x
for x
in ganeti
if unicode(x
) not in db
]
1303 def nodes_missing_in_db(self
):
1305 Returns list of Nodes that are missing from the database, but present
1309 ganeti
= self
.rapi
.GetNodes()
1310 except GanetiApiError
:
1312 db
= self
.nodes
.all().values_list('hostname', flat
=True)
1313 return [x
for x
in ganeti
if unicode(x
) not in db
]
1316 def nodes_missing_in_ganeti(self
):
1318 Returns list of Nodes that are missing from the ganeti cluster
1319 but present in the database
1322 ganeti
= self
.rapi
.GetNodes()
1323 except GanetiApiError
:
1325 db
= self
.nodes
.all().values_list('hostname', flat
=True)
1326 return filter(lambda x
: str(x
) not in ganeti
, db
)
1329 def available_ram(self
):
1330 """ returns dict of free and total ram """
1331 nodes
= self
.nodes
.exclude(ram_total
=-1) \
1332 .aggregate(total
=Sum('ram_total'), free
=Sum('ram_free'))
1333 total
= max(nodes
.get("total", 0), 0)
1334 free
= max(nodes
.get("free", 0), 0)
1336 values
= self
.virtual_machines \
1337 .filter(status
='running') \
1338 .exclude(ram
=-1).order_by() \
1339 .aggregate(used
=Sum('ram'))
1341 if values
.get("used") is None:
1344 allocated
= values
["used"]
1346 free
= max(total
- allocated
, 0)
1351 'allocated': allocated
,
1356 def available_disk(self
):
1357 """ returns dict of free and total disk space """
1358 nodes
= self
.nodes
.exclude(disk_total
=-1) \
1359 .aggregate(total
=Sum('disk_total'), free
=Sum('disk_free'))
1360 total
= max(nodes
.get("total", 0), 0)
1361 free
= max(nodes
.get("free", 0), 0)
1363 values
= self
.virtual_machines \
1364 .exclude(disk_size
=-1).order_by() \
1365 .aggregate(used
=Sum('disk_size'))
1367 if values
.get("used") is None:
1370 allocated
= values
["used"]
1372 free
= max(total
- allocated
, 0)
1377 'allocated': allocated
,
1382 return self
.rapi
.GetInfo()
1384 def instances(self
, bulk
=False):
1385 """Gets all VMs which reside under the Cluster
1386 Calls the rapi client for all instances.
1389 return self
.rapi
.GetInstances(bulk
=bulk
)
1390 except GanetiApiError
:
1393 def instance(self
, instance
):
1394 """Get a single Instance
1395 Calls the rapi client for a specific instance.
1398 return self
.rapi
.GetInstance(instance
)
1399 except GanetiApiError
:
1402 def redistribute_config(self
):
1404 Redistribute config from cluster's master node to all
1407 # no exception handling, because it's being done in a view
1408 id = self
.rapi
.RedistributeConfig()
1409 job
= Job
.objects
.create(job_id
=id, obj
=self
, cluster_id
=self
.id)
1411 Cluster
.objects
.filter(pk
=self
.id) \
1412 .update(last_job
=job
, ignore_cache
=True)
1416 class VirtualMachineTemplate(models
.Model
):
1418 Virtual Machine Template holds all the values for the create virtual
1419 machine form so that they can automatically be used or edited by a user.
1422 template_name
= models
.CharField(max_length
=255, default
="")
1423 temporary
= BooleanField(verbose_name
=_("Temporary"), default
=False)
1424 description
= models
.CharField(max_length
=255, default
="")
1425 cluster
= models
.ForeignKey(Cluster
, related_name
="templates", null
=True,
1427 start
= models
.BooleanField(verbose_name
=_('Start up After Creation'),
1429 no_install
= models
.BooleanField(verbose_name
=_('Do not install OS'),
1431 ip_check
= BooleanField(verbose_name
=_("IP Check"), default
=True)
1432 name_check
= models
.BooleanField(verbose_name
=_('DNS Name Check'),
1434 iallocator
= models
.BooleanField(verbose_name
=_('Automatic Allocation'),
1436 iallocator_hostname
= models
.CharField(max_length
=255, blank
=True)
1437 disk_template
= models
.CharField(verbose_name
=_('Disk Template'),
1439 # XXX why aren't these FKs?
1440 pnode
= models
.CharField(verbose_name
=_('Primary Node'), max_length
=255,
1442 snode
= models
.CharField(verbose_name
=_('Secondary Node'), max_length
=255,
1444 os
= models
.CharField(verbose_name
=_('Operating System'), max_length
=255)
1446 # Backend parameters (BEPARAMS)
1447 vcpus
= models
.IntegerField(verbose_name
=_('Virtual CPUs'),
1448 validators
=[MinValueValidator(1)], null
=True,
1450 # XXX do we really want the minimum memory to be 100MiB? This isn't
1451 # strictly necessary AFAICT.
1452 memory
= models
.IntegerField(verbose_name
=_('Memory'),
1453 validators
=[MinValueValidator(100)],
1454 null
=True, blank
=True)
1455 minmem
= models
.IntegerField(verbose_name
=_('Minimum Memory'),
1456 validators
=[MinValueValidator(100)],
1457 null
=True, blank
=True)
1458 disks
= PickleField(verbose_name
=_('Disks'), null
=True, blank
=True)
1459 # XXX why isn't this an enum?
1460 disk_type
= models
.CharField(verbose_name
=_('Disk Type'), max_length
=255,
1462 nics
= PickleField(verbose_name
=_('NICs'), null
=True, blank
=True)
1463 # XXX why isn't this an enum?
1464 nic_type
= models
.CharField(verbose_name
=_('NIC Type'), max_length
=255,
1467 # Hypervisor parameters (HVPARAMS)
1468 kernel_path
= models
.CharField(verbose_name
=_('Kernel Path'),
1469 max_length
=255, default
="", blank
=True)
1470 root_path
= models
.CharField(verbose_name
=_('Root Path'), max_length
=255,
1471 default
='/', blank
=True)
1472 serial_console
= models
.BooleanField(
1473 verbose_name
=_('Enable Serial Console'))
1474 boot_order
= models
.CharField(verbose_name
=_('Boot Device'),
1475 max_length
=255, default
="")
1476 cdrom_image_path
= models
.CharField(verbose_name
=_('CD-ROM Image Path'),
1477 max_length
=512, blank
=True)
1478 cdrom2_image_path
= models
.CharField(
1479 verbose_name
=_('CD-ROM 2 Image Path'),
1480 max_length
=512, blank
=True)
1483 unique_together
= (("cluster", "template_name"),)
1485 def __unicode__(self
):
1487 return u
'(temporary)'
1489 return self
.template_name
1491 def set_name(self
, name
):
1493 Set this template's name.
1495 If the name is blank, this template will become temporary and its name
1496 will be set to a unique timestamp.
1500 self
.template_name
= name
1502 # The template is temporary and will be removed by the VM when the
1503 # VM successfully comes into existence.
1504 self
.temporary
= True
1505 # Give it a temporary name. Something unique. This is the number
1506 # of microseconds since the epoch; I figure that it'll work out
1508 self
.template_name
= str(int(time
.time() * (10 ** 6)))
1511 class GanetiError(models
.Model
):
1513 Class for storing errors which occured in Ganeti
1515 cluster
= models
.ForeignKey(Cluster
, related_name
="errors")
1516 msg
= models
.TextField()
1517 code
= models
.PositiveIntegerField(blank
=True, null
=True)
1519 # XXX could be fixed with django-model-util's TimeStampedModel
1520 timestamp
= models
.DateTimeField()
1522 # determines if the errors still appears or not
1523 cleared
= models
.BooleanField(default
=False)
1525 # cluster object (cluster, VM, Node) affected by the error (if any)
1526 obj_type
= models
.ForeignKey(ContentType
, related_name
="ganeti_errors")
1527 obj_id
= models
.PositiveIntegerField()
1528 obj
= GenericForeignKey("obj_type", "obj_id")
1530 objects
= QuerySetManager()
1533 ordering
= ("-timestamp", "code", "msg")
1535 def __unicode__(self
):
1536 base
= u
"[%s] %s" % (self
.timestamp
, self
.msg
)
1539 class QuerySet(QuerySet
):
1541 def clear_errors(self
, obj
=None):
1543 Clear errors instead of deleting them.
1546 qs
= self
.filter(cleared
=False)
1549 qs
= qs
.get_errors(obj
)
1551 return qs
.update(cleared
=True)
1553 def get_errors(self
, obj
):
1555 Manager method used for getting QuerySet of all errors depending
1556 on passed arguments.
1558 @param obj affected object (itself or just QuerySet)
1562 raise RuntimeError("Implementation error calling get_errors()"
1565 # Create base query of errors to return.
1567 # if it's a Cluster or a queryset for Clusters, then we need to
1568 # get all errors from the Clusters. Do this by filtering on
1569 # GanetiError.cluster instead of obj_id.
1570 if isinstance(obj
, (Cluster
,)):
1571 return self
.filter(cluster
=obj
)
1573 elif isinstance(obj
, (QuerySet
,)):
1574 if obj
.model
== Cluster
:
1575 return self
.filter(cluster__in
=obj
)
1577 ct
= ContentType
.objects
.get_for_model(obj
.model
)
1578 return self
.filter(obj_type
=ct
, obj_id__in
=obj
)
1581 ct
= ContentType
.objects
.get_for_model(obj
.__class
__)
1582 return self
.filter(obj_type
=ct
, obj_id
=obj
.pk
)
1585 return "<GanetiError '%s'>" % self
.msg
1588 def store_error(cls
, msg
, obj
, code
, **kwargs
):
1590 Create and save an error with the given information.
1592 @param msg error's message
1593 @param obj object (i.e. cluster or vm) affected by the error
1594 @param code error's code number
1596 ct
= ContentType
.objects
.get_for_model(obj
.__class
__)
1597 is_cluster
= isinstance(obj
, Cluster
)
1599 # 401 -- bad permissions
1600 # 401 is cluster-specific error and thus shouldn't appear on any other
1604 # NOTE: what we do here is almost like:
1605 # return self.store_error(msg=msg, code=code, obj=obj.cluster)
1606 # we just omit the recursiveness
1608 ct
= ContentType
.objects
.get_for_model(Cluster
)
1611 # 404 -- object not found
1612 # 404 can occur on any object, but when it occurs on a cluster, then
1613 # any of its children must not see the error again
1616 # return if the error exists for cluster
1618 c_ct
= ContentType
.objects
.get_for_model(Cluster
)
1619 return cls
.objects
.filter(msg
=msg
, obj_type
=c_ct
,
1621 obj_id
=obj
.cluster_id
,
1624 except (cls
.DoesNotExist
, IndexError):
1625 # we want to proceed when the error is not
1629 # XXX use a try/except instead of get_or_create(). get_or_create()
1630 # does not allow us to set cluster_id. This means we'd have to query
1631 # the cluster object to create the error. we can't guaranteee the
1632 # cluster will already be queried so use create() instead which does
1635 return cls
.objects
.filter(msg
=msg
, obj_type
=ct
, obj_id
=obj
.pk
,
1636 code
=code
, **kwargs
)[0]
1638 except (cls
.DoesNotExist
, IndexError):
1639 cluster_id
= obj
.pk
if is_cluster
else obj
.cluster_id
1641 return cls
.objects
.create(timestamp
=datetime
.now(), msg
=msg
,
1642 obj_type
=ct
, obj_id
=obj
.pk
,
1643 cluster_id
=cluster_id
, code
=code
,
1647 class ClusterUser(models
.Model
):
1649 Base class for objects that may interact with a Cluster or VirtualMachine.
1652 name
= models
.CharField(max_length
=128)
1653 real_type
= models
.ForeignKey(ContentType
, related_name
="+",
1654 editable
=False, null
=True, blank
=True)
1656 def __unicode__(self
):
1659 def save(self
, *args
, **kwargs
):
1661 self
.real_type
= self
._get
_real
_type
()
1662 super(ClusterUser
, self
).save(*args
, **kwargs
)
1664 def get_absolute_url(self
):
1665 return self
.cast().get_absolute_url()
1668 def permissable(self
):
1669 """ returns an object that can be granted permissions """
1670 return self
.cast().permissable
1673 def _get_real_type(cls
):
1674 return ContentType
.objects
.get_for_model(cls
)
1677 return self
.real_type
.get_object_for_this_type(pk
=self
.pk
)
1679 def used_resources(self
, cluster
=None, only_running
=True):
1681 Return dictionary of total resources used by VMs that this ClusterUser
1683 @param cluster if set, get only VMs from specified cluster
1684 @param only_running if set, get only running VMs
1686 # XXX - order_by must be cleared or it breaks annotation grouping since
1687 # the default order_by field is also added to the group_by clause
1688 base
= self
.virtual_machines
.all().order_by()
1690 # XXX - use a custom aggregate for ram and vcpu count when filtering by
1691 # running. this allows us to execute a single query.
1693 # XXX - quotes must be used in this order. postgresql quirk
1695 sum_ram
= SumIf('ram', condition
="status='running'")
1696 sum_vcpus
= SumIf('virtual_cpus', condition
="status='running'")
1698 sum_ram
= Sum('ram')
1699 sum_vcpus
= Sum('virtual_cpus')
1701 base
= base
.exclude(ram
=-1, disk_size
=-1, virtual_cpus
=-1)
1704 base
= base
.filter(cluster
=cluster
)
1705 result
= base
.aggregate(ram
=sum_ram
, disk
=Sum('disk_size'),
1706 virtual_cpus
=sum_vcpus
)
1708 # repack with zeros instead of Nones
1709 if result
['disk'] is None:
1711 if result
['ram'] is None:
1713 if result
['virtual_cpus'] is None:
1714 result
['virtual_cpus'] = 0
1718 base
= base
.values('cluster').annotate(uram
=sum_ram
,
1719 udisk
=Sum('disk_size'),
1720 uvirtual_cpus
=sum_vcpus
)
1722 # repack as dictionary
1725 # repack with zeros instead of Nones, change index names
1726 used
["ram"] = used
.pop("uram") or 0
1727 used
["disk"] = used
.pop("udisk") or 0
1728 used
["virtual_cpus"] = used
.pop("uvirtual_cpus") or 0
1729 result
[used
.pop('cluster')] = used
1734 class Profile(ClusterUser
):
1736 Profile associated with a django.contrib.auth.User object.
1738 user
= models
.OneToOneField(User
)
1740 def get_absolute_url(self
):
1741 return self
.user
.get_absolute_url()
1743 def grant(self
, perm
, obj
):
1744 self
.user
.grant(perm
, obj
)
1746 def set_perms(self
, perms
, obj
):
1747 self
.user
.set_perms(perms
, obj
)
1749 def get_objects_any_perms(self
, *args
, **kwargs
):
1750 return self
.user
.get_objects_any_perms(*args
, **kwargs
)
1752 def has_perm(self
, *args
, **kwargs
):
1753 return self
.user
.has_perm(*args
, **kwargs
)
1756 def permissable(self
):
1757 """ returns an object that can be granted permissions """
1761 class Organization(ClusterUser
):
1763 An organization is used for grouping Users.
1765 Organizations are matched with an instance of contrib.auth.models.Group.
1766 This model exists so that contrib.auth.models.Group have a 1:1 relation
1767 with a ClusterUser on which quotas and permissions can be assigned.
1770 group
= models
.OneToOneField(Group
, related_name
='organization')
1772 def get_absolute_url(self
):
1773 return self
.group
.get_absolute_url()
1775 def grant(self
, perm
, object):
1776 self
.group
.grant(perm
, object)
1778 def set_perms(self
, perms
, object):
1779 self
.group
.set_perms(perms
, object)
1781 def get_objects_any_perms(self
, *args
, **kwargs
):
1782 return self
.group
.get_objects_any_perms(*args
, **kwargs
)
1784 def has_perm(self
, *args
, **kwargs
):
1785 return self
.group
.has_perm(*args
, **kwargs
)
1788 def permissable(self
):
1789 """ returns an object that can be granted permissions """
1793 class Quota(models
.Model
):
1795 A resource limit imposed on a ClusterUser for a given Cluster. The
1796 attributes of this model represent maximum values the ClusterUser can
1797 consume. The absence of a Quota indicates unlimited usage.
1799 user
= models
.ForeignKey(ClusterUser
, related_name
='quotas')
1800 cluster
= models
.ForeignKey(Cluster
, related_name
='quotas')
1802 ram
= models
.IntegerField(default
=0, null
=True, blank
=True)
1803 disk
= models
.IntegerField(default
=0, null
=True, blank
=True)
1804 virtual_cpus
= models
.IntegerField(default
=0, null
=True, blank
=True)
1807 class SSHKey(models
.Model
):
1809 Model representing user's SSH public key. Virtual machines rely on
1812 key
= models
.TextField(validators
=[validate_sshkey
])
1813 #filename = models.CharField(max_length=128) # saves key file's name
1814 user
= models
.ForeignKey(User
, related_name
='ssh_keys')
1817 def create_profile(sender
, instance
, **kwargs
):
1819 Create a profile object whenever a new user is created, also keeps the
1820 profile name synchronized with the username
1823 profile
, new
= Profile
.objects
.get_or_create(user
=instance
)
1824 if profile
.name
!= instance
.username
:
1825 profile
.name
= instance
.username
1827 except DatabaseError
:
1828 # XXX - since we're using south to track migrations the Profile table
1829 # won't be available the first time syncdb is run. Catch the error
1830 # here and let the south migration handle it.
1834 def update_cluster_hash(sender
, instance
, **kwargs
):
1836 Updates the Cluster hash for all of it's VirtualMachines, Nodes, and Jobs
1838 instance
.virtual_machines
.all().update(cluster_hash
=instance
.hash)
1839 instance
.jobs
.all().update(cluster_hash
=instance
.hash)
1840 instance
.nodes
.all().update(cluster_hash
=instance
.hash)
1843 def update_organization(sender
, instance
, **kwargs
):
1845 Creates a Organizations whenever a contrib.auth.models.Group is created
1847 org
, new
= Organization
.objects
.get_or_create(group
=instance
)
1848 org
.name
= instance
.name
1851 post_save
.connect(create_profile
, sender
=User
)
1852 post_save
.connect(update_cluster_hash
, sender
=Cluster
)
1853 post_save
.connect(update_organization
, sender
=Group
)
1855 # Disconnect create_default_site from django.contrib.sites so that
1856 # the useless table for sites is not created. This will be
1857 # reconnected for other apps to use in update_sites_module.
1858 post_syncdb
.disconnect(create_default_site
, sender
=sites_app
)
1859 post_syncdb
.connect(management
.update_sites_module
, sender
=sites_app
,
1860 dispatch_uid
="ganeti.management.update_sites_module")
1863 def regenerate_cu_children(sender
, **kwargs
):
1865 Resets may destroy Profiles and/or Organizations. We need to regenerate
1869 # So. What are we actually doing here?
1870 # Whenever a User or Group is saved, the associated Profile or
1871 # Organization is also updated. This means that, if a Profile for a User
1872 # is absent, it will be created.
1873 # More importantly, *why* might a Profile be missing? Simple. Resets of
1874 # the ganeti app destroy them. This shouldn't happen in production, and
1875 # only occasionally in development, but it's good to explicitly handle
1876 # this particular case so that missing Profiles not resulting from a reset
1877 # are easier to diagnose.
1879 for user
in User
.objects
.filter(profile__isnull
=True):
1881 for group
in Group
.objects
.filter(organization__isnull
=True):
1883 except DatabaseError
:
1884 # XXX - since we're using south to track migrations the Profile table
1885 # won't be available the first time syncdb is run. Catch the error
1886 # here and let the south migration handle it.
1889 post_syncdb
.connect(regenerate_cu_children
)
1892 def log_group_create(sender
, editor
, **kwargs
):
1893 """ log group creation signal """
1894 log_action('CREATE', editor
, sender
)
1897 def log_group_edit(sender
, editor
, **kwargs
):
1898 """ log group edit signal """
1899 log_action('EDIT', editor
, sender
)
1902 muddle_user_signals
.view_group_created
.connect(log_group_create
)
1903 muddle_user_signals
.view_group_edited
.connect(log_group_edit
)
1906 def refresh_objects(sender
, **kwargs
):
1908 This was originally the code in the 0009
1909 and then 0010 'force_object_refresh' migration
1911 Force a refresh of all Cluster, Nodes, and VirtualMachines, and
1912 import any new Nodes.
1915 if kwargs
.get('app', False) and kwargs
['app'] == 'ganeti_web':
1916 Cluster
.objects
.all().update(mtime
=None)
1917 Node
.objects
.all().update(mtime
=None)
1918 VirtualMachine
.objects
.all().update(mtime
=None)
1920 write
= sys
.stdout
.write
1921 flush
= sys
.stdout
.flush
1923 def wf(str, newline
=False):
1929 wf('- Refresh Cached Cluster Objects')
1930 wf(' > Synchronizing Cluster Nodes ', True)
1932 for cluster
in Cluster
.objects
.all().iterator():
1934 cluster
.sync_nodes()
1936 except GanetiApiError
:
1939 wf(' > Refreshing Node Caches ', True)
1940 for node
in Node
.objects
.all().iterator():
1943 except GanetiApiError
:
1946 wf(' > Refreshing Instance Caches ', True)
1947 for instance
in VirtualMachine
.objects
.all().iterator():
1950 except GanetiApiError
:
1955 # Set this as post_migrate hook.
1956 post_migrate
.connect(refresh_objects
)
1958 # Register permissions on our models.
1959 # These are part of the DB schema and should not be changed without serious
1961 # You *must* syncdb after you change these.
1962 register(permissions
.CLUSTER_PARAMS
, Cluster
, 'ganeti_web')
1963 register(permissions
.VIRTUAL_MACHINE_PARAMS
, VirtualMachine
, 'ganeti_web')
1966 # register log actions
1967 register_log_actions()