2 #Copyright (C) 2009 Gabes Jean, naparuba@gmail.com
4 #This file is part of Shinken.
6 #Shinken is free software: you can redistribute it and/or modify
7 #it under the terms of the GNU Affero General Public License as published by
8 #the Free Software Foundation, either version 3 of the License, or
9 #(at your option) any later version.
11 #Shinken is distributed in the hope that it will be useful,
12 #but WITHOUT ANY WARRANTY; without even the implied warranty of
13 #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 #GNU Affero General Public License for more details.
16 #You should have received a copy of the GNU Affero General Public License
17 #along with Shinken. If not, see <http://www.gnu.org/licenses/>.
20 #This Class is an example of an Scheduler module
21 #Here for the configuration phase AND running one
24 #This text is print at the import
25 print "Detected module : Picle retention file for Scheduler"
33 'type' : 'pickle_retention_file',
35 'phases' : ['retention'],
39 #called by the plugin manager to get a broker
40 def get_instance(plugin
):
41 print "Get a pickle retention scheduler module for plugin %s" % plugin
.get_name()
43 instance
= Pickle_retention_scheduler(plugin
.get_name(), path
)
48 #Just print some stuff
49 class Pickle_retention_scheduler
:
50 def __init__(self
, name
, path
):
54 #Called by Scheduler to say 'let's prepare yourself guy'
56 print "Initilisation of the Pickle file retention scheduler module"
57 #self.return_queue = self.properties['from_queue']
64 #Ok, main function that is called in the retention creation pass
65 def update_retention_objects(self
, sched
, log_mgr
):
66 print "[PickleRetention] asking me to update the retention objects"
67 #Now the flat file method
69 # Open a file near the path, with .tmp extension
70 # so in cae or problem, we do not lost the old one
71 f
= open(self
.path
+'.tmp', 'wb')
72 #Just put hosts/services becauses checks and notifications
73 #are already link into
74 #all_data = {'hosts' : sched.hosts, 'services' : sched.services}
76 # We create a all_data dict with lsit of dict of retention useful
77 # data of our hosts and services
78 all_data
= {'hosts' : {}, 'services' : {}}
81 running_properties
= h
.__class
__.running_properties
82 for prop
in running_properties
:
83 entry
= running_properties
[prop
]
85 d
[prop
] = getattr(h
, prop
)
86 # if prop == 'notifications_in_progress':
87 # v = getattr(h, prop)
89 # print "DUMP", getattr(h, prop)
90 # for n in v.values():
92 all_data
['hosts'][h
.host_name
] = d
94 #Now same for services
95 for s
in sched
.services
:
97 running_properties
= s
.__class
__.running_properties
98 for prop
in running_properties
:
99 entry
= running_properties
[prop
]
101 d
[prop
] = getattr(s
, prop
)
102 all_data
['services'][(s
.host
.host_name
, s
.service_description
)] = d
104 #s = cPickle.dumps(all_data)
105 #s_compress = zlib.compress(s)
106 cPickle
.dump(all_data
, f
)
109 # Now move the .tmp fiel to the real path
110 shutil
.move(self
.path
+'.tmp', self
.path
)
111 except IOError , exp
:
112 log_mgr
.log("Error: retention file creation failed, %s" % str(exp
))
114 log_mgr
.log("Updating retention_file %s" % self
.path
)
119 #Should return if it succeed in the retention load or not
120 def load_retention_objects(self
, sched
, log_mgr
):
121 print "[PickleRetention] asking me to load the retention objects"
123 #Now the old flat file way :(
124 log_mgr
.log("[PickleRetention]Reading from retention_file %s" % self
.path
)
126 f
= open(self
.path
, 'rb')
127 all_data
= cPickle
.load(f
)
129 except EOFError , exp
:
132 except ValueError , exp
:
135 except IOError , exp
:
138 except IndexError , exp
:
139 s
= "WARNING: Sorry, the ressource file is not compatible"
142 except TypeError , exp
:
143 s
= "WARNING: Sorry, the ressource file is not compatible"
148 #Now load interesting properties in hosts/services
149 #Taging retention=False prop that not be directly load
150 #Items will be with theirs status, but not in checking, so
151 #a new check will be launch like with a normal begining (random distributed
154 ret_hosts
= all_data
['hosts']
155 for ret_h_name
in ret_hosts
:
156 #We take the dict of our value to load
157 d
= all_data
['hosts'][ret_h_name
]
158 h
= sched
.hosts
.find_by_name(ret_h_name
)
160 running_properties
= h
.__class
__.running_properties
161 for prop
in running_properties
:
162 entry
= running_properties
[prop
]
164 # Mayeb the save was not with this value, so
165 # we just bypass this
167 setattr(h
, prop
, d
[prop
])
168 for a
in h
.notifications_in_progress
.values():
169 # print "AA,", a.__dict__
172 h
.update_in_checking()
173 #And also add downtimes and comments
174 for dt
in h
.downtimes
:
176 dt
.extra_comment
.ref
= h
183 ret_services
= all_data
['services']
184 for (ret_s_h_name
, ret_s_desc
) in ret_services
:
185 #We take the dict of our value to load
186 d
= all_data
['services'][(ret_s_h_name
, ret_s_desc
)]
187 s
= sched
.services
.find_srv_by_name_and_hostname(ret_s_h_name
, ret_s_desc
)
189 running_properties
= s
.__class
__.running_properties
190 for prop
in running_properties
:
191 entry
= running_properties
[prop
]
193 # Mayeb the save was not with this value, so
194 # we just bypass this
196 setattr(s
, prop
, d
[prop
])
197 for a
in s
.notifications_in_progress
.values():
200 s
.update_in_checking()
201 #And also add downtimes and comments
202 for dt
in s
.downtimes
:
204 dt
.extra_comment
.ref
= s
210 log_mgr
.log("[PickleRetention] OK we've load data from retention file")