*Implement in_check_period/in_notification_period for livestatus to make multisite...
[shinken.git] / shinken / service.py
blob2fdbfa31595ae4200d63d1298b232ebb9ab4797c
1 #!/usr/bin/env python
2 # Copyright (C) 2009-2010 :
3 # Gabes Jean, naparuba@gmail.com
4 # Gerhard Lausser, Gerhard.Lausser@consol.de
6 # This file is part of Shinken.
8 # Shinken is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU Affero General Public License as published by
10 # the Free Software Foundation, either version 3 of the License, or
11 # (at your option) any later version.
13 # Shinken is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU Affero General Public License for more details.
18 # You should have received a copy of the GNU Affero General Public License
19 # along with Shinken. If not, see <http://www.gnu.org/licenses/>.
22 """ This Class is the service one, s it manage all service specific thing.
23 If you look at the scheduling part, look at the scheduling item class"""
25 import time
27 try:
28 from ClusterShell.NodeSet import NodeSet, NodeSetParseRangeError
29 except ImportError:
30 NodeSet = None
33 from shinken.autoslots import AutoSlots
34 from shinken.item import Items
35 from shinken.schedulingitem import SchedulingItem
36 from shinken.util import to_int, to_char, to_split, to_bool, to_float, strip_and_uniq, format_t_into_dhms_format, to_svc_hst_distinct_lists, get_key_value_sequence, GET_KEY_VALUE_SEQUENCE_ERROR_NOERROR, GET_KEY_VALUE_SEQUENCE_ERROR_SYNTAX, GET_KEY_VALUE_SEQUENCE_ERROR_NODEFAULT, GET_KEY_VALUE_SEQUENCE_ERROR_NODE, to_list_string_of_names, expand_with_macros
37 from shinken.property import UnusedProp, BoolProp, IntegerProp, FloatProp, CharProp, StringProp, ListProp
38 from shinken.macroresolver import MacroResolver
39 from shinken.eventhandler import EventHandler
40 from shinken.log import logger
43 class Service(SchedulingItem):
44 # AutoSlots create the __slots__ with properties and
45 # running_properties names
46 __metaclass__ = AutoSlots
48 # Every service have a unique ID, and 0 is always special in
49 # database and co...
50 id = 1
51 # The host and service do not have the same 0 value, now yes :)
52 ok_up = 'OK'
53 # used by item class for format specific value like for Broks
54 my_type = 'service'
56 # properties defined by configuration
57 # required : is required in conf
58 # default : default value if no set in conf
59 # pythonize : function to call when transfort string to python object
60 # fill_brok : if set, send to broker. there are two categories:
61 # full_status for initial and update status, check_result for check results
62 # no_slots : do not take this property for __slots__
63 properties = {
64 'host_name' : StringProp(fill_brok=['full_status', 'check_result', 'next_schedule']),
65 'hostgroup_name' : StringProp(default = '', fill_brok=['full_status']),
66 'service_description' : StringProp(fill_brok= ['full_status', 'check_result', 'next_schedule']),
67 'display_name' : StringProp(default='', fill_brok=['full_status']),
68 'servicegroups' : StringProp(default='', fill_brok=['full_status'], brok_transformation=to_list_string_of_names),
69 'is_volatile' : BoolProp(default='0', fill_brok=['full_status']),
70 'check_command' : StringProp(fill_brok = ['full_status']),
71 'initial_state' : CharProp(default='o', fill_brok=['full_status']),
72 'max_check_attempts' : IntegerProp(fill_brok=['full_status']),
73 'check_interval' : IntegerProp(fill_brok=['full_status']),
74 'retry_interval' : IntegerProp(fill_brok=['full_status']),
75 'active_checks_enabled' : BoolProp(default='1', fill_brok= ['full_status']),
76 'passive_checks_enabled' : BoolProp(default='1', fill_brok=['full_status']),
77 'check_period' : StringProp(fill_brok= ['full_status']),
78 'obsess_over_service' : BoolProp(default='0', fill_brok=['full_status']),
79 'check_freshness' : BoolProp(default='0', fill_brok=['full_status']),
80 'freshness_threshold' : IntegerProp(default='0', fill_brok=['full_status']),
81 'event_handler' : StringProp(default='', fill_brok=['full_status']),
82 'event_handler_enabled' : BoolProp(default='0',fill_brok=['full_status']),
83 'low_flap_threshold' : IntegerProp(default='-1', fill_brok= ['full_status']),
84 'high_flap_threshold' : IntegerProp(default='-1', fill_brok=['full_status']),
85 'flap_detection_enabled' : BoolProp(default='1', fill_brok=['full_status']),
86 'flap_detection_options' : ListProp(default='o,w,c,u', fill_brok=['full_status']),
87 'process_perf_data' : BoolProp(default='1', fill_brok=['full_status']),
88 'retain_status_information' : BoolProp(default='1', fill_brok=['full_status']),
89 'retain_nonstatus_information' : BoolProp(default='1', fill_brok=['full_status']),
90 'notification_interval' : IntegerProp(default='60', fill_brok=['full_status']),
91 'first_notification_delay' : IntegerProp(default='0', fill_brok=['full_status']),
92 'notification_period' : StringProp(fill_brok=['full_status']),
93 'notification_options' : ListProp(default='w,u,c,r,f,s',fill_brok=['full_status']),
94 'notifications_enabled' : BoolProp(default='1', fill_brok=['full_status']),
95 'contacts' : StringProp(fill_brok=['full_status']),
96 'contact_groups' : StringProp(fill_brok=['full_status']),
97 'stalking_options' : ListProp(default='', fill_brok=['full_status']),
98 'notes' : StringProp(default='', fill_brok=['full_status']),
99 'notes_url' : StringProp(default='', fill_brok=['full_status']),
100 'action_url' : StringProp(default='', fill_brok=['full_status']),
101 'icon_image' : StringProp(default='', fill_brok=['full_status']),
102 'icon_image_alt' : StringProp(default='', fill_brok=['full_status']),
103 'failure_prediction_enabled' : BoolProp(default='0', fill_brok=['full_status']),
104 'parallelize_check' : BoolProp(default='1', fill_brok=['full_status']),
106 # Shinken specific
107 'poller_tag' : StringProp(default=None),
109 'resultmodulations' : StringProp(default=''),
110 'escalations' : StringProp(default='', fill_brok=['full_status']),
111 'maintenance_period' : StringProp(default='', fill_brok=['full_status']),
113 # service generator
114 'duplicate_foreach' : StringProp(default=''),
115 'default_value' : StringProp(default=''),
117 # Criticity value
118 'criticity' : IntegerProp(default='3', fill_brok=['full_status']),
122 # properties used in the running state
123 running_properties = {
124 'last_chk' : IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
125 'next_chk' : IntegerProp(default=0, fill_brok=['full_status', 'next_schedule']),
126 'in_checking' : BoolProp(default=False, fill_brok=['full_status', 'check_result', 'next_schedule'], retention=True),
127 'latency' : FloatProp(default=0, fill_brok=['full_status', 'check_result'], retention=True,),
128 'attempt' : IntegerProp(default=0, fill_brok=['full_status', 'check_result'],retention=True),
129 'state' : StringProp(default='PENDING', fill_brok=['full_status'], retention=True),
130 'state_id' : IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
131 'current_event_id' : IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
132 'last_event_id' : IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
133 'last_state' : StringProp(default='PENDING', fill_brok=['full_status'], retention=True),
134 'last_state_id' : IntegerProp(default=0, fill_brok=['full_status'], retention=True),
135 'last_state_change' : FloatProp(default=time.time(), fill_brok=['full_status'], retention=True),
136 'last_hard_state_change' : FloatProp(default=time.time(), fill_brok=['full_status'], retention=True),
137 'last_hard_state' : StringProp(default='PENDING', fill_brok=['full_status'], retention=True),
138 'last_hard_state_id' : IntegerProp(default=0, fill_brok=['full_status'], retention=True),
139 'last_time_ok' : IntegerProp(default=int(time.time()), fill_brok=['full_status', 'check_result'], retention=True),
140 'last_time_warning' : IntegerProp(default=int(time.time()), fill_brok = ['full_status', 'check_result'], retention=True),
141 'last_time_critical' : IntegerProp(default=int(time.time()), fill_brok =['full_status', 'check_result'], retention=True),
142 'last_time_unknown' : IntegerProp(default=int(time.time()), fill_brok=['full_status', 'check_result'], retention=True),
143 'duration_sec' : IntegerProp(default=0, fill_brok=['full_status'], retention=True),
144 'state_type' : StringProp(default='HARD', fill_brok=['full_status'], retention=True),
145 'state_type_id' : IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
146 'output' : StringProp(default='', fill_brok=['full_status', 'check_result'], retention=True),
147 'long_output' : StringProp(default='', fill_brok=['full_status', 'check_result'], retention=True),
148 'is_flapping' : BoolProp(default=False, fill_brok=['full_status'], retention=True),
149 # dependencies for actions like notif of event handler,
150 # so AFTER check return
151 'act_depend_of' : ListProp(default=[]),
152 # dependencies for checks raise, so BEFORE checks
153 'chk_depend_of' : ListProp(default=[]),
154 # elements that depend of me, so the reverse than just uppper
155 'act_depend_of_me' : ListProp(default=[]),
156 # elements that depend of me
157 'chk_depend_of_me' : ListProp(default=[]),
159 'last_state_update' : FloatProp(default=time.time(), fill_brok=['full_status'], retention=True),
160 'checks_in_progress' : ListProp(default=[]), # no brok because checks are too linked
161 'notifications_in_progress' : ListProp(default={}, retention=True), # no broks because notifications are too linked
162 'downtimes' : ListProp(default=[], fill_brok=['full_status'], retention=True),
163 'comments' : ListProp(default=[], fill_brok=['full_status'], retention=True),
164 'flapping_changes' : ListProp(default=[], fill_brok=['full_status'], retention=True),
165 'flapping_comment_id' : IntegerProp(default=0, fill_brok=['full_status'], retention=True),
166 'percent_state_change' : FloatProp(default=0.0, fill_brok=['full_status'], retention=True),
167 'problem_has_been_acknowledged' : BoolProp(default=False, fill_brok=['full_status'], retention=True),
168 'acknowledgement' : StringProp(default=None, retention=True),
169 'acknowledgement_type' : IntegerProp(default=1, fill_brok=['full_status', 'check_result'], retention=True),
170 'check_type' : IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
171 'has_been_checked' : IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
172 'should_be_scheduled' : IntegerProp(default=1, fill_brok=['full_status'], retention=True),
173 'last_problem_id' : IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
174 'current_problem_id' : IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
175 'execution_time' : FloatProp(default=0.0, fill_brok=['full_status', 'check_result'], retention=True),
176 'last_notification' : FloatProp(default=time.time(), fill_brok=['full_status'], retention=True),
177 'current_notification_number' : IntegerProp(default=0, fill_brok=['full_status'],retention=True),
178 'current_notification_id' : IntegerProp(default=0, fill_brok=['full_status'], retention=True),
179 'check_flapping_recovery_notification' : BoolProp(default=True, fill_brok=['full_status'], retention=True),
180 'scheduled_downtime_depth' : IntegerProp(default=0, fill_brok=['full_status'], retention=True),
181 'pending_flex_downtime' : IntegerProp(default=0, fill_brok=['full_status'], retention=True),
182 'timeout' : IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
183 'start_time' : IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
184 'end_time' : IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
185 'early_timeout' : IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
186 'return_code' : IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
187 'perf_data' : StringProp(default='', fill_brok=['full_status', 'check_result'], retention=True),
188 'last_perf_data' : StringProp(default='', retention=True),
189 'host' : StringProp(default=None),
190 'customs' : ListProp(default={}, fill_brok=['full_status']),
191 'notified_contacts' : ListProp(default=set()), # use for having all contacts we have notified
192 'in_scheduled_downtime' : BoolProp(default=False, retention=True),
193 'in_scheduled_downtime_during_last_check' : BoolProp(default=False, retention=True),
194 'actions' : ListProp(default=[]), #put here checks and notif raised
195 'broks' : ListProp(default=[]), #and here broks raised
197 # All errors and warning raised during the configuration parsing
198 # and taht will raised real warning/errors during the is_correct
199 'configuration_warnings' : ListProp(default=[]),
200 'configuration_errors' : ListProp(default=[]),
202 # Problem/impact part
203 'is_problem' : BoolProp(default=False, fill_brok=['full_status']),
204 'is_impact' : BoolProp(default=False, fill_brok=['full_status']),
205 # the save value of our criticity for "problems"
206 'my_own_criticity': IntegerProp(default=-1),
207 # list of problems that make us an impact
208 'source_problems' : ListProp(default=[], fill_brok=['full_status'], brok_transformation=to_svc_hst_distinct_lists),
209 # list of the impact I'm the cause of
210 'impacts' : ListProp( default=[], fill_brok=['full_status'], brok_transformation=to_svc_hst_distinct_lists),
211 # keep a trace of the old state before being an impact
212 'state_before_impact' : StringProp(default='PENDING'),
213 # keep a trace of the old state id before being an impact
214 'state_id_before_impact' : IntegerProp(default=0),
215 # if the state change, we know so we do not revert it
216 'state_changed_since_impact' : BoolProp(default=False),
218 # Easy Service dep definition
219 'service_dependencies' : ListProp(default=''),# TODO : find a way to brok it?
221 #BUSINESS CORRELATOR PART
222 # Say if we are business based rule or not
223 'got_business_rule' : BoolProp(default=False),
224 # Our Dependency node for the business rule
225 'business_rule' : StringProp(default=None),
227 # Here it's the elements we are depending on
228 # so our parents as network relation, or a host
229 # we are depending in a hostdependency
230 # or even if we are businesss based.
231 'parent_dependencies' : StringProp(
232 brok_transformation=to_svc_hst_distinct_lists,
233 default=[],
234 fill_brok=['full_status']),
235 # Here it's the guys taht depend on us. So it's the total
236 # oposite of the parent_dependencies
237 'child_dependencies': StringProp(
238 brok_transformation=to_svc_hst_distinct_lists,
239 default=[],
240 fill_brok=['full_status']),
245 # Mapping between Macros and properties (can be prop or a function)
246 macros = {
247 'SERVICEDESC' : 'service_description',
248 'SERVICEDISPLAYNAME' : 'display_name',
249 'SERVICESTATE' : 'state',
250 'SERVICESTATEID' : 'state_id',
251 'LASTSERVICESTATE' : 'last_state',
252 'LASTSERVICESTATEID' : 'last_state_id',
253 'SERVICESTATETYPE' : 'state_type',
254 'SERVICEATTEMPT' : 'attempt',
255 'MAXSERVICEATTEMPTS' : 'max_check_attempts',
256 'SERVICEISVOLATILE' : 'is_volatile',
257 'SERVICEEVENTID' : 'current_event_id',
258 'LASTSERVICEEVENTID' : 'last_event_id',
259 'SERVICEPROBLEMID' : 'current_problem_id',
260 'LASTSERVICEPROBLEMID' : 'last_problem_id',
261 'SERVICELATENCY' : 'latency',
262 'SERVICEEXECUTIONTIME' : 'execution_time',
263 'SERVICEDURATION' : 'get_duration',
264 'SERVICEDURATIONSEC' : 'get_duration_sec',
265 'SERVICEDOWNTIME' : 'get_downtime',
266 'SERVICEPERCENTCHANGE' : 'percent_state_change',
267 'SERVICEGROUPNAME' : 'get_groupname',
268 'SERVICEGROUPNAMES' : 'get_groupnames',
269 'LASTSERVICECHECK' : 'last_chk',
270 'LASTSERVICESTATECHANGE' : 'last_state_change',
271 'LASTSERVICEOK' : 'last_time_ok',
272 'LASTSERVICEWARNING' : 'last_time_warning',
273 'LASTSERVICEUNKNOWN' : 'last_time_unknown',
274 'LASTSERVICECRITICAL' : 'last_time_critical',
275 'SERVICEOUTPUT' : 'output',
276 'LONGSERVICEOUTPUT' : 'long_output',
277 'SERVICEPERFDATA' : 'perf_data',
278 'LASTSERVICEPERFDATA' : 'last_perf_data',
279 'SERVICECHECKCOMMAND' : 'get_check_command',
280 'SERVICEACKAUTHOR' : 'get_ack_author_name',
281 'SERVICEACKAUTHORNAME' : 'get_ack_author_name',
282 'SERVICEACKAUTHORALIAS' : 'get_ack_author_name',
283 'SERVICEACKCOMMENT' : 'get_ack_comment',
284 'SERVICEACTIONURL' : 'action_url',
285 'SERVICENOTESURL' : 'notes_url',
286 'SERVICENOTES' : 'notes'
289 # This tab is used to transform old parameters name into new ones
290 # so from Nagios2 format, to Nagios3 ones
291 old_properties = {
292 'normal_check_interval' : 'check_interval',
293 'retry_check_interval' : 'retry_interval'
297 # Give a nice name output
298 def get_name(self):
299 if not self.is_tpl():
300 return self.service_description
301 else:
302 return self.name
304 # Get the servicegroups names
305 def get_groupnames(self):
306 return ','.join([sg.get_name() for sg in self.servicegroups])
309 # Need the whole name for debugin purpose
310 def get_dbg_name(self):
311 return "%s/%s" % (self.host.host_name, self.service_description)
314 # Call by picle for dataify service
315 # we do a dict because list are too dangerous for
316 # retention save and co :( even if it's more
317 # extensive
318 # The setstate function do the inverse
319 def __getstate__(self):
320 # print "Asking a getstate for service", self.get_dbg_name()
321 cls = self.__class__
322 # id is not in *_properties
323 res = {'id' : self.id}
324 for prop in cls.properties:
325 if hasattr(self, prop):
326 res[prop] = getattr(self, prop)
327 for prop in cls.running_properties:
328 if hasattr(self, prop):
329 res[prop] = getattr(self, prop)
331 return res
334 # Inversed funtion of getstate
335 def __setstate__(self, state):
336 cls = self.__class__
337 self.id = state['id']
338 for prop in cls.properties:
339 if prop in state:
340 setattr(self, prop, state[prop])
341 for prop in cls.running_properties:
342 if prop in state:
343 setattr(self, prop, state[prop])
346 # Check is required prop are set:
347 # template are always correct
348 # contacts OR contactgroups is need
349 def is_correct(self):
350 state = True # guilty or not? :)
351 cls = self.__class__
353 special_properties = ['contacts', 'contact_groups', 'check_period', \
354 'notification_interval', 'host_name', \
355 'hostgroup_name']
356 for prop in cls.properties:
357 if prop not in special_properties:
358 if not hasattr(self, prop) and cls.properties[prop].required:
359 logger.log('%s : I do not have %s' % (self.get_name(), prop))
360 state = False # Bad boy...
362 # Raised all previously saw errors like unknown contacts and co
363 if self.configuration_errors != []:
364 state = False
365 for err in self.configuration_errors:
366 logger.log(err)
368 # Ok now we manage special cases...
369 if not hasattr(self, 'contacts') \
370 and not hasattr(self, 'contact_groups') \
371 and self.notifications_enabled == True:
372 logger.log('%s : I do not have contacts nor contact_groups' % self.get_name())
373 state = False
374 if not hasattr(self, 'check_command'):
375 logger.log("%s : I've got no check_command" % self.get_name())
376 state = False
377 # Ok got a command, but maybe it's invalid
378 else:
379 if not self.check_command.is_valid():
380 logger.log("%s : my check_command %s is invalid" % (self.get_name(), self.check_command.command))
381 state = False
382 if not hasattr(self, 'notification_interval') \
383 and self.notifications_enabled == True:
384 logger.log("%s : I've got no notification_interval but I've got notifications enabled" % self.get_name())
385 state = False
386 if not hasattr(self, 'host') or self.host == None:
387 logger.log("%s : I do not have an host" % self.get_name())
388 state = False
389 if not hasattr(self, 'check_period'):
390 self.check_period = None
391 if hasattr(self, 'service_description'):
392 for c in cls.illegal_object_name_chars:
393 if c in self.service_description:
394 logger.log("%s : My service_description got the caracter %s that is not allowed." % (self.get_name(), c))
395 state = False
396 return state
400 # The service is dependent of his father dep
401 # Must be AFTER linkify
402 def fill_daddy_dependancy(self):
403 # Depend of host, all status, is a networkdep
404 # and do not have timeperiod, and folow parents dep
405 if self.host is not None:
406 # I add the dep in MY list
407 self.act_depend_of.append( (self.host,
408 ['d', 'u', 's', 'f'],
409 'network_dep',
410 None, True) )
411 # I add the dep in Daddy list
412 self.host.act_depend_of_me.append( (self,
413 ['d', 'u', 's', 'f'],
414 'network_dep',
415 None, True) )
417 # And the parent/child dep lists too
418 self.host.register_son_in_parent_child_dependencies(self)
421 # Register the dependancy between 2 service for action (notification etc)
422 def add_service_act_dependancy(self, srv, status, timeperiod, inherits_parent):
423 # first I add the other the I depend on in MY list
424 self.act_depend_of.append( (srv, status, 'logic_dep',
425 timeperiod, inherits_parent) )
426 # then I register myself in the other service dep list
427 srv.act_depend_of_me.append( (self, status, 'logic_dep',
428 timeperiod, inherits_parent) )
430 # And the parent/child dep lists too
431 srv.register_son_in_parent_child_dependencies(self)
435 # Register the dependancy between 2 service for action (notification etc)
436 # but based on a BUSINESS rule, so on fact:
437 # ERP depend on database, so we fill just database.act_depend_of_me
438 # because we will want ERP mails to go on! So call this
439 # on the database service with the srv=ERP service
440 def add_business_rule_act_dependancy(self, srv, status, timeperiod, inherits_parent):
441 # I only register so he know that I WILL be a inpact
442 self.act_depend_of_me.append( (srv, status, 'business_dep',
443 timeperiod, inherits_parent) )
445 # And the parent/child dep lists too
446 self.register_son_in_parent_child_dependencies(srv)
450 # Register the dependancy between 2 service for checks
451 def add_service_chk_dependancy(self, srv, status, timeperiod, inherits_parent):
452 # first I add the other the I depend on in MY list
453 self.chk_depend_of.append( (srv, status, 'logic_dep',
454 timeperiod, inherits_parent) )
455 # then I register myself in the other service dep list
456 srv.chk_depend_of_me.append( (self, status, 'logic_dep',
457 timeperiod, inherits_parent) )
459 # And the parent/child dep lists too
460 srv.register_son_in_parent_child_dependencies(self)
463 # Set unreachable : our host is DOWN, but it mean nothing for a service
464 def set_unreachable(self):
465 pass
468 # We just go an impact, so we go unreachable
469 # but only if it's enable in the configuration
470 def set_impact_state(self):
471 cls = self.__class__
472 if cls.enable_problem_impacts_states_change:
473 # Keep a trace of the old state (problem came back before
474 # a new checks)
475 self.state_before_impact = self.state
476 self.state_id_before_impact = self.state_id
477 # this flag will know if we overide the impact state
478 self.state_changed_since_impact = False
479 self.state = 'UNKNOWN'# exit code UNDETERMINED
480 self.state_id = 3
483 # Ok, we are no more an impact, if no news checks
484 # overide the impact state, we came back to old
485 # states
486 # And only if we enable the state change for impacts
487 def unset_impact_state(self):
488 cls = self.__class__
489 if cls.enable_problem_impacts_states_change and not self.state_changed_since_impact:
490 self.state = self.state_before_impact
491 self.state_id = self.state_id_before_impact
494 # Set state with status return by the check
495 # and update flapping state
496 def set_state_from_exit_status(self, status):
497 now = time.time()
498 self.last_state_update = now
501 # we should put in last_state the good last state:
502 # if not just change the state by an problem/impact
503 # we can take current state. But if it's the case, the
504 # real old state is self.state_before_impact (it's teh TRUE
505 # state in fact)
506 # but only if the global conf have enable the impact state change
507 cls = self.__class__
508 if cls.enable_problem_impacts_states_change \
509 and self.is_impact \
510 and not self.state_changed_since_impact:
511 self.last_state = self.state_before_impact
512 else: # standard case
513 self.last_state = self.state
515 if status == 0:
516 self.state = 'OK'
517 self.state_id = 0
518 self.last_time_ok = int(self.last_state_update)
519 state_code = 'o'
520 elif status == 1:
521 self.state = 'WARNING'
522 self.state_id = 1
523 self.last_time_warning = int(self.last_state_update)
524 state_code = 'w'
525 elif status == 2:
526 self.state = 'CRITICAL'
527 self.state_id = 2
528 self.last_time_critical = int(self.last_state_update)
529 state_code = 'c'
530 elif status == 3:
531 self.state = 'UNKNOWN'
532 self.state_id = 3
533 self.last_time_unknown = int(self.last_state_update)
534 state_code = 'u'
535 else:
536 self.state = 'CRITICAL'# exit code UNDETERMINED
537 self.state_id = 2
538 self.last_time_critical = int(self.last_state_update)
539 state_code = 'c'
541 if state_code in self.flap_detection_options:
542 self.add_flapping_change(self.state != self.last_state)
544 if self.state != self.last_state:
545 self.last_state_change = self.last_state_update
547 self.duration_sec = now - self.last_state_change
550 # Return True if status is the state (like OK) or small form like 'o'
551 def is_state(self, status):
552 if status == self.state:
553 return True
554 # Now low status
555 elif status == 'o' and self.state == 'OK':
556 return True
557 elif status == 'c' and self.state == 'CRITICAL':
558 return True
559 elif status == 'w' and self.state == 'WARNING':
560 return True
561 elif status == 'u' and self.state == 'UNKNOWN':
562 return True
563 return False
566 # The last time when the state was not OK
567 def last_time_non_ok_or_up(self):
568 non_ok_times = filter(lambda x: x > self.last_time_ok, [self.last_time_warning,
569 self.last_time_critical,
570 self.last_time_unknown])
571 if len(non_ok_times) == 0:
572 last_time_non_ok = 0 # program_start would be better
573 else:
574 last_time_non_ok = min(non_ok_times)
575 return last_time_non_ok
578 # Add a log entry with a SERVICE ALERT like:
579 # SERVICE ALERT: server;Load;UNKNOWN;HARD;1;I don't know what to say...
580 def raise_alert_log_entry(self):
581 logger.log('SERVICE ALERT: %s;%s;%s;%s;%d;%s' % (self.host.get_name(),
582 self.get_name(),
583 self.state,
584 self.state_type,
585 self.attempt,
586 self.output))
589 # Add a log entry with a Freshness alert like:
590 # Warning: The results of host 'Server' are stale by 0d 0h 0m 58s (threshold=0d 1h 0m 0s).
591 # I'm forcing an immediate check of the host.
592 def raise_freshness_log_entry(self, t_stale_by, t_threshold):
593 logger.log("Warning: The results of service '%s' on host '%s' are stale by %s (threshold=%s). I'm forcing an immediate check of the service." \
594 % (self.get_name(), self.host.get_name(), format_t_into_dhms_format(t_stale_by), format_t_into_dhms_format(t_threshold)))
597 # Raise a log entry with a Notification alert like
598 # SERVICE NOTIFICATION: superadmin;server;Load;OK;notify-by-rss;no output
599 def raise_notification_log_entry(self, n):
600 contact = n.contact
601 command = n.command_call
602 if n.type in ('DOWNTIMESTART', 'DOWNTIMEEND', 'DOWNTIMECANCELLED',
603 'CUSTOM', 'ACKNOWLEDGEMENT', 'FLAPPINGSTART',
604 'FLAPPINGSTOP', 'FLAPPINGDISABLED'):
605 state = '%s (%s)' % (n.type, self.state)
606 else:
607 state = self.state
608 if self.__class__.log_notifications:
609 logger.log("SERVICE NOTIFICATION: %s;%s;%s;%s;%s;%s" % (contact.get_name(),
610 self.host.get_name(),
611 self.get_name(), state,
612 command.get_name(), self.output))
615 # Raise a log entry with a Eventhandler alert like
616 # SERVICE EVENT HANDLER: test_host_0;test_ok_0;OK;SOFT;4;eventhandler
617 def raise_event_handler_log_entry(self, command):
618 if self.__class__.log_event_handlers:
619 logger.log("SERVICE EVENT HANDLER: %s;%s;%s;%s;%s;%s" % (self.host.get_name(),
620 self.get_name(),
621 self.state,
622 self.state_type,
623 self.attempt,
624 command.get_name()))
627 # Raise a log entry with FLAPPING START alert like
628 # SERVICE FLAPPING ALERT: server;LOAD;STARTED; Service appears to have started flapping (50.6% change >= 50.0% threshold)
629 def raise_flapping_start_log_entry(self, change_ratio, threshold):
630 logger.log("SERVICE FLAPPING ALERT: %s;%s;STARTED; Service appears to have started flapping (%.1f% change >= %.1% threshold)" % \
631 (self.host.get_name(), self.get_name(), change_ratio, threshold))
634 # Raise a log entry with FLAPPING STOP alert like
635 # SERVICE FLAPPING ALERT: server;LOAD;STOPPED; Service appears to have stopped flapping (23.0% change < 25.0% threshold)
636 def raise_flapping_stop_log_entry(self, change_ratio, threshold):
637 logger.log("SERVICE FLAPPING ALERT: %s;%s;STOPPED; Service appears to have stopped flapping (%.1f% change < %.1% threshold)" % \
638 (self.host.get_name(), self.get_name(), change_ratio, threshold))
641 # If there is no valid time for next check, raise a log entry
642 def raise_no_next_check_log_entry(self):
643 logger.log("Warning : I cannot schedule the check for the service '%s' on host '%s' because there is not future valid time" % \
644 (self.get_name(), self.host.get_name()))
647 # Raise a log entry when a downtime begins
648 # SERVICE DOWNTIME ALERT: test_host_0;test_ok_0;STARTED; Service has entered a period of scheduled downtime
649 def raise_enter_downtime_log_entry(self):
650 logger.log("SERVICE DOWNTIME ALERT: %s;%s;STARTED; Service has entered a period of scheduled downtime" % \
651 (self.host.get_name(), self.get_name()))
654 # Raise a log entry when a downtime has finished
655 # SERVICE DOWNTIME ALERT: test_host_0;test_ok_0;STOPPED; Service has exited from a period of scheduled downtime
656 def raise_exit_downtime_log_entry(self):
657 logger.log("SERVICE DOWNTIME ALERT: %s;%s;STOPPED; Service has exited from a period of scheduled downtime" % \
658 (self.host.get_name(), self.get_name()))
661 # Raise a log entry when a downtime prematurely ends
662 # SERVICE DOWNTIME ALERT: test_host_0;test_ok_0;CANCELLED; Service has entered a period of scheduled downtime
663 def raise_cancel_downtime_log_entry(self):
664 logger.log("SERVICE DOWNTIME ALERT: %s;%s;CANCELLED; Scheduled downtime for service has been cancelled." % \
665 (self.host.get_name(), self.get_name()))
668 # Is stalking ?
669 # Launch if check is waitconsume==first time
670 # and if c.status is in self.stalking_options
671 def manage_stalking(self, c):
672 need_stalk = False
673 if c.status == 'waitconsume':
674 if c.exit_status == 0 and 'o' in self.stalking_options:
675 need_stalk = True
676 elif c.exit_status == 1 and 'w' in self.stalking_options:
677 need_stalk = True
678 elif c.exit_status == 2 and 'c' in self.stalking_options:
679 need_stalk = True
680 elif c.exit_status == 3 and 'u' in self.stalking_options:
681 need_stalk = True
682 if c.output == self.output:
683 need_stalk = False
684 if need_stalk:
685 logger.log("Stalking %s : %s" % (self.get_name(), c.output))
688 # Give data for checks's macros
689 def get_data_for_checks(self):
690 return [self.host, self]
693 # Give data for evetn handlers's macros
694 def get_data_for_event_handler(self):
695 return [self.host, self]
698 # Give data for notifications'n macros
699 def get_data_for_notifications(self, contact, n):
700 return [self.host, self, contact, n]
703 # See if the notification is launchable (time is OK and contact is OK too)
704 def notification_is_blocked_by_contact(self, n, contact):
705 return not contact.want_service_notification(self.last_chk, self.state, n.type, self.criticity)
708 def get_duration_sec(self):
709 return str(int(self.duration_sec))
712 def get_duration(self):
713 m, s = divmod(self.duration_sec, 60)
714 h, m = divmod(m, 60)
715 return "%02dh %02dm %02ds" % (h, m, s)
718 def get_ack_author_name(self):
719 if self.acknowledgement == None:
720 return ''
721 return self.acknowledgement.author
723 def get_ack_comment(self):
724 if self.acknowledgement == None:
725 return ''
726 return self.acknowledgement.comment
729 def get_check_command(self):
730 return self.check_command.get_name()
733 # Check if a notification for this service is suppressed at this time
734 def notification_is_blocked_by_item(self, type, t_wished = None):
735 if t_wished == None:
736 t_wished = time.time()
738 # TODO
739 # forced notification
740 # pass if this is a custom notification
742 # Block if notifications are program-wide disabled
743 if not self.enable_notifications:
744 return True
746 # Does the notification period allow sending out this notification?
747 if not self.notification_period.is_time_valid(t_wished):
748 return True
750 # Block if notifications are disabled for this service
751 if not self.notifications_enabled:
752 return True
754 # Block if the current status is in the notification_options w,u,c,r,f,s
755 if 'n' in self.notification_options:
756 return True
757 if type in ('PROBLEM', 'RECOVERY'):
758 if self.state == 'UNKNOWN' and not 'u' in self.notification_options:
759 return True
760 if self.state == 'WARNING' and not 'w' in self.notification_options:
761 return True
762 if self.state == 'CRITICAL' and not 'c' in self.notification_options:
763 return True
764 if self.state == 'OK' and not 'r' in self.notification_options:
765 return True
766 if (type in ('FLAPPINGSTART', 'FLAPPINGSTOP', 'FLAPPINGDISABLED')
767 and not 'f' in self.notification_options):
768 return True
769 if (type in ('DOWNTIMESTART', 'DOWNTIMEEND', 'DOWNTIMECANCELLED')
770 and not 's' in self.notification_options):
771 return True
773 # Acknowledgements make no sense when the status is ok/up
774 if type == 'ACKNOWLEDGEMENT':
775 if self.state == self.ok_up:
776 return True
778 # When in downtime, only allow end-of-downtime notifications
779 if self.scheduled_downtime_depth > 1 and type not in ('DOWNTIMEEND', 'DOWNTIMECANCELLED'):
780 return True
782 # Block if host is in a scheduled downtime
783 if self.host.scheduled_downtime_depth > 0:
784 return True
786 # Block if in a scheduled downtime and a problem arises
787 if self.scheduled_downtime_depth > 0 and type in ('PROBLEM', 'RECOVERY'):
788 return True
790 # Block if the status is SOFT
791 if self.state_type == 'SOFT' and type == 'PROBLEM':
792 return True
794 # Block if the problem has already been acknowledged
795 if self.problem_has_been_acknowledged and type != 'ACKNOWLEDGEMENT':
796 return True
798 # Block if flapping
799 if self.is_flapping:
800 return True
802 # Block if host is down
803 if self.host.state != self.host.ok_up:
804 return True
806 return False
811 # Get a oc*p command if item has obsess_over_*
812 # command. It must be enabled locally and globally
813 def get_obsessive_compulsive_processor_command(self):
814 cls = self.__class__
815 if not cls.obsess_over or not self.obsess_over_service:
816 return
818 m = MacroResolver()
819 data = self.get_data_for_event_handler()
820 cmd = m.resolve_command(cls.ocsp_command, data)
821 e = EventHandler(cmd, timeout=cls.ocsp_timeout)
823 # ok we can put it in our temp action queue
824 self.actions.append(e)
827 def duplicate(self, host):
828 duplicates = []
830 # In macro, it's all in UPPER case
831 prop = self.duplicate_foreach.strip().upper()
832 # If I do not have the property, we bail out
833 if prop in host.customs:
834 entry = host.customs[prop]
836 default_value = getattr(self, 'default_value', None)
837 # Transform the generator string to a list
838 # Missing values are filled with the default value
839 (key_values, errcode) = get_key_value_sequence(entry, default_value)
841 if key_values:
842 for key_value in key_values:
843 key = key_value['KEY']
844 value = key_value['VALUE']
845 new_s = self.copy()
846 new_s.host_name = host.get_name()
847 if self.is_tpl(): # if template, the new one is not
848 new_s.register = 1
849 for key in key_value:
850 if key == 'KEY':
851 if hasattr(self, 'service_description'):
852 new_s.service_description = self.service_description.replace('$'+key+'$', key_value[key])
853 if hasattr(self, 'check_command'):
854 # here we can replace VALUE, VALUE1, VALUE2,...
855 new_s.check_command = new_s.check_command.replace('$'+key+'$', key_value[key])
856 # And then add in our list this new service
857 duplicates.append(new_s)
858 else:
859 if errcode == GET_KEY_VALUE_SEQUENCE_ERROR_SYNTAX:
860 err = "The custom property '%s' of the host '%s' is not a valid entry %s for a service generator" % (self.duplicate_foreach.strip(), host.get_name(), entry)
861 self.configuration_errors.append(err)
862 elif errcode == GET_KEY_VALUE_SEQUENCE_ERROR_NODEFAULT:
863 err = "The custom property '%s 'of the host '%s' has empty values %s but the service %s has no default_value" % (self.duplicate_foreach.strip(), host.get_name(), entry, self.service_description)
864 self.configuration_errors.append(err)
865 elif errcode == GET_KEY_VALUE_SEQUENCE_ERROR_NODE:
866 err = "The custom property '%s 'of the host '%s' has an invalid node range %s" % (self.duplicate_foreach.strip(), host.get_name(), entry, self.service_description)
867 self.configuration_errors.append(err)
868 return duplicates
872 class Services(Items):
873 inner_class = Service # use for know what is in items
874 # Create the reversed list for speedup search by host_name/name
875 # We also tag service already in list : they are twins. It'a a bad things.
876 # Hostgroups service have an ID higer thant host service. So it we tag
877 # an id that already are in the list, this service is already
878 # exist, and is a host,
879 # or a previous hostgroup, but define before.
880 def create_reversed_list(self):
881 self.reversed_list = {}
882 self.twins = []
883 for s in self:
884 if hasattr(s, 'service_description') and hasattr(s, 'host_name'):
885 s_desc = getattr(s, 'service_description')
886 s_host_name = getattr(s, 'host_name')
887 key = (s_host_name, s_desc)
888 if key not in self.reversed_list:
889 self.reversed_list[key] = s.id
890 else:
891 self.twins.append(s.id)
892 # For service, the reversed_list is not used for
893 # search, so we del it
894 del self.reversed_list
898 # TODO : finish serach to use reversed
899 # Search a service id by it's name and host_name
900 def find_srv_id_by_name_and_hostname(self, host_name, name):
901 # key = (host_name, name)
902 # if key in self.reversed_list:
903 # return self.reversed_list[key]
905 # if not, maybe in the whole list?
906 for s in self:
907 # Runtinme first, available only after linkify
908 if hasattr(s, 'service_description') and hasattr(s, 'host'):
909 if s.service_description == name and s.host == host_name:
910 return s.id
911 # At config part, available before linkify
912 if hasattr(s, 'service_description') and hasattr(s, 'host_name'):
913 if s.service_description == name and s.host_name == host_name:
914 return s.id
915 return None
918 # Search a service by it's name and hot_name
919 def find_srv_by_name_and_hostname(self, host_name, name):
920 if hasattr(self, 'hosts'):
921 h = self.hosts.find_by_name(host_name)
922 if h == None:
923 return None
924 return h.find_service_by_name(name)
926 id = self.find_srv_id_by_name_and_hostname(host_name, name)
927 if id is not None:
928 return self.items[id]
929 else:
930 return None
933 # Make link between elements:
934 # service -> host
935 # service -> command
936 # service -> timepriods
937 # service -> contacts
938 def linkify(self, hosts, commands, timeperiods, contacts,
939 resultmodulations, escalations, servicegroups):
940 self.linkify_with_timeperiods(timeperiods, 'notification_period')
941 self.linkify_with_timeperiods(timeperiods, 'check_period')
942 self.linkify_with_timeperiods(timeperiods, 'maintenance_period')
943 self.linkify_s_by_hst(hosts)
944 self.linkify_s_by_sg(servicegroups)
945 self.linkify_one_command_with_commands(commands, 'check_command')
946 self.linkify_one_command_with_commands(commands, 'event_handler')
947 self.linkify_with_contacts(contacts)
948 self.linkify_with_resultmodulations(resultmodulations)
949 # WARNING: all escalations will not be link here
950 # (just the escalation here, not serviceesca or hostesca).
951 # This last one will be link in escalations linkify.
952 self.linkify_with_escalations(escalations)
955 # We can link services with hosts so
956 # We can search in O(hosts) instead
957 # of O(services) for common cases
958 def optimize_service_search(self, hosts):
959 self.hosts = hosts
962 # We just search for each host the id of the host
963 # and replace the name by the id
964 # + inform the host we are a service of him
965 def linkify_s_by_hst(self, hosts):
966 for s in self:
967 try:
968 hst_name = s.host_name
969 # The new member list, in id
970 hst = hosts.find_by_name(hst_name)
971 s.host = hst
972 # Let the host know we are his service
973 if s.host is not None:
974 hst.add_service_link(s)
975 except AttributeError , exp:
976 pass # Will be catch at the is_correct moment
979 # We look for servicegroups property in services and
980 # link them
981 def linkify_s_by_sg(self, servicegroups):
982 for s in self:
983 if not s.is_tpl():
984 new_servicegroups = []
985 if hasattr(s, 'servicegroups') and s.servicegroups != '':
986 sgs = s.servicegroups.split(',')
987 for sg_name in sgs:
988 sg_name = sg_name.strip()
989 sg = servicegroups.find_by_name(sg_name)
990 if sg != None:
991 new_servicegroups.append(sg)
992 else:
993 err = "Error : the servicegroup '%s' of the service '%s' is unknown" % (sg_name, s.get_dbg_name())
994 s.configuration_errors.append(err)
995 s.servicegroups = new_servicegroups
999 # Delete services by ids
1000 def delete_services_by_id(self, ids):
1001 for id in ids:
1002 del self.items[id]
1005 # It's used to change old Nagios2 names to
1006 # Nagios3 ones
1007 def old_properties_names_to_new(self):
1008 for s in self:
1009 s.old_properties_names_to_new()
1012 # Apply implicit inheritance for special properties:
1013 # contact_groups, notification_interval , notification_period
1014 # So service will take info from host if necessery
1015 def apply_implicit_inheritance(self, hosts):
1016 for prop in ['contacts', 'contact_groups', 'notification_interval', \
1017 'notification_period', 'resultmodulations', 'escalations', \
1018 'poller_tag', 'check_period', 'criticity']:
1019 for s in self:
1020 if not s.is_tpl():
1021 if not hasattr(s, prop) and hasattr(s, 'host_name'):
1022 h = hosts.find_by_name(s.host_name)
1023 if h is not None and hasattr(h, prop):
1024 setattr(s, prop, getattr(h, prop))
1027 # Apply inheritance for all properties
1028 def apply_inheritance(self, hosts):
1029 # We check for all Host properties if the host has it
1030 # if not, it check all host templates for a value
1031 for prop in Service.properties:
1032 self.apply_partial_inheritance(prop)
1034 # Then implicit inheritance
1035 # self.apply_implicit_inheritance(hosts)
1036 for s in self:
1037 s.get_customs_properties_by_inheritance(self)
1040 # Create dependancies for services (daddy ones)
1041 def apply_dependancies(self):
1042 for s in self:
1043 s.fill_daddy_dependancy()
1046 # Add in our queue a service create from another. Special case :
1047 # is a template : so hname is a name of template, so need to get all
1048 # hosts that inherit from it.
1049 def copy_create_service_from_another(self, hosts, s, hname):
1050 for_hosts_to_create = []
1051 # if we are not a template, it's easy : copy for all host_name
1052 # because they are our final host_name after all
1053 if not s.is_tpl():
1054 for_hosts_to_create.append(hname)
1056 # But for template it's more tricky : it's a template name
1057 # we've got, not a real host_name/ So we must get a list of host_name
1058 # that use this template
1059 else:
1060 hosts_from_tpl = hosts.find_hosts_that_use_template(hname)
1061 # And now copy our real services
1062 for n in hosts_from_tpl:
1063 for_hosts_to_create.append(n)
1065 # Now really create the services
1067 for name in for_hosts_to_create:
1068 if not hasattr(s, 'duplicate_foreach') or s.duplicate_foreach == '':
1069 new_s = s.copy()
1070 new_s.host_name = name
1071 if s.is_tpl(): # if template, the new one is not
1072 new_s.register = 1
1073 self.items[new_s.id] = new_s
1074 else: # the generator case, we must create several new services
1075 # we must find our host, and get all key:value we need
1077 h = hosts.find_by_name(name.strip())
1078 if h != None:
1079 for new_s in s.duplicate(h):
1080 self.items[new_s.id] = new_s
1082 else: # TODO : raise an error?
1083 pass
1086 # We create new service if necessery (host groups and co)
1087 def explode(self, hosts, hostgroups, contactgroups,
1088 servicegroups, servicedependencies):
1089 # The "old" services will be removed. All services with
1090 # more than one host or a host group will be in it
1091 srv_to_remove = []
1094 # items::explode_host_groups_into_hosts
1095 # take all hosts from our hostgroup_name into our host_name property
1096 self.explode_host_groups_into_hosts(hosts, hostgroups)
1098 # items::explode_contact_groups_into_contacts
1099 # take all contacts from our contact_groups into our contact property
1100 self.explode_contact_groups_into_contacts(contactgroups)
1102 # Then for every host create a copy of the service with just the host
1103 # because we are adding services, we can't just loop in it
1104 service_to_check = self.items.keys()
1106 for id in service_to_check:
1107 s = self.items[id]
1108 duplicate_for_hosts = [] # get the list of our host_names if more than 1
1109 not_hosts = [] # the list of !host_name so we remove them after
1111 # print "Looking for s", s
1112 # if hasattr(s, 'duplicate_foreach'):
1113 # print s.duplicate_foreach
1115 # if not s.is_tpl(): # Exploding template is useless
1116 # Explode for real service or teplate with a host_name
1117 if hasattr(s, 'host_name'):
1118 hnames = s.host_name.split(',')
1119 hnames = strip_and_uniq(hnames)
1120 # We will duplicate if we have multiple host_name
1121 # or if we are a template (so a clean service)
1122 # print "WHEre", len(hnames) >= 2 or s.is_tpl()
1123 if len(hnames) >= 2 or s.is_tpl() \
1124 or (hasattr(s, 'duplicate_foreach') and s.duplicate_foreach != ''):
1125 for hname in hnames:
1126 hname = hname.strip()
1128 # If the name begin with a !, we put it in
1129 # the not list
1130 if len(hname) > 0 and hname[0] == '!':
1131 not_hosts.append(hname[1:])
1132 else: # the standard list
1133 duplicate_for_hosts.append(hname)
1135 # Ok now we clean the duplicate_for_hosts with all hosts
1136 # of the not
1137 for hname in not_hosts:
1138 if hname in duplicate_for_hosts:
1139 duplicate_for_hosts.remove(hname)
1141 # Now we duplicate the service for all host_names
1142 for hname in duplicate_for_hosts:
1143 self.copy_create_service_from_another(hosts, s, hname)
1145 # Multiple host_name -> the original service
1146 # must be delete. But template are clean else where
1147 # and only the the servce not got an error in it's conf
1148 if not s.is_tpl() and s.configuration_errors == []:
1149 srv_to_remove.append(id)
1151 else: # Maybe the hnames was full of same host,
1152 # so we must reset the name
1153 for hname in hnames: # So even if len == 0, we are protected
1154 s.host_name = hname
1156 # We clean all service that was for multiple hosts.
1157 self.delete_services_by_id(srv_to_remove)
1159 # Servicegroups property need to be fullfill for got the informations
1160 # And then just register to this service_group
1161 for s in self:
1162 if not s.is_tpl():
1163 sname = s.service_description
1164 shname = s.host_name
1165 if hasattr(s, 'servicegroups'):
1166 sgs = s.servicegroups.split(',')
1167 for sg in sgs:
1168 servicegroups.add_member(shname+','+sname, sg)
1171 # Now we explode service_dependencies into Servicedependency
1172 # We just create serviceDep with goods values (as STRING!),
1173 # the link pass will be done after
1174 for s in self:
1175 # Templates are useless here
1176 if not s.is_tpl():
1177 if hasattr(s, 'service_dependencies'):
1178 if s.service_dependencies != '':
1179 sdeps = s.service_dependencies.split(',')
1180 # %2=0 are for hosts, !=0 are for service_decription
1181 i = 0
1182 hname = ''
1183 for elt in sdeps:
1184 if i % 2 == 0: # host
1185 hname = elt
1186 else: # description
1187 desc = elt
1188 # we can register it (s) (depend on) -> (hname, desc)
1189 # If we do not have enouth data for s, it's no use
1190 if hasattr(s, 'service_description') and hasattr(s, 'host_name'):
1191 servicedependencies.add_service_dependency(s.host_name, s.service_description, hname, desc)
1192 i += 1
1196 # Will create all business tree for the
1197 # services
1198 def create_business_rules(self, hosts, services):
1199 for s in self:
1200 s.create_business_rules(hosts, services)
1203 # Will link all business service/host with theirs
1204 # dep for problem/impact link
1205 def create_business_rules_dependencies(self):
1206 for s in self:
1207 s.create_business_rules_dependencies()