Fix : get back LiveStatus as default.
[shinken.git] / shinken / objects / config.py
blob37592560d7ca86bb2a7fed866271e6b76868a287
1 #!/usr/bin/env python
2 #Copyright (C) 2009-2010 :
3 # Gabes Jean, naparuba@gmail.com
4 # Gerhard Lausser, Gerhard.Lausser@consol.de
5 # Gregory Starck, g.starck@gmail.com
6 # Hartmut Goebel, h.goebel@goebel-consult.de
8 #This file is part of Shinken.
10 #Shinken is free software: you can redistribute it and/or modify
11 #it under the terms of the GNU Affero General Public License as published by
12 #the Free Software Foundation, either version 3 of the License, or
13 #(at your option) any later version.
15 #Shinken is distributed in the hope that it will be useful,
16 #but WITHOUT ANY WARRANTY; without even the implied warranty of
17 #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 #GNU Affero General Public License for more details.
20 #You should have received a copy of the GNU Affero General Public License
21 #along with Shinken. If not, see <http://www.gnu.org/licenses/>.
24 """ Config is the class to read, load and manipulate the user
25 configuration. It read a main cfg (nagios.cfg) and get all informations
26 from it. It create objects, make link between them, clean them, and cut
27 them into independant parts. The main user of this is Arbiter, but schedulers
28 use it too (but far less)"""
30 import re
31 import string
32 import copy
33 import os
34 import socket
35 import itertools
36 import time
37 import random
39 from shinken.objects import *
41 from shinken.arbiterlink import ArbiterLink, ArbiterLinks
42 from shinken.schedulerlink import SchedulerLink, SchedulerLinks
43 from shinken.reactionnerlink import ReactionnerLink, ReactionnerLinks
44 from shinken.brokerlink import BrokerLink, BrokerLinks
45 from shinken.pollerlink import PollerLink, PollerLinks
46 from shinken.graph import Graph
47 from shinken.log import logger
49 from shinken.property import UnusedProp, BoolProp, IntegerProp, CharProp, StringProp
52 no_longer_used_txt = 'This parameter is not longer take from the main file, but must be defined in the status_dat broker module instead. But Shinken will create you one if there are no present and use this parameter in it, so no worry.'
53 not_interresting_txt = 'We do not think such an option is interesting to manage.'
56 class Config(Item):
57 cache_path = "objects.cache"
58 my_type = "config"
60 # Properties:
61 # *required : if True, there is not default, and the config must put them
62 # *default: if not set, take this value
63 # *pythonize : function call to
64 # *class_inherit : (Service, 'blabla') : must set this propertie to the
65 # Service class with name blabla
66 # if (Service, None) : must set this properti to the Service class with
67 # same name
68 # *unused : just to warn the user that the option he use is no more used
69 # in Shinken
70 # *usage_text : if present, will print it to explain why it's no more useful
71 properties = {
72 'prefix': StringProp(default='/usr/local/shinken/'),
73 'log_file': UnusedProp(text=no_longer_used_txt),
74 'object_cache_file': UnusedProp(text=no_longer_used_txt),
75 'precached_object_file': UnusedProp(text='Shinken is faster enough to do not need precached object file.'),
76 'resource_file': StringProp(default='/tmp/ressources.txt'),
77 'temp_file': UnusedProp(text=' temporary files are not used in the shinken architecture.'),
78 'status_file': UnusedProp(text=no_longer_used_txt),
79 'status_update_interval': UnusedProp(text=no_longer_used_txt),
80 'shinken_user': StringProp(default='shinken'),
81 'shinken_group': StringProp(default='shinken'),
82 'enable_notifications': BoolProp(default='1', class_inherit=[(Host, None), (Service, None), (Contact, None)]),
83 'execute_service_checks': BoolProp(default='1', class_inherit=[(Service, 'execute_checks')]),
84 'accept_passive_service_checks': BoolProp(default='1', class_inherit=[(Service, 'accept_passive_checks')]),
85 'execute_host_checks': BoolProp(default='1', class_inherit=[(Host, 'execute_checks')]),
86 'accept_passive_host_checks': BoolProp(default='1', class_inherit=[(Host, 'accept_passive_checks')]),
87 'enable_event_handlers': BoolProp(default='1', class_inherit=[(Host, None), (Service, None)]),
88 'log_rotation_method': CharProp(default='d'),
89 'log_archive_path': StringProp(default='/usr/local/shinken/var/archives'),
90 'check_external_commands': BoolProp(default='1'),
91 'command_check_interval': UnusedProp(text='anoter value than look always the file is useless, so we fix it.'),
92 'command_file': StringProp(default='/tmp/command.cmd'),
93 'external_command_buffer_slots': UnusedProp(text='We do not limit the external command slot.'),
94 'check_for_updates': UnusedProp(text='network administrators will never allow such communication between server and the external world. Use your distribution packet manager to know if updates are available or go to the http://www.shinken-monitoring.org website instead.'),
95 'bare_update_checks': UnusedProp(text=None),
96 'lock_file': StringProp(default='/usr/local/shinken/var/arbiterd.pid'),
97 'retain_state_information': UnusedProp(text='sorry, retain state information will not be implemented because it is useless.'),
98 'state_retention_file': StringProp(default=''),
99 'retention_update_interval': IntegerProp(default='0'),
100 'use_retained_program_state': UnusedProp(text=not_interresting_txt),
101 'use_retained_scheduling_info': UnusedProp(text=not_interresting_txt),
102 'retained_host_attribute_mask': UnusedProp(text=not_interresting_txt),
103 'retained_service_attribute_mask': UnusedProp(text=not_interresting_txt),
104 'retained_process_host_attribute_mask': UnusedProp(text=not_interresting_txt),
105 'retained_process_service_attribute_mask': UnusedProp(text=not_interresting_txt),
106 'retained_contact_host_attribute_mask': UnusedProp(text=not_interresting_txt),
107 'retained_contact_service_attribute_mask': UnusedProp(text=not_interresting_txt),
108 'use_syslog': BoolProp(default='0'),
109 'log_notifications': BoolProp(default='1', class_inherit=[(Host, None), (Service, None)]),
110 'log_service_retries': BoolProp(default='1', class_inherit=[(Service, 'log_retries')]),
111 'log_host_retries': BoolProp(default='1', class_inherit=[(Host, 'log_retries')]),
112 'log_event_handlers': BoolProp(default='1', class_inherit=[(Host, None), (Service, None)]),
113 'log_initial_states': BoolProp(default='1'),
114 'log_external_commands': BoolProp(default='1'),
115 'log_passive_checks': BoolProp(default='1'),
116 'global_host_event_handler': StringProp(default='', class_inherit=[(Host, 'global_event_handler')]),
117 'global_service_event_handler': StringProp(default='', class_inherit=[(Service, 'global_event_handler')]),
118 'sleep_time': UnusedProp(text='this deprecated option is useless in the shinken way of doing.'),
119 'service_inter_check_delay_method': UnusedProp(text='This option is useless in the Shinken scheduling. The only way is the smart way.'),
120 'max_service_check_spread': IntegerProp(default='30', class_inherit=[(Service, 'max_check_spread')]),
121 'service_interleave_factor': UnusedProp(text='This option is useless in the Shinken scheduling because it use a random distribution for initial checks.'),
122 'max_concurrent_checks': UnusedProp(text='Limiting the max concurrent checks is not helful to got a good running monitoring server.'),
123 'check_result_reaper_frequency': UnusedProp(text='Shinken do not use reaper process.'),
124 'max_check_result_reaper_time': UnusedProp(text='Shinken do not use reaper process.'),
125 'check_result_path': UnusedProp(text='Shinken use in memory returns, not check results on flat file.'),
126 'max_check_result_file_age': UnusedProp(text='Shinken do not use flat file check resultfiles.'),
127 'host_inter_check_delay_method': UnusedProp(text='This option is unused in the Shinken scheduling because distribution of the initial check is a random one.'),
128 'max_host_check_spread': IntegerProp(default='30', class_inherit=[(Host, 'max_check_spread')]),
129 'interval_length': IntegerProp(default='60', class_inherit=[(Host, None), (Service, None)]),
130 'auto_reschedule_checks': BoolProp(managed=False, default='1'),
131 'auto_rescheduling_interval': IntegerProp(managed=False, default='1'),
132 'auto_rescheduling_window': IntegerProp(managed=False, default='180'),
133 'use_aggressive_host_checking': UnusedProp(text='Host agressive checking is an heritage from Nagios 1 and is really useless now.'),
134 'translate_passive_host_checks': BoolProp(managed=False, default='1'),
135 'passive_host_checks_are_soft': BoolProp(managed=False, default='1'),
136 'enable_predictive_host_dependency_checks': BoolProp(managed=False, default='1', class_inherit=[(Host, 'enable_predictive_dependency_checks')]),
137 'enable_predictive_service_dependency_checks': StringProp(managed=False, default='1'),
138 'cached_host_check_horizon': IntegerProp(default='0', class_inherit=[(Host, 'cached_check_horizon')]),
139 'cached_service_check_horizon': IntegerProp(default='0', class_inherit=[(Service, 'cached_check_horizon')]),
140 'use_large_installation_tweaks': BoolProp(default='0', class_inherit=[(Host, None), (Service, None)]),
141 'free_child_process_memory': UnusedProp(text='this option is automatic in Python processes'),
142 'child_processes_fork_twice': UnusedProp(text='fork twice is not use.'),
143 'enable_environment_macros': BoolProp(default='1', class_inherit=[(Host, None), (Service, None)]),
144 'enable_flap_detection': BoolProp(default='1', class_inherit=[(Host, None), (Service, None)]),
145 'low_service_flap_threshold': IntegerProp(default='25', class_inherit=[(Service, 'low_flap_threshold')]),
146 'high_service_flap_threshold': IntegerProp(default='50', class_inherit=[(Service, 'high_flap_threshold')]),
147 'low_host_flap_threshold': IntegerProp(default='25', class_inherit=[(Host, 'low_flap_threshold')]),
148 'high_host_flap_threshold': IntegerProp(default='50', class_inherit=[(Host, 'high_flap_threshold')]),
149 'soft_state_dependencies': BoolProp(managed=False, default='0'),
150 'service_check_timeout': IntegerProp(default='10', class_inherit=[(Service, 'check_timeout')]),
151 'host_check_timeout': IntegerProp(default='10', class_inherit=[(Host, 'check_timeout')]),
152 'event_handler_timeout': IntegerProp(default='10', class_inherit=[(Host, None), (Service, None)]),
153 'notification_timeout': IntegerProp(default='5', class_inherit=[(Host, None), (Service, None)]),
154 'ocsp_timeout': IntegerProp(default='5', class_inherit=[(Service, None)]),
155 'ochp_timeout': IntegerProp(default='5', class_inherit=[(Host, None)]),
156 'perfdata_timeout': IntegerProp(default='2', class_inherit=[(Host, None), (Service, None)]),
157 'obsess_over_services': BoolProp(default='0', class_inherit=[(Service, 'obsess_over')]),
158 'ocsp_command': StringProp(default='', class_inherit=[(Service, None)]),
159 'obsess_over_hosts': BoolProp(default='0', class_inherit=[(Host, 'obsess_over')]),
160 'ochp_command': StringProp(default='', class_inherit=[(Host, None)]),
161 'process_performance_data': BoolProp(default='1', class_inherit=[(Host, None), (Service, None)]),
162 'host_perfdata_command': StringProp(default='', class_inherit=[(Host, 'perfdata_command')]),
163 'service_perfdata_command': StringProp(default='', class_inherit=[(Service, 'perfdata_command')]),
164 'host_perfdata_file': StringProp(default='', class_inherit=[(Host, 'perfdata_file')]),
165 'service_perfdata_file': StringProp(default='', class_inherit=[(Service, 'perfdata_file')]),
166 'host_perfdata_file_template': StringProp(default='/tmp/host.perf', class_inherit=[(Host, 'perfdata_file_template')]),
167 'service_perfdata_file_template': StringProp(default='/tmp/host.perf', class_inherit=[(Service, 'perfdata_file_template')]),
168 'host_perfdata_file_mode': CharProp(default='a', class_inherit=[(Host, 'perfdata_file_mode')]),
169 'service_perfdata_file_mode': CharProp(default='a', class_inherit=[(Service, 'perfdata_file_mode')]),
170 'host_perfdata_file_processing_interval': IntegerProp(managed=False, default='15'),
171 'service_perfdata_file_processing_interval': IntegerProp(managed=False, default='15'),
172 'host_perfdata_file_processing_command': StringProp(managed=False, default='', class_inherit=[(Host, 'perfdata_file_processing_command')]),
173 'service_perfdata_file_processing_command': StringProp(managed=False, default=None),
174 'check_for_orphaned_services': BoolProp(default='1', class_inherit=[(Service, 'check_for_orphaned')]),
175 'check_for_orphaned_hosts': BoolProp(default='1', class_inherit=[(Host, 'check_for_orphaned')]),
176 'check_service_freshness': BoolProp(default='1', class_inherit=[(Service, 'check_freshness')]),
177 'service_freshness_check_interval': IntegerProp(default='60'),
178 'check_host_freshness': BoolProp(default='1', class_inherit=[(Host, 'check_freshness')]),
179 'host_freshness_check_interval': IntegerProp(default='60'),
180 'additional_freshness_latency': IntegerProp(default='15', class_inherit=[(Host, None), (Service, None)]),
181 'enable_embedded_perl': BoolProp(managed=False, default='1', help='It will surely never be managed, but it should not be useful with poller performances.'),
182 'use_embedded_perl_implicitly': BoolProp(managed=False, default='0'),
183 'date_format': StringProp(managed=False, default=None),
184 'use_timezone': StringProp(default='', class_inherit=[(Host, None), (Service, None), (Contact, None)]),
185 'illegal_object_name_chars': StringProp(default="""`~!$%^&*"|'<>?,()=""", class_inherit=[(Host, None), (Service, None), (Contact, None)]),
186 'illegal_macro_output_chars': StringProp(default='', class_inherit=[(Host, None), (Service, None), (Contact, None)]),
187 'use_regexp_matching': BoolProp(managed=False, default='0', help=' if you go some host or service definition like prod*, it will surely failed from now, sorry.'),
188 'use_true_regexp_matching': BoolProp(managed=False, default=None),
189 'admin_email': UnusedProp(text='sorry, not yet implemented.'),
190 'admin_pager': UnusedProp(text='sorry, not yet implemented.'),
191 'event_broker_options': UnusedProp(text='event broker are replaced by modules with a real configuration template.'),
192 'broker_module': StringProp(default=''),
193 'debug_file': UnusedProp(text=None),
194 'debug_level': UnusedProp(text=None),
195 'debug_verbosity': UnusedProp(text=None),
196 'max_debug_file_size': UnusedProp(text=None),
197 #'$USERn$ : {'required':False, 'default':''} # Add at run in __init__
199 # SHINKEN SPECIFIC
200 'idontcareaboutsecurity': BoolProp(default='0'),
201 'flap_history': IntegerProp(default='20', class_inherit=[(Host, None), (Service, None)]),
202 'max_plugins_output_length': IntegerProp(default='8192', class_inherit=[(Host, None), (Service, None)]),
204 # Enable or not the notice about old Nagios parameters
205 'disable_old_nagios_parameters_whining': BoolProp(default='0'),
207 # Now for problem/impact states changes
208 'enable_problem_impacts_states_change': BoolProp(default='0', class_inherit=[(Host, None), (Service, None)]),
210 # More a running value in fact
211 'resource_macros_names': StringProp(default=[]),
213 # SSL PART
214 # global boolean for know if we use ssl or not
215 'use_ssl': BoolProp(default='0', class_inherit=[(SchedulerLink, None), (ReactionnerLink, None),
216 (BrokerLink, None), (PollerLink, None), (ArbiterLink, None)]),
217 'certs_dir': StringProp(default='etc/certs'),
218 'ca_cert': StringProp(default='etc/certs/ca.pem'),
219 'server_cert' : StringProp(default='etc/certs/server.pem'),
220 'hard_ssl_name_check': BoolProp(default='0'),
223 macros = {
224 'PREFIX': 'prefix',
225 'MAINCONFIGFILE': '',
226 'STATUSDATAFILE': '',
227 'COMMENTDATAFILE': '',
228 'DOWNTIMEDATAFILE': '',
229 'RETENTIONDATAFILE': '',
230 'OBJECTCACHEFILE': '',
231 'TEMPFILE': '',
232 'TEMPPATH': '',
233 'LOGFILE': '',
234 'RESOURCEFILE': '',
235 'COMMANDFILE': '',
236 'HOSTPERFDATAFILE': '',
237 'SERVICEPERFDATAFILE': '',
238 'ADMINEMAIL': '',
239 'ADMINPAGER': ''
240 #'USERn' : '$USERn$' # Add at run time
243 #We create dict of objects
244 #Type: 'name in objects' : {Class of object, Class of objects,
245 #'property for self for the objects(config)'
246 types_creations = {
247 'timeperiod': (Timeperiod, Timeperiods, 'timeperiods'),
248 'service': (Service, Services, 'services'),
249 'servicegroup': (Servicegroup, Servicegroups, 'servicegroups'),
250 'command': (Command, Commands, 'commands'),
251 'host': (Host, Hosts, 'hosts'),
252 'hostgroup': (Hostgroup, Hostgroups, 'hostgroups'),
253 'contact': (Contact, Contacts, 'contacts'),
254 'contactgroup': (Contactgroup, Contactgroups, 'contactgroups'),
255 'notificationway': (NotificationWay, NotificationWays, 'notificationways'),
256 'servicedependency': (Servicedependency, Servicedependencies, 'servicedependencies'),
257 'hostdependency': (Hostdependency, Hostdependencies, 'hostdependencies'),
258 'arbiter': (ArbiterLink, ArbiterLinks, 'arbiterlinks'),
259 'scheduler': (SchedulerLink, SchedulerLinks, 'schedulerlinks'),
260 'reactionner': (ReactionnerLink, ReactionnerLinks, 'reactionners'),
261 'broker': (BrokerLink, BrokerLinks, 'brokers'),
262 'poller': (PollerLink, PollerLinks, 'pollers'),
263 'realm': (Realm, Realms, 'realms'),
264 'module': (Module, Modules, 'modules'),
265 'resultmodulation': (Resultmodulation, Resultmodulations, 'resultmodulations'),
266 'escalation': (Escalation, Escalations, 'escalations'),
267 'serviceescalation': (Serviceescalation, Serviceescalations, 'serviceescalations'),
268 'hostescalation': (Hostescalation, Hostescalations, 'hostescalations'),
271 #This tab is used to transform old parameters name into new ones
272 #so from Nagios2 format, to Nagios3 ones
273 old_properties = {
274 'nagios_user': 'shinken_user',
275 'nagios_group': 'shinken_group'
278 def __init__(self):
279 self.params = {}
280 self.resource_macros_names = []
281 #By default the conf is correct
282 self.conf_is_correct = True
283 #We tag the conf with a magic_hash, a random value to
284 #idify this conf
285 random.seed(time.time())
286 self.magic_hash = random.randint(1, 100000)
289 # We've got macro in the resource file and we want
290 # to update our MACRO dict with it
291 def fill_resource_macros_names_macros(self):
292 """ fill the macro dict will all value
293 from self.resource_macros_names"""
294 properties = self.__class__.properties
295 macros = self.__class__.macros
296 for macro_name in self.resource_macros_names:
297 properties['$'+macro_name+'$'] = StringProp(default='')
298 macros[macro_name] = '$'+macro_name+'$'
301 def load_params(self, params):
302 for elt in params:
303 elts = elt.split('=')
304 if len(elts) == 1: #error, there is no = !
305 self.conf_is_correct = False
306 print "Error : the parameter %s is malformed! (no = sign)" % elts[0]
307 else:
308 self.params[elts[0]] = elts[1]
309 setattr(self, elts[0], elts[1])
310 #Maybe it's a variable as $USER$ or $ANOTHERVATRIABLE$
311 #so look at the first character. If it's a $, it's a variable
312 #and if it's end like it too
313 if elts[0][0] == '$' and elts[0][-1] == '$':
314 macro_name = elts[0][1:-1]
315 self.resource_macros_names.append(macro_name)
319 def _cut_line(self, line):
320 #punct = '"#$%&\'()*+/<=>?@[\\]^`{|}~'
321 tmp = re.split("[" + string.whitespace + "]+" , line)
322 r = [elt for elt in tmp if elt != '']
323 return r
326 def read_config(self, files):
327 #just a first pass to get the cfg_file and all files in a buf
328 res = ''
330 for file in files:
331 #We add a \n (or \r\n) to be sure config files are separated
332 #if the previous does not finish with a line return
333 res += os.linesep
334 print "Opening configuration file", file
335 try:
336 # Open in Universal way for Windows, Mac, Linux
337 fd = open(file, 'rU')
338 buf = fd.readlines()
339 fd.close()
340 config_base_dir = os.path.dirname(file)
341 except IOError, exp:
342 logger.log("Error: Cannot open config file '%s' for reading: %s" % (file, exp))
343 #The configuration is invalid because we have a bad file!
344 self.conf_is_correct = False
345 continue
347 for line in buf:
348 # Should not be useful anymore with the Universal open
349 # if os.name != 'nt':
350 # line = line.replace("\r\n", "\n")
351 res += line
352 line = line[:-1]
353 if re.search("^cfg_file", line) or re.search("^resource_file", line):
354 elts = line.split('=')
355 if os.path.isabs(elts[1]):
356 cfg_file_name = elts[1]
357 else:
358 cfg_file_name = os.path.join(config_base_dir, elts[1])
359 cfg_file_name = cfg_file_name.strip()
360 try:
361 fd = open(cfg_file_name, 'rU')
362 logger.log("Processing object config file '%s'" % cfg_file_name)
363 res += fd.read()
364 #Be sure to add a line return so we won't mix files
365 res += '\n'
366 fd.close()
367 except IOError, exp:
368 logger.log("Error: Cannot open config file '%s' for reading: %s" % (cfg_file_name, exp))
369 #The configuration is invalid because we have a bad file!
370 self.conf_is_correct = False
371 elif re.search("^cfg_dir", line):
372 elts = line.split('=')
373 if os.path.isabs(elts[1]):
374 cfg_dir_name = elts[1]
375 else:
376 cfg_dir_name = os.path.join(config_base_dir, elts[1])
377 #Ok, look if it's really a directory
378 if not os.path.isdir(cfg_dir_name):
379 logger.log("Error: Cannot open config dir '%s' for reading" % cfg_dir_name)
380 self.conf_is_correct = False
381 #Now walk for it
382 for root, dirs, files in os.walk(cfg_dir_name):
383 for file in files:
384 if re.search("\.cfg$", file):
385 logger.log("Processing object config file '%s'" % os.path.join(root, file))
386 try:
388 fd = open(os.path.join(root, file), 'rU')
389 res += fd.read()
390 fd.close()
391 except IOError, exp:
392 logger.log("Error: Cannot open config file '%s' for reading: %s" % (os.path.join(root, file), exp))
393 # The configuration is invalid
394 # because we have a bad file!
395 self.conf_is_correct = False
396 return res
397 # self.read_config_buf(res)
400 def read_config_buf(self, buf):
401 params = []
402 objectscfg = {
403 'void': [],
404 'timeperiod' : [],
405 'command' : [],
406 'contactgroup' : [],
407 'hostgroup' : [],
408 'contact' : [],
409 'notificationway' : [],
410 'host' : [],
411 'service' : [],
412 'servicegroup' : [],
413 'servicedependency' : [],
414 'hostdependency' : [],
415 'arbiter' : [],
416 'scheduler' : [],
417 'reactionner' : [],
418 'broker' : [],
419 'poller' : [],
420 'realm' : [],
421 'module' : [],
422 'resultmodulation' : [],
423 'escalation' : [],
424 'serviceescalation' : [],
425 'hostescalation' : [],
427 tmp = []
428 tmp_type = 'void'
429 in_define = False
430 continuation_line = False
431 tmp_line = ''
432 lines = buf.split('\n')
433 for line in lines:
434 line = line.split(';')[0]
435 #A backslash means, there is more to come
436 if re.search("\\\s*$", line):
437 continuation_line = True
438 line = re.sub("\\\s*$", "", line)
439 line = re.sub("^\s+", " ", line)
440 tmp_line += line
441 continue
442 elif continuation_line:
443 #Now the continuation line is complete
444 line = re.sub("^\s+", "", line)
445 line = tmp_line + line
446 tmp_line = ''
447 continuation_line = False
448 if re.search("}", line):
449 in_define = False
450 if re.search("^\s*\t*#|^\s*$|^\s*}", line):
451 pass
453 #A define must be catch and the type save
454 #The old entry must be save before
455 elif re.search("^define", line):
456 in_define = True
457 if tmp_type not in objectscfg:
458 objectscfg[tmp_type] = []
459 objectscfg[tmp_type].append(tmp)
460 tmp = []
461 #Get new type
462 elts = re.split('\s', line)
463 tmp_type = elts[1]
464 tmp_type = tmp_type.split('{')[0]
465 else:
466 if in_define:
467 tmp.append(line)
468 else:
469 params.append(line)
471 objectscfg[tmp_type].append(tmp)
472 objects = {}
474 #print "Params", params
475 self.load_params(params)
476 #And then update our MACRO dict
477 self.fill_resource_macros_names_macros()
479 for type in objectscfg:
480 objects[type] = []
481 for items in objectscfg[type]:
482 tmp = {}
483 for line in items:
484 elts = self._cut_line(line)
485 if elts != []:
486 prop = elts[0]
487 value = ' '.join(elts[1:])
488 tmp[prop] = value
489 if tmp != {}:
490 objects[type].append(tmp)
492 return objects
495 # We need to have some ghost objects like
496 # the check_command bp_rule for business
497 # correlator rules
498 def add_ghost_objects(self, raw_objects):
499 bp_rule = {'command_name' : 'bp_rule', 'command_line' : 'bp_rule'}
500 raw_objects['command'].append(bp_rule)
501 host_up = {'command_name' : '_internal_host_up', 'command_line' : '_internal_host_up'}
502 raw_objects['command'].append(host_up)
505 #We've got raw objects in string, now create real Instances
506 def create_objects(self, raw_objects):
507 """ Create real 'object' from dicts of prop/value """
508 types_creations = self.__class__.types_creations
510 #some types are already created in this time
511 early_created_types = ['arbiter', 'module']
513 # Before really create the objects, we add
514 # ghost ones like the bp_rule for correlation
515 self.add_ghost_objects(raw_objects)
517 for t in types_creations:
518 if t not in early_created_types:
519 self.create_objects_for_type(raw_objects, t)
522 def create_objects_for_type(self, raw_objects, type):
523 types_creations = self.__class__.types_creations
524 t = type
525 #Ex: the above code do for timeperiods:
526 #timeperiods = []
527 #for timeperiodcfg in objects['timeperiod']:
528 # t = Timeperiod(timeperiodcfg)
529 # t.clean()
530 # timeperiods.append(t)
531 #self.timeperiods = Timeperiods(timeperiods)
533 (cls, clss, prop) = types_creations[t]
534 #List where we put objects
535 lst = []
536 for obj_cfg in raw_objects[t]:
537 #We create teh object
538 o = cls(obj_cfg)
539 o.clean()
540 lst.append(o)
541 #we create the objects Class and we set it in prop
542 setattr(self, prop, clss(lst))
546 #Here arbiter and modules objects should be prepare and link
547 #before all others types
548 def early_arbiter_linking(self):
549 """ Prepare the arbiter for early operations """
550 self.modules.create_reversed_list()
552 if len(self.arbiterlinks) == 0:
553 logger.log("Warning : there is no arbiter, I add one in localhost:7770")
554 a = ArbiterLink({'arbiter_name' : 'Default-Arbiter',
555 'host_name' : socket.gethostname(),
556 'address' : 'localhost', 'port' : '7770',
557 'spare' : '0'})
558 self.arbiterlinks = ArbiterLinks([a])
560 #First fill default
561 self.arbiterlinks.fill_default()
564 #print "****************** Pythonize ******************"
565 self.arbiterlinks.pythonize()
567 #print "****************** Linkify ******************"
568 self.arbiterlinks.linkify(self.modules)
572 # We use linkify to make the config more efficient : elements will be
573 # linked, like pointers. For example, a host will have it's service,
574 # and contacts directly in it's properties
575 # REMEMBER: linkify AFTER explode...
576 def linkify(self):
577 """ Make 'links' between elements, like a host got a services list
578 with all it's services in it """
580 # First linkify myself like for some global commands
581 self.linkify_one_command_with_commands(self.commands, 'ocsp_command')
582 self.linkify_one_command_with_commands(self.commands, 'ochp_command')
583 self.linkify_one_command_with_commands(self.commands, 'host_perfdata_command')
584 self.linkify_one_command_with_commands(self.commands, 'service_perfdata_command')
586 #print "Hosts"
587 # link hosts with timeperiods and commands
588 self.hosts.linkify(self.timeperiods, self.commands, \
589 self.contacts, self.realms, \
590 self.resultmodulations, self.escalations,\
591 self.hostgroups)
593 # Do the simplify AFTER explode groups
594 #print "Hostgroups"
595 # link hostgroups with hosts
596 self.hostgroups.linkify(self.hosts, self.realms)
598 #print "Services"
599 # link services with other objects
600 self.services.linkify(self.hosts, self.commands, \
601 self.timeperiods, self.contacts,\
602 self.resultmodulations, self.escalations,\
603 self.servicegroups)
605 #print "Service groups"
606 # link servicegroups members with services
607 self.servicegroups.linkify(self.services)
609 # link notificationways with timeperiods and commands
610 self.notificationways.linkify(self.timeperiods, self.commands)
612 #print "Contactgroups"
613 #link contacgroups with contacts
614 self.contactgroups.linkify(self.contacts)
616 #print "Contacts"
617 #link contacts with timeperiods and commands
618 self.contacts.linkify(self.timeperiods, self.commands,
619 self.notificationways)
621 #print "Timeperiods"
622 #link timeperiods with timeperiods (exclude part)
623 self.timeperiods.linkify()
625 #print "Servicedependancy"
626 self.servicedependencies.linkify(self.hosts, self.services,
627 self.timeperiods)
629 #print "Hostdependancy"
630 self.hostdependencies.linkify(self.hosts, self.timeperiods)
632 #print "Resultmodulations"
633 self.resultmodulations.linkify(self.timeperiods)
635 #print "Escalations"
636 self.escalations.linkify(self.timeperiods, self.contacts, \
637 self.services, self.hosts)
639 #print "Realms"
640 self.realms.linkify()
642 #print "Schedulers and satellites"
643 #Link all links with realms
644 # self.arbiterlinks.linkify(self.modules)
645 self.schedulerlinks.linkify(self.realms, self.modules)
646 self.brokers.linkify(self.realms, self.modules)
647 self.reactionners.linkify(self.realms, self.modules)
648 self.pollers.linkify(self.realms, self.modules)
652 #Some properties are dangerous to be send like that
653 #like realms linked in hosts. Realms are too big to send (too linked)
654 def prepare_for_sending(self):
655 self.hosts.prepare_for_sending()
658 def dump(self):
659 #print 'Parameters:', self
660 #print 'Hostgroups:',self.hostgroups,'\n'
661 #print 'Services:', self.services
662 print "Slots", Service.__slots__
663 print 'Hosts:'
664 for h in self.hosts:
665 print '\t', h.get_name(), h.contacts
666 print 'Services:'
667 for s in self.services:
668 print '\t', s.get_name(), s.contacts
669 #print 'Templates:', self.hosts_tpl
670 #print 'Hosts:',self.hosts,'\n'
671 #print 'Contacts:', self.contacts
672 #print 'contactgroups',self.contactgroups
673 #print 'Servicegroups:', self.servicegroups
674 #print 'Timepriods:', self.timeperiods
675 #print 'Commands:', self.commands
676 #print "Number of services:", len(self.services.items)
677 #print "Service Dep", self.servicedependencies
678 #print "Schedulers", self.schedulerlinks
681 #It's used to change Nagios2 names to Nagios3 ones
682 #For hosts and services
683 def old_properties_names_to_new(self):
684 super(Config, self).old_properties_names_to_new()
685 self.hosts.old_properties_names_to_new()
686 self.services.old_properties_names_to_new()
689 #It's used to warn about useless parameter and print why it's not use.
690 def notice_about_useless_parameters(self):
691 if not self.disable_old_nagios_parameters_whining:
692 properties = self.__class__.properties
693 for prop, entry in properties.items():
694 if isinstance(entry, UnusedProp):
695 text = 'Notice : the parameter %s is useless and can be removed from the configuration (Reason: %s)' % (prop, entry.text)
696 logger.log(text)
699 # It's used to raise warning if the user got parameter
700 # that we do not manage from now
701 def warn_about_unmanaged_parameters(self):
702 properties = self.__class__.properties
703 unmanaged = []
704 for prop, entry in properties.items():
705 if not entry.managed and hasattr(self, prop):
706 if entry.help:
707 s = "%s : %s" % (prop, entry.help)
708 else:
709 s = prop
710 unmanaged.append(s)
711 if len(unmanaged) != 0:
712 print "\n"
713 mailing_list_uri = "https://lists.sourceforge.net/lists/listinfo/shinken-devel"
714 text = 'Warning : the folowing parameter(s) are not curently managed.'
715 logger.log(text)
716 for s in unmanaged:
717 logger.log(s)
718 text = 'Please look if you really need it. If so, please register at the devel mailing list (%s) and ask for it or propose us a patch :)' % mailing_list_uri
719 logger.log(text)
720 print "\n"
723 #Use to fill groups values on hosts and create new services
724 #(for host group ones)
725 def explode(self):
726 #first elements, after groups
727 #print "Contacts"
728 self.contacts.explode(self.contactgroups, self.notificationways)
729 #print "Contactgroups"
730 self.contactgroups.explode()
732 #print "Hosts"
733 self.hosts.explode(self.hostgroups, self.contactgroups)
734 #print "Hostgroups"
735 self.hostgroups.explode()
737 #print "Services"
738 #print "Initialy got nb of services : %d" % len(self.services.items)
739 self.services.explode(self.hosts, self.hostgroups, self.contactgroups,
740 self.servicegroups, self.servicedependencies)
741 #print "finally got nb of services : %d" % len(self.services.items)
742 #print "Servicegroups"
743 self.servicegroups.explode()
745 #print "Timeperiods"
746 self.timeperiods.explode()
748 self.hostdependencies.explode()
750 #print "Servicedependancy"
751 self.servicedependencies.explode()
753 #Serviceescalations hostescalations will create new escalations
754 self.serviceescalations.explode(self.escalations)
755 self.hostescalations.explode(self.escalations)
756 self.escalations.explode(self.hosts, self.hostgroups,
757 self.contactgroups)
759 #Now the architecture part
760 #print "Realms"
761 self.realms.explode()
764 #Remove elements will the same name, so twins :)
765 #In fact only services should be acceptable with twins
766 def remove_twins(self):
767 #self.hosts.remove_twins()
768 self.services.remove_twins()
769 #self.contacts.remove_twins()
770 #self.timeperiods.remove_twins()
773 #Dependancies are importants for scheduling
774 #This function create dependencies linked between elements.
775 def apply_dependancies(self):
776 self.hosts.apply_dependancies()
777 self.services.apply_dependancies()
780 #Use to apply inheritance (template and implicit ones)
781 #So elements wil have their configured properties
782 def apply_inheritance(self):
783 #inheritance properties by template
784 #print "Hosts"
785 self.hosts.apply_inheritance()
786 #print "Contacts"
787 self.contacts.apply_inheritance()
788 #print "Services"
789 self.services.apply_inheritance(self.hosts)
790 #print "Servicedependencies"
791 self.servicedependencies.apply_inheritance(self.hosts)
792 #print "Hostdependencies"
793 self.hostdependencies.apply_inheritance()
794 #Also timeperiods
795 self.timeperiods.apply_inheritance()
798 #Use to apply implicit inheritance
799 def apply_implicit_inheritance(self):
800 #print "Services"
801 self.services.apply_implicit_inheritance(self.hosts)
804 #will fill properties for elements so they will have all theirs properties
805 def fill_default(self):
806 #Fill default for config (self)
807 super(Config, self).fill_default()
808 self.hosts.fill_default()
809 self.hostgroups.fill_default()
810 self.contacts.fill_default()
811 self.contactgroups.fill_default()
812 self.notificationways.fill_default()
813 self.services.fill_default()
814 self.servicegroups.fill_default()
815 self.resultmodulations.fill_default()
817 #Also fill default of host/servicedep objects
818 self.servicedependencies.fill_default()
819 self.hostdependencies.fill_default()
821 #first we create missing sat, so no other sat will
822 #be created after this point
823 self.fill_default_satellites()
824 #now we have all elements, we can create a default
825 #realm if need and it will be taged to sat that do
826 #not have an realm
827 self.fill_default_realm()
828 self.reactionners.fill_default()
829 self.pollers.fill_default()
830 self.brokers.fill_default()
831 self.schedulerlinks.fill_default()
832 # self.arbiterlinks.fill_default()
833 #Now fill some fields we can predict (like adress for hosts)
834 self.fill_predictive_missing_parameters()
836 #Here is a special functions to fill some special
837 #properties that are not filled and should be like
838 #adress for host (if not set, put host_name)
839 def fill_predictive_missing_parameters(self):
840 self.hosts.fill_predictive_missing_parameters()
843 #Will check if a realm is defined, if not
844 #Create a new one (default) and tag everyone that do not have
845 #a realm prop to be put in this realm
846 def fill_default_realm(self):
847 if len(self.realms) == 0:
848 #Create a default realm with default value =1
849 #so all hosts without realm wil be link with it
850 default = Realm({'realm_name' : 'Default', 'default' : '1'})
851 self.realms = Realms([default])
852 logger.log("Notice : the is no defined realms, so I add a new one %s" % default.get_name())
853 lists = [self.pollers, self.brokers, self.reactionners, self.schedulerlinks]
854 for l in lists:
855 for elt in l:
856 if not hasattr(elt, 'realm'):
857 elt.realm = 'Default'
858 logger.log("Notice : Tagging %s with realm %s" % (elt.get_name(), default.get_name()))
861 #If a satellite is missing, we add them in the localhost
862 #with defaults values
863 def fill_default_satellites(self):
864 if len(self.schedulerlinks) == 0:
865 logger.log("Warning : there is no scheduler, I add one in localhost:7768")
866 s = SchedulerLink({'scheduler_name' : 'Default-Scheduler',
867 'address' : 'localhost', 'port' : '7768'})
868 self.schedulerlinks = SchedulerLinks([s])
869 if len(self.pollers) == 0:
870 logger.log("Warning : there is no poller, I add one in localhost:7771")
871 p = PollerLink({'poller_name' : 'Default-Poller',
872 'address' : 'localhost', 'port' : '7771'})
873 self.pollers = PollerLinks([p])
874 if len(self.reactionners) == 0:
875 logger.log("Warning : there is no reactionner, I add one in localhost:7769")
876 r = ReactionnerLink({'reactionner_name' : 'Default-Reactionner',
877 'address' : 'localhost', 'port' : '7769'})
878 self.reactionners = ReactionnerLinks([r])
879 if len(self.brokers) == 0:
880 logger.log("Warning : there is no broker, I add one in localhost:7772")
881 b = BrokerLink({'broker_name' : 'Default-Broker',
882 'address' : 'localhost', 'port' : '7772',
883 'manage_arbiters' : '1'})
884 self.brokers = BrokerLinks([b])
887 #Return if one broker got a module of type : mod_type
888 def got_broker_module_type_defined(self, mod_type):
889 for b in self.brokers:
890 for m in b.modules:
891 if hasattr(m, 'module_type') and m.module_type == mod_type:
892 return True
893 return False
896 #return if one scheduler got a module of type : mod_type
897 def got_scheduler_module_type_defined(self, mod_type):
898 for b in self.schedulerlinks:
899 for m in b.modules:
900 if hasattr(m, 'module_type') and m.module_type == mod_type:
901 return True
902 return False
905 # Will ask for each host/service if the
906 # check_command is a bp rule. If so, it will create
907 # a tree structures with the rules
908 def create_business_rules(self):
909 self.hosts.create_business_rules(self.hosts, self.services)
910 self.services.create_business_rules(self.hosts, self.services)
913 # Will fill dep list for business rules
914 def create_business_rules_dependencies(self):
915 self.hosts.create_business_rules_dependencies()
916 self.services.create_business_rules_dependencies()
919 #It's used to hack some old Nagios parameters like
920 #log_file or status_file : if they are present in
921 #the global configuration and there is no such modules
922 #in a Broker, we create it on the fly for all Brokers
923 def hack_old_nagios_parameters(self):
924 """ Create some 'modules' from all nagios parameters if they are set and
925 the modules are not created """
926 #We list all modules we will add to brokers
927 mod_to_add = []
928 mod_to_add_to_schedulers = []
930 #For status_dat
931 if hasattr(self, 'status_file') and self.status_file != '' and hasattr(self, 'object_cache_file'):
932 #Ok, the user put such a value, we must look
933 #if he forget to put a module for Brokers
934 got_status_dat_module = self.got_broker_module_type_defined('status_dat')
936 #We need to create the modue on the fly?
937 if not got_status_dat_module:
938 data = { 'object_cache_file': self.object_cache_file,
939 'status_file': self.status_file,
940 'module_name': 'Status-Dat-Autogenerated',
941 'module_type': 'status_dat'}
942 mod = Module(data)
943 mod.status_update_interval = getattr(self, 'status_update_interval', 15)
944 mod_to_add.append(mod)
946 #Now the log_file
947 if hasattr(self, 'log_file') and self.log_file != '':
948 #Ok, the user put such a value, we must look
949 #if he forget to put a module for Brokers
950 got_simple_log_module = self.got_broker_module_type_defined('simple_log')
952 #We need to create the module on the fly?
953 if not got_simple_log_module:
954 data = {'module_type': 'simple_log', 'path': self.log_file,
955 'archive_path' : self.log_archive_path,
956 'module_name': 'Simple-log-Autogenerated'}
957 mod = Module(data)
958 mod_to_add.append(mod)
960 #Now the syslog facility
961 if self.use_syslog:
962 #Ok, the user want a syslog logging, why not after all
963 got_syslog_module = self.got_broker_module_type_defined('syslog')
965 #We need to create the module on the fly?
966 if not got_syslog_module:
967 data = {'module_type': 'syslog',
968 'module_name': 'Syslog-Autogenerated'}
969 mod = Module(data)
970 mod_to_add.append(mod)
972 #Now the service_perfdata module
973 if self.service_perfdata_file != '':
974 #Ok, we've got a path for a service perfdata file
975 got_service_perfdata_module = self.got_broker_module_type_defined('service_perfdata')
977 #We need to create the module on the fly?
978 if not got_service_perfdata_module:
979 data = {'module_type': 'service_perfdata',
980 'module_name': 'Service-Perfdata-Autogenerated',
981 'path' : self.service_perfdata_file,
982 'mode' : self.service_perfdata_file_mode,
983 'template' : self.service_perfdata_file_template}
984 mod = Module(data)
985 mod_to_add.append(mod)
987 #Now the old retention file module
988 if self.state_retention_file != '' and self.retention_update_interval != 0:
989 #Ok, we've got a old retention file
990 got_retention_file_module = self.got_scheduler_module_type_defined('nagios_retention_file')
992 #We need to create the module on the fly?
993 if not got_retention_file_module:
994 data = {'module_type': 'nagios_retention_file',
995 'module_name': 'Nagios-Retention-File-Autogenerated',
996 'path' : self.state_retention_file}
997 mod = Module(data)
998 mod_to_add_to_schedulers.append(mod)
1000 #Now the host_perfdata module
1001 if self.host_perfdata_file != '':
1002 #Ok, we've got a path for a host perfdata file
1003 got_host_perfdata_module = self.got_broker_module_type_defined('host_perfdata')
1005 #We need to create the module on the fly?
1006 if not got_host_perfdata_module:
1007 data = {'module_type': 'host_perfdata',
1008 'module_name': 'Host-Perfdata-Autogenerated',
1009 'path' : self.host_perfdata_file, 'mode' : self.host_perfdata_file_mode,
1010 'template' : self.host_perfdata_file_template}
1011 mod = Module(data)
1012 mod_to_add.append(mod)
1015 #We add them to the brokers if we need it
1016 if mod_to_add != []:
1017 print "Warning : I autogenerated some Broker modules, please look at your configuration"
1018 for m in mod_to_add:
1019 print "Warning : the module", m.module_name, "is autogenerated"
1020 for b in self.brokers:
1021 b.modules.append(m)
1023 #Then for schedulers
1024 if mod_to_add_to_schedulers != []:
1025 print "Warning : I autogenerated some Scheduler modules, please look at your configuration"
1026 for m in mod_to_add_to_schedulers:
1027 print "Warning : the module", m.module_name, "is autogenerated"
1028 for b in self.schedulerlinks:
1029 b.modules.append(m)
1033 # Set our timezone value and give it too to unset satellites
1034 def propagate_timezone_option(self):
1035 if self.use_timezone != '':
1036 #first apply myself
1037 os.environ['TZ'] = self.use_timezone
1038 time.tzset()
1040 tab = [self.schedulerlinks, self.pollers, self.brokers, self.reactionners]
1041 for t in tab:
1042 for s in t:
1043 if s.use_timezone == 'NOTSET':
1044 setattr(s, 'use_timezone', self.use_timezone)
1048 # Link templates with elements
1049 def linkify_templates(self):
1050 """ Like for normal object, we link templates with each others """
1051 self.hosts.linkify_templates()
1052 self.contacts.linkify_templates()
1053 self.services.linkify_templates()
1054 self.servicedependencies.linkify_templates()
1055 self.hostdependencies.linkify_templates()
1056 self.timeperiods.linkify_templates()
1060 # Reversed list is a dist with name for quick search by name
1061 def create_reversed_list(self):
1062 """ Create quick search lists for objects """
1063 self.hosts.create_reversed_list()
1064 self.hostgroups.create_reversed_list()
1065 self.contacts.create_reversed_list()
1066 self.contactgroups.create_reversed_list()
1067 self.notificationways.create_reversed_list()
1068 self.services.create_reversed_list()
1069 self.servicegroups.create_reversed_list()
1070 self.timeperiods.create_reversed_list()
1071 # self.modules.create_reversed_list()
1072 self.resultmodulations.create_reversed_list()
1073 self.escalations.create_reversed_list()
1074 #For services it's a special case
1075 #we search for hosts, then for services
1076 #it's quicker than search in all services
1077 self.services.optimize_service_search(self.hosts)
1080 #Some parameters are just not managed like O*HP commands
1081 #and regexp capabilities
1082 #True : OK
1083 #False : error in conf
1084 def check_error_on_hard_unmanaged_parameters(self):
1085 r = True
1086 if self.use_regexp_matching:
1087 logger.log("Error : the use_regexp_matching parameter is not managed.")
1088 r &= False
1089 #if self.ochp_command != '':
1090 # logger.log("Error : the ochp_command parameter is not managed.")
1091 # r &= False
1092 #if self.ocsp_command != '':
1093 # logger.log("Error : the ocsp_command parameter is not managed.")
1094 # r &= False
1095 return r
1098 # check if elements are correct or not (fill with defaults, etc)
1099 # Warning : this function call be called from a Arbiter AND
1100 # from and scheduler. The first one got everything, the second
1101 # does not have the satellites.
1102 def is_correct(self):
1103 """ Check if all elements got a good configuration """
1104 logger.log('Running pre-flight check on configuration data...')
1105 r = self.conf_is_correct
1107 # Globally unamanged parameters
1108 logger.log('Checking global parameters...')
1109 if not self.check_error_on_hard_unmanaged_parameters():
1110 r = False
1111 logger.log("check global parameters failed")
1113 for x in ('hosts', 'hostgroups', 'contacts', 'contactgroups', 'notificationways',
1114 'escalations', 'services', 'servicegroups', 'timeperiods'):
1115 logger.log('Checking %s...' % (x))
1116 cur = getattr(self, x)
1117 if not cur.is_correct():
1118 r = False
1119 logger.log("\t%s conf incorrect !!" % (x))
1120 logger.log('\tChecked %d %s' % (len(cur), x))
1122 # Hosts got a special check for loops
1123 if not self.hosts.no_loop_in_parents():
1124 r = False
1125 logger.log("hosts: detected loop in parents ; conf incorrect")
1127 for x in ( 'servicedependencies', 'hostdependencies', 'arbiterlinks', 'schedulerlinks',
1128 'reactionners', 'pollers', 'brokers', 'resultmodulations'):
1129 try: cur = getattr(self, x)
1130 except: continue
1131 logger.log('Checking %s' % (x))
1132 if not cur.is_correct():
1133 r = False
1134 logger.log("\t%s conf incorrect !!" % (x))
1135 logger.log('\tChecked %d %s' % (len(cur), x))
1138 self.conf_is_correct = r
1141 #We've got strings (like 1) but we want python elements, like True
1142 def pythonize(self):
1143 #call item pythonize for parameters
1144 super(Config, self).pythonize()
1145 self.hosts.pythonize()
1146 self.hostgroups.pythonize()
1147 self.hostdependencies.pythonize()
1148 self.contactgroups.pythonize()
1149 self.contacts.pythonize()
1150 self.notificationways.pythonize()
1151 self.servicegroups.pythonize()
1152 self.services.pythonize()
1153 self.servicedependencies.pythonize()
1154 self.resultmodulations.pythonize()
1155 self.escalations.pythonize()
1156 # self.arbiterlinks.pythonize()
1157 self.schedulerlinks.pythonize()
1158 self.realms.pythonize()
1159 self.reactionners.pythonize()
1160 self.pollers.pythonize()
1161 self.brokers.pythonize()
1164 #Explode parameters like cached_service_check_horizon in the
1165 #Service class in a cached_check_horizon manner, o*hp commands
1166 #, etc
1167 def explode_global_conf(self):
1168 clss = [Service, Host, Contact, SchedulerLink,
1169 PollerLink, ReactionnerLink, BrokerLink,
1170 ArbiterLink]
1171 for cls in clss:
1172 cls.load_global_conf(self)
1175 #Clean useless elements like templates because they are not needed anymore
1176 def clean_useless(self):
1177 self.hosts.clean_useless()
1178 self.contacts.clean_useless()
1179 self.services.clean_useless()
1180 self.servicedependencies.clean_useless()
1181 self.hostdependencies.clean_useless()
1182 self.timeperiods.clean_useless()
1185 #Create packs of hosts and services so in a pack,
1186 #all dependencies are resolved
1187 #It create a graph. All hosts are connected to their
1188 #parents, and hosts without parent are connected to host 'root'.
1189 #services are link to the host. Dependencies are managed
1190 #REF: doc/pack-creation.png
1191 def create_packs(self, nb_packs):
1192 #We create a graph with host in nodes
1193 g = Graph()
1194 g.add_nodes(self.hosts)
1196 #links will be used for relations between hosts
1197 links = set()
1199 #Now the relations
1200 for h in self.hosts:
1201 #Add parent relations
1202 for p in h.parents:
1203 if p is not None:
1204 links.add((p, h))
1205 #Add the others dependencies
1206 for (dep, tmp, tmp2, tmp3, tmp4) in h.act_depend_of:
1207 links.add((dep, h))
1208 for (dep, tmp, tmp2, tmp3, tmp4) in h.chk_depend_of:
1209 links.add((dep, h))
1211 #For services : they are link woth their own host but we need
1212 #To have the hosts of service dep in the same pack too
1213 for s in self.services:
1214 for (dep, tmp, tmp2, tmp3, tmp4) in s.act_depend_of:
1215 #I don't care about dep host: they are just the host
1216 #of the service...
1217 if hasattr(dep, 'host'):
1218 links.add((dep.host, s.host))
1219 #The othe type of dep
1220 for (dep, tmp, tmp2, tmp3, tmp4) in s.chk_depend_of:
1221 links.add((dep.host, s.host))
1223 # For host/service that are business based, we need to
1224 # link them too
1225 for s in [s for s in self.services if s.got_business_rule]:
1226 for e in s.business_rule.list_all_elements():
1227 if hasattr(e, 'host'): # if it's a service
1228 if e.host != s.host: # do not an host with itself
1229 links.add((e.host, s.host))
1230 else: # it's already a host
1231 if e != s.host:
1232 links.add((e, s.host))
1234 # Same for hosts of course
1235 for h in [ h for h in self.hosts if h.got_business_rule]:
1236 for e in h.business_rule.list_all_elements():
1237 if hasattr(e, 'host'): # if it's a service
1238 if e.host != h:
1239 links.add((e.host, h))
1240 else: # e is a host
1241 if e != h:
1242 links.add((e, h))
1245 #Now we create links in the graph. With links (set)
1246 #We are sure to call the less add_edge
1247 for (dep, h) in links:
1248 g.add_edge(dep, h)
1249 g.add_edge(h, dep)
1251 #Access_list from a node il all nodes that are connected
1252 #with it : it's a list of ours mini_packs
1253 tmp_packs = g.get_accessibility_packs()
1255 #Now We find the default realm (must be unique or
1256 #BAD THINGS MAY HAPPEN )
1257 default_realm = None
1258 for r in self.realms:
1259 if hasattr(r, 'default') and r.default:
1260 default_realm = r
1262 #Now we look if all elements of all packs have the
1263 #same realm. If not, not good!
1264 for pack in tmp_packs:
1265 tmp_realms = set()
1266 for elt in pack:
1267 if elt.realm is not None:
1268 tmp_realms.add(elt.realm)
1269 if len(tmp_realms) > 1:
1270 logger.log("Error : the realm configuration of yours hosts is not good because there a more than one realm in one pack (host relations) :")
1271 for h in pack:
1272 if h.realm is None:
1273 logger.log('Error : the host %s do not have a realm' % h.get_name())
1274 else:
1275 logger.log('Error : the host %s is in the realm %s' % (h.get_name(), h.realm.get_name()))
1276 if len(tmp_realms) == 1: # Ok, good
1277 r = tmp_realms.pop() #There is just one element
1278 r.packs.append(pack)
1279 elif len(tmp_realms) == 0: #Hum.. no realm value? So default Realm
1280 if default_realm is not None:
1281 default_realm.packs.append(pack)
1282 else:
1283 logger.log("Error : some hosts do not have a realm and you do not defined a default realm!")
1284 for h in pack:
1285 logger.log('Host in this pack : %s ' % h.get_name())
1287 #The load balancing is for a loop, so all
1288 #hosts of a realm (in a pack) will be dispatch
1289 #in the schedulers of this realm
1290 #REF: doc/pack-agregation.png
1291 for r in self.realms:
1292 #print "Load balancing realm", r.get_name()
1293 packs = {}
1294 #create roundrobin iterator for id of cfg
1295 #So dispatching is loadbalanced in a realm
1296 #but add a entry in the roundrobin tourniquet for
1297 #every weight point schedulers (so Weight round robin)
1298 weight_list = []
1299 no_spare_schedulers = [s for s in r.schedulers if not s.spare]
1300 nb_schedulers = len(no_spare_schedulers)
1302 #Maybe there is no scheduler in the realm, it's can be a
1303 #big problem if there are elements in packs
1304 nb_elements = len([elt for elt in [pack for pack in r.packs]])
1305 logger.log("Number of hosts in the realm %s : %d" %(r.get_name(), nb_elements))
1307 if nb_schedulers == 0 and nb_elements != 0:
1308 logger.log("ERROR : The realm %s have hosts but no scheduler!" %r.get_name())
1309 r.packs = [] #Dumb pack
1310 #The conf is incorrect
1311 self.conf_is_correct = False
1312 continue
1314 packindex = 0
1315 packindices = {}
1316 for s in no_spare_schedulers:
1317 packindices[s.id] = packindex
1318 packindex += 1
1319 for i in xrange(0, s.weight):
1320 weight_list.append(s.id)
1322 rr = itertools.cycle(weight_list)
1324 #we must have nb_schedulers packs)
1325 for i in xrange(0, nb_schedulers):
1326 packs[i] = []
1328 #Now we explode the numerous packs into nb_packs reals packs:
1329 #we 'load balance' them in a roundrobin way
1330 for pack in r.packs:
1331 i = rr.next()
1332 for elt in pack:
1333 packs[packindices[i]].append(elt)
1334 #Now in packs we have the number of packs [h1, h2, etc]
1335 #equal to the number of schedulers.
1336 r.packs = packs
1340 # Use the self.conf and make nb_parts new confs.
1341 # nbparts is equal to the number of schedulerlink
1342 # New confs are independant whith checks. The only communication
1343 # That can be need is macro in commands
1344 def cut_into_parts(self):
1345 #print "Scheduler configurated :", self.schedulerlinks
1346 #I do not care about alive or not. User must have set a spare if need it
1347 nb_parts = len([s for s in self.schedulerlinks if not s.spare])
1349 if nb_parts == 0:
1350 nb_parts = 1
1352 # We create dummy configurations for schedulers :
1353 # they are clone of the master
1354 # conf but without hosts and services (because they are dispatched between
1355 # theses configurations)
1356 self.confs = {}
1357 for i in xrange(0, nb_parts):
1358 #print "Create Conf:", i, '/', nb_parts -1
1359 self.confs[i] = Config()
1361 #Now we copy all properties of conf into the new ones
1362 for prop, entry in Config.properties.items():
1363 # if not 'usage' in entry \
1364 # or not (entry['usage'] == 'unused' \
1365 # or entry['usage'] == 'unmanaged'):
1366 if entry.managed and not isinstance(entry, UnusedProp):
1367 val = getattr(self, prop)
1368 setattr(self.confs[i], prop, val)
1369 #print "Copy", prop, val
1371 # we need a deepcopy because each conf
1372 # will have new hostgroups
1373 self.confs[i].id = i
1374 self.confs[i].commands = self.commands
1375 self.confs[i].timeperiods = self.timeperiods
1376 #Create hostgroups with just the name and same id, but no members
1377 new_hostgroups = []
1378 for hg in self.hostgroups:
1379 new_hostgroups.append(hg.copy_shell())
1380 self.confs[i].hostgroups = Hostgroups(new_hostgroups)
1381 self.confs[i].notificationways = self.notificationways
1382 self.confs[i].contactgroups = self.contactgroups
1383 self.confs[i].contacts = self.contacts
1384 self.confs[i].schedulerlinks = copy.copy(self.schedulerlinks)
1385 #Create hostgroups with just the name and same id, but no members
1386 new_servicegroups = []
1387 for sg in self.servicegroups:
1388 new_servicegroups.append(sg.copy_shell())
1389 self.confs[i].servicegroups = Servicegroups(new_servicegroups)
1390 self.confs[i].hosts = [] # will be fill after
1391 self.confs[i].services = [] # will be fill after
1392 # The elements of the others conf will be tag here
1393 self.confs[i].other_elements = {}
1394 # if a scheduler have accepted the conf
1395 self.confs[i].is_assigned = False
1397 logger.log("Creating packs for realms")
1399 #Just create packs. There can be numerous ones
1400 #In pack we've got hosts and service
1401 #packs are in the realms
1402 #REF: doc/pack-creation.png
1403 self.create_packs(nb_parts)
1405 #We've got all big packs and get elements into configurations
1406 #REF: doc/pack-agregation.png
1407 offset = 0
1408 for r in self.realms:
1409 for i in r.packs:
1410 pack = r.packs[i]
1411 for h in pack:
1412 self.confs[i+offset].hosts.append(h)
1413 for s in h.services:
1414 self.confs[i+offset].services.append(s)
1415 #Now the conf can be link in the realm
1416 r.confs[i+offset] = self.confs[i+offset]
1417 offset += len(r.packs)
1418 del r.packs
1420 #We've nearly have hosts and services. Now we want REALS hosts (Class)
1421 #And we want groups too
1422 #print "Finishing packs"
1423 for i in self.confs:
1424 #print "Finishing pack Nb:", i
1425 cfg = self.confs[i]
1427 #Create ours classes
1428 cfg.hosts = Hosts(cfg.hosts)
1429 cfg.hosts.create_reversed_list()
1430 cfg.services = Services(cfg.services)
1431 cfg.services.create_reversed_list()
1432 #Fill host groups
1433 for ori_hg in self.hostgroups:
1434 hg = cfg.hostgroups.find_by_name(ori_hg.get_name())
1435 mbrs = ori_hg.members
1436 mbrs_id = []
1437 for h in mbrs:
1438 if h is not None:
1439 mbrs_id.append(h.id)
1440 for h in cfg.hosts:
1441 if h.id in mbrs_id:
1442 hg.members.append(h)
1443 #Fill servicegroup
1444 for ori_sg in self.servicegroups:
1445 sg = cfg.servicegroups.find_by_name(ori_sg.get_name())
1446 mbrs = ori_sg.members
1447 mbrs_id = []
1448 for s in mbrs:
1449 if s is not None:
1450 mbrs_id.append(s.id)
1451 for s in cfg.services:
1452 if s.id in mbrs_id:
1453 sg.members.append(s)
1455 #Now we fill other_elements by host (service are with their host
1456 #so they are not tagged)
1457 for i in self.confs:
1458 for h in self.confs[i].hosts:
1459 for j in [j for j in self.confs if j != i]: #So other than i
1460 self.confs[i].other_elements[h.get_name()] = i
1462 #We tag conf with instance_id
1463 for i in self.confs:
1464 self.confs[i].instance_id = i
1465 random.seed(time.time())
1466 self.confs[i].magic_hash = random.randint(1, 100000)
1469 # ...
1470 def lazy():
1471 # let's compute the "USER" properties and macros..
1472 for n in xrange(1, 256):
1473 n = str(n)
1474 Config.properties['$USER'+str(n)+'$'] = StringProp(default='')
1475 Config.macros['USER'+str(n)] = '$USER'+n+'$'
1477 lazy()
1478 del lazy