Clean : (Grégory Starck) clean of some getattr code, bis.
[shinken.git] / shinken / config.py
blob5f2efef12314b14ed110ca1c2d471716084c75b8
1 #!/usr/bin/env python
2 #Copyright (C) 2009-2010 :
3 # Gabes Jean, naparuba@gmail.com
4 # Gerhard Lausser, Gerhard.Lausser@consol.de
6 #This file is part of Shinken.
8 #Shinken is free software: you can redistribute it and/or modify
9 #it under the terms of the GNU Affero General Public License as published by
10 #the Free Software Foundation, either version 3 of the License, or
11 #(at your option) any later version.
13 #Shinken is distributed in the hope that it will be useful,
14 #but WITHOUT ANY WARRANTY; without even the implied warranty of
15 #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 #GNU Affero General Public License for more details.
18 #You should have received a copy of the GNU Affero General Public License
19 #along with Shinken. If not, see <http://www.gnu.org/licenses/>.
22 """ Config is the class to read, load and manipulate the user
23 configuration. It read a main cfg (nagios.cfg) and get all informations
24 from it. It create objects, make link between them, clean them, and cut
25 them into independant parts. The main user of this is Arbiter, but schedulers
26 use it too (but far less)"""
28 import re, string, copy, os, socket
29 import itertools
30 import time
31 import random
33 from shinken.timeperiod import Timeperiod, Timeperiods
34 from shinken.service import Service, Services
35 from shinken.command import Command, Commands
36 from shinken.resultmodulation import Resultmodulation, Resultmodulations
37 from shinken.escalation import Escalation, Escalations
38 from shinken.serviceescalation import Serviceescalation, Serviceescalations
39 from shinken.hostescalation import Hostescalation, Hostescalations
40 from shinken.host import Host, Hosts
41 from shinken.hostgroup import Hostgroup, Hostgroups
42 from shinken.realm import Realm, Realms
43 from shinken.contact import Contact, Contacts
44 from shinken.contactgroup import Contactgroup, Contactgroups
45 from shinken.notificationway import NotificationWay, NotificationWays
46 from shinken.servicegroup import Servicegroup, Servicegroups
47 from shinken.item import Item
48 from shinken.servicedependency import Servicedependency, Servicedependencies
49 from shinken.hostdependency import Hostdependency, Hostdependencies
50 from shinken.arbiterlink import ArbiterLink, ArbiterLinks
51 from shinken.schedulerlink import SchedulerLink, SchedulerLinks
52 from shinken.reactionnerlink import ReactionnerLink, ReactionnerLinks
53 from shinken.brokerlink import BrokerLink, BrokerLinks
54 from shinken.pollerlink import PollerLink, PollerLinks
55 from shinken.module import Module, Modules
56 from shinken.graph import Graph
57 from shinken.log import logger
59 from shinken.util import to_int, to_char, to_bool
60 from shinken.property import UnusedProp, BoolProp, IntegerProp, FloatProp, CharProp, StringProp, ListProp
61 #import psyco
62 #psyco.full()
65 class Config(Item):
66 cache_path = "objects.cache"
67 my_type = "config"
69 # Properties:
70 # *required : if True, there is not default, and the config must put them
71 # *default: if not set, take this value
72 # *pythonize : function call to
73 # *class_inherit : (Service, 'blabla') : must set this propertie to the
74 # Service class with name blabla
75 # if (Service, None) : must set this properti to the Service class with
76 # same name
77 # *unused : just to warn the user that the option he use is no more used
78 # in Shinken
79 # *usage_text : if present, will print it to explain why it's no more useful
80 properties = {
81 'prefix': StringProp(
82 default='/usr/local/shinken/'),
83 'log_file': UnusedProp(
84 text='This parameter is not longer take from the main file, but must be defined in the log broker module instead. But Shinken will create you one if there are no present and use this parameter in it, so no worry.'),
85 'object_cache_file': UnusedProp(
86 text='This parameter is not longer take from the main file, but must be defined in the status_dat broker module instead. But Shinken will create you one if there are no present and use this parameter in it, so no worry.'),
87 'precached_object_file': UnusedProp(
88 text='Shinken is faster enough to do not need precached object file.'),
89 'resource_file': StringProp(
90 default='/tmp/ressources.txt'),
91 'temp_file': UnusedProp(
92 text=' temporary files are not used in the shinken architecture.'),
93 'status_file': UnusedProp(
94 text='This parameter is not longer take from the main file, but must be defined in the status_dat broker module instead. But Shinken will create you one if there are no present and use this parameter in it, so no worry.'),
95 'status_update_interval': UnusedProp(
96 text='This parameter is not longer take from the main file, but must be defined in the status_dat broker module instead. But Shinken will create you one if there are no present and use this parameter in it, so no worry.'),
97 'shinken_user': StringProp(
98 default='shinken'),
99 'shinken_group': StringProp(
100 default='shinken'),
101 'enable_notifications': BoolProp(
102 default='1',
103 class_inherit=[(Host, None), (Service, None), (Contact, None)]),
104 'execute_service_checks': BoolProp(
105 default='1',
106 class_inherit=[(Service, 'execute_checks')]),
107 'accept_passive_service_checks': BoolProp(
108 default='1',
109 class_inherit=[(Service, 'accept_passive_checks')]),
110 'execute_host_checks': BoolProp(
111 default='1',
112 class_inherit=[(Host, 'execute_checks')]),
113 'accept_passive_host_checks': BoolProp(
114 default='1',
115 class_inherit=[(Host, 'accept_passive_checks')]),
116 'enable_event_handlers': BoolProp(
117 default='1',
118 class_inherit=[(Host, None), (Service, None)]),
119 'log_rotation_method': CharProp(
120 default='d'),
121 'log_archive_path': StringProp(
122 default='/usr/local/shinken/var/archives'),
123 'check_external_commands': BoolProp(
124 default='1'),
125 'command_check_interval': UnusedProp(
126 text='anoter value than look always the file is useless, so we fix it.'),
127 'command_file': StringProp(
128 default='/tmp/command.cmd'),
129 'external_command_buffer_slots': UnusedProp(
130 text='We do not limit the ewxternal command slot.'),
131 'check_for_updates': UnusedProp(
132 text='network administrators will never allow such communication between server and the external world. Use your distribution packet manager to know if updates are available or go to the http://www.shinken-monitoring.org website instead.'),
133 'bare_update_checks': UnusedProp(
134 text=None),
135 'lock_file': StringProp(
136 default='/usr/local/shinken/var/arbiterd.pid'),
137 'retain_state_information': UnusedProp(
138 text='sorry, retain state information will not be implemented because it is useless.'),
139 'state_retention_file': StringProp(
140 default=''),
141 'retention_update_interval': IntegerProp(
142 default='0'),
143 'use_retained_program_state': UnusedProp(
144 text='We do not think such an option is interesting to manage.'),
145 'use_retained_scheduling_info': UnusedProp(
146 text='We do not think such an option is interesting to manage.'),
147 'retained_host_attribute_mask': UnusedProp(
148 text='We do not think such an option is interesting to manage.'),
149 'retained_service_attribute_mask': UnusedProp(
150 text='We do not think such an option is interesting to manage.'),
151 'retained_process_host_attribute_mask': UnusedProp(
152 text='We do not think such an option is interesting to manage.'),
153 'retained_process_service_attribute_mask': UnusedProp(
154 text='We do not think such an option is interesting to manage.'),
155 'retained_contact_host_attribute_mask': UnusedProp(
156 text='We do not think such an option is interesting to manage.'),
157 'retained_contact_service_attribute_mask': UnusedProp(
158 text='We do not think such an option is interesting to manage.'),
159 'use_syslog': BoolProp(
160 default='0'),
161 'log_notifications': BoolProp(
162 default='1',
163 class_inherit=[(Host, None), (Service, None)]),
164 'log_service_retries': BoolProp(
165 default='1',
166 class_inherit=[(Service, 'log_retries')]),
167 'log_host_retries': BoolProp(
168 default='1',
169 class_inherit=[(Host, 'log_retries')]),
170 'log_event_handlers': BoolProp(
171 default='1',
172 class_inherit=[(Host, None), (Service, None)]),
173 'log_initial_states': BoolProp(
174 default='1'),
175 'log_external_commands': BoolProp(
176 default='1'),
177 'log_passive_checks': BoolProp(
178 default='1'),
179 'global_host_event_handler': StringProp(
180 default='',
181 class_inherit=[(Host, 'global_event_handler')]),
182 'global_service_event_handler': StringProp(
183 default='',
184 class_inherit=[(Service, 'global_event_handler')]),
185 'sleep_time': UnusedProp(
186 text='this deprecated option is useless in the shinken way of doing.'),
187 'service_inter_check_delay_method': UnusedProp(
188 text='This option is useless in the Shinken scheduling. The only way is the smart way.'),
189 'max_service_check_spread': IntegerProp(
190 default='30',
191 class_inherit=[(Service, 'max_check_spread')]),
192 'service_interleave_factor': UnusedProp(
193 text='This option is useless in the Shinken scheduling because it use a random distribution for initial checks.'),
194 'max_concurrent_checks': UnusedProp(
195 text='Limiting the max concurrent checks is not helful to got a good running monitoring server.'),
196 'check_result_reaper_frequency': UnusedProp(
197 text='Shinken do not use reaper process.'),
198 'max_check_result_reaper_time': UnusedProp(
199 text='Shinken do not use reaper process.'),
200 'check_result_path': UnusedProp(
201 text='Shinken use in memory returns, not check results on flat file.'),
202 'max_check_result_file_age': UnusedProp(
203 text='Shinken do not use flat file check resultfiles.'),
204 'host_inter_check_delay_method': UnusedProp(
205 text='This option is unused in the Shinken scheduling because distribution of the initial check is a random one.'),
206 'max_host_check_spread': IntegerProp(
207 default='30',
208 class_inherit=[(Host, 'max_check_spread')]),
209 'interval_length': IntegerProp(
210 default='60',
211 class_inherit=[(Host, None), (Service, None)]),
212 'auto_reschedule_checks': BoolProp(
213 managed=False,
214 default='1'),
215 'auto_rescheduling_interval': IntegerProp(
216 managed=False,
217 default='1'),
218 'auto_rescheduling_window': IntegerProp(
219 managed=False,
220 default='180'),
221 'use_aggressive_host_checking': UnusedProp(
222 text='Host agressive checking is an heritage from Nagios 1 and is really useless now.'),
223 'translate_passive_host_checks': BoolProp(
224 managed=False,
225 default='1'),
226 'passive_host_checks_are_soft': BoolProp(
227 managed=False,
228 default='1'),
229 'enable_predictive_host_dependency_checks': BoolProp(
230 managed=False,
231 default='1',
232 class_inherit=[(Host, 'enable_predictive_dependency_checks')]),
233 'enable_predictive_service_dependency_checks': StringProp(
234 managed=False,
235 default='1'),
236 'cached_host_check_horizon': IntegerProp(
237 default='0',
238 class_inherit=[(Host, 'cached_check_horizon')]),
239 'cached_service_check_horizon': IntegerProp(
240 default='0',
241 class_inherit=[(Service, 'cached_check_horizon')]),
242 'use_large_installation_tweaks': BoolProp(
243 default='0',
244 class_inherit=[(Host, None), (Service, None)]),
245 'free_child_process_memory': UnusedProp(
246 text='this option is automatic in Python processes'),
247 'child_processes_fork_twice': UnusedProp(
248 text='fork twice is not use.'),
249 'enable_environment_macros': BoolProp(
250 default='1',
251 class_inherit=[(Host, None), (Service, None)]),
252 'enable_flap_detection': BoolProp(
253 default='1',
254 class_inherit=[(Host, None), (Service, None)]),
255 'low_service_flap_threshold': IntegerProp(
256 default='25',
257 class_inherit=[(Service, 'low_flap_threshold')]),
258 'high_service_flap_threshold': IntegerProp(
259 default='50',
260 class_inherit=[(Service, 'high_flap_threshold')]),
261 'low_host_flap_threshold': IntegerProp(
262 default='25',
263 class_inherit=[(Host, 'low_flap_threshold')]),
264 'high_host_flap_threshold': IntegerProp(
265 default='50',
266 class_inherit=[(Host, 'high_flap_threshold')]),
267 'soft_state_dependencies': BoolProp(
268 managed=False,
269 default='0'),
270 'service_check_timeout': IntegerProp(
271 default='10',
272 class_inherit=[(Service, 'check_timeout')]),
273 'host_check_timeout': IntegerProp(
274 default='10',
275 class_inherit=[(Host, 'check_timeout')]),
276 'event_handler_timeout': IntegerProp(
277 default='10',
278 class_inherit=[(Host, None), (Service, None)]),
279 'notification_timeout': IntegerProp(
280 default='5',
281 class_inherit=[(Host, None), (Service, None)]),
282 'ocsp_timeout': IntegerProp(
283 default='5',
284 class_inherit=[(Service, None)]),
285 'ochp_timeout': IntegerProp(
286 default='5',
287 class_inherit=[(Host, None)]),
288 'perfdata_timeout': IntegerProp(
289 default='2',
290 class_inherit=[(Host, None), (Service, None)]),
291 'obsess_over_services': BoolProp(
292 default='0',
293 class_inherit=[(Service, 'obsess_over')]),
294 'ocsp_command': StringProp(
295 default='',
296 class_inherit=[(Service, None)]),
297 'obsess_over_hosts': BoolProp(
298 default='0',
299 class_inherit=[(Host, 'obsess_over')]),
300 'ochp_command': StringProp(
301 default='',
302 class_inherit=[(Host, None)]),
303 'process_performance_data': BoolProp(
304 default='1',
305 class_inherit=[(Host, None), (Service, None)]),
306 'host_perfdata_command': StringProp(
307 default='',
308 class_inherit=[(Host, 'perfdata_command')]),
309 'service_perfdata_command': StringProp(
310 default='',
311 class_inherit=[(Service, 'perfdata_command')]),
312 'host_perfdata_file': StringProp(
313 default='',
314 class_inherit=[(Host, 'perfdata_file')]),
315 'service_perfdata_file': StringProp(
316 default='',
317 class_inherit=[(Service, 'perfdata_file')]),
318 'host_perfdata_file_template': StringProp(
319 default='/tmp/host.perf',
320 class_inherit=[(Host, 'perfdata_file_template')]),
321 'service_perfdata_file_template': StringProp(
322 default='/tmp/host.perf',
323 class_inherit=[(Service, 'perfdata_file_template')]),
324 'host_perfdata_file_mode': CharProp(
325 default='a',
326 class_inherit=[(Host, 'perfdata_file_mode')]),
327 'service_perfdata_file_mode': CharProp(
328 default='a',
329 class_inherit=[(Service, 'perfdata_file_mode')]),
330 'host_perfdata_file_processing_interval': IntegerProp(
331 managed=False,
332 default='15'),
333 'service_perfdata_file_processing_interval': IntegerProp(
334 managed=False,
335 default='15'),
336 'host_perfdata_file_processing_command': StringProp(
337 managed=False,
338 default='',
339 class_inherit=[(Host, 'perfdata_file_processing_command')]),
340 'service_perfdata_file_processing_command': StringProp(
341 managed=False,
342 default=None),
343 'check_for_orphaned_services': BoolProp(
344 default='1',
345 class_inherit=[(Service, 'check_for_orphaned')]),
346 'check_for_orphaned_hosts': BoolProp(
347 default='1',
348 class_inherit=[(Host, 'check_for_orphaned')]),
349 'check_service_freshness': BoolProp(
350 default='1',
351 class_inherit=[(Service, 'check_freshness')]),
352 'service_freshness_check_interval': IntegerProp(
353 default='60'),
354 'check_host_freshness': BoolProp(
355 default='1',
356 class_inherit=[(Host, 'check_freshness')]),
357 'host_freshness_check_interval': IntegerProp(
358 default='60'),
359 'additional_freshness_latency': IntegerProp(
360 default='15',
361 class_inherit=[(Host, None), (Service, None)]),
362 'enable_embedded_perl': BoolProp(
363 help='It will surely never be managed, but it should not be useful with poller performances.',
364 managed=False,
365 default='1'),
366 'use_embedded_perl_implicitly': BoolProp(
367 managed=False,
368 default='0'),
369 'date_format': StringProp(
370 managed=False,
371 default=None),
372 'use_timezone': StringProp(
373 default='',
374 class_inherit=[(Host, None), (Service, None), (Contact, None)]),
375 'illegal_object_name_chars': StringProp(
376 default="""`~!$%^&*"|'<>?,()=""",
377 class_inherit=[(Host, None), (Service, None), (Contact, None)]),
378 'illegal_macro_output_chars': StringProp(
379 default='',
380 class_inherit=[(Host, None), (Service, None), (Contact, None)]),
381 'use_regexp_matching': BoolProp(
382 help=' if you go some host or service definition like prod*, it will surely failed from now, sorry.',
383 managed=False,
384 default='0'),
385 'use_true_regexp_matching': BoolProp(
386 managed=False,
387 default=None),
388 'admin_email': UnusedProp(
389 text='sorry, not yet implemented.'),
390 'admin_pager': UnusedProp(
391 text='sorry, not yet implemented.'),
392 'event_broker_options': UnusedProp(
393 text='event broker are replaced by modules with a real configuration template.'),
394 'broker_module': StringProp(
395 default=''),
396 'debug_file': UnusedProp(
397 text=None),
398 'debug_level': UnusedProp(
399 text=None),
400 'debug_verbosity': UnusedProp(
401 text=None),
402 'max_debug_file_size': UnusedProp(
403 text=None),
404 #'$USERn$ : {'required':False, 'default':''} # Add at run in __init__
406 # SHINKEN SPECIFIC
407 'idontcareaboutsecurity': BoolProp(
408 default='0'),
409 'flap_history': IntegerProp(
410 default='20',
411 class_inherit=[(Host, None), (Service, None)]),
412 'max_plugins_output_length': IntegerProp(
413 default='8192',
414 class_inherit=[(Host, None), (Service, None)]),
416 # Enable or not the notice about old Nagios parameters
417 'disable_old_nagios_parameters_whining': BoolProp(
418 default='0'),
420 # Now for problem/impact states changes
421 'enable_problem_impacts_states_change': BoolProp(
422 default='0',
423 class_inherit=[(Host, None), (Service, None)]),
425 # More a running value in fact
426 'resource_macros_names': StringProp(
427 default=[])
430 macros = {
431 'PREFIX' : 'prefix',
432 'MAINCONFIGFILE' : '',
433 'STATUSDATAFILE' : '',
434 'COMMENTDATAFILE' : '',
435 'DOWNTIMEDATAFILE' : '',
436 'RETENTIONDATAFILE' : '',
437 'OBJECTCACHEFILE' : '',
438 'TEMPFILE' : '',
439 'TEMPPATH' : '',
440 'LOGFILE' : '',
441 'RESOURCEFILE' : '',
442 'COMMANDFILE' : '',
443 'HOSTPERFDATAFILE' : '',
444 'SERVICEPERFDATAFILE' : '',
445 'ADMINEMAIL' : '',
446 'ADMINPAGER' : ''
447 #'USERn' : '$USERn$' # Add at run in __init__
451 #We create dict of objects
452 #Type: 'name in objects' : {Class of object, Class of objects,
453 #'property for self for the objects(config)'
454 types_creations = {
455 'timeperiod' : (Timeperiod, Timeperiods, 'timeperiods'),
456 'service' : (Service, Services, 'services'),
457 'servicegroup' : (Servicegroup, Servicegroups, 'servicegroups'),
458 'command' : (Command, Commands, 'commands'),
459 'host' : (Host, Hosts, 'hosts'),
460 'hostgroup' : (Hostgroup, Hostgroups, 'hostgroups'),
461 'contact' : (Contact, Contacts, 'contacts'),
462 'contactgroup' : (Contactgroup, Contactgroups, 'contactgroups'),
463 'notificationway' : (NotificationWay, NotificationWays, 'notificationways'),
464 'servicedependency' : (Servicedependency, Servicedependencies, 'servicedependencies'),
465 'hostdependency' : (Hostdependency, Hostdependencies, 'hostdependencies'),
466 'arbiter' : (ArbiterLink, ArbiterLinks, 'arbiterlinks'),
467 'scheduler' : (SchedulerLink, SchedulerLinks, 'schedulerlinks'),
468 'reactionner' : (ReactionnerLink, ReactionnerLinks, 'reactionners'),
469 'broker' : (BrokerLink, BrokerLinks, 'brokers'),
470 'poller' : (PollerLink, PollerLinks, 'pollers'),
471 'realm' : (Realm, Realms, 'realms'),
472 'module' : (Module, Modules, 'modules'),
473 'resultmodulation' : (Resultmodulation, Resultmodulations, 'resultmodulations'),
474 'escalation' : (Escalation, Escalations, 'escalations'),
475 'serviceescalation' : (Serviceescalation, Serviceescalations, 'serviceescalations'),
476 'hostescalation' : (Hostescalation, Hostescalations, 'hostescalations'),
479 #This tab is used to transform old parameters name into new ones
480 #so from Nagios2 format, to Nagios3 ones
481 old_properties = {
482 'nagios_user' : 'shinken_user',
483 'nagios_group' : 'shinken_group'
488 def __init__(self):
489 self.params = {}
490 self.resource_macros_names = []
491 #By default the conf is correct
492 self.conf_is_correct = True
493 #We tag the conf with a magic_hash, a random value to
494 #idify this conf
495 random.seed(time.time())
496 self.magic_hash = random.randint(1, 100000)
500 def fill_usern_macros(cls):
501 """ Fill all USERN macros with value of properties"""
502 #Now the ressource file part
503 properties = cls.properties
504 macros = cls.macros
505 for n in xrange(1, 256):
506 n = str(n)
507 properties['$USER'+n+'$'] = StringProp(default='')
508 macros['USER'+n] = '$USER'+n+'$'
509 #Set this a Class method
510 fill_usern_macros = classmethod(fill_usern_macros)
513 # We've got macro in the resource file and we want
514 # to update our MACRO dict with it
515 def fill_resource_macros_names_macros(self):
516 """ fill the macro dict will all value
517 from self.resource_macros_names"""
518 macros = self.__class__.macros
519 for macro_name in self.resource_macros_names:
520 macros[macro_name] = '$'+macro_name+'$'
523 def load_params(self, params):
524 for elt in params:
525 elts = elt.split('=')
526 if len(elts) == 1: #error, there is no = !
527 self.conf_is_correct = False
528 print "Error : the parameter %s is malformed! (no = sign)" % elts[0]
529 else:
530 self.params[elts[0]] = elts[1]
531 setattr(self, elts[0], elts[1])
532 #Maybe it's a variable as $USER$ or $ANOTHERVATRIABLE$
533 #so look at the first character. If it's a $, it's a variable
534 #and if it's end like it too
535 if elts[0][0] == '$' and elts[0][-1] == '$':
536 macro_name = elts[0][1:-1]
537 self.resource_macros_names.append(macro_name)
541 def _cut_line(self, line):
542 #punct = '"#$%&\'()*+/<=>?@[\\]^`{|}~'
543 tmp = re.split("[" + string.whitespace + "]+" , line)
544 r = [elt for elt in tmp if elt != '']
545 return r
548 def read_config(self, files):
549 #just a first pass to get the cfg_file and all files in a buf
550 res = ''
552 for file in files:
553 #We add a \n (or \r\n) to be sure config files are separated
554 #if the previous does not finish with a line return
555 res += os.linesep
556 print "Opening configuration file", file
557 try:
558 # Open in Universal way for Windows, Mac, Linux
559 fd = open(file, 'rU')
560 buf = fd.readlines()
561 fd.close()
562 config_base_dir = os.path.dirname(file)
563 except IOError, exp:
564 logger.log("Error: Cannot open config file '%s' for reading: %s" % (file, exp))
565 #The configuration is invalid because we have a bad file!
566 self.conf_is_correct = False
567 continue
569 for line in buf:
570 # Should not be useful anymore with the Universal open
571 # if os.name != 'nt':
572 # line = line.replace("\r\n", "\n")
573 res += line
574 line = line[:-1]
575 if re.search("^cfg_file", line) or re.search("^resource_file", line):
576 elts = line.split('=')
577 if os.path.isabs(elts[1]):
578 cfg_file_name = elts[1]
579 else:
580 cfg_file_name = os.path.join(config_base_dir, elts[1])
581 try:
582 fd = open(cfg_file_name, 'rU')
583 logger.log("Processing object config file '%s'" % cfg_file_name)
584 res += fd.read()
585 #Be sure to add a line return so we won't mix files
586 res += '\n'
587 fd.close()
588 except IOError, exp:
589 logger.log("Error: Cannot open config file '%s' for reading: %s" % (cfg_file_name, exp))
590 #The configuration is invalid because we have a bad file!
591 self.conf_is_correct = False
592 elif re.search("^cfg_dir", line):
593 elts = line.split('=')
594 if os.path.isabs(elts[1]):
595 cfg_dir_name = elts[1]
596 else:
597 cfg_dir_name = os.path.join(config_base_dir, elts[1])
598 #Ok, look if it's really a directory
599 if not os.path.isdir(cfg_dir_name):
600 logger.log("Error: Cannot open config dir '%s' for reading" % cfg_dir_name)
601 self.conf_is_correct = False
602 #Now walk for it
603 for root, dirs, files in os.walk(cfg_dir_name):
604 for file in files:
605 if re.search("\.cfg$", file):
606 logger.log("Processing object config file '%s'" % os.path.join(root, file))
607 try:
609 fd = open(os.path.join(root, file), 'rU')
610 res += fd.read()
611 fd.close()
612 except IOError, exp:
613 logger.log("Error: Cannot open config file '%s' for reading: %s" % (os.path.join(root, file), exp))
614 # The configuration is invalid
615 # because we have a bad file!
616 self.conf_is_correct = False
617 return res
618 # self.read_config_buf(res)
621 def read_config_buf(self, buf):
622 params = []
623 objectscfg = {'void': [],
624 'timeperiod' : [],
625 'command' : [],
626 'contactgroup' : [],
627 'hostgroup' : [],
628 'contact' : [],
629 'notificationway' : [],
630 'host' : [],
631 'service' : [],
632 'servicegroup' : [],
633 'servicedependency' : [],
634 'hostdependency' : [],
635 'arbiter' : [],
636 'scheduler' : [],
637 'reactionner' : [],
638 'broker' : [],
639 'poller' : [],
640 'realm' : [],
641 'module' : [],
642 'resultmodulation' : [],
643 'escalation' : [],
644 'serviceescalation' : [],
645 'hostescalation' : [],
647 tmp = []
648 tmp_type = 'void'
649 in_define = False
650 continuation_line = False
651 tmp_line = ''
652 lines = buf.split('\n')
653 for line in lines:
654 line = line.split(';')[0]
655 #A backslash means, there is more to come
656 if re.search("\\\s*$", line):
657 continuation_line = True
658 line = re.sub("\\\s*$", "", line)
659 line = re.sub("^\s+", " ", line)
660 tmp_line += line
661 continue
662 elif continuation_line:
663 #Now the continuation line is complete
664 line = re.sub("^\s+", "", line)
665 line = tmp_line + line
666 tmp_line = ''
667 continuation_line = False
668 if re.search("}", line):
669 in_define = False
670 if re.search("^\s*\t*#|^\s*$|^\s*}", line):
671 pass
673 #A define must be catch and the type save
674 #The old entry must be save before
675 elif re.search("^define", line):
676 in_define = True
677 if tmp_type not in objectscfg:
678 objectscfg[tmp_type] = []
679 objectscfg[tmp_type].append(tmp)
680 tmp = []
681 #Get new type
682 elts = re.split('\s', line)
683 tmp_type = elts[1]
684 tmp_type = tmp_type.split('{')[0]
685 else:
686 if in_define:
687 tmp.append(line)
688 else:
689 params.append(line)
691 objectscfg[tmp_type].append(tmp)
692 objects = {}
694 #print "Params", params
695 self.load_params(params)
696 #And then update our MACRO dict
697 self.fill_resource_macros_names_macros()
699 for type in objectscfg:
700 objects[type] = []
701 for items in objectscfg[type]:
702 tmp = {}
703 for line in items:
704 elts = self._cut_line(line)
705 if elts != []:
706 prop = elts[0]
707 value = ' '.join(elts[1:])
708 tmp[prop] = value
709 if tmp != {}:
710 objects[type].append(tmp)
712 return objects
715 # We need to have some ghost objects like
716 # the check_command bp_rule for business
717 # correlator rules
718 def add_ghost_objects(self, raw_objects):
719 bp_rule = {'command_name' : 'bp_rule', 'command_line' : 'bp_rule'}
720 raw_objects['command'].append(bp_rule)
721 host_up = {'command_name' : '_internal_host_up', 'command_line' : '_internal_host_up'}
722 raw_objects['command'].append(host_up)
725 #We've got raw objects in string, now create real Instances
726 def create_objects(self, raw_objects):
727 """ Create real 'object' from dicts of prop/value """
728 types_creations = self.__class__.types_creations
730 #some types are already created in this time
731 early_created_types = ['arbiter', 'module']
733 # Before really create the objects, we add
734 # ghost ones like the bp_rule for correlation
735 self.add_ghost_objects(raw_objects)
737 for t in types_creations:
738 if t not in early_created_types:
739 self.create_objects_for_type(raw_objects, t)
742 def create_objects_for_type(self, raw_objects, type):
743 types_creations = self.__class__.types_creations
744 t = type
745 #Ex: the above code do for timeperiods:
746 #timeperiods = []
747 #for timeperiodcfg in objects['timeperiod']:
748 # t = Timeperiod(timeperiodcfg)
749 # t.clean()
750 # timeperiods.append(t)
751 #self.timeperiods = Timeperiods(timeperiods)
753 (cls, clss, prop) = types_creations[t]
754 #List where we put objects
755 lst = []
756 for obj_cfg in raw_objects[t]:
757 #We create teh object
758 o = cls(obj_cfg)
759 o.clean()
760 lst.append(o)
761 #we create the objects Class and we set it in prop
762 setattr(self, prop, clss(lst))
766 #Here arbiter and modules objects should be prepare and link
767 #before all others types
768 def early_arbiter_linking(self):
769 """ Prepare the arbiter for early operations """
770 self.modules.create_reversed_list()
772 if len(self.arbiterlinks) == 0:
773 logger.log("Warning : there is no arbiter, I add one in localhost:7770")
774 a = ArbiterLink({'arbiter_name' : 'Default-Arbiter',
775 'host_name' : socket.gethostname(),
776 'address' : 'localhost', 'port' : '7770',
777 'spare' : '0'})
778 self.arbiterlinks = ArbiterLinks([a])
780 #First fill default
781 self.arbiterlinks.fill_default()
784 #print "****************** Pythonize ******************"
785 self.arbiterlinks.pythonize()
787 #print "****************** Linkify ******************"
788 self.arbiterlinks.linkify(self.modules)
792 # We use linkify to make the config more efficient : elements will be
793 # linked, like pointers. For example, a host will have it's service,
794 # and contacts directly in it's properties
795 # REMEMBER: linkify AFTER explode...
796 def linkify(self):
797 """ Make 'links' between elements, like a host got a services list
798 with all it's services in it """
800 # First linkify myself like for some global commands
801 self.linkify_one_command_with_commands(self.commands, 'ocsp_command')
802 self.linkify_one_command_with_commands(self.commands, 'ochp_command')
803 self.linkify_one_command_with_commands(self.commands, 'host_perfdata_command')
804 self.linkify_one_command_with_commands(self.commands, 'service_perfdata_command')
806 #print "Hosts"
807 # link hosts with timeperiods and commands
808 self.hosts.linkify(self.timeperiods, self.commands, \
809 self.contacts, self.realms, \
810 self.resultmodulations, self.escalations,\
811 self.hostgroups)
813 # Do the simplify AFTER explode groups
814 #print "Hostgroups"
815 # link hostgroups with hosts
816 self.hostgroups.linkify(self.hosts, self.realms)
818 #print "Services"
819 # link services with other objects
820 self.services.linkify(self.hosts, self.commands, \
821 self.timeperiods, self.contacts,\
822 self.resultmodulations, self.escalations,\
823 self.servicegroups)
825 #print "Service groups"
826 # link servicegroups members with services
827 self.servicegroups.linkify(self.services)
829 # link notificationways with timeperiods and commands
830 self.notificationways.linkify(self.timeperiods, self.commands)
832 #print "Contactgroups"
833 #link contacgroups with contacts
834 self.contactgroups.linkify(self.contacts)
836 #print "Contacts"
837 #link contacts with timeperiods and commands
838 self.contacts.linkify(self.timeperiods, self.commands,
839 self.notificationways)
841 #print "Timeperiods"
842 #link timeperiods with timeperiods (exclude part)
843 self.timeperiods.linkify()
845 #print "Servicedependancy"
846 self.servicedependencies.linkify(self.hosts, self.services,
847 self.timeperiods)
849 #print "Hostdependancy"
850 self.hostdependencies.linkify(self.hosts, self.timeperiods)
852 #print "Resultmodulations"
853 self.resultmodulations.linkify(self.timeperiods)
855 #print "Escalations"
856 self.escalations.linkify(self.timeperiods, self.contacts, \
857 self.services, self.hosts)
859 #print "Realms"
860 self.realms.linkify()
862 #print "Schedulers and satellites"
863 #Link all links with realms
864 # self.arbiterlinks.linkify(self.modules)
865 self.schedulerlinks.linkify(self.realms, self.modules)
866 self.brokers.linkify(self.realms, self.modules)
867 self.reactionners.linkify(self.realms, self.modules)
868 self.pollers.linkify(self.realms, self.modules)
872 #Some properties are dangerous to be send like that
873 #like realms linked in hosts. Realms are too big to send (too linked)
874 def prepare_for_sending(self):
875 self.hosts.prepare_for_sending()
878 def dump(self):
879 #print 'Parameters:', self
880 #print 'Hostgroups:',self.hostgroups,'\n'
881 #print 'Services:', self.services
882 print "Slots", Service.__slots__
883 print 'Hosts:'
884 for h in self.hosts:
885 print '\t', h.get_name(), h.contacts
886 print 'Services:'
887 for s in self.services:
888 print '\t', s.get_name(), s.contacts
889 #print 'Templates:', self.hosts_tpl
890 #print 'Hosts:',self.hosts,'\n'
891 #print 'Contacts:', self.contacts
892 #print 'contactgroups',self.contactgroups
893 #print 'Servicegroups:', self.servicegroups
894 #print 'Timepriods:', self.timeperiods
895 #print 'Commands:', self.commands
896 #print "Number of services:", len(self.services.items)
897 #print "Service Dep", self.servicedependencies
898 #print "Schedulers", self.schedulerlinks
901 #It's used to change Nagios2 names to Nagios3 ones
902 #For hosts and services
903 def old_properties_names_to_new(self):
904 super(Config, self).old_properties_names_to_new()
905 self.hosts.old_properties_names_to_new()
906 self.services.old_properties_names_to_new()
909 #It's used to warn about useless parameter and print why it's not use.
910 def notice_about_useless_parameters(self):
911 if not self.disable_old_nagios_parameters_whining:
912 properties = self.__class__.properties
913 for prop in properties:
914 entry = properties[prop]
915 if isinstance(entry, UnusedProp):
916 text = 'Notice : the parameter %s is useless and can be removed from the configuration (Reason: %s)' % (prop, entry.text)
917 logger.log(text)
920 # It's used to raise warning if the user got parameter
921 # that we do not manage from now
922 def warn_about_unmanaged_parameters(self):
923 properties = self.__class__.properties
924 unmanaged = []
925 for prop in properties:
926 entry = properties[prop]
927 if not entry.managed and hasattr(self, prop):
928 if entry.help:
929 s = "%s : %s" % (prop, entry.help)
930 else:
931 s = prop
932 unmanaged.append(s)
933 if len(unmanaged) != 0:
934 print "\n"
935 mailing_list_uri = "https://lists.sourceforge.net/lists/listinfo/shinken-devel"
936 text = 'Warning : the folowing parameter(s) are not curently managed.'
937 logger.log(text)
938 for s in unmanaged:
939 logger.log(s)
940 text = 'Please look if you really need it. If so, please register at the devel mailing list (%s) and ask for it or propose us a patch :)' % mailing_list_uri
941 logger.log(text)
942 print "\n"
945 #Use to fill groups values on hosts and create new services
946 #(for host group ones)
947 def explode(self):
948 #first elements, after groups
949 #print "Contacts"
950 self.contacts.explode(self.contactgroups, self.notificationways)
951 #print "Contactgroups"
952 self.contactgroups.explode()
954 #print "Hosts"
955 self.hosts.explode(self.hostgroups, self.contactgroups)
956 #print "Hostgroups"
957 self.hostgroups.explode()
959 #print "Services"
960 #print "Initialy got nb of services : %d" % len(self.services.items)
961 self.services.explode(self.hosts, self.hostgroups, self.contactgroups,
962 self.servicegroups, self.servicedependencies)
963 #print "finally got nb of services : %d" % len(self.services.items)
964 #print "Servicegroups"
965 self.servicegroups.explode()
967 #print "Timeperiods"
968 self.timeperiods.explode()
970 self.hostdependencies.explode()
972 #print "Servicedependancy"
973 self.servicedependencies.explode()
975 #Serviceescalations hostescalations will create new escalations
976 self.serviceescalations.explode(self.escalations)
977 self.hostescalations.explode(self.escalations)
978 self.escalations.explode(self.hosts, self.hostgroups,
979 self.contactgroups)
981 #Now the architecture part
982 #print "Realms"
983 self.realms.explode()
986 #Remove elements will the same name, so twins :)
987 #In fact only services should be acceptable with twins
988 def remove_twins(self):
989 #self.hosts.remove_twins()
990 self.services.remove_twins()
991 #self.contacts.remove_twins()
992 #self.timeperiods.remove_twins()
995 #Dependancies are importants for scheduling
996 #This function create dependencies linked between elements.
997 def apply_dependancies(self):
998 self.hosts.apply_dependancies()
999 self.services.apply_dependancies()
1002 #Use to apply inheritance (template and implicit ones)
1003 #So elements wil have their configured properties
1004 def apply_inheritance(self):
1005 #inheritance properties by template
1006 #print "Hosts"
1007 self.hosts.apply_inheritance()
1008 #print "Contacts"
1009 self.contacts.apply_inheritance()
1010 #print "Services"
1011 self.services.apply_inheritance(self.hosts)
1012 #print "Servicedependencies"
1013 self.servicedependencies.apply_inheritance(self.hosts)
1014 #print "Hostdependencies"
1015 self.hostdependencies.apply_inheritance()
1016 #Also timeperiods
1017 self.timeperiods.apply_inheritance()
1020 #Use to apply implicit inheritance
1021 def apply_implicit_inheritance(self):
1022 #print "Services"
1023 self.services.apply_implicit_inheritance(self.hosts)
1026 #will fill properties for elements so they will have all theirs properties
1027 def fill_default(self):
1028 #Fill default for config (self)
1029 super(Config, self).fill_default()
1030 self.hosts.fill_default()
1031 self.hostgroups.fill_default()
1032 self.contacts.fill_default()
1033 self.contactgroups.fill_default()
1034 self.notificationways.fill_default()
1035 self.services.fill_default()
1036 self.servicegroups.fill_default()
1037 self.resultmodulations.fill_default()
1039 #Also fill default of host/servicedep objects
1040 self.servicedependencies.fill_default()
1041 self.hostdependencies.fill_default()
1043 #first we create missing sat, so no other sat will
1044 #be created after this point
1045 self.fill_default_satellites()
1046 #now we have all elements, we can create a default
1047 #realm if need and it will be taged to sat that do
1048 #not have an realm
1049 self.fill_default_realm()
1050 self.reactionners.fill_default()
1051 self.pollers.fill_default()
1052 self.brokers.fill_default()
1053 self.schedulerlinks.fill_default()
1054 # self.arbiterlinks.fill_default()
1055 #Now fill some fields we can predict (like adress for hosts)
1056 self.fill_predictive_missing_parameters()
1058 #Here is a special functions to fill some special
1059 #properties that are not filled and should be like
1060 #adress for host (if not set, put host_name)
1061 def fill_predictive_missing_parameters(self):
1062 self.hosts.fill_predictive_missing_parameters()
1065 #Will check if a realm is defined, if not
1066 #Create a new one (default) and tag everyone that do not have
1067 #a realm prop to be put in this realm
1068 def fill_default_realm(self):
1069 if len(self.realms) == 0:
1070 #Create a default realm with default value =1
1071 #so all hosts without realm wil be link with it
1072 default = Realm({'realm_name' : 'Default', 'default' : '1'})
1073 self.realms = Realms([default])
1074 logger.log("Notice : the is no defined realms, so I add a new one %s" % default.get_name())
1075 lists = [self.pollers, self.brokers, self.reactionners, self.schedulerlinks]
1076 for l in lists:
1077 for elt in l:
1078 if not hasattr(elt, 'realm'):
1079 elt.realm = 'Default'
1080 logger.log("Notice : Tagging %s with realm %s" % (elt.get_name(), default.get_name()))
1083 #If a satellite is missing, we add them in the localhost
1084 #with defaults values
1085 def fill_default_satellites(self):
1086 if len(self.schedulerlinks) == 0:
1087 logger.log("Warning : there is no scheduler, I add one in localhost:7768")
1088 s = SchedulerLink({'scheduler_name' : 'Default-Scheduler',
1089 'address' : 'localhost', 'port' : '7768'})
1090 self.schedulerlinks = SchedulerLinks([s])
1091 if len(self.pollers) == 0:
1092 logger.log("Warning : there is no poller, I add one in localhost:7771")
1093 p = PollerLink({'poller_name' : 'Default-Poller',
1094 'address' : 'localhost', 'port' : '7771'})
1095 self.pollers = PollerLinks([p])
1096 if len(self.reactionners) == 0:
1097 logger.log("Warning : there is no reactionner, I add one in localhost:7769")
1098 r = ReactionnerLink({'reactionner_name' : 'Default-Reactionner',
1099 'address' : 'localhost', 'port' : '7769'})
1100 self.reactionners = ReactionnerLinks([r])
1101 if len(self.brokers) == 0:
1102 logger.log("Warning : there is no broker, I add one in localhost:7772")
1103 b = BrokerLink({'broker_name' : 'Default-Broker',
1104 'address' : 'localhost', 'port' : '7772',
1105 'manage_arbiters' : '1'})
1106 self.brokers = BrokerLinks([b])
1109 #Return if one broker got a module of type : mod_type
1110 def got_broker_module_type_defined(self, mod_type):
1111 for b in self.brokers:
1112 for m in b.modules:
1113 if hasattr(m, 'module_type') and m.module_type == mod_type:
1114 return True
1115 return False
1118 #return if one scheduler got a module of type : mod_type
1119 def got_scheduler_module_type_defined(self, mod_type):
1120 for b in self.schedulerlinks:
1121 for m in b.modules:
1122 if hasattr(m, 'module_type') and m.module_type == mod_type:
1123 return True
1124 return False
1127 # Will ask for each host/service if the
1128 # check_command is a bp rule. If so, it will create
1129 # a tree structures with the rules
1130 def create_business_rules(self):
1131 self.hosts.create_business_rules(self.hosts, self.services)
1132 self.services.create_business_rules(self.hosts, self.services)
1135 # Will fill dep list for business rules
1136 def create_business_rules_dependencies(self):
1137 self.hosts.create_business_rules_dependencies()
1138 self.services.create_business_rules_dependencies()
1141 #It's used to hack some old Nagios parameters like
1142 #log_file or status_file : if they are present in
1143 #the global configuration and there is no such modules
1144 #in a Broker, we create it on the fly for all Brokers
1145 def hack_old_nagios_parameters(self):
1146 """ Create some 'modules' from all nagios parameters if they are set and
1147 the modules are not created """
1148 #We list all modules we will add to brokers
1149 mod_to_add = []
1150 mod_to_add_to_schedulers = []
1152 #For status_dat
1153 if hasattr(self, 'status_file') and self.status_file != '' and hasattr(self, 'object_cache_file'):
1154 #Ok, the user put such a value, we must look
1155 #if he forget to put a module for Brokers
1156 got_status_dat_module = self.got_broker_module_type_defined('status_dat')
1158 #We need to create the modue on the fly?
1159 if not got_status_dat_module:
1160 data = { 'object_cache_file': self.object_cache_file,
1161 'status_file': self.status_file,
1162 'module_name': 'Status-Dat-Autogenerated',
1163 'module_type': 'status_dat'}
1164 mod = Module(data)
1165 if hasattr(self, 'status_update_interval'):
1166 mod.status_update_interval = self.status_update_interval
1167 else:
1168 mod.status_update_interval = 15
1169 mod_to_add.append(mod)
1171 #Now the log_file
1172 if hasattr(self, 'log_file') and self.log_file != '':
1173 #Ok, the user put such a value, we must look
1174 #if he forget to put a module for Brokers
1175 got_simple_log_module = self.got_broker_module_type_defined('simple_log')
1177 #We need to create the module on the fly?
1178 if not got_simple_log_module:
1179 data = {'module_type': 'simple_log', 'path': self.log_file,
1180 'archive_path' : self.log_archive_path,
1181 'module_name': 'Simple-log-Autogenerated'}
1182 mod = Module(data)
1183 mod_to_add.append(mod)
1185 #Now the syslog facility
1186 if self.use_syslog:
1187 #Ok, the user want a syslog logging, why not after all
1188 got_syslog_module = self.got_broker_module_type_defined('syslog')
1190 #We need to create the module on the fly?
1191 if not got_syslog_module:
1192 data = {'module_type': 'syslog',
1193 'module_name': 'Syslog-Autogenerated'}
1194 mod = Module(data)
1195 mod_to_add.append(mod)
1197 #Now the service_perfdata module
1198 if self.service_perfdata_file != '':
1199 #Ok, we've got a path for a service perfdata file
1200 got_service_perfdata_module = self.got_broker_module_type_defined('service_perfdata')
1202 #We need to create the module on the fly?
1203 if not got_service_perfdata_module:
1204 data = {'module_type': 'service_perfdata',
1205 'module_name': 'Service-Perfdata-Autogenerated',
1206 'path' : self.service_perfdata_file,
1207 'mode' : self.service_perfdata_file_mode,
1208 'template' : self.service_perfdata_file_template}
1209 mod = Module(data)
1210 mod_to_add.append(mod)
1212 #Now the old retention file module
1213 if self.state_retention_file != '' and self.retention_update_interval != 0:
1214 #Ok, we've got a old retention file
1215 got_retention_file_module = self.got_scheduler_module_type_defined('nagios_retention_file')
1217 #We need to create the module on the fly?
1218 if not got_retention_file_module:
1219 data = {'module_type': 'nagios_retention_file',
1220 'module_name': 'Nagios-Retention-File-Autogenerated',
1221 'path' : self.state_retention_file}
1222 mod = Module(data)
1223 mod_to_add_to_schedulers.append(mod)
1225 #Now the host_perfdata module
1226 if self.host_perfdata_file != '':
1227 #Ok, we've got a path for a host perfdata file
1228 got_host_perfdata_module = self.got_broker_module_type_defined('host_perfdata')
1230 #We need to create the module on the fly?
1231 if not got_host_perfdata_module:
1232 data = {'module_type': 'host_perfdata',
1233 'module_name': 'Host-Perfdata-Autogenerated',
1234 'path' : self.host_perfdata_file, 'mode' : self.host_perfdata_file_mode,
1235 'template' : self.host_perfdata_file_template}
1236 mod = Module(data)
1237 mod_to_add.append(mod)
1240 #We add them to the brokers if we need it
1241 if mod_to_add != []:
1242 print "Warning : I autogenerated some Broker modules, please look at your configuration"
1243 for m in mod_to_add:
1244 print "Warning : the module", m.module_name, "is autogenerated"
1245 for b in self.brokers:
1246 b.modules.append(m)
1248 #Then for schedulers
1249 if mod_to_add_to_schedulers != []:
1250 print "Warning : I autogenerated some Scheduler modules, please look at your configuration"
1251 for m in mod_to_add_to_schedulers:
1252 print "Warning : the module", m.module_name, "is autogenerated"
1253 for b in self.schedulerlinks:
1254 b.modules.append(m)
1258 # Set our timezone value and give it too to unset satellites
1259 def propagate_timezone_option(self):
1260 if self.use_timezone != '':
1261 #first apply myself
1262 os.environ['TZ'] = self.use_timezone
1263 time.tzset()
1265 tab = [self.schedulerlinks, self.pollers, self.brokers, self.reactionners]
1266 for t in tab:
1267 for s in t:
1268 if s.use_timezone == 'NOTSET':
1269 setattr(s, 'use_timezone', self.use_timezone)
1273 # Link templates with elements
1274 def linkify_templates(self):
1275 """ Like for normal object, we link templates with each others """
1276 self.hosts.linkify_templates()
1277 self.contacts.linkify_templates()
1278 self.services.linkify_templates()
1279 self.servicedependencies.linkify_templates()
1280 self.hostdependencies.linkify_templates()
1281 self.timeperiods.linkify_templates()
1285 # Reversed list is a dist with name for quick search by name
1286 def create_reversed_list(self):
1287 """ Create quick search lists for objects """
1288 self.hosts.create_reversed_list()
1289 self.hostgroups.create_reversed_list()
1290 self.contacts.create_reversed_list()
1291 self.contactgroups.create_reversed_list()
1292 self.notificationways.create_reversed_list()
1293 self.services.create_reversed_list()
1294 self.servicegroups.create_reversed_list()
1295 self.timeperiods.create_reversed_list()
1296 # self.modules.create_reversed_list()
1297 self.resultmodulations.create_reversed_list()
1298 self.escalations.create_reversed_list()
1299 #For services it's a special case
1300 #we search for hosts, then for services
1301 #it's quicker than search in all services
1302 self.services.optimize_service_search(self.hosts)
1305 #Some parameters are just not managed like O*HP commands
1306 #and regexp capabilities
1307 #True : OK
1308 #False : error in conf
1309 def check_error_on_hard_unmanaged_parameters(self):
1310 r = True
1311 if self.use_regexp_matching:
1312 logger.log("Error : the use_regexp_matching parameter is not managed.")
1313 r &= False
1314 #if self.ochp_command != '':
1315 # logger.log("Error : the ochp_command parameter is not managed.")
1316 # r &= False
1317 #if self.ocsp_command != '':
1318 # logger.log("Error : the ocsp_command parameter is not managed.")
1319 # r &= False
1320 return r
1323 # check if elements are correct or not (fill with defaults, etc)
1324 # Warning : this function call be called from a Arbiter AND
1325 # from and scheduler. The first one got everything, the second
1326 # does not have the satellites.
1327 def is_correct(self):
1328 """ Check if all elements got a good configuration """
1329 logger.log('Running pre-flight check on configuration data...')
1330 r = self.conf_is_correct
1332 #Globally unamanged parameters
1333 logger.log('Checking global parameters...')
1334 r &= self.check_error_on_hard_unmanaged_parameters()
1336 #Hosts
1337 logger.log('Checking hosts...')
1338 r &= self.hosts.is_correct()
1339 #Hosts got a special cehcks for loops
1340 r &= self.hosts.no_loop_in_parents()
1341 logger.log('\tChecked %d hosts' % len(self.hosts))
1343 #Hostgroups
1344 logger.log('Checking hostgroups...')
1345 r &= self.hostgroups.is_correct()
1346 logger.log('\tChecked %d hostgroups' % len(self.hostgroups))
1348 #Contacts
1349 logger.log('Checking contacts...')
1350 r &= self.contacts.is_correct()
1351 logger.log('\tChecked %d contacts' % len(self.contacts))
1353 #Contactgroups
1354 logger.log('Checking contactgroups')
1355 r &= self.contactgroups.is_correct()
1356 logger.log('\tChecked %d contactgroups' % len(self.contactgroups))
1358 #Notificationways
1359 logger.log('Checking notificationways...')
1360 r &= self.notificationways.is_correct()
1361 logger.log('\tChecked %d notificationways' % len(self.notificationways))
1363 #Escalations
1364 logger.log('Checking escalations...')
1365 r &= self.escalations.is_correct()
1366 logger.log('\tChecked %d escalations' % len(self.escalations))
1368 #Services
1369 logger.log('Checking services')
1370 r &= self.services.is_correct()
1371 logger.log('\tChecked %d services' % len(self.services))
1373 #Servicegroups
1374 logger.log('Checking servicegroups')
1375 r &= self.servicegroups.is_correct()
1376 logger.log('\tChecked %d servicegroups' % len(self.servicegroups))
1378 #Servicedependencies
1379 if hasattr(self, 'servicedependencies'):
1380 logger.log('Checking servicedependencies')
1381 r &= self.servicedependencies.is_correct()
1382 logger.log('\tChecked %d servicedependencies' % len(self.servicedependencies))
1384 #Hostdependencies
1385 if hasattr(self, 'hostdependencies'):
1386 logger.log('Checking hostdependencies')
1387 r &= self.hostdependencies.is_correct()
1388 logger.log('\tChecked %d hostdependencies' % len(self.hostdependencies))
1391 #Arbiters
1392 if hasattr(self, 'arbiterlinks'):
1393 logger.log('Checking arbiters')
1394 r &= self.arbiterlinks.is_correct()
1395 logger.log('\tChecked %d arbiters' % len(self.arbiterlinks))
1397 #Schedulers
1398 if hasattr(self, 'schedulerlinks'):
1399 logger.log('Checking schedulers')
1400 r &= self.schedulerlinks.is_correct()
1401 logger.log('\tChecked %d schedulers' % len(self.schedulerlinks))
1403 #Reactionners
1404 if hasattr(self, 'reactionners'):
1405 logger.log('Checking reactionners')
1406 r &= self.reactionners.is_correct()
1407 logger.log('\tChecked %d reactionners' % len(self.reactionners))
1409 #Pollers
1410 if hasattr(self, 'pollers'):
1411 logger.log('Checking pollers')
1412 r &= self.pollers.is_correct()
1413 logger.log('\tChecked %d pollers' % len(self.pollers))
1415 #Brokers
1416 if hasattr(self, 'brokers'):
1417 logger.log('Checking brokers')
1418 r &= self.brokers.is_correct()
1419 logger.log('\tChecked %d brokers' % len(self.brokers))
1421 #Timeperiods
1422 logger.log('Checking timeperiods')
1423 r &= self.timeperiods.is_correct()
1424 logger.log('\tChecked %d timeperiods' % len(self.timeperiods))
1426 #Resultmodulations
1427 if hasattr(self, 'resultmodulations'):
1428 logger.log('Checking resultmodulations')
1429 r &= self.resultmodulations.is_correct()
1430 logger.log('\tChecked %d resultmodulations' % len(self.resultmodulations))
1432 self.conf_is_correct = r
1435 #We've got strings (like 1) but we want python elements, like True
1436 def pythonize(self):
1437 #call item pythonize for parameters
1438 super(Config, self).pythonize()
1439 self.hosts.pythonize()
1440 self.hostgroups.pythonize()
1441 self.hostdependencies.pythonize()
1442 self.contactgroups.pythonize()
1443 self.contacts.pythonize()
1444 self.notificationways.pythonize()
1445 self.servicegroups.pythonize()
1446 self.services.pythonize()
1447 self.servicedependencies.pythonize()
1448 self.resultmodulations.pythonize()
1449 self.escalations.pythonize()
1450 # self.arbiterlinks.pythonize()
1451 self.schedulerlinks.pythonize()
1452 self.realms.pythonize()
1453 self.reactionners.pythonize()
1454 self.pollers.pythonize()
1455 self.brokers.pythonize()
1458 #Explode parameters like cached_service_check_horizon in the
1459 #Service class in a cached_check_horizon manner, o*hp commands
1460 #, etc
1461 def explode_global_conf(self):
1462 Service.load_global_conf(self)
1463 Host.load_global_conf(self)
1464 Contact.load_global_conf(self)
1467 #Clean useless elements like templates because they are not needed anymore
1468 def clean_useless(self):
1469 self.hosts.clean_useless()
1470 self.contacts.clean_useless()
1471 self.services.clean_useless()
1472 self.servicedependencies.clean_useless()
1473 self.hostdependencies.clean_useless()
1474 self.timeperiods.clean_useless()
1477 #Create packs of hosts and services so in a pack,
1478 #all dependencies are resolved
1479 #It create a graph. All hosts are connected to their
1480 #parents, and hosts without parent are connected to host 'root'.
1481 #services are link to the host. Dependencies are managed
1482 #REF: doc/pack-creation.png
1483 def create_packs(self, nb_packs):
1484 #We create a graph with host in nodes
1485 g = Graph()
1486 g.add_nodes(self.hosts)
1488 #links will be used for relations between hosts
1489 links = set()
1491 #Now the relations
1492 for h in self.hosts:
1493 #Add parent relations
1494 for p in h.parents:
1495 if p is not None:
1496 links.add((p, h))
1497 #Add the others dependencies
1498 for (dep, tmp, tmp2, tmp3, tmp4) in h.act_depend_of:
1499 links.add((dep, h))
1500 for (dep, tmp, tmp2, tmp3, tmp4) in h.chk_depend_of:
1501 links.add((dep, h))
1503 #For services : they are link woth their own host but we need
1504 #To have the hosts of service dep in the same pack too
1505 for s in self.services:
1506 for (dep, tmp, tmp2, tmp3, tmp4) in s.act_depend_of:
1507 #I don't care about dep host: they are just the host
1508 #of the service...
1509 if hasattr(dep, 'host'):
1510 links.add((dep.host, s.host))
1511 #The othe type of dep
1512 for (dep, tmp, tmp2, tmp3, tmp4) in s.chk_depend_of:
1513 links.add((dep.host, s.host))
1515 # For host/service that are business based, we need to
1516 # link them too
1517 for s in [s for s in self.services if s.got_business_rule]:
1518 for e in s.business_rule.list_all_elements():
1519 if hasattr(e, 'host'): # if it's a service
1520 if e.host != s.host: # do not an host with itself
1521 links.add((e.host, s.host))
1522 else: # it's already a host
1523 if e != s.host:
1524 links.add((e, s.host))
1526 # Same for hosts of course
1527 for h in [ h for h in self.hosts if h.got_business_rule]:
1528 for e in h.business_rule.list_all_elements():
1529 if hasattr(e, 'host'): # if it's a service
1530 if e.host != h:
1531 links.add((e.host, h))
1532 else: # e is a host
1533 if e != h:
1534 links.add((e, h))
1537 #Now we create links in the graph. With links (set)
1538 #We are sure to call the less add_edge
1539 for (dep, h) in links:
1540 g.add_edge(dep, h)
1541 g.add_edge(h, dep)
1543 #Access_list from a node il all nodes that are connected
1544 #with it : it's a list of ours mini_packs
1545 tmp_packs = g.get_accessibility_packs()
1547 #Now We find the default realm (must be unique or
1548 #BAD THINGS MAY HAPPEN )
1549 default_realm = None
1550 for r in self.realms:
1551 if hasattr(r, 'default') and r.default:
1552 default_realm = r
1554 #Now we look if all elements of all packs have the
1555 #same realm. If not, not good!
1556 for pack in tmp_packs:
1557 tmp_realms = set()
1558 for elt in pack:
1559 if elt.realm != None:
1560 tmp_realms.add(elt.realm)
1561 if len(tmp_realms) > 1:
1562 logger.log("Error : the realm configuration of yours hosts is not good because there a more than one realm in one pack (host relations) :")
1563 for h in pack:
1564 if h.realm == None:
1565 logger.log('Error : the host %s do not have a realm' % h.get_name())
1566 else:
1567 logger.log('Error : the host %s is in the realm %s' % (h.get_name(), h.realm.get_name()))
1568 if len(tmp_realms) == 1: # Ok, good
1569 r = tmp_realms.pop() #There is just one element
1570 r.packs.append(pack)
1571 elif len(tmp_realms) == 0: #Hum.. no realm value? So default Realm
1572 if default_realm != None:
1573 default_realm.packs.append(pack)
1574 else:
1575 logger.log("Error : some hosts do not have a realm and you do not defined a default realm!")
1576 for h in pack:
1577 logger.log('Host in this pack : %s ' % h.get_name())
1579 #The load balancing is for a loop, so all
1580 #hosts of a realm (in a pack) will be dispatch
1581 #in the schedulers of this realm
1582 #REF: doc/pack-agregation.png
1583 for r in self.realms:
1584 #print "Load balancing realm", r.get_name()
1585 packs = {}
1586 #create roundrobin iterator for id of cfg
1587 #So dispatching is loadbalanced in a realm
1588 #but add a entry in the roundrobin tourniquet for
1589 #every weight point schedulers (so Weight round robin)
1590 weight_list = []
1591 no_spare_schedulers = [s for s in r.schedulers if not s.spare]
1592 nb_schedulers = len(no_spare_schedulers)
1594 #Maybe there is no scheduler in the realm, it's can be a
1595 #big problem if there are elements in packs
1596 nb_elements = len([elt for elt in [pack for pack in r.packs]])
1597 logger.log("Number of hosts in the realm %s : %d" %(r.get_name(), nb_elements))
1599 if nb_schedulers == 0 and nb_elements != 0:
1600 logger.log("ERROR : The realm %s have hosts but no scheduler!" %r.get_name())
1601 r.packs = [] #Dumb pack
1602 #The conf is incorrect
1603 self.conf_is_correct = False
1604 continue
1606 packindex = 0
1607 packindices = {}
1608 for s in no_spare_schedulers:
1609 packindices[s.id] = packindex
1610 packindex += 1
1611 for i in xrange(0, s.weight):
1612 weight_list.append(s.id)
1614 rr = itertools.cycle(weight_list)
1616 #we must have nb_schedulers packs)
1617 for i in xrange(0, nb_schedulers):
1618 packs[i] = []
1620 #Now we explode the numerous packs into nb_packs reals packs:
1621 #we 'load balance' them in a roundrobin way
1622 for pack in r.packs:
1623 i = rr.next()
1624 for elt in pack:
1625 packs[packindices[i]].append(elt)
1626 #Now in packs we have the number of packs [h1, h2, etc]
1627 #equal to the number of schedulers.
1628 r.packs = packs
1632 #Use the self.conf and make nb_parts new confs.
1633 #nbparts is equal to the number of schedulerlink
1634 #New confs are independant whith checks. The only communication
1635 #That can be need is macro in commands
1636 def cut_into_parts(self):
1637 #print "Scheduler configurated :", self.schedulerlinks
1638 #I do not care about alive or not. User must have set a spare if need it
1639 nb_parts = len([s for s in self.schedulerlinks if not s.spare])
1641 if nb_parts == 0:
1642 nb_parts = 1
1644 # We create dummy configurations for schedulers :
1645 # they are clone of the master
1646 # conf but without hosts and services (because they are dispatched between
1647 # theses configurations)
1648 self.confs = {}
1649 for i in xrange(0, nb_parts):
1650 #print "Create Conf:", i, '/', nb_parts -1
1651 self.confs[i] = Config()
1653 #Now we copy all properties of conf into the new ones
1654 for prop in Config.properties:
1655 # if not 'usage' in Config.properties[prop] \
1656 # or not (Config.properties[prop]['usage'] == 'unused' \
1657 # or Config.properties[prop]['usage'] == 'unmanaged'):
1658 if Config.properties[prop].managed \
1659 and not isinstance(Config.properties[prop], UnusedProp):
1660 val = getattr(self, prop)
1661 setattr(self.confs[i], prop, val)
1663 #we need a deepcopy because each conf
1664 #will have new hostgroups
1665 self.confs[i].id = i
1666 self.confs[i].commands = self.commands
1667 self.confs[i].timeperiods = self.timeperiods
1668 #Create hostgroups with just the name and same id, but no members
1669 new_hostgroups = []
1670 for hg in self.hostgroups:
1671 new_hostgroups.append(hg.copy_shell())
1672 self.confs[i].hostgroups = Hostgroups(new_hostgroups)
1673 self.confs[i].notificationways = self.notificationways
1674 self.confs[i].contactgroups = self.contactgroups
1675 self.confs[i].contacts = self.contacts
1676 self.confs[i].schedulerlinks = copy.copy(self.schedulerlinks)
1677 #Create hostgroups with just the name and same id, but no members
1678 new_servicegroups = []
1679 for sg in self.servicegroups:
1680 new_servicegroups.append(sg.copy_shell())
1681 self.confs[i].servicegroups = Servicegroups(new_servicegroups)
1682 self.confs[i].hosts = [] #will be fill after
1683 self.confs[i].services = [] #will be fill after
1684 self.confs[i].other_elements = {} # The elements of the others
1685 #conf will be tag here
1686 self.confs[i].is_assigned = False #if a scheduler have
1687 #accepted the conf
1689 logger.log("Creating packs for realms")
1691 #Just create packs. There can be numerous ones
1692 #In pack we've got hosts and service
1693 #packs are in the realms
1694 #REF: doc/pack-creation.png
1695 self.create_packs(nb_parts)
1697 #We've got all big packs and get elements into configurations
1698 #REF: doc/pack-agregation.png
1699 offset = 0
1700 for r in self.realms:
1701 for i in r.packs:
1702 pack = r.packs[i]
1703 for h in pack:
1704 self.confs[i+offset].hosts.append(h)
1705 for s in h.services:
1706 self.confs[i+offset].services.append(s)
1707 #Now the conf can be link in the realm
1708 r.confs[i+offset] = self.confs[i+offset]
1709 offset += len(r.packs)
1710 del r.packs
1712 #We've nearly have hosts and services. Now we want REALS hosts (Class)
1713 #And we want groups too
1714 #print "Finishing packs"
1715 for i in self.confs:
1716 #print "Finishing pack Nb:", i
1717 cfg = self.confs[i]
1719 #Create ours classes
1720 cfg.hosts = Hosts(cfg.hosts)
1721 cfg.hosts.create_reversed_list()
1722 cfg.services = Services(cfg.services)
1723 cfg.services.create_reversed_list()
1724 #Fill host groups
1725 for ori_hg in self.hostgroups:
1726 hg = cfg.hostgroups.find_by_name(ori_hg.get_name())
1727 mbrs = ori_hg.members
1728 mbrs_id = []
1729 for h in mbrs:
1730 if h is not None:
1731 mbrs_id.append(h.id)
1732 for h in cfg.hosts:
1733 if h.id in mbrs_id:
1734 hg.members.append(h)
1735 #Fill servicegroup
1736 for ori_sg in self.servicegroups:
1737 sg = cfg.servicegroups.find_by_name(ori_sg.get_name())
1738 mbrs = ori_sg.members
1739 mbrs_id = []
1740 for s in mbrs:
1741 if s is not None:
1742 mbrs_id.append(s.id)
1743 for s in cfg.services:
1744 if s.id in mbrs_id:
1745 sg.members.append(s)
1747 #Now we fill other_elements by host (service are with their host
1748 #so they are not tagged)
1749 for i in self.confs:
1750 for h in self.confs[i].hosts:
1751 for j in [j for j in self.confs if j != i]: #So other than i
1752 self.confs[i].other_elements[h.get_name()] = i
1754 #We tag conf with instance_id
1755 for i in self.confs:
1756 self.confs[i].instance_id = i
1757 random.seed(time.time())
1758 self.confs[i].magic_hash = random.randint(1, 100000)