2 #Copyright (C) 2009-2010 :
3 # Gabes Jean, naparuba@gmail.com
4 # Gerhard Lausser, Gerhard.Lausser@consol.de
5 # Gregory Starck, g.starck@gmail.com
7 #This file is part of Shinken.
9 #Shinken is free software: you can redistribute it and/or modify
10 #it under the terms of the GNU Affero General Public License as published by
11 #the Free Software Foundation, either version 3 of the License, or
12 #(at your option) any later version.
14 #Shinken is distributed in the hope that it will be useful,
15 #but WITHOUT ANY WARRANTY; without even the implied warranty of
16 #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 #GNU Affero General Public License for more details.
19 #You should have received a copy of the GNU Affero General Public License
20 #along with Shinken. If not, see <http://www.gnu.org/licenses/>.
23 """ Config is the class to read, load and manipulate the user
24 configuration. It read a main cfg (nagios.cfg) and get all informations
25 from it. It create objects, make link between them, clean them, and cut
26 them into independant parts. The main user of this is Arbiter, but schedulers
27 use it too (but far less)"""
29 import re
, string
, copy
, os
, socket
34 from shinken
.objects
import *
36 from shinken
.arbiterlink
import ArbiterLink
, ArbiterLinks
37 from shinken
.schedulerlink
import SchedulerLink
, SchedulerLinks
38 from shinken
.reactionnerlink
import ReactionnerLink
, ReactionnerLinks
39 from shinken
.brokerlink
import BrokerLink
, BrokerLinks
40 from shinken
.pollerlink
import PollerLink
, PollerLinks
41 from shinken
.graph
import Graph
42 from shinken
.log
import logger
44 from shinken
.util
import to_int
, to_char
, to_bool
45 from shinken
.property import UnusedProp
, BoolProp
, IntegerProp
, FloatProp
, CharProp
, StringProp
, ListProp
51 cache_path
= "objects.cache"
55 # *required : if True, there is not default, and the config must put them
56 # *default: if not set, take this value
57 # *pythonize : function call to
58 # *class_inherit : (Service, 'blabla') : must set this propertie to the
59 # Service class with name blabla
60 # if (Service, None) : must set this properti to the Service class with
62 # *unused : just to warn the user that the option he use is no more used
64 # *usage_text : if present, will print it to explain why it's no more useful
67 default
='/usr/local/shinken/'),
68 'log_file': UnusedProp(
69 text
='This parameter is not longer take from the main file, but must be defined in the log broker module instead. But Shinken will create you one if there are no present and use this parameter in it, so no worry.'),
70 'object_cache_file': UnusedProp(
71 text
='This parameter is not longer take from the main file, but must be defined in the status_dat broker module instead. But Shinken will create you one if there are no present and use this parameter in it, so no worry.'),
72 'precached_object_file': UnusedProp(
73 text
='Shinken is faster enough to do not need precached object file.'),
74 'resource_file': StringProp(
75 default
='/tmp/ressources.txt'),
76 'temp_file': UnusedProp(
77 text
=' temporary files are not used in the shinken architecture.'),
78 'status_file': UnusedProp(
79 text
='This parameter is not longer take from the main file, but must be defined in the status_dat broker module instead. But Shinken will create you one if there are no present and use this parameter in it, so no worry.'),
80 'status_update_interval': UnusedProp(
81 text
='This parameter is not longer take from the main file, but must be defined in the status_dat broker module instead. But Shinken will create you one if there are no present and use this parameter in it, so no worry.'),
82 'shinken_user': StringProp(
84 'shinken_group': StringProp(
86 'enable_notifications': BoolProp(
88 class_inherit
=[(Host
, None), (Service
, None), (Contact
, None)]),
89 'execute_service_checks': BoolProp(
91 class_inherit
=[(Service
, 'execute_checks')]),
92 'accept_passive_service_checks': BoolProp(
94 class_inherit
=[(Service
, 'accept_passive_checks')]),
95 'execute_host_checks': BoolProp(
97 class_inherit
=[(Host
, 'execute_checks')]),
98 'accept_passive_host_checks': BoolProp(
100 class_inherit
=[(Host
, 'accept_passive_checks')]),
101 'enable_event_handlers': BoolProp(
103 class_inherit
=[(Host
, None), (Service
, None)]),
104 'log_rotation_method': CharProp(
106 'log_archive_path': StringProp(
107 default
='/usr/local/shinken/var/archives'),
108 'check_external_commands': BoolProp(
110 'command_check_interval': UnusedProp(
111 text
='anoter value than look always the file is useless, so we fix it.'),
112 'command_file': StringProp(
113 default
='/tmp/command.cmd'),
114 'external_command_buffer_slots': UnusedProp(
115 text
='We do not limit the ewxternal command slot.'),
116 'check_for_updates': UnusedProp(
117 text
='network administrators will never allow such communication between server and the external world. Use your distribution packet manager to know if updates are available or go to the http://www.shinken-monitoring.org website instead.'),
118 'bare_update_checks': UnusedProp(
120 'lock_file': StringProp(
121 default
='/usr/local/shinken/var/arbiterd.pid'),
122 'retain_state_information': UnusedProp(
123 text
='sorry, retain state information will not be implemented because it is useless.'),
124 'state_retention_file': StringProp(
126 'retention_update_interval': IntegerProp(
128 'use_retained_program_state': UnusedProp(
129 text
='We do not think such an option is interesting to manage.'),
130 'use_retained_scheduling_info': UnusedProp(
131 text
='We do not think such an option is interesting to manage.'),
132 'retained_host_attribute_mask': UnusedProp(
133 text
='We do not think such an option is interesting to manage.'),
134 'retained_service_attribute_mask': UnusedProp(
135 text
='We do not think such an option is interesting to manage.'),
136 'retained_process_host_attribute_mask': UnusedProp(
137 text
='We do not think such an option is interesting to manage.'),
138 'retained_process_service_attribute_mask': UnusedProp(
139 text
='We do not think such an option is interesting to manage.'),
140 'retained_contact_host_attribute_mask': UnusedProp(
141 text
='We do not think such an option is interesting to manage.'),
142 'retained_contact_service_attribute_mask': UnusedProp(
143 text
='We do not think such an option is interesting to manage.'),
144 'use_syslog': BoolProp(
146 'log_notifications': BoolProp(
148 class_inherit
=[(Host
, None), (Service
, None)]),
149 'log_service_retries': BoolProp(
151 class_inherit
=[(Service
, 'log_retries')]),
152 'log_host_retries': BoolProp(
154 class_inherit
=[(Host
, 'log_retries')]),
155 'log_event_handlers': BoolProp(
157 class_inherit
=[(Host
, None), (Service
, None)]),
158 'log_initial_states': BoolProp(
160 'log_external_commands': BoolProp(
162 'log_passive_checks': BoolProp(
164 'global_host_event_handler': StringProp(
166 class_inherit
=[(Host
, 'global_event_handler')]),
167 'global_service_event_handler': StringProp(
169 class_inherit
=[(Service
, 'global_event_handler')]),
170 'sleep_time': UnusedProp(
171 text
='this deprecated option is useless in the shinken way of doing.'),
172 'service_inter_check_delay_method': UnusedProp(
173 text
='This option is useless in the Shinken scheduling. The only way is the smart way.'),
174 'max_service_check_spread': IntegerProp(
176 class_inherit
=[(Service
, 'max_check_spread')]),
177 'service_interleave_factor': UnusedProp(
178 text
='This option is useless in the Shinken scheduling because it use a random distribution for initial checks.'),
179 'max_concurrent_checks': UnusedProp(
180 text
='Limiting the max concurrent checks is not helful to got a good running monitoring server.'),
181 'check_result_reaper_frequency': UnusedProp(
182 text
='Shinken do not use reaper process.'),
183 'max_check_result_reaper_time': UnusedProp(
184 text
='Shinken do not use reaper process.'),
185 'check_result_path': UnusedProp(
186 text
='Shinken use in memory returns, not check results on flat file.'),
187 'max_check_result_file_age': UnusedProp(
188 text
='Shinken do not use flat file check resultfiles.'),
189 'host_inter_check_delay_method': UnusedProp(
190 text
='This option is unused in the Shinken scheduling because distribution of the initial check is a random one.'),
191 'max_host_check_spread': IntegerProp(
193 class_inherit
=[(Host
, 'max_check_spread')]),
194 'interval_length': IntegerProp(
196 class_inherit
=[(Host
, None), (Service
, None)]),
197 'auto_reschedule_checks': BoolProp(
200 'auto_rescheduling_interval': IntegerProp(
203 'auto_rescheduling_window': IntegerProp(
206 'use_aggressive_host_checking': UnusedProp(
207 text
='Host agressive checking is an heritage from Nagios 1 and is really useless now.'),
208 'translate_passive_host_checks': BoolProp(
211 'passive_host_checks_are_soft': BoolProp(
214 'enable_predictive_host_dependency_checks': BoolProp(
217 class_inherit
=[(Host
, 'enable_predictive_dependency_checks')]),
218 'enable_predictive_service_dependency_checks': StringProp(
221 'cached_host_check_horizon': IntegerProp(
223 class_inherit
=[(Host
, 'cached_check_horizon')]),
224 'cached_service_check_horizon': IntegerProp(
226 class_inherit
=[(Service
, 'cached_check_horizon')]),
227 'use_large_installation_tweaks': BoolProp(
229 class_inherit
=[(Host
, None), (Service
, None)]),
230 'free_child_process_memory': UnusedProp(
231 text
='this option is automatic in Python processes'),
232 'child_processes_fork_twice': UnusedProp(
233 text
='fork twice is not use.'),
234 'enable_environment_macros': BoolProp(
236 class_inherit
=[(Host
, None), (Service
, None)]),
237 'enable_flap_detection': BoolProp(
239 class_inherit
=[(Host
, None), (Service
, None)]),
240 'low_service_flap_threshold': IntegerProp(
242 class_inherit
=[(Service
, 'low_flap_threshold')]),
243 'high_service_flap_threshold': IntegerProp(
245 class_inherit
=[(Service
, 'high_flap_threshold')]),
246 'low_host_flap_threshold': IntegerProp(
248 class_inherit
=[(Host
, 'low_flap_threshold')]),
249 'high_host_flap_threshold': IntegerProp(
251 class_inherit
=[(Host
, 'high_flap_threshold')]),
252 'soft_state_dependencies': BoolProp(
255 'service_check_timeout': IntegerProp(
257 class_inherit
=[(Service
, 'check_timeout')]),
258 'host_check_timeout': IntegerProp(
260 class_inherit
=[(Host
, 'check_timeout')]),
261 'event_handler_timeout': IntegerProp(
263 class_inherit
=[(Host
, None), (Service
, None)]),
264 'notification_timeout': IntegerProp(
266 class_inherit
=[(Host
, None), (Service
, None)]),
267 'ocsp_timeout': IntegerProp(
269 class_inherit
=[(Service
, None)]),
270 'ochp_timeout': IntegerProp(
272 class_inherit
=[(Host
, None)]),
273 'perfdata_timeout': IntegerProp(
275 class_inherit
=[(Host
, None), (Service
, None)]),
276 'obsess_over_services': BoolProp(
278 class_inherit
=[(Service
, 'obsess_over')]),
279 'ocsp_command': StringProp(
281 class_inherit
=[(Service
, None)]),
282 'obsess_over_hosts': BoolProp(
284 class_inherit
=[(Host
, 'obsess_over')]),
285 'ochp_command': StringProp(
287 class_inherit
=[(Host
, None)]),
288 'process_performance_data': BoolProp(
290 class_inherit
=[(Host
, None), (Service
, None)]),
291 'host_perfdata_command': StringProp(
293 class_inherit
=[(Host
, 'perfdata_command')]),
294 'service_perfdata_command': StringProp(
296 class_inherit
=[(Service
, 'perfdata_command')]),
297 'host_perfdata_file': StringProp(
299 class_inherit
=[(Host
, 'perfdata_file')]),
300 'service_perfdata_file': StringProp(
302 class_inherit
=[(Service
, 'perfdata_file')]),
303 'host_perfdata_file_template': StringProp(
304 default
='/tmp/host.perf',
305 class_inherit
=[(Host
, 'perfdata_file_template')]),
306 'service_perfdata_file_template': StringProp(
307 default
='/tmp/host.perf',
308 class_inherit
=[(Service
, 'perfdata_file_template')]),
309 'host_perfdata_file_mode': CharProp(
311 class_inherit
=[(Host
, 'perfdata_file_mode')]),
312 'service_perfdata_file_mode': CharProp(
314 class_inherit
=[(Service
, 'perfdata_file_mode')]),
315 'host_perfdata_file_processing_interval': IntegerProp(
318 'service_perfdata_file_processing_interval': IntegerProp(
321 'host_perfdata_file_processing_command': StringProp(
324 class_inherit
=[(Host
, 'perfdata_file_processing_command')]),
325 'service_perfdata_file_processing_command': StringProp(
328 'check_for_orphaned_services': BoolProp(
330 class_inherit
=[(Service
, 'check_for_orphaned')]),
331 'check_for_orphaned_hosts': BoolProp(
333 class_inherit
=[(Host
, 'check_for_orphaned')]),
334 'check_service_freshness': BoolProp(
336 class_inherit
=[(Service
, 'check_freshness')]),
337 'service_freshness_check_interval': IntegerProp(
339 'check_host_freshness': BoolProp(
341 class_inherit
=[(Host
, 'check_freshness')]),
342 'host_freshness_check_interval': IntegerProp(
344 'additional_freshness_latency': IntegerProp(
346 class_inherit
=[(Host
, None), (Service
, None)]),
347 'enable_embedded_perl': BoolProp(
348 help='It will surely never be managed, but it should not be useful with poller performances.',
351 'use_embedded_perl_implicitly': BoolProp(
354 'date_format': StringProp(
357 'use_timezone': StringProp(
359 class_inherit
=[(Host
, None), (Service
, None), (Contact
, None)]),
360 'illegal_object_name_chars': StringProp(
361 default
="""`~!$%^&*"|'<>?,()=""",
362 class_inherit
=[(Host
, None), (Service
, None), (Contact
, None)]),
363 'illegal_macro_output_chars': StringProp(
365 class_inherit
=[(Host
, None), (Service
, None), (Contact
, None)]),
366 'use_regexp_matching': BoolProp(
367 help=' if you go some host or service definition like prod*, it will surely failed from now, sorry.',
370 'use_true_regexp_matching': BoolProp(
373 'admin_email': UnusedProp(
374 text
='sorry, not yet implemented.'),
375 'admin_pager': UnusedProp(
376 text
='sorry, not yet implemented.'),
377 'event_broker_options': UnusedProp(
378 text
='event broker are replaced by modules with a real configuration template.'),
379 'broker_module': StringProp(
381 'debug_file': UnusedProp(
383 'debug_level': UnusedProp(
385 'debug_verbosity': UnusedProp(
387 'max_debug_file_size': UnusedProp(
389 #'$USERn$ : {'required':False, 'default':''} # Add at run in __init__
392 'idontcareaboutsecurity': BoolProp(
394 'flap_history': IntegerProp(
396 class_inherit
=[(Host
, None), (Service
, None)]),
397 'max_plugins_output_length': IntegerProp(
399 class_inherit
=[(Host
, None), (Service
, None)]),
401 # Enable or not the notice about old Nagios parameters
402 'disable_old_nagios_parameters_whining': BoolProp(
405 # Now for problem/impact states changes
406 'enable_problem_impacts_states_change': BoolProp(
408 class_inherit
=[(Host
, None), (Service
, None)]),
410 # More a running value in fact
411 'resource_macros_names': StringProp(
415 # global boolean for know if we use ssl or not
416 'use_ssl' : BoolProp(default
='0',
417 class_inherit
=[(SchedulerLink
, None), (ReactionnerLink
, None),
418 (BrokerLink
, None), (PollerLink
, None), (ArbiterLink
, None)],
420 'certs_dir' : StringProp(default
='etc/certs'),
421 'ca_cert' : StringProp(default
='etc/certs/ca.pem'),
422 'server_cert' : StringProp(default
='etc/certs/server.pem'),
423 'hard_ssl_name_check' : BoolProp(default
='0'),
429 'MAINCONFIGFILE' : '',
430 'STATUSDATAFILE' : '',
431 'COMMENTDATAFILE' : '',
432 'DOWNTIMEDATAFILE' : '',
433 'RETENTIONDATAFILE' : '',
434 'OBJECTCACHEFILE' : '',
440 'HOSTPERFDATAFILE' : '',
441 'SERVICEPERFDATAFILE' : '',
444 #'USERn' : '$USERn$' # Add at run in __init__
448 #We create dict of objects
449 #Type: 'name in objects' : {Class of object, Class of objects,
450 #'property for self for the objects(config)'
452 'timeperiod' : (Timeperiod
, Timeperiods
, 'timeperiods'),
453 'service' : (Service
, Services
, 'services'),
454 'servicegroup' : (Servicegroup
, Servicegroups
, 'servicegroups'),
455 'command' : (Command
, Commands
, 'commands'),
456 'host' : (Host
, Hosts
, 'hosts'),
457 'hostgroup' : (Hostgroup
, Hostgroups
, 'hostgroups'),
458 'contact' : (Contact
, Contacts
, 'contacts'),
459 'contactgroup' : (Contactgroup
, Contactgroups
, 'contactgroups'),
460 'notificationway' : (NotificationWay
, NotificationWays
, 'notificationways'),
461 'servicedependency' : (Servicedependency
, Servicedependencies
, 'servicedependencies'),
462 'hostdependency' : (Hostdependency
, Hostdependencies
, 'hostdependencies'),
463 'arbiter' : (ArbiterLink
, ArbiterLinks
, 'arbiterlinks'),
464 'scheduler' : (SchedulerLink
, SchedulerLinks
, 'schedulerlinks'),
465 'reactionner' : (ReactionnerLink
, ReactionnerLinks
, 'reactionners'),
466 'broker' : (BrokerLink
, BrokerLinks
, 'brokers'),
467 'poller' : (PollerLink
, PollerLinks
, 'pollers'),
468 'realm' : (Realm
, Realms
, 'realms'),
469 'module' : (Module
, Modules
, 'modules'),
470 'resultmodulation' : (Resultmodulation
, Resultmodulations
, 'resultmodulations'),
471 'escalation' : (Escalation
, Escalations
, 'escalations'),
472 'serviceescalation' : (Serviceescalation
, Serviceescalations
, 'serviceescalations'),
473 'hostescalation' : (Hostescalation
, Hostescalations
, 'hostescalations'),
476 #This tab is used to transform old parameters name into new ones
477 #so from Nagios2 format, to Nagios3 ones
479 'nagios_user' : 'shinken_user',
480 'nagios_group' : 'shinken_group'
487 self
.resource_macros_names
= []
488 #By default the conf is correct
489 self
.conf_is_correct
= True
490 #We tag the conf with a magic_hash, a random value to
492 random
.seed(time
.time())
493 self
.magic_hash
= random
.randint(1, 100000)
497 def fill_usern_macros(cls
):
498 """ Fill all USERN macros with value of properties"""
499 #Now the ressource file part
500 properties
= cls
.properties
502 for n
in xrange(1, 256):
504 properties
['$USER'+n
+'$'] = StringProp(default
='')
505 macros
['USER'+n
] = '$USER'+n
+'$'
506 #Set this a Class method
507 fill_usern_macros
= classmethod(fill_usern_macros
)
510 # We've got macro in the resource file and we want
511 # to update our MACRO dict with it
512 def fill_resource_macros_names_macros(self
):
513 """ fill the macro dict will all value
514 from self.resource_macros_names"""
515 macros
= self
.__class
__.macros
516 for macro_name
in self
.resource_macros_names
:
517 macros
[macro_name
] = '$'+macro_name
+'$'
520 def load_params(self
, params
):
522 elts
= elt
.split('=')
523 if len(elts
) == 1: #error, there is no = !
524 self
.conf_is_correct
= False
525 print "Error : the parameter %s is malformed! (no = sign)" % elts
[0]
527 self
.params
[elts
[0]] = elts
[1]
528 setattr(self
, elts
[0], elts
[1])
529 #Maybe it's a variable as $USER$ or $ANOTHERVATRIABLE$
530 #so look at the first character. If it's a $, it's a variable
531 #and if it's end like it too
532 if elts
[0][0] == '$' and elts
[0][-1] == '$':
533 macro_name
= elts
[0][1:-1]
534 self
.resource_macros_names
.append(macro_name
)
538 def _cut_line(self
, line
):
539 #punct = '"#$%&\'()*+/<=>?@[\\]^`{|}~'
540 tmp
= re
.split("[" + string
.whitespace
+ "]+" , line
)
541 r
= [elt
for elt
in tmp
if elt
!= '']
545 def read_config(self
, files
):
546 #just a first pass to get the cfg_file and all files in a buf
550 #We add a \n (or \r\n) to be sure config files are separated
551 #if the previous does not finish with a line return
553 print "Opening configuration file", file
555 # Open in Universal way for Windows, Mac, Linux
556 fd
= open(file, 'rU')
559 config_base_dir
= os
.path
.dirname(file)
561 logger
.log("Error: Cannot open config file '%s' for reading: %s" % (file, exp
))
562 #The configuration is invalid because we have a bad file!
563 self
.conf_is_correct
= False
567 # Should not be useful anymore with the Universal open
568 # if os.name != 'nt':
569 # line = line.replace("\r\n", "\n")
572 if re
.search("^cfg_file", line
) or re
.search("^resource_file", line
):
573 elts
= line
.split('=')
574 if os
.path
.isabs(elts
[1]):
575 cfg_file_name
= elts
[1]
577 cfg_file_name
= os
.path
.join(config_base_dir
, elts
[1])
579 fd
= open(cfg_file_name
, 'rU')
580 logger
.log("Processing object config file '%s'" % cfg_file_name
)
582 #Be sure to add a line return so we won't mix files
586 logger
.log("Error: Cannot open config file '%s' for reading: %s" % (cfg_file_name
, exp
))
587 #The configuration is invalid because we have a bad file!
588 self
.conf_is_correct
= False
589 elif re
.search("^cfg_dir", line
):
590 elts
= line
.split('=')
591 if os
.path
.isabs(elts
[1]):
592 cfg_dir_name
= elts
[1]
594 cfg_dir_name
= os
.path
.join(config_base_dir
, elts
[1])
595 #Ok, look if it's really a directory
596 if not os
.path
.isdir(cfg_dir_name
):
597 logger
.log("Error: Cannot open config dir '%s' for reading" % cfg_dir_name
)
598 self
.conf_is_correct
= False
600 for root
, dirs
, files
in os
.walk(cfg_dir_name
):
602 if re
.search("\.cfg$", file):
603 logger
.log("Processing object config file '%s'" % os
.path
.join(root
, file))
606 fd
= open(os
.path
.join(root
, file), 'rU')
610 logger
.log("Error: Cannot open config file '%s' for reading: %s" % (os
.path
.join(root
, file), exp
))
611 # The configuration is invalid
612 # because we have a bad file!
613 self
.conf_is_correct
= False
615 # self.read_config_buf(res)
618 def read_config_buf(self
, buf
):
620 objectscfg
= {'void': [],
626 'notificationway' : [],
630 'servicedependency' : [],
631 'hostdependency' : [],
639 'resultmodulation' : [],
641 'serviceescalation' : [],
642 'hostescalation' : [],
647 continuation_line
= False
649 lines
= buf
.split('\n')
651 line
= line
.split(';')[0]
652 #A backslash means, there is more to come
653 if re
.search("\\\s*$", line
):
654 continuation_line
= True
655 line
= re
.sub("\\\s*$", "", line
)
656 line
= re
.sub("^\s+", " ", line
)
659 elif continuation_line
:
660 #Now the continuation line is complete
661 line
= re
.sub("^\s+", "", line
)
662 line
= tmp_line
+ line
664 continuation_line
= False
665 if re
.search("}", line
):
667 if re
.search("^\s*\t*#|^\s*$|^\s*}", line
):
670 #A define must be catch and the type save
671 #The old entry must be save before
672 elif re
.search("^define", line
):
674 if tmp_type
not in objectscfg
:
675 objectscfg
[tmp_type
] = []
676 objectscfg
[tmp_type
].append(tmp
)
679 elts
= re
.split('\s', line
)
681 tmp_type
= tmp_type
.split('{')[0]
688 objectscfg
[tmp_type
].append(tmp
)
691 #print "Params", params
692 self
.load_params(params
)
693 #And then update our MACRO dict
694 self
.fill_resource_macros_names_macros()
696 for type in objectscfg
:
698 for items
in objectscfg
[type]:
701 elts
= self
._cut
_line
(line
)
704 value
= ' '.join(elts
[1:])
707 objects
[type].append(tmp
)
712 # We need to have some ghost objects like
713 # the check_command bp_rule for business
715 def add_ghost_objects(self
, raw_objects
):
716 bp_rule
= {'command_name' : 'bp_rule', 'command_line' : 'bp_rule'}
717 raw_objects
['command'].append(bp_rule
)
718 host_up
= {'command_name' : '_internal_host_up', 'command_line' : '_internal_host_up'}
719 raw_objects
['command'].append(host_up
)
722 #We've got raw objects in string, now create real Instances
723 def create_objects(self
, raw_objects
):
724 """ Create real 'object' from dicts of prop/value """
725 types_creations
= self
.__class
__.types_creations
727 #some types are already created in this time
728 early_created_types
= ['arbiter', 'module']
730 # Before really create the objects, we add
731 # ghost ones like the bp_rule for correlation
732 self
.add_ghost_objects(raw_objects
)
734 for t
in types_creations
:
735 if t
not in early_created_types
:
736 self
.create_objects_for_type(raw_objects
, t
)
739 def create_objects_for_type(self
, raw_objects
, type):
740 types_creations
= self
.__class
__.types_creations
742 #Ex: the above code do for timeperiods:
744 #for timeperiodcfg in objects['timeperiod']:
745 # t = Timeperiod(timeperiodcfg)
747 # timeperiods.append(t)
748 #self.timeperiods = Timeperiods(timeperiods)
750 (cls
, clss
, prop
) = types_creations
[t
]
751 #List where we put objects
753 for obj_cfg
in raw_objects
[t
]:
754 #We create teh object
758 #we create the objects Class and we set it in prop
759 setattr(self
, prop
, clss(lst
))
763 #Here arbiter and modules objects should be prepare and link
764 #before all others types
765 def early_arbiter_linking(self
):
766 """ Prepare the arbiter for early operations """
767 self
.modules
.create_reversed_list()
769 if len(self
.arbiterlinks
) == 0:
770 logger
.log("Warning : there is no arbiter, I add one in localhost:7770")
771 a
= ArbiterLink({'arbiter_name' : 'Default-Arbiter',
772 'host_name' : socket
.gethostname(),
773 'address' : 'localhost', 'port' : '7770',
775 self
.arbiterlinks
= ArbiterLinks([a
])
778 self
.arbiterlinks
.fill_default()
781 #print "****************** Pythonize ******************"
782 self
.arbiterlinks
.pythonize()
784 #print "****************** Linkify ******************"
785 self
.arbiterlinks
.linkify(self
.modules
)
789 # We use linkify to make the config more efficient : elements will be
790 # linked, like pointers. For example, a host will have it's service,
791 # and contacts directly in it's properties
792 # REMEMBER: linkify AFTER explode...
794 """ Make 'links' between elements, like a host got a services list
795 with all it's services in it """
797 # First linkify myself like for some global commands
798 self
.linkify_one_command_with_commands(self
.commands
, 'ocsp_command')
799 self
.linkify_one_command_with_commands(self
.commands
, 'ochp_command')
800 self
.linkify_one_command_with_commands(self
.commands
, 'host_perfdata_command')
801 self
.linkify_one_command_with_commands(self
.commands
, 'service_perfdata_command')
804 # link hosts with timeperiods and commands
805 self
.hosts
.linkify(self
.timeperiods
, self
.commands
, \
806 self
.contacts
, self
.realms
, \
807 self
.resultmodulations
, self
.escalations
,\
810 # Do the simplify AFTER explode groups
812 # link hostgroups with hosts
813 self
.hostgroups
.linkify(self
.hosts
, self
.realms
)
816 # link services with other objects
817 self
.services
.linkify(self
.hosts
, self
.commands
, \
818 self
.timeperiods
, self
.contacts
,\
819 self
.resultmodulations
, self
.escalations
,\
822 #print "Service groups"
823 # link servicegroups members with services
824 self
.servicegroups
.linkify(self
.services
)
826 # link notificationways with timeperiods and commands
827 self
.notificationways
.linkify(self
.timeperiods
, self
.commands
)
829 #print "Contactgroups"
830 #link contacgroups with contacts
831 self
.contactgroups
.linkify(self
.contacts
)
834 #link contacts with timeperiods and commands
835 self
.contacts
.linkify(self
.timeperiods
, self
.commands
,
836 self
.notificationways
)
839 #link timeperiods with timeperiods (exclude part)
840 self
.timeperiods
.linkify()
842 #print "Servicedependancy"
843 self
.servicedependencies
.linkify(self
.hosts
, self
.services
,
846 #print "Hostdependancy"
847 self
.hostdependencies
.linkify(self
.hosts
, self
.timeperiods
)
849 #print "Resultmodulations"
850 self
.resultmodulations
.linkify(self
.timeperiods
)
853 self
.escalations
.linkify(self
.timeperiods
, self
.contacts
, \
854 self
.services
, self
.hosts
)
857 self
.realms
.linkify()
859 #print "Schedulers and satellites"
860 #Link all links with realms
861 # self.arbiterlinks.linkify(self.modules)
862 self
.schedulerlinks
.linkify(self
.realms
, self
.modules
)
863 self
.brokers
.linkify(self
.realms
, self
.modules
)
864 self
.reactionners
.linkify(self
.realms
, self
.modules
)
865 self
.pollers
.linkify(self
.realms
, self
.modules
)
869 #Some properties are dangerous to be send like that
870 #like realms linked in hosts. Realms are too big to send (too linked)
871 def prepare_for_sending(self
):
872 self
.hosts
.prepare_for_sending()
876 #print 'Parameters:', self
877 #print 'Hostgroups:',self.hostgroups,'\n'
878 #print 'Services:', self.services
879 print "Slots", Service
.__slots
__
882 print '\t', h
.get_name(), h
.contacts
884 for s
in self
.services
:
885 print '\t', s
.get_name(), s
.contacts
886 #print 'Templates:', self.hosts_tpl
887 #print 'Hosts:',self.hosts,'\n'
888 #print 'Contacts:', self.contacts
889 #print 'contactgroups',self.contactgroups
890 #print 'Servicegroups:', self.servicegroups
891 #print 'Timepriods:', self.timeperiods
892 #print 'Commands:', self.commands
893 #print "Number of services:", len(self.services.items)
894 #print "Service Dep", self.servicedependencies
895 #print "Schedulers", self.schedulerlinks
898 #It's used to change Nagios2 names to Nagios3 ones
899 #For hosts and services
900 def old_properties_names_to_new(self
):
901 super(Config
, self
).old_properties_names_to_new()
902 self
.hosts
.old_properties_names_to_new()
903 self
.services
.old_properties_names_to_new()
906 #It's used to warn about useless parameter and print why it's not use.
907 def notice_about_useless_parameters(self
):
908 if not self
.disable_old_nagios_parameters_whining
:
909 properties
= self
.__class
__.properties
910 for prop
in properties
:
911 entry
= properties
[prop
]
912 if isinstance(entry
, UnusedProp
):
913 text
= 'Notice : the parameter %s is useless and can be removed from the configuration (Reason: %s)' % (prop
, entry
.text
)
917 # It's used to raise warning if the user got parameter
918 # that we do not manage from now
919 def warn_about_unmanaged_parameters(self
):
920 properties
= self
.__class
__.properties
922 for prop
in properties
:
923 entry
= properties
[prop
]
924 if not entry
.managed
and hasattr(self
, prop
):
926 s
= "%s : %s" % (prop
, entry
.help)
930 if len(unmanaged
) != 0:
932 mailing_list_uri
= "https://lists.sourceforge.net/lists/listinfo/shinken-devel"
933 text
= 'Warning : the folowing parameter(s) are not curently managed.'
937 text
= 'Please look if you really need it. If so, please register at the devel mailing list (%s) and ask for it or propose us a patch :)' % mailing_list_uri
942 #Use to fill groups values on hosts and create new services
943 #(for host group ones)
945 #first elements, after groups
947 self
.contacts
.explode(self
.contactgroups
, self
.notificationways
)
948 #print "Contactgroups"
949 self
.contactgroups
.explode()
952 self
.hosts
.explode(self
.hostgroups
, self
.contactgroups
)
954 self
.hostgroups
.explode()
957 #print "Initialy got nb of services : %d" % len(self.services.items)
958 self
.services
.explode(self
.hosts
, self
.hostgroups
, self
.contactgroups
,
959 self
.servicegroups
, self
.servicedependencies
)
960 #print "finally got nb of services : %d" % len(self.services.items)
961 #print "Servicegroups"
962 self
.servicegroups
.explode()
965 self
.timeperiods
.explode()
967 self
.hostdependencies
.explode()
969 #print "Servicedependancy"
970 self
.servicedependencies
.explode()
972 #Serviceescalations hostescalations will create new escalations
973 self
.serviceescalations
.explode(self
.escalations
)
974 self
.hostescalations
.explode(self
.escalations
)
975 self
.escalations
.explode(self
.hosts
, self
.hostgroups
,
978 #Now the architecture part
980 self
.realms
.explode()
983 #Remove elements will the same name, so twins :)
984 #In fact only services should be acceptable with twins
985 def remove_twins(self
):
986 #self.hosts.remove_twins()
987 self
.services
.remove_twins()
988 #self.contacts.remove_twins()
989 #self.timeperiods.remove_twins()
992 #Dependancies are importants for scheduling
993 #This function create dependencies linked between elements.
994 def apply_dependancies(self
):
995 self
.hosts
.apply_dependancies()
996 self
.services
.apply_dependancies()
999 #Use to apply inheritance (template and implicit ones)
1000 #So elements wil have their configured properties
1001 def apply_inheritance(self
):
1002 #inheritance properties by template
1004 self
.hosts
.apply_inheritance()
1006 self
.contacts
.apply_inheritance()
1008 self
.services
.apply_inheritance(self
.hosts
)
1009 #print "Servicedependencies"
1010 self
.servicedependencies
.apply_inheritance(self
.hosts
)
1011 #print "Hostdependencies"
1012 self
.hostdependencies
.apply_inheritance()
1014 self
.timeperiods
.apply_inheritance()
1017 #Use to apply implicit inheritance
1018 def apply_implicit_inheritance(self
):
1020 self
.services
.apply_implicit_inheritance(self
.hosts
)
1023 #will fill properties for elements so they will have all theirs properties
1024 def fill_default(self
):
1025 #Fill default for config (self)
1026 super(Config
, self
).fill_default()
1027 self
.hosts
.fill_default()
1028 self
.hostgroups
.fill_default()
1029 self
.contacts
.fill_default()
1030 self
.contactgroups
.fill_default()
1031 self
.notificationways
.fill_default()
1032 self
.services
.fill_default()
1033 self
.servicegroups
.fill_default()
1034 self
.resultmodulations
.fill_default()
1036 #Also fill default of host/servicedep objects
1037 self
.servicedependencies
.fill_default()
1038 self
.hostdependencies
.fill_default()
1040 #first we create missing sat, so no other sat will
1041 #be created after this point
1042 self
.fill_default_satellites()
1043 #now we have all elements, we can create a default
1044 #realm if need and it will be taged to sat that do
1046 self
.fill_default_realm()
1047 self
.reactionners
.fill_default()
1048 self
.pollers
.fill_default()
1049 self
.brokers
.fill_default()
1050 self
.schedulerlinks
.fill_default()
1051 # self.arbiterlinks.fill_default()
1052 #Now fill some fields we can predict (like adress for hosts)
1053 self
.fill_predictive_missing_parameters()
1055 #Here is a special functions to fill some special
1056 #properties that are not filled and should be like
1057 #adress for host (if not set, put host_name)
1058 def fill_predictive_missing_parameters(self
):
1059 self
.hosts
.fill_predictive_missing_parameters()
1062 #Will check if a realm is defined, if not
1063 #Create a new one (default) and tag everyone that do not have
1064 #a realm prop to be put in this realm
1065 def fill_default_realm(self
):
1066 if len(self
.realms
) == 0:
1067 #Create a default realm with default value =1
1068 #so all hosts without realm wil be link with it
1069 default
= Realm({'realm_name' : 'Default', 'default' : '1'})
1070 self
.realms
= Realms([default
])
1071 logger
.log("Notice : the is no defined realms, so I add a new one %s" % default
.get_name())
1072 lists
= [self
.pollers
, self
.brokers
, self
.reactionners
, self
.schedulerlinks
]
1075 if not hasattr(elt
, 'realm'):
1076 elt
.realm
= 'Default'
1077 logger
.log("Notice : Tagging %s with realm %s" % (elt
.get_name(), default
.get_name()))
1080 #If a satellite is missing, we add them in the localhost
1081 #with defaults values
1082 def fill_default_satellites(self
):
1083 if len(self
.schedulerlinks
) == 0:
1084 logger
.log("Warning : there is no scheduler, I add one in localhost:7768")
1085 s
= SchedulerLink({'scheduler_name' : 'Default-Scheduler',
1086 'address' : 'localhost', 'port' : '7768'})
1087 self
.schedulerlinks
= SchedulerLinks([s
])
1088 if len(self
.pollers
) == 0:
1089 logger
.log("Warning : there is no poller, I add one in localhost:7771")
1090 p
= PollerLink({'poller_name' : 'Default-Poller',
1091 'address' : 'localhost', 'port' : '7771'})
1092 self
.pollers
= PollerLinks([p
])
1093 if len(self
.reactionners
) == 0:
1094 logger
.log("Warning : there is no reactionner, I add one in localhost:7769")
1095 r
= ReactionnerLink({'reactionner_name' : 'Default-Reactionner',
1096 'address' : 'localhost', 'port' : '7769'})
1097 self
.reactionners
= ReactionnerLinks([r
])
1098 if len(self
.brokers
) == 0:
1099 logger
.log("Warning : there is no broker, I add one in localhost:7772")
1100 b
= BrokerLink({'broker_name' : 'Default-Broker',
1101 'address' : 'localhost', 'port' : '7772',
1102 'manage_arbiters' : '1'})
1103 self
.brokers
= BrokerLinks([b
])
1106 #Return if one broker got a module of type : mod_type
1107 def got_broker_module_type_defined(self
, mod_type
):
1108 for b
in self
.brokers
:
1110 if hasattr(m
, 'module_type') and m
.module_type
== mod_type
:
1115 #return if one scheduler got a module of type : mod_type
1116 def got_scheduler_module_type_defined(self
, mod_type
):
1117 for b
in self
.schedulerlinks
:
1119 if hasattr(m
, 'module_type') and m
.module_type
== mod_type
:
1124 # Will ask for each host/service if the
1125 # check_command is a bp rule. If so, it will create
1126 # a tree structures with the rules
1127 def create_business_rules(self
):
1128 self
.hosts
.create_business_rules(self
.hosts
, self
.services
)
1129 self
.services
.create_business_rules(self
.hosts
, self
.services
)
1132 # Will fill dep list for business rules
1133 def create_business_rules_dependencies(self
):
1134 self
.hosts
.create_business_rules_dependencies()
1135 self
.services
.create_business_rules_dependencies()
1138 #It's used to hack some old Nagios parameters like
1139 #log_file or status_file : if they are present in
1140 #the global configuration and there is no such modules
1141 #in a Broker, we create it on the fly for all Brokers
1142 def hack_old_nagios_parameters(self
):
1143 """ Create some 'modules' from all nagios parameters if they are set and
1144 the modules are not created """
1145 #We list all modules we will add to brokers
1147 mod_to_add_to_schedulers
= []
1150 if hasattr(self
, 'status_file') and self
.status_file
!= '' and hasattr(self
, 'object_cache_file'):
1151 #Ok, the user put such a value, we must look
1152 #if he forget to put a module for Brokers
1153 got_status_dat_module
= self
.got_broker_module_type_defined('status_dat')
1155 #We need to create the modue on the fly?
1156 if not got_status_dat_module
:
1157 data
= { 'object_cache_file': self
.object_cache_file
,
1158 'status_file': self
.status_file
,
1159 'module_name': 'Status-Dat-Autogenerated',
1160 'module_type': 'status_dat'}
1162 mod
.status_update_interval
= getattr(self
, 'status_update_interval', 15)
1163 mod_to_add
.append(mod
)
1166 if hasattr(self
, 'log_file') and self
.log_file
!= '':
1167 #Ok, the user put such a value, we must look
1168 #if he forget to put a module for Brokers
1169 got_simple_log_module
= self
.got_broker_module_type_defined('simple_log')
1171 #We need to create the module on the fly?
1172 if not got_simple_log_module
:
1173 data
= {'module_type': 'simple_log', 'path': self
.log_file
,
1174 'archive_path' : self
.log_archive_path
,
1175 'module_name': 'Simple-log-Autogenerated'}
1177 mod_to_add
.append(mod
)
1179 #Now the syslog facility
1181 #Ok, the user want a syslog logging, why not after all
1182 got_syslog_module
= self
.got_broker_module_type_defined('syslog')
1184 #We need to create the module on the fly?
1185 if not got_syslog_module
:
1186 data
= {'module_type': 'syslog',
1187 'module_name': 'Syslog-Autogenerated'}
1189 mod_to_add
.append(mod
)
1191 #Now the service_perfdata module
1192 if self
.service_perfdata_file
!= '':
1193 #Ok, we've got a path for a service perfdata file
1194 got_service_perfdata_module
= self
.got_broker_module_type_defined('service_perfdata')
1196 #We need to create the module on the fly?
1197 if not got_service_perfdata_module
:
1198 data
= {'module_type': 'service_perfdata',
1199 'module_name': 'Service-Perfdata-Autogenerated',
1200 'path' : self
.service_perfdata_file
,
1201 'mode' : self
.service_perfdata_file_mode
,
1202 'template' : self
.service_perfdata_file_template
}
1204 mod_to_add
.append(mod
)
1206 #Now the old retention file module
1207 if self
.state_retention_file
!= '' and self
.retention_update_interval
!= 0:
1208 #Ok, we've got a old retention file
1209 got_retention_file_module
= self
.got_scheduler_module_type_defined('nagios_retention_file')
1211 #We need to create the module on the fly?
1212 if not got_retention_file_module
:
1213 data
= {'module_type': 'nagios_retention_file',
1214 'module_name': 'Nagios-Retention-File-Autogenerated',
1215 'path' : self
.state_retention_file
}
1217 mod_to_add_to_schedulers
.append(mod
)
1219 #Now the host_perfdata module
1220 if self
.host_perfdata_file
!= '':
1221 #Ok, we've got a path for a host perfdata file
1222 got_host_perfdata_module
= self
.got_broker_module_type_defined('host_perfdata')
1224 #We need to create the module on the fly?
1225 if not got_host_perfdata_module
:
1226 data
= {'module_type': 'host_perfdata',
1227 'module_name': 'Host-Perfdata-Autogenerated',
1228 'path' : self
.host_perfdata_file
, 'mode' : self
.host_perfdata_file_mode
,
1229 'template' : self
.host_perfdata_file_template
}
1231 mod_to_add
.append(mod
)
1234 #We add them to the brokers if we need it
1235 if mod_to_add
!= []:
1236 print "Warning : I autogenerated some Broker modules, please look at your configuration"
1237 for m
in mod_to_add
:
1238 print "Warning : the module", m
.module_name
, "is autogenerated"
1239 for b
in self
.brokers
:
1242 #Then for schedulers
1243 if mod_to_add_to_schedulers
!= []:
1244 print "Warning : I autogenerated some Scheduler modules, please look at your configuration"
1245 for m
in mod_to_add_to_schedulers
:
1246 print "Warning : the module", m
.module_name
, "is autogenerated"
1247 for b
in self
.schedulerlinks
:
1252 # Set our timezone value and give it too to unset satellites
1253 def propagate_timezone_option(self
):
1254 if self
.use_timezone
!= '':
1256 os
.environ
['TZ'] = self
.use_timezone
1259 tab
= [self
.schedulerlinks
, self
.pollers
, self
.brokers
, self
.reactionners
]
1262 if s
.use_timezone
== 'NOTSET':
1263 setattr(s
, 'use_timezone', self
.use_timezone
)
1267 # Link templates with elements
1268 def linkify_templates(self
):
1269 """ Like for normal object, we link templates with each others """
1270 self
.hosts
.linkify_templates()
1271 self
.contacts
.linkify_templates()
1272 self
.services
.linkify_templates()
1273 self
.servicedependencies
.linkify_templates()
1274 self
.hostdependencies
.linkify_templates()
1275 self
.timeperiods
.linkify_templates()
1279 # Reversed list is a dist with name for quick search by name
1280 def create_reversed_list(self
):
1281 """ Create quick search lists for objects """
1282 self
.hosts
.create_reversed_list()
1283 self
.hostgroups
.create_reversed_list()
1284 self
.contacts
.create_reversed_list()
1285 self
.contactgroups
.create_reversed_list()
1286 self
.notificationways
.create_reversed_list()
1287 self
.services
.create_reversed_list()
1288 self
.servicegroups
.create_reversed_list()
1289 self
.timeperiods
.create_reversed_list()
1290 # self.modules.create_reversed_list()
1291 self
.resultmodulations
.create_reversed_list()
1292 self
.escalations
.create_reversed_list()
1293 #For services it's a special case
1294 #we search for hosts, then for services
1295 #it's quicker than search in all services
1296 self
.services
.optimize_service_search(self
.hosts
)
1299 #Some parameters are just not managed like O*HP commands
1300 #and regexp capabilities
1302 #False : error in conf
1303 def check_error_on_hard_unmanaged_parameters(self
):
1305 if self
.use_regexp_matching
:
1306 logger
.log("Error : the use_regexp_matching parameter is not managed.")
1308 #if self.ochp_command != '':
1309 # logger.log("Error : the ochp_command parameter is not managed.")
1311 #if self.ocsp_command != '':
1312 # logger.log("Error : the ocsp_command parameter is not managed.")
1317 # check if elements are correct or not (fill with defaults, etc)
1318 # Warning : this function call be called from a Arbiter AND
1319 # from and scheduler. The first one got everything, the second
1320 # does not have the satellites.
1321 def is_correct(self
):
1322 """ Check if all elements got a good configuration """
1323 logger
.log('Running pre-flight check on configuration data...')
1324 r
= self
.conf_is_correct
1326 # Globally unamanged parameters
1327 logger
.log('Checking global parameters...')
1328 if not self
.check_error_on_hard_unmanaged_parameters():
1330 logger
.log("check global parameters failed")
1332 for x
in ('hosts', 'hostgroups', 'contacts', 'contactgroups', 'notificationways',
1333 'escalations', 'services', 'servicegroups', 'timeperiods'):
1334 logger
.log('Checking %s...' % (x
))
1335 cur
= getattr(self
, x
)
1336 if not cur
.is_correct():
1338 logger
.log("\t%s conf incorrect !!" % (x
))
1339 logger
.log('\tChecked %d %s' % (len(cur
), x
))
1341 # Hosts got a special check for loops
1342 if not self
.hosts
.no_loop_in_parents():
1344 logger
.log("hosts: detected loop in parents ; conf incorrect")
1346 for x
in ( 'servicedependencies', 'hostdependencies', 'arbiterlinks', 'schedulerlinks',
1347 'reactionners', 'pollers', 'brokers', 'resultmodulations'):
1348 try: cur
= getattr(self
, x
)
1350 logger
.log('Checking %s' % (x
))
1351 if not cur
.is_correct():
1353 logger
.log("\t%s conf incorrect !!" % (x
))
1354 logger
.log('\tChecked %d %s' % (len(cur
), x
))
1357 self
.conf_is_correct
= r
1360 #We've got strings (like 1) but we want python elements, like True
1361 def pythonize(self
):
1362 #call item pythonize for parameters
1363 super(Config
, self
).pythonize()
1364 self
.hosts
.pythonize()
1365 self
.hostgroups
.pythonize()
1366 self
.hostdependencies
.pythonize()
1367 self
.contactgroups
.pythonize()
1368 self
.contacts
.pythonize()
1369 self
.notificationways
.pythonize()
1370 self
.servicegroups
.pythonize()
1371 self
.services
.pythonize()
1372 self
.servicedependencies
.pythonize()
1373 self
.resultmodulations
.pythonize()
1374 self
.escalations
.pythonize()
1375 # self.arbiterlinks.pythonize()
1376 self
.schedulerlinks
.pythonize()
1377 self
.realms
.pythonize()
1378 self
.reactionners
.pythonize()
1379 self
.pollers
.pythonize()
1380 self
.brokers
.pythonize()
1383 #Explode parameters like cached_service_check_horizon in the
1384 #Service class in a cached_check_horizon manner, o*hp commands
1386 def explode_global_conf(self
):
1387 clss
= [Service
, Host
, Contact
, SchedulerLink
,
1388 PollerLink
, ReactionnerLink
, BrokerLink
,
1391 cls
.load_global_conf(self
)
1394 #Clean useless elements like templates because they are not needed anymore
1395 def clean_useless(self
):
1396 self
.hosts
.clean_useless()
1397 self
.contacts
.clean_useless()
1398 self
.services
.clean_useless()
1399 self
.servicedependencies
.clean_useless()
1400 self
.hostdependencies
.clean_useless()
1401 self
.timeperiods
.clean_useless()
1404 #Create packs of hosts and services so in a pack,
1405 #all dependencies are resolved
1406 #It create a graph. All hosts are connected to their
1407 #parents, and hosts without parent are connected to host 'root'.
1408 #services are link to the host. Dependencies are managed
1409 #REF: doc/pack-creation.png
1410 def create_packs(self
, nb_packs
):
1411 #We create a graph with host in nodes
1413 g
.add_nodes(self
.hosts
)
1415 #links will be used for relations between hosts
1419 for h
in self
.hosts
:
1420 #Add parent relations
1424 #Add the others dependencies
1425 for (dep
, tmp
, tmp2
, tmp3
, tmp4
) in h
.act_depend_of
:
1427 for (dep
, tmp
, tmp2
, tmp3
, tmp4
) in h
.chk_depend_of
:
1430 #For services : they are link woth their own host but we need
1431 #To have the hosts of service dep in the same pack too
1432 for s
in self
.services
:
1433 for (dep
, tmp
, tmp2
, tmp3
, tmp4
) in s
.act_depend_of
:
1434 #I don't care about dep host: they are just the host
1436 if hasattr(dep
, 'host'):
1437 links
.add((dep
.host
, s
.host
))
1438 #The othe type of dep
1439 for (dep
, tmp
, tmp2
, tmp3
, tmp4
) in s
.chk_depend_of
:
1440 links
.add((dep
.host
, s
.host
))
1442 # For host/service that are business based, we need to
1444 for s
in [s
for s
in self
.services
if s
.got_business_rule
]:
1445 for e
in s
.business_rule
.list_all_elements():
1446 if hasattr(e
, 'host'): # if it's a service
1447 if e
.host
!= s
.host
: # do not an host with itself
1448 links
.add((e
.host
, s
.host
))
1449 else: # it's already a host
1451 links
.add((e
, s
.host
))
1453 # Same for hosts of course
1454 for h
in [ h
for h
in self
.hosts
if h
.got_business_rule
]:
1455 for e
in h
.business_rule
.list_all_elements():
1456 if hasattr(e
, 'host'): # if it's a service
1458 links
.add((e
.host
, h
))
1464 #Now we create links in the graph. With links (set)
1465 #We are sure to call the less add_edge
1466 for (dep
, h
) in links
:
1470 #Access_list from a node il all nodes that are connected
1471 #with it : it's a list of ours mini_packs
1472 tmp_packs
= g
.get_accessibility_packs()
1474 #Now We find the default realm (must be unique or
1475 #BAD THINGS MAY HAPPEN )
1476 default_realm
= None
1477 for r
in self
.realms
:
1478 if hasattr(r
, 'default') and r
.default
:
1481 #Now we look if all elements of all packs have the
1482 #same realm. If not, not good!
1483 for pack
in tmp_packs
:
1486 if elt
.realm
!= None:
1487 tmp_realms
.add(elt
.realm
)
1488 if len(tmp_realms
) > 1:
1489 logger
.log("Error : the realm configuration of yours hosts is not good because there a more than one realm in one pack (host relations) :")
1492 logger
.log('Error : the host %s do not have a realm' % h
.get_name())
1494 logger
.log('Error : the host %s is in the realm %s' % (h
.get_name(), h
.realm
.get_name()))
1495 if len(tmp_realms
) == 1: # Ok, good
1496 r
= tmp_realms
.pop() #There is just one element
1497 r
.packs
.append(pack
)
1498 elif len(tmp_realms
) == 0: #Hum.. no realm value? So default Realm
1499 if default_realm
!= None:
1500 default_realm
.packs
.append(pack
)
1502 logger
.log("Error : some hosts do not have a realm and you do not defined a default realm!")
1504 logger
.log('Host in this pack : %s ' % h
.get_name())
1506 #The load balancing is for a loop, so all
1507 #hosts of a realm (in a pack) will be dispatch
1508 #in the schedulers of this realm
1509 #REF: doc/pack-agregation.png
1510 for r
in self
.realms
:
1511 #print "Load balancing realm", r.get_name()
1513 #create roundrobin iterator for id of cfg
1514 #So dispatching is loadbalanced in a realm
1515 #but add a entry in the roundrobin tourniquet for
1516 #every weight point schedulers (so Weight round robin)
1518 no_spare_schedulers
= [s
for s
in r
.schedulers
if not s
.spare
]
1519 nb_schedulers
= len(no_spare_schedulers
)
1521 #Maybe there is no scheduler in the realm, it's can be a
1522 #big problem if there are elements in packs
1523 nb_elements
= len([elt
for elt
in [pack
for pack
in r
.packs
]])
1524 logger
.log("Number of hosts in the realm %s : %d" %(r
.get_name(), nb_elements
))
1526 if nb_schedulers
== 0 and nb_elements
!= 0:
1527 logger
.log("ERROR : The realm %s have hosts but no scheduler!" %r.get_name())
1528 r
.packs
= [] #Dumb pack
1529 #The conf is incorrect
1530 self
.conf_is_correct
= False
1535 for s
in no_spare_schedulers
:
1536 packindices
[s
.id] = packindex
1538 for i
in xrange(0, s
.weight
):
1539 weight_list
.append(s
.id)
1541 rr
= itertools
.cycle(weight_list
)
1543 #we must have nb_schedulers packs)
1544 for i
in xrange(0, nb_schedulers
):
1547 #Now we explode the numerous packs into nb_packs reals packs:
1548 #we 'load balance' them in a roundrobin way
1549 for pack
in r
.packs
:
1552 packs
[packindices
[i
]].append(elt
)
1553 #Now in packs we have the number of packs [h1, h2, etc]
1554 #equal to the number of schedulers.
1559 #Use the self.conf and make nb_parts new confs.
1560 #nbparts is equal to the number of schedulerlink
1561 #New confs are independant whith checks. The only communication
1562 #That can be need is macro in commands
1563 def cut_into_parts(self
):
1564 #print "Scheduler configurated :", self.schedulerlinks
1565 #I do not care about alive or not. User must have set a spare if need it
1566 nb_parts
= len([s
for s
in self
.schedulerlinks
if not s
.spare
])
1571 # We create dummy configurations for schedulers :
1572 # they are clone of the master
1573 # conf but without hosts and services (because they are dispatched between
1574 # theses configurations)
1576 for i
in xrange(0, nb_parts
):
1577 #print "Create Conf:", i, '/', nb_parts -1
1578 self
.confs
[i
] = Config()
1580 #Now we copy all properties of conf into the new ones
1581 for prop
in Config
.properties
:
1582 # if not 'usage' in Config.properties[prop] \
1583 # or not (Config.properties[prop]['usage'] == 'unused' \
1584 # or Config.properties[prop]['usage'] == 'unmanaged'):
1585 if Config
.properties
[prop
].managed \
1586 and not isinstance(Config
.properties
[prop
], UnusedProp
):
1587 val
= getattr(self
, prop
)
1588 setattr(self
.confs
[i
], prop
, val
)
1590 #we need a deepcopy because each conf
1591 #will have new hostgroups
1592 self
.confs
[i
].id = i
1593 self
.confs
[i
].commands
= self
.commands
1594 self
.confs
[i
].timeperiods
= self
.timeperiods
1595 #Create hostgroups with just the name and same id, but no members
1597 for hg
in self
.hostgroups
:
1598 new_hostgroups
.append(hg
.copy_shell())
1599 self
.confs
[i
].hostgroups
= Hostgroups(new_hostgroups
)
1600 self
.confs
[i
].notificationways
= self
.notificationways
1601 self
.confs
[i
].contactgroups
= self
.contactgroups
1602 self
.confs
[i
].contacts
= self
.contacts
1603 self
.confs
[i
].schedulerlinks
= copy
.copy(self
.schedulerlinks
)
1604 #Create hostgroups with just the name and same id, but no members
1605 new_servicegroups
= []
1606 for sg
in self
.servicegroups
:
1607 new_servicegroups
.append(sg
.copy_shell())
1608 self
.confs
[i
].servicegroups
= Servicegroups(new_servicegroups
)
1609 self
.confs
[i
].hosts
= [] # will be fill after
1610 self
.confs
[i
].services
= [] # will be fill after
1611 # The elements of the others conf will be tag here
1612 self
.confs
[i
].other_elements
= {}
1613 # if a scheduler have accepted the conf
1614 self
.confs
[i
].is_assigned
= False
1616 logger
.log("Creating packs for realms")
1618 #Just create packs. There can be numerous ones
1619 #In pack we've got hosts and service
1620 #packs are in the realms
1621 #REF: doc/pack-creation.png
1622 self
.create_packs(nb_parts
)
1624 #We've got all big packs and get elements into configurations
1625 #REF: doc/pack-agregation.png
1627 for r
in self
.realms
:
1631 self
.confs
[i
+offset
].hosts
.append(h
)
1632 for s
in h
.services
:
1633 self
.confs
[i
+offset
].services
.append(s
)
1634 #Now the conf can be link in the realm
1635 r
.confs
[i
+offset
] = self
.confs
[i
+offset
]
1636 offset
+= len(r
.packs
)
1639 #We've nearly have hosts and services. Now we want REALS hosts (Class)
1640 #And we want groups too
1641 #print "Finishing packs"
1642 for i
in self
.confs
:
1643 #print "Finishing pack Nb:", i
1646 #Create ours classes
1647 cfg
.hosts
= Hosts(cfg
.hosts
)
1648 cfg
.hosts
.create_reversed_list()
1649 cfg
.services
= Services(cfg
.services
)
1650 cfg
.services
.create_reversed_list()
1652 for ori_hg
in self
.hostgroups
:
1653 hg
= cfg
.hostgroups
.find_by_name(ori_hg
.get_name())
1654 mbrs
= ori_hg
.members
1658 mbrs_id
.append(h
.id)
1661 hg
.members
.append(h
)
1663 for ori_sg
in self
.servicegroups
:
1664 sg
= cfg
.servicegroups
.find_by_name(ori_sg
.get_name())
1665 mbrs
= ori_sg
.members
1669 mbrs_id
.append(s
.id)
1670 for s
in cfg
.services
:
1672 sg
.members
.append(s
)
1674 #Now we fill other_elements by host (service are with their host
1675 #so they are not tagged)
1676 for i
in self
.confs
:
1677 for h
in self
.confs
[i
].hosts
:
1678 for j
in [j
for j
in self
.confs
if j
!= i
]: #So other than i
1679 self
.confs
[i
].other_elements
[h
.get_name()] = i
1681 #We tag conf with instance_id
1682 for i
in self
.confs
:
1683 self
.confs
[i
].instance_id
= i
1684 random
.seed(time
.time())
1685 self
.confs
[i
].magic_hash
= random
.randint(1, 100000)