Fix : manage the 'null' for inheritance.
[shinken.git] / test / shinken_test.py
blob9d643ead693060d1cee80d0e81e9e7c7a7189179
1 #!/usr/bin/env python2.6
4 # This file is used to test host- and service-downtimes.
7 import sys
8 import time
9 import datetime
10 import os
11 import string
12 import re
13 import random
14 import unittest
15 sys.path.append("..")
16 sys.path.append("../shinken")
17 from shinken.config import Config
18 from shinken.dispatcher import Dispatcher
19 from shinken.log import logger
20 from shinken.scheduler import Scheduler
21 from shinken.macroresolver import MacroResolver
22 from shinken.external_command import ExternalCommandManager, ExternalCommand
23 from shinken.check import Check
24 from shinken.module import Module
25 from shinken.schedulerlink import SchedulerLink
26 from shinken.pollerlink import PollerLink
27 from shinken.reactionnerlink import ReactionnerLink
28 from shinken.brokerlink import BrokerLink
29 from shinken.notification import Notification
30 from shinken.command import Command
32 class ShinkenTest(unittest.TestCase):
33 def setUp(self):
34 self.setup_with_file('etc/nagios_1r_1h_1s.cfg')
36 def setup_with_file(self, path):
37 # i am arbiter-like
38 Config.fill_usern_macros()
39 self.broks = {}
40 self.me = None
41 self.log = logger
42 self.log.load_obj(self)
43 self.config_files = [path]
44 self.conf = Config()
45 self.conf.read_config(self.config_files)
46 buf = self.conf.read_config(self.config_files)
47 raw_objects = self.conf.read_config_buf(buf)
48 self.conf.create_objects_for_type(raw_objects, 'arbiter')
49 self.conf.create_objects_for_type(raw_objects, 'module')
50 self.conf.early_arbiter_linking()
51 self.conf.create_objects(raw_objects)
52 self.conf.instance_id = 0
53 self.conf.instance_name = 'test'
54 self.conf.linkify_templates()
55 self.conf.apply_inheritance()
56 self.conf.explode()
57 self.conf.create_reversed_list()
58 self.conf.remove_twins()
59 self.conf.apply_implicit_inheritance()
60 self.conf.fill_default()
61 self.conf.clean_useless()
62 self.conf.pythonize()
63 self.conf.linkify()
64 self.conf.apply_dependancies()
65 self.conf.explode_global_conf()
66 self.conf.propagate_timezone_option()
67 self.conf.create_business_rules()
68 self.conf.create_business_rules_dependencies()
69 self.conf.is_correct()
70 self.confs = self.conf.cut_into_parts()
71 self.dispatcher = Dispatcher(self.conf, self.me)
72 self.sched = Scheduler(None)
73 m = MacroResolver()
74 m.init(self.conf)
75 self.sched.load_conf(self.conf)
76 e = ExternalCommandManager(self.conf, 'applyer')
77 self.sched.external_command = e
78 e.load_scheduler(self.sched)
79 self.sched.schedule()
82 def add(self, b):
83 self.broks[b.id] = b
86 def fake_check(self, ref, exit_status, output="OK"):
87 #print "fake", ref
88 now = time.time()
89 ref.schedule(force=True)
90 #now checks are schedule and we get them in
91 #the action queue
92 check = ref.actions.pop()
93 self.sched.add(check) # check is now in sched.checks[]
94 # fake execution
95 check.check_time = now
97 elts_line1 = output.split('|')
98 #First line before | is output
99 check.output = elts_line1[0]
100 #After | is perfdata
101 if len(elts_line1) > 1:
102 check.perf_data = elts_line1[1]
103 else:
104 check.perf_data = ''
105 check.exit_status = exit_status
106 check.execution_time = 0.001
107 check.status = 'waitconsume'
108 self.sched.waiting_results.append(check)
111 def scheduler_loop(self, count, reflist, do_sleep=False, sleep_time=61):
112 for ref in reflist:
113 (obj, exit_status, output) = ref
114 obj.checks_in_progress = []
115 for loop in range(1, count + 1):
116 print "processing check", loop
117 for ref in reflist:
118 (obj, exit_status, output) = ref
119 obj.update_in_checking()
120 self.fake_check(obj, exit_status, output)
121 self.sched.manage_internal_checks()
122 self.sched.consume_results()
123 self.sched.get_new_actions()
124 self.sched.get_new_broks()
125 self.worker_loop()
126 for ref in reflist:
127 (obj, exit_status, output) = ref
128 obj.checks_in_progress = []
129 self.sched.update_downtimes_and_comments()
130 #time.sleep(ref.retry_interval * 60 + 1)
131 if do_sleep:
132 time.sleep(sleep_time)
135 def worker_loop(self):
136 self.sched.delete_zombie_checks()
137 self.sched.delete_zombie_actions()
138 checks = self.sched.get_to_run_checks(True, False)
139 actions = self.sched.get_to_run_checks(False, True)
140 #print "------------ worker loop checks ----------------"
141 #print checks
142 #print "------------ worker loop actions ----------------"
143 self.show_actions()
144 #print "------------ worker loop new ----------------"
145 for a in actions:
146 a.status = 'inpoller'
147 a.check_time = time.time()
148 a.exit_status = 0
149 self.sched.put_results(a)
150 self.show_actions()
151 #print "------------ worker loop end ----------------"
154 def show_logs(self):
155 print "--- logs <<<----------------------------------"
156 for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id):
157 if brok.type == 'log':
158 print "LOG:", brok.data['log']
159 print "--- logs >>>----------------------------------"
162 def show_actions(self):
163 print "--- actions <<<----------------------------------"
164 for a in sorted(self.sched.actions.values(), lambda x, y: x.id - y.id):
165 if a.is_a == 'notification':
166 if a.ref.my_type == "host":
167 ref = "host: %s" % a.ref.get_name()
168 else:
169 ref = "host: %s svc: %s" % (a.ref.host.get_name(), a.ref.get_name())
170 print "NOTIFICATION %d %s %s %s %s" % (a.id, ref, a.type, time.asctime(time.localtime(a.t_to_go)), a.status)
171 elif a.is_a == 'eventhandler':
172 print "EVENTHANDLER:", a
173 print "--- actions >>>----------------------------------"
176 def show_and_clear_logs(self):
177 self.show_logs()
178 self.clear_logs()
181 def show_and_clear_actions(self):
182 self.show_actions()
183 self.clear_actions()
186 def count_logs(self):
187 return len([b for b in self.sched.broks.values() if b.type == 'log'])
190 def count_actions(self):
191 return len(self.sched.actions.values())
194 def clear_logs(self):
195 id_to_del = []
196 for b in self.sched.broks.values():
197 if b.type == 'log':
198 id_to_del.append(b.id)
199 for id in id_to_del:
200 del self.sched.broks[id]
203 def clear_actions(self):
204 self.sched.actions = {}
207 def log_match(self, index, pattern):
208 # log messages are counted 1...n, so index=1 for the first message
209 if index > self.count_logs():
210 return False
211 else:
212 regex = re.compile(pattern)
213 lognum = 1
214 for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id):
215 if brok.type == 'log':
216 if index == lognum:
217 if re.search(regex, brok.data['log']):
218 return True
219 lognum += 1
220 return False
223 def any_log_match(self, pattern):
224 regex = re.compile(pattern)
225 for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id):
226 if brok.type == 'log':
227 if re.search(regex, brok.data['log']):
228 return True
229 return False
232 def get_log_match(self, pattern):
233 regex = re.compile(pattern)
234 res = []
235 for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id):
236 if brok.type == 'log':
237 if re.search(regex, brok.data['log']):
238 res.append(brok.data['log'])
239 return res
243 def print_header(self):
244 print "#" * 80 + "\n" + "#" + " " * 78 + "#"
245 print "#" + string.center(self.id(), 78) + "#"
246 print "#" + " " * 78 + "#\n" + "#" * 80 + "\n"
251 def xtest_conf_is_correct(self):
252 self.print_header()
253 self.assert_(self.conf.conf_is_correct)
257 if __name__ == '__main__':
258 unittest.main()