2 #Copyright (C) 2009 Gabes Jean, naparuba@gmail.com
4 #This file is part of Shinken.
6 #Shinken is free software: you can redistribute it and/or modify
7 #it under the terms of the GNU Affero General Public License as published by
8 #the Free Software Foundation, either version 3 of the License, or
9 #(at your option) any later version.
11 #Shinken is distributed in the hope that it will be useful,
12 #but WITHOUT ANY WARRANTY; without even the implied warranty of
13 #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 #GNU Affero General Public License for more details.
16 #You should have received a copy of the GNU Affero General Public License
17 #along with Shinken. If not, see <http://www.gnu.org/licenses/>.
20 #This Class is a plugin for the Shinken Broker. It is in charge
21 #to brok information into the merlin database. for the moment
22 #only Mysql is supported. This code is __imported__ from Broker.
23 #The managed_brok function is called by Broker for manage the broks. It calls
24 #the manage_*_brok functions that create queries, and then run queries.
31 from shinken
.db_mysql
import DBMysql
32 except : # TODO : fix this, python2.4 is not happy here?
33 from db_mysql
import DBMysql
35 from shinken
.basemodule
import BaseModule
38 return time
.strftime('%Y-%m-%d %H:%M:%S', time
.localtime(t
))
42 #Class for the Merlindb Broker
43 #Get broks and puts them in merlin database
44 class Ndodb_broker(BaseModule
):
45 def __init__(self
, conf
):
46 BaseModule
.__init
__(self
, conf
)
47 #Mapping for name of dataand transform function
49 'program_status' : {'program_start' : {'name' : 'program_start_time', 'transform' : de_unixify
},
50 'pid' : {'name' : 'process_id', 'transform' : None},
51 'last_alive' : {'name' : 'status_update_time', 'transform' : de_unixify
},
52 'is_running' : {'name' : 'is_currently_running', 'transform' : None}
58 self
.password
= conf
.password
59 self
.database
= conf
.database
60 self
.character_set
= conf
.character_set
63 #Called by Broker so we can do init stuff
64 #TODO : add conf param to get pass with init
67 print "I connect to NDO database"
68 self
.db
= DBMysql(self
.host
, self
.user
, self
.password
, self
.database
, self
.character_set
, table_prefix
='nagios_')
69 self
.connect_database()
71 #Cache for hosts and services
72 #will be flushed when we got a net instance id
73 #or something like that
74 self
.services_cache
= {}
78 #Get a brok, parse it, and put in in database
79 #We call functions like manage_ TYPEOFBROK _brok that return us queries
80 def manage_brok(self
, b
):
81 #We've got problem with instance_id == 0 so we add 1 every where
82 if 'instance_id' in b
.data
:
83 b
.data
['instance_id'] = b
.data
['instance_id'] + 1
84 #print "(Ndo) I search manager:", manager
85 queries
= BaseModule
.manage_brok(self
, b
)
86 if queries
is not None:
88 self
.db
.execute_query(q
)
90 #print "(ndodb)I don't manage this brok type", b
93 #Create the database connexion
94 #TODO : finish (begin :) ) error catch and conf parameters...
95 def connect_database(self
):
96 self
.db
.connect_database()
99 def get_host_object_id_by_name(self
, host_name
):
100 #First look in cache.
101 if host_name
in self
.hosts_cache
:
102 return self
.hosts_cache
[host_name
]
104 #Not in cache, not good
105 query
= u
"SELECT object_id from nagios_objects where name1='%s' and objecttype_id='1'" % host_name
106 self
.db
.execute_query(query
)
107 row
= self
.db
.fetchone ()
108 if row
== None or len(row
) < 1:
111 self
.hosts_cache
[host_name
] = row
[0]
115 def get_hostgroup_object_id_by_name(self
, hostgroup_name
):
116 query
= u
"SELECT object_id from nagios_objects where name1='%s' and objecttype_id='3'" % hostgroup_name
117 self
.db
.execute_query(query
)
118 row
= self
.db
.fetchone ()
119 if row
== None or len(row
) < 1:
125 def get_service_object_id_by_name(self
, host_name
, service_description
):
127 if (host_name
, service_description
) in self
.services_cache
:
128 return self
.services_cache
[(host_name
, service_description
)]
130 #else; not in cache :(
131 query
= u
"SELECT object_id from nagios_objects where name1='%s' and name2='%s' and objecttype_id='2'" % (host_name
, service_description
)
132 self
.db
.execute_query(query
)
133 row
= self
.db
.fetchone ()
134 if row
== None or len(row
) < 1:
137 self
.services_cache
[(host_name
, service_description
)] = row
[0]
141 def get_servicegroup_object_id_by_name(self
, servicegroup_name
):
142 query
= u
"SELECT object_id from nagios_objects where name1='%s' and objecttype_id='4'" % servicegroup_name
143 self
.db
.execute_query(query
)
144 row
= self
.db
.fetchone ()
145 if row
== None or len(row
) < 1:
151 #Ok, we are at launch and a scheduler want him only, OK...
152 #So ca create several queries with all tables we need to delete with
154 #This brob must be send at the begining of a scheduler session,
155 #if not, BAD THINGS MAY HAPPENED :)
156 def manage_clean_all_my_instance_id_brok(self
, b
):
157 instance_id
= b
.data
['instance_id']
158 tables
= ['commands', 'contacts', 'contactgroups', 'hosts',
159 'hostescalations', 'hostgroups', 'notifications',
160 'services', 'serviceescalations', 'programstatus',
161 'servicegroups', 'timeperiods', 'hostgroup_members',
162 'contactgroup_members', 'objects', 'hoststatus',
163 'servicestatus', 'instances', 'servicegroup_members']
166 q
= "DELETE FROM %s WHERE instance_id = '%s' " % ('nagios_'+table
, instance_id
)
169 #We also clean cache, because we are not sure about this data now
170 print "[MySQL/NDO] Flushing caches"
171 self
.services_cache
= {}
172 self
.hosts_cache
= {}
177 #Program status is .. status of program? :)
178 #Like pid, daemon mode, last activity, etc
179 #We aleady clean database, so insert
181 #TODO : fill nagios_instances
182 def manage_program_status_brok(self
, b
):
183 new_b
= copy
.deepcopy(b
)
185 #Must delete me first
186 query_delete_instance
= u
"DELETE FROM %s WHERE instance_name = '%s' " % ('nagios_instances', b
.data
['instance_name'])
188 query_instance
= self
.db
.create_insert_query('instances', {'instance_name' : new_b
.data
['instance_name'],\
189 'instance_description' : new_b
.data
['instance_name'], \
190 'instance_id' : new_b
.data
['instance_id']
193 to_del
= ['instance_name', 'command_file']
195 mapping
= self
.mapping
['program_status']
196 for prop
in new_b
.data
:
197 #ex : 'name' : 'program_start_time', 'transform'
199 #print "Got a prop to change", prop
200 val
= new_b
.data
[prop
]
201 if mapping
[prop
]['transform'] != None:
202 f
= mapping
[prop
]['transform']
204 new_name
= mapping
[prop
]['name']
205 to_add
.append((new_name
, val
))
209 for (name
, val
) in to_add
:
210 new_b
.data
[name
] = val
211 query
= self
.db
.create_insert_query('programstatus', new_b
.data
)
212 return [query_delete_instance
, query_instance
, query
]
215 #TODO : fill nagios_instances
216 def manage_update_program_status_brok(self
, b
):
217 new_b
= copy
.deepcopy(b
)
218 to_del
= ['instance_name', 'command_file']
220 mapping
= self
.mapping
['program_status']
221 for prop
in new_b
.data
:
222 #ex : 'name' : 'program_start_time', 'transform'
224 #print "Got a prop to change", prop
225 val
= new_b
.data
[prop
]
226 if mapping
[prop
]['transform'] != None:
227 f
= mapping
[prop
]['transform']
229 new_name
= mapping
[prop
]['name']
230 to_add
.append((new_name
, val
))
234 for (name
, val
) in to_add
:
235 new_b
.data
[name
] = val
236 where_clause
= {'instance_id' : new_b
.data
['instance_id']}
237 query
= self
.db
.create_update_query('programstatus', new_b
.data
, where_clause
)
241 #A host have just be create, database is clean, we INSERT it
242 def manage_initial_host_status_brok(self
, b
):
243 #new_b = copy.deepcopy(b)
247 #First add to nagios_objects
248 objects_data
= {'instance_id' : data
['instance_id'], 'objecttype_id' : 1,
249 'name1' : data
['host_name'], 'is_active' : data
['active_checks_enabled']
251 object_query
= self
.db
.create_insert_query('objects', objects_data
)
252 self
.db
.execute_query(object_query
)
254 host_id
= self
.get_host_object_id_by_name(data
['host_name'])
257 hosts_data
= {'host_id' : data
['id'], 'instance_id' : data
['instance_id'],
258 'host_object_id' : host_id
, 'alias' : data
['alias'],
259 'display_name' : data
['display_name'], 'address' : data
['address'],
260 'failure_prediction_options' : '0', 'check_interval' : data
['check_interval'],
261 'retry_interval' : data
['retry_interval'], 'max_check_attempts' : data
['max_check_attempts'],
262 'first_notification_delay' : data
['first_notification_delay'], 'notification_interval' : data
['notification_interval'],
263 'flap_detection_enabled' : data
['flap_detection_enabled'], 'low_flap_threshold' : data
['low_flap_threshold'],
264 'high_flap_threshold' : data
['high_flap_threshold'], 'process_performance_data' : data
['process_perf_data'],
265 'freshness_checks_enabled' : data
['check_freshness'], 'freshness_threshold' : data
['freshness_threshold'],
266 'passive_checks_enabled' : data
['passive_checks_enabled'], 'event_handler_enabled' : data
['event_handler_enabled'],
267 'active_checks_enabled' : data
['active_checks_enabled'], 'notifications_enabled' : data
['notifications_enabled'],
268 'obsess_over_host' : data
['obsess_over_host'], 'notes' : data
['notes'], 'notes_url' : data
['notes_url']
271 #print "HOST DATA", hosts_data
272 query
= self
.db
.create_insert_query('hosts', hosts_data
)
274 #Now create an hoststatus entry
275 hoststatus_data
= {'instance_id' : data
['instance_id'],
276 'host_object_id' : host_id
,
277 'normal_check_interval' : data
['check_interval'],
278 'retry_check_interval' : data
['retry_interval'], 'max_check_attempts' : data
['max_check_attempts'],
279 'current_state' : data
['state_id'], 'state_type' : data
['state_type_id'],
280 'passive_checks_enabled' : data
['passive_checks_enabled'], 'event_handler_enabled' : data
['event_handler_enabled'],
281 'active_checks_enabled' : data
['active_checks_enabled'], 'notifications_enabled' : data
['notifications_enabled'],
282 'obsess_over_host' : data
['obsess_over_host'],'process_performance_data' : data
['process_perf_data'],
283 'check_type' : 0, 'current_check_attempt' : data
['attempt'],
284 'execution_time' : data
['execution_time'], 'latency' : data
['latency'],
285 'output' : data
['output'], 'perfdata' : data
['perf_data'],'last_check' : de_unixify(data
['last_chk']),
286 'last_hard_state_change' : de_unixify(data
['last_hard_state_change']),
287 'problem_has_been_acknowledged' : data
['problem_has_been_acknowledged'], 'acknowledgement_type' : data
['acknowledgement_type'],
288 #set check to 1 so nagvis is happy
289 'has_been_checked' : 1,
291 hoststatus_query
= self
.db
.create_insert_query('hoststatus' , hoststatus_data
)
293 return [query
, hoststatus_query
]
296 #A host have just be create, database is clean, we INSERT it
297 def manage_initial_service_status_brok(self
, b
):
298 #new_b = copy.deepcopy(b)
301 #First add to nagios_objects
302 objects_data
= {'instance_id' : data
['instance_id'], 'objecttype_id' : 2,
303 'name1' : data
['host_name'], 'name2' : data
['service_description'], 'is_active' : data
['active_checks_enabled']
305 object_query
= self
.db
.create_insert_query('objects', objects_data
)
306 self
.db
.execute_query(object_query
)
308 host_id
= self
.get_host_object_id_by_name(data
['host_name'])
309 service_id
= self
.get_service_object_id_by_name(data
['host_name'], data
['service_description'])
312 #print "HOST ID:", host_id
313 #print "SERVICE ID:", service_id
314 services_data
= {'service_id' : data
['id'], 'instance_id' : data
['instance_id'],
315 'service_object_id' : service_id
, 'host_object_id' : host_id
,
316 'display_name' : data
['display_name'],
317 'failure_prediction_options' : '0', 'check_interval' : data
['check_interval'],
318 'retry_interval' : data
['retry_interval'], 'max_check_attempts' : data
['max_check_attempts'],
319 'first_notification_delay' : data
['first_notification_delay'], 'notification_interval' : data
['notification_interval'],
320 'flap_detection_enabled' : data
['flap_detection_enabled'], 'low_flap_threshold' : data
['low_flap_threshold'],
321 'high_flap_threshold' : data
['high_flap_threshold'], 'process_performance_data' : data
['process_perf_data'],
322 'freshness_checks_enabled' : data
['check_freshness'], 'freshness_threshold' : data
['freshness_threshold'],
323 'passive_checks_enabled' : data
['passive_checks_enabled'], 'event_handler_enabled' : data
['event_handler_enabled'],
324 'active_checks_enabled' : data
['active_checks_enabled'], 'notifications_enabled' : data
['notifications_enabled'],
325 'obsess_over_service' : data
['obsess_over_service'], 'notes' : data
['notes'], 'notes_url' : data
['notes_url']
328 #print "HOST DATA", hosts_data
329 query
= self
.db
.create_insert_query('services', services_data
)
331 #Now create an hoststatus entry
332 servicestatus_data
= {'instance_id' : data
['instance_id'],
333 'service_object_id' : service_id
,
334 'normal_check_interval' : data
['check_interval'],
335 'retry_check_interval' : data
['retry_interval'], 'max_check_attempts' : data
['max_check_attempts'],
336 'current_state' : data
['state_id'], 'state_type' : data
['state_type_id'],
337 'passive_checks_enabled' : data
['passive_checks_enabled'], 'event_handler_enabled' : data
['event_handler_enabled'],
338 'active_checks_enabled' : data
['active_checks_enabled'], 'notifications_enabled' : data
['notifications_enabled'],
339 'obsess_over_service' : data
['obsess_over_service'],'process_performance_data' : data
['process_perf_data'],
341 'check_type' : 0, 'current_check_attempt' : data
['attempt'],
342 'execution_time' : data
['execution_time'], 'latency' : data
['latency'],
343 'output' : data
['output'], 'perfdata' : data
['perf_data'], 'last_check' : de_unixify(data
['last_chk']),
344 'last_hard_state_change' : de_unixify(data
['last_hard_state_change']),
345 'problem_has_been_acknowledged' : data
['problem_has_been_acknowledged'], 'acknowledgement_type' : data
['acknowledgement_type'],
346 #set check to 1 so nagvis is happy
347 'has_been_checked' : 1,
349 servicestatus_query
= self
.db
.create_insert_query('servicestatus' , servicestatus_data
)
351 return [query
, servicestatus_query
]
355 #A new host group? Insert it
356 #We need to do something for the members prop (host.id, host_name)
357 #They are for host_hostgroup table, with just host.id hostgroup.id
358 def manage_initial_hostgroup_status_brok(self
, b
):
361 #First add to nagios_objects
362 objects_data
= {'instance_id' : data
['instance_id'], 'objecttype_id' : 3,
363 'name1' : data
['hostgroup_name'], 'is_active' : 1
365 object_query
= self
.db
.create_insert_query('objects', objects_data
)
366 self
.db
.execute_query(object_query
)
368 hostgroup_id
= self
.get_hostgroup_object_id_by_name(data
['hostgroup_name'])
370 hostgroups_data
= {'hostgroup_id' : data
['id'], 'instance_id' : data
['instance_id'],
371 'config_type' : 0, 'hostgroup_object_id' : hostgroup_id
,
372 'alias' : data
['alias']
375 query
= self
.db
.create_insert_query('hostgroups', hostgroups_data
)
378 #Ok, the hostgroups table is uptodate, now we add relations
379 #between hosts and hostgroups
380 for (h_id
, h_name
) in b
.data
['members']:
381 host_id
= self
.get_host_object_id_by_name(h_name
)
382 hostgroup_members_data
= {'instance_id' : data
['instance_id'], 'hostgroup_id' : data
['id'],
383 'host_object_id' : host_id
}
384 q
= self
.db
.create_insert_query('hostgroup_members', hostgroup_members_data
)
390 #A new host group? Insert it
391 #We need to do something for the members prop (host.id, host_name)
392 #They are for host_hostgroup table, with just host.id hostgroup.id
393 def manage_initial_servicegroup_status_brok(self
, b
):
396 #First add to nagios_objects
397 objects_data
= {'instance_id' : data
['instance_id'], 'objecttype_id' : 4,
398 'name1' : data
['servicegroup_name'], 'is_active' : 1
400 object_query
= self
.db
.create_insert_query('objects', objects_data
)
401 self
.db
.execute_query(object_query
)
403 servicegroup_id
= self
.get_servicegroup_object_id_by_name(data
['servicegroup_name'])
406 servicegroups_data
= {'servicegroup_id' : data
['id'], 'instance_id' : data
['instance_id'],
407 'config_type' : 0, 'servicegroup_object_id' : servicegroup_id
,
408 'alias' : data
['alias']
411 query
= self
.db
.create_insert_query('servicegroups', servicegroups_data
)
414 #Ok, the hostgroups table is uptodate, now we add relations
415 #between hosts and hostgroups
416 for (s_id
, s_name
) in b
.data
['members']:
417 servicegroup_members_data
= {'instance_id' : data
['instance_id'], 'servicegroup_id' : data
['id'],
418 'service_object_id' : s_id
}
419 q
= self
.db
.create_insert_query('servicegroup_members', servicegroup_members_data
)
424 #Same than service result, but for host result
425 def manage_host_check_result_brok(self
, b
):
428 host_id
= self
.get_host_object_id_by_name(data
['host_name'])
429 #Only the host is impacted
430 where_clause
= {'host_object_id' : host_id
}
431 host_check_data
= {'instance_id' : data
['instance_id'],
432 'check_type' : 0, 'is_raw_check' : 0, 'current_check_attempt' : data
['attempt'],
433 'state' : data
['state_id'], 'state_type' : data
['state_type_id'],
434 'start_time' : data
['start_time'], 'start_time_usec' : 0,
435 'execution_time' : data
['execution_time'], 'latency' : data
['latency'],
436 'return_code' : data
['return_code'], 'output' : data
['output'],
437 'perfdata' : data
['perf_data']
439 query
= self
.db
.create_update_query('hostchecks', host_check_data
, where_clause
)
442 hoststatus_data
= {'instance_id' : data
['instance_id'],
443 'check_type' : 0, 'current_check_attempt' : data
['attempt'],
444 'current_state' : data
['state_id'], 'state_type' : data
['state_type_id'],
445 'execution_time' : data
['execution_time'], 'latency' : data
['latency'],
446 'output' : data
['output'], 'perfdata' : data
['perf_data'], 'last_check' : de_unixify(data
['last_chk'])
448 hoststatus_query
= self
.db
.create_update_query('hoststatus' , hoststatus_data
, where_clause
)
450 return [query
, hoststatus_query
]
452 #The next schedule got it's own brok. got it and just update the
454 def manage_host_next_schedule_brok(self
, b
):
456 host_id
= self
.get_host_object_id_by_name(data
['host_name'])
457 #Only the host is impacted
458 where_clause
= {'host_object_id' : host_id
}
460 #Just update teh host status
461 hoststatus_data
= {'next_check' : de_unixify(data
['next_chk'])}
462 hoststatus_query
= self
.db
.create_update_query('hoststatus' , hoststatus_data
, where_clause
)
464 return [hoststatus_query
]
467 #Same than service result, but for host result
468 def manage_service_check_result_brok(self
, b
):
471 service_id
= self
.get_service_object_id_by_name(data
['host_name'], data
['service_description'])
473 #Only the service is impacted
474 where_clause
= {'service_object_id' : service_id
}
475 service_check_data
= {'instance_id' : data
['instance_id'],
476 'check_type' : 0, 'current_check_attempt' : data
['attempt'],
477 'state' : data
['state_id'], 'state_type' : data
['state_type_id'],
478 'start_time' : data
['start_time'], 'start_time_usec' : 0,
479 'execution_time' : data
['execution_time'], 'latency' : data
['latency'],
480 'return_code' : data
['return_code'], 'output' : data
['output'],
481 'perfdata' : data
['perf_data']
483 query
= self
.db
.create_update_query('servicechecks', service_check_data
, where_clause
)
486 servicestatus_data
= {'instance_id' : data
['instance_id'],
487 'check_type' : 0, 'current_check_attempt' : data
['attempt'],
488 'current_state' : data
['state_id'], 'state_type' : data
['state_type_id'],
489 'execution_time' : data
['execution_time'], 'latency' : data
['latency'],
490 'output' : data
['output'], 'perfdata' : data
['perf_data'], 'last_check' : de_unixify(data
['last_chk'])
493 servicestatus_query
= self
.db
.create_update_query('servicestatus' , servicestatus_data
, where_clause
)
495 return [query
, servicestatus_query
]
498 #The next schedule got it's own brok. got it and just update the
500 def manage_service_next_schedule_brok(self
, b
):
503 service_id
= self
.get_service_object_id_by_name(data
['host_name'], data
['service_description'])
505 #Only the service is impacted
506 where_clause
= {'service_object_id' : service_id
}
508 #Just update the service status
509 servicestatus_data
= {'next_check' : de_unixify(data
['next_chk'])}
510 servicestatus_query
= self
.db
.create_update_query('servicestatus' , servicestatus_data
, where_clause
)
512 return [servicestatus_query
]
516 #Ok the host is updated
517 def manage_update_host_status_brok(self
, b
):
519 host_id
= self
.get_host_object_id_by_name(data
['host_name'])
521 hosts_data
= {'instance_id' : data
['instance_id'],
522 'failure_prediction_options' : '0', 'check_interval' : data
['check_interval'],
523 'retry_interval' : data
['retry_interval'], 'max_check_attempts' : data
['max_check_attempts'],
524 'first_notification_delay' : data
['first_notification_delay'], 'notification_interval' : data
['notification_interval'],
525 'flap_detection_enabled' : data
['flap_detection_enabled'], 'low_flap_threshold' : data
['low_flap_threshold'],
526 'high_flap_threshold' : data
['high_flap_threshold'], 'process_performance_data' : data
['process_perf_data'],
527 'freshness_checks_enabled' : data
['check_freshness'], 'freshness_threshold' : data
['freshness_threshold'],
528 'passive_checks_enabled' : data
['passive_checks_enabled'], 'event_handler_enabled' : data
['event_handler_enabled'],
529 'active_checks_enabled' : data
['active_checks_enabled'], 'notifications_enabled' : data
['notifications_enabled'],
530 'obsess_over_host' : data
['obsess_over_host'], 'notes' : data
['notes'], 'notes_url' : data
['notes_url']
532 #Only the host is impacted
533 where_clause
= {'host_object_id' : host_id
}
535 query
= self
.db
.create_update_query('hosts', hosts_data
, where_clause
)
537 #Now update an hoststatus entry
538 hoststatus_data
= {'instance_id' : data
['instance_id'],
539 'host_object_id' : host_id
,
540 'normal_check_interval' : data
['check_interval'],
541 'retry_check_interval' : data
['retry_interval'], 'max_check_attempts' : data
['max_check_attempts'],
542 'current_state' : data
['state_id'], 'state_type' : data
['state_type_id'],
543 'passive_checks_enabled' : data
['passive_checks_enabled'], 'event_handler_enabled' : data
['event_handler_enabled'],
544 'active_checks_enabled' : data
['active_checks_enabled'], 'notifications_enabled' : data
['notifications_enabled'],
545 'obsess_over_host' : data
['obsess_over_host'],'process_performance_data' : data
['process_perf_data'],
546 'check_type' : 0, 'current_check_attempt' : data
['attempt'],
547 'execution_time' : data
['execution_time'], 'latency' : data
['latency'],
548 'output' : data
['output'], 'perfdata' : data
['perf_data'],'last_check' : de_unixify(data
['last_chk']),
549 'last_hard_state_change' : de_unixify(data
['last_hard_state_change']),
550 'problem_has_been_acknowledged' : data
['problem_has_been_acknowledged'], 'acknowledgement_type' : data
['acknowledgement_type'],
551 #set check to 1 so nagvis is happy
552 'has_been_checked' : 1,
554 hoststatus_query
= self
.db
.create_update_query('hoststatus' , hoststatus_data
, where_clause
)
556 return [query
, hoststatus_query
]
559 #Ok the host is updated
560 def manage_update_service_status_brok(self
, b
):
563 service_id
= self
.get_service_object_id_by_name(data
['host_name'], data
['service_description'])
567 services_data
= {'instance_id' : data
['instance_id'],
568 'display_name' : data
['display_name'],
569 'failure_prediction_options' : '0', 'check_interval' : data
['check_interval'],
570 'retry_interval' : data
['retry_interval'], 'max_check_attempts' : data
['max_check_attempts'],
571 'first_notification_delay' : data
['first_notification_delay'], 'notification_interval' : data
['notification_interval'],
572 'flap_detection_enabled' : data
['flap_detection_enabled'], 'low_flap_threshold' : data
['low_flap_threshold'],
573 'high_flap_threshold' : data
['high_flap_threshold'], 'process_performance_data' : data
['process_perf_data'],
574 'freshness_checks_enabled' : data
['check_freshness'], 'freshness_threshold' : data
['freshness_threshold'],
575 'passive_checks_enabled' : data
['passive_checks_enabled'], 'event_handler_enabled' : data
['event_handler_enabled'],
576 'active_checks_enabled' : data
['active_checks_enabled'], 'notifications_enabled' : data
['notifications_enabled'],
577 'obsess_over_service' : data
['obsess_over_service'], 'notes' : data
['notes'], 'notes_url' : data
['notes_url']
580 #Only the service is impacted
581 where_clause
= {'service_object_id' : service_id
, 'service_id' : data
['id']}
582 #where_clause = {'host_name' : data['host_name']}
583 query
= self
.db
.create_update_query('services', services_data
, where_clause
)
585 #Now create an hoststatus entry
586 servicestatus_data
= {'instance_id' : data
['instance_id'],
587 'service_object_id' : service_id
,
588 'normal_check_interval' : data
['check_interval'],
589 'retry_check_interval' : data
['retry_interval'], 'max_check_attempts' : data
['max_check_attempts'],
590 'current_state' : data
['state_id'], 'state_type' : data
['state_type_id'],
591 'passive_checks_enabled' : data
['passive_checks_enabled'], 'event_handler_enabled' : data
['event_handler_enabled'],
592 'active_checks_enabled' : data
['active_checks_enabled'], 'notifications_enabled' : data
['notifications_enabled'],
593 'obsess_over_service' : data
['obsess_over_service'],'process_performance_data' : data
['process_perf_data'],
595 'check_type' : 0, 'current_check_attempt' : data
['attempt'],
596 'execution_time' : data
['execution_time'], 'latency' : data
['latency'],
597 'output' : data
['output'], 'perfdata' : data
['perf_data'], 'last_check' : de_unixify(data
['last_chk']),
598 'last_hard_state_change' : de_unixify(data
['last_hard_state_change']),
599 'problem_has_been_acknowledged' : data
['problem_has_been_acknowledged'], 'acknowledgement_type' : data
['acknowledgement_type'],
600 #set check to 1 so nagvis is happy
601 'has_been_checked' : 1,
604 where_clause
= {'service_object_id' : service_id
}
605 servicestatus_query
= self
.db
.create_update_query('servicestatus' , servicestatus_data
, where_clause
)
607 return [query
, servicestatus_query
]
611 #A host have just be create, database is clean, we INSERT it
612 def manage_initial_contact_status_brok(self
, b
):
613 #new_b = copy.deepcopy(b)
617 contacts_data
= {'contact_id' : data
['id'], 'instance_id' : data
['instance_id'],
618 'contact_object_id' : data
['id'], 'contact_object_id' : data
['id'],
619 'alias' : data
['alias'],
620 'email_address' : data
['email'], 'pager_address' : data
['pager'],
621 'host_notifications_enabled' : data
['host_notifications_enabled'],
622 'service_notifications_enabled' : data
['service_notifications_enabled'],
625 #print "HOST DATA", hosts_data
626 query
= self
.db
.create_insert_query('contacts', contacts_data
)
631 #A new host group? Insert it
632 #We need to do something for the members prop (host.id, host_name)
633 #They are for host_hostgroup table, with just host.id hostgroup.id
634 def manage_initial_contactgroup_status_brok(self
, b
):
637 contactgroups_data
= {'contactgroup_id' : data
['id'], 'instance_id' : data
['instance_id'],
638 'config_type' : 0, 'contactgroup_object_id' : data
['id'],
639 'alias' : data
['alias']
642 query
= self
.db
.create_insert_query('contactgroups', contactgroups_data
)
645 #Ok, the hostgroups table is uptodate, now we add relations
646 #between hosts and hostgroups
647 for (c_id
, c_name
) in b
.data
['members']:
649 contactgroup_members_data
= {'instance_id' : data
['instance_id'], 'contactgroup_id' : data
['id'],
650 'contact_object_id' : c_id
}
651 q
= self
.db
.create_insert_query('contactgroup_members', contactgroup_members_data
)
657 #A notification have just be created, we INSERT it
658 #def manage_notification_raise_brok(self, b):
659 # query = self.db.create_insert_query('notification', b.data)