move sections
[python/dscho.git] / Lib / logging / handlers.py
blobdeb735de65a890c98b1819967b4dde1b92934a1d
1 # Copyright 2001-2010 by Vinay Sajip. All Rights Reserved.
3 # Permission to use, copy, modify, and distribute this software and its
4 # documentation for any purpose and without fee is hereby granted,
5 # provided that the above copyright notice appear in all copies and that
6 # both that copyright notice and this permission notice appear in
7 # supporting documentation, and that the name of Vinay Sajip
8 # not be used in advertising or publicity pertaining to distribution
9 # of the software without specific, written prior permission.
10 # VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
11 # ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
12 # VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
13 # ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
14 # IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
15 # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 """
18 Additional handlers for the logging package for Python. The core package is
19 based on PEP 282 and comments thereto in comp.lang.python, and influenced by
20 Apache's log4j system.
22 Copyright (C) 2001-2010 Vinay Sajip. All Rights Reserved.
24 To use, simply 'import logging.handlers' and log away!
25 """
27 import logging, socket, os, cPickle, struct, time, re
28 from stat import ST_DEV, ST_INO, ST_MTIME
30 try:
31 import codecs
32 except ImportError:
33 codecs = None
34 try:
35 unicode
36 _unicode = True
37 except NameError:
38 _unicode = False
41 # Some constants...
44 DEFAULT_TCP_LOGGING_PORT = 9020
45 DEFAULT_UDP_LOGGING_PORT = 9021
46 DEFAULT_HTTP_LOGGING_PORT = 9022
47 DEFAULT_SOAP_LOGGING_PORT = 9023
48 SYSLOG_UDP_PORT = 514
49 SYSLOG_TCP_PORT = 514
51 _MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
53 class BaseRotatingHandler(logging.FileHandler):
54 """
55 Base class for handlers that rotate log files at a certain point.
56 Not meant to be instantiated directly. Instead, use RotatingFileHandler
57 or TimedRotatingFileHandler.
58 """
59 def __init__(self, filename, mode, encoding=None, delay=0):
60 """
61 Use the specified filename for streamed logging
62 """
63 if codecs is None:
64 encoding = None
65 logging.FileHandler.__init__(self, filename, mode, encoding, delay)
66 self.mode = mode
67 self.encoding = encoding
69 def emit(self, record):
70 """
71 Emit a record.
73 Output the record to the file, catering for rollover as described
74 in doRollover().
75 """
76 try:
77 if self.shouldRollover(record):
78 self.doRollover()
79 logging.FileHandler.emit(self, record)
80 except (KeyboardInterrupt, SystemExit):
81 raise
82 except:
83 self.handleError(record)
85 class RotatingFileHandler(BaseRotatingHandler):
86 """
87 Handler for logging to a set of files, which switches from one file
88 to the next when the current file reaches a certain size.
89 """
90 def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=0):
91 """
92 Open the specified file and use it as the stream for logging.
94 By default, the file grows indefinitely. You can specify particular
95 values of maxBytes and backupCount to allow the file to rollover at
96 a predetermined size.
98 Rollover occurs whenever the current log file is nearly maxBytes in
99 length. If backupCount is >= 1, the system will successively create
100 new files with the same pathname as the base file, but with extensions
101 ".1", ".2" etc. appended to it. For example, with a backupCount of 5
102 and a base file name of "app.log", you would get "app.log",
103 "app.log.1", "app.log.2", ... through to "app.log.5". The file being
104 written to is always "app.log" - when it gets filled up, it is closed
105 and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
106 exist, then they are renamed to "app.log.2", "app.log.3" etc.
107 respectively.
109 If maxBytes is zero, rollover never occurs.
111 if maxBytes > 0:
112 mode = 'a' # doesn't make sense otherwise!
113 BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
114 self.maxBytes = maxBytes
115 self.backupCount = backupCount
117 def doRollover(self):
119 Do a rollover, as described in __init__().
121 if self.stream:
122 self.stream.close()
123 if self.backupCount > 0:
124 for i in range(self.backupCount - 1, 0, -1):
125 sfn = "%s.%d" % (self.baseFilename, i)
126 dfn = "%s.%d" % (self.baseFilename, i + 1)
127 if os.path.exists(sfn):
128 #print "%s -> %s" % (sfn, dfn)
129 if os.path.exists(dfn):
130 os.remove(dfn)
131 os.rename(sfn, dfn)
132 dfn = self.baseFilename + ".1"
133 if os.path.exists(dfn):
134 os.remove(dfn)
135 os.rename(self.baseFilename, dfn)
136 #print "%s -> %s" % (self.baseFilename, dfn)
137 self.mode = 'w'
138 self.stream = self._open()
140 def shouldRollover(self, record):
142 Determine if rollover should occur.
144 Basically, see if the supplied record would cause the file to exceed
145 the size limit we have.
147 if self.stream is None: # delay was set...
148 self.stream = self._open()
149 if self.maxBytes > 0: # are we rolling over?
150 msg = "%s\n" % self.format(record)
151 self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
152 if self.stream.tell() + len(msg) >= self.maxBytes:
153 return 1
154 return 0
156 class TimedRotatingFileHandler(BaseRotatingHandler):
158 Handler for logging to a file, rotating the log file at certain timed
159 intervals.
161 If backupCount is > 0, when rollover is done, no more than backupCount
162 files are kept - the oldest ones are deleted.
164 def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False):
165 BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
166 self.when = when.upper()
167 self.backupCount = backupCount
168 self.utc = utc
169 # Calculate the real rollover interval, which is just the number of
170 # seconds between rollovers. Also set the filename suffix used when
171 # a rollover occurs. Current 'when' events supported:
172 # S - Seconds
173 # M - Minutes
174 # H - Hours
175 # D - Days
176 # midnight - roll over at midnight
177 # W{0-6} - roll over on a certain day; 0 - Monday
179 # Case of the 'when' specifier is not important; lower or upper case
180 # will work.
181 if self.when == 'S':
182 self.interval = 1 # one second
183 self.suffix = "%Y-%m-%d_%H-%M-%S"
184 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}$"
185 elif self.when == 'M':
186 self.interval = 60 # one minute
187 self.suffix = "%Y-%m-%d_%H-%M"
188 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}$"
189 elif self.when == 'H':
190 self.interval = 60 * 60 # one hour
191 self.suffix = "%Y-%m-%d_%H"
192 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}$"
193 elif self.when == 'D' or self.when == 'MIDNIGHT':
194 self.interval = 60 * 60 * 24 # one day
195 self.suffix = "%Y-%m-%d"
196 self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
197 elif self.when.startswith('W'):
198 self.interval = 60 * 60 * 24 * 7 # one week
199 if len(self.when) != 2:
200 raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
201 if self.when[1] < '0' or self.when[1] > '6':
202 raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
203 self.dayOfWeek = int(self.when[1])
204 self.suffix = "%Y-%m-%d"
205 self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
206 else:
207 raise ValueError("Invalid rollover interval specified: %s" % self.when)
209 self.extMatch = re.compile(self.extMatch)
210 self.interval = self.interval * interval # multiply by units requested
211 if os.path.exists(filename):
212 t = os.stat(filename)[ST_MTIME]
213 else:
214 t = int(time.time())
215 self.rolloverAt = self.computeRollover(t)
217 def computeRollover(self, currentTime):
219 Work out the rollover time based on the specified time.
221 result = currentTime + self.interval
222 # If we are rolling over at midnight or weekly, then the interval is already known.
223 # What we need to figure out is WHEN the next interval is. In other words,
224 # if you are rolling over at midnight, then your base interval is 1 day,
225 # but you want to start that one day clock at midnight, not now. So, we
226 # have to fudge the rolloverAt value in order to trigger the first rollover
227 # at the right time. After that, the regular interval will take care of
228 # the rest. Note that this code doesn't care about leap seconds. :)
229 if self.when == 'MIDNIGHT' or self.when.startswith('W'):
230 # This could be done with less code, but I wanted it to be clear
231 if self.utc:
232 t = time.gmtime(currentTime)
233 else:
234 t = time.localtime(currentTime)
235 currentHour = t[3]
236 currentMinute = t[4]
237 currentSecond = t[5]
238 # r is the number of seconds left between now and midnight
239 r = _MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 +
240 currentSecond)
241 result = currentTime + r
242 # If we are rolling over on a certain day, add in the number of days until
243 # the next rollover, but offset by 1 since we just calculated the time
244 # until the next day starts. There are three cases:
245 # Case 1) The day to rollover is today; in this case, do nothing
246 # Case 2) The day to rollover is further in the interval (i.e., today is
247 # day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
248 # next rollover is simply 6 - 2 - 1, or 3.
249 # Case 3) The day to rollover is behind us in the interval (i.e., today
250 # is day 5 (Saturday) and rollover is on day 3 (Thursday).
251 # Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
252 # number of days left in the current week (1) plus the number
253 # of days in the next week until the rollover day (3).
254 # The calculations described in 2) and 3) above need to have a day added.
255 # This is because the above time calculation takes us to midnight on this
256 # day, i.e. the start of the next day.
257 if self.when.startswith('W'):
258 day = t[6] # 0 is Monday
259 if day != self.dayOfWeek:
260 if day < self.dayOfWeek:
261 daysToWait = self.dayOfWeek - day
262 else:
263 daysToWait = 6 - day + self.dayOfWeek + 1
264 newRolloverAt = result + (daysToWait * (60 * 60 * 24))
265 if not self.utc:
266 dstNow = t[-1]
267 dstAtRollover = time.localtime(newRolloverAt)[-1]
268 if dstNow != dstAtRollover:
269 if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
270 newRolloverAt = newRolloverAt - 3600
271 else: # DST bows out before next rollover, so we need to add an hour
272 newRolloverAt = newRolloverAt + 3600
273 result = newRolloverAt
274 return result
276 def shouldRollover(self, record):
278 Determine if rollover should occur.
280 record is not used, as we are just comparing times, but it is needed so
281 the method signatures are the same
283 t = int(time.time())
284 if t >= self.rolloverAt:
285 return 1
286 #print "No need to rollover: %d, %d" % (t, self.rolloverAt)
287 return 0
289 def getFilesToDelete(self):
291 Determine the files to delete when rolling over.
293 More specific than the earlier method, which just used glob.glob().
295 dirName, baseName = os.path.split(self.baseFilename)
296 fileNames = os.listdir(dirName)
297 result = []
298 prefix = baseName + "."
299 plen = len(prefix)
300 for fileName in fileNames:
301 if fileName[:plen] == prefix:
302 suffix = fileName[plen:]
303 if self.extMatch.match(suffix):
304 result.append(os.path.join(dirName, fileName))
305 result.sort()
306 if len(result) < self.backupCount:
307 result = []
308 else:
309 result = result[:len(result) - self.backupCount]
310 return result
312 def doRollover(self):
314 do a rollover; in this case, a date/time stamp is appended to the filename
315 when the rollover happens. However, you want the file to be named for the
316 start of the interval, not the current time. If there is a backup count,
317 then we have to get a list of matching filenames, sort them and remove
318 the one with the oldest suffix.
320 if self.stream:
321 self.stream.close()
322 # get the time that this sequence started at and make it a TimeTuple
323 t = self.rolloverAt - self.interval
324 if self.utc:
325 timeTuple = time.gmtime(t)
326 else:
327 timeTuple = time.localtime(t)
328 dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple)
329 if os.path.exists(dfn):
330 os.remove(dfn)
331 os.rename(self.baseFilename, dfn)
332 if self.backupCount > 0:
333 # find the oldest log file and delete it
334 #s = glob.glob(self.baseFilename + ".20*")
335 #if len(s) > self.backupCount:
336 # s.sort()
337 # os.remove(s[0])
338 for s in self.getFilesToDelete():
339 os.remove(s)
340 #print "%s -> %s" % (self.baseFilename, dfn)
341 self.mode = 'w'
342 self.stream = self._open()
343 currentTime = int(time.time())
344 newRolloverAt = self.computeRollover(currentTime)
345 while newRolloverAt <= currentTime:
346 newRolloverAt = newRolloverAt + self.interval
347 #If DST changes and midnight or weekly rollover, adjust for this.
348 if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
349 dstNow = time.localtime(currentTime)[-1]
350 dstAtRollover = time.localtime(newRolloverAt)[-1]
351 if dstNow != dstAtRollover:
352 if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
353 newRolloverAt = newRolloverAt - 3600
354 else: # DST bows out before next rollover, so we need to add an hour
355 newRolloverAt = newRolloverAt + 3600
356 self.rolloverAt = newRolloverAt
358 class WatchedFileHandler(logging.FileHandler):
360 A handler for logging to a file, which watches the file
361 to see if it has changed while in use. This can happen because of
362 usage of programs such as newsyslog and logrotate which perform
363 log file rotation. This handler, intended for use under Unix,
364 watches the file to see if it has changed since the last emit.
365 (A file has changed if its device or inode have changed.)
366 If it has changed, the old file stream is closed, and the file
367 opened to get a new stream.
369 This handler is not appropriate for use under Windows, because
370 under Windows open files cannot be moved or renamed - logging
371 opens the files with exclusive locks - and so there is no need
372 for such a handler. Furthermore, ST_INO is not supported under
373 Windows; stat always returns zero for this value.
375 This handler is based on a suggestion and patch by Chad J.
376 Schroeder.
378 def __init__(self, filename, mode='a', encoding=None, delay=0):
379 logging.FileHandler.__init__(self, filename, mode, encoding, delay)
380 if not os.path.exists(self.baseFilename):
381 self.dev, self.ino = -1, -1
382 else:
383 stat = os.stat(self.baseFilename)
384 self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
386 def emit(self, record):
388 Emit a record.
390 First check if the underlying file has changed, and if it
391 has, close the old stream and reopen the file to get the
392 current stream.
394 if not os.path.exists(self.baseFilename):
395 stat = None
396 changed = 1
397 else:
398 stat = os.stat(self.baseFilename)
399 changed = (stat[ST_DEV] != self.dev) or (stat[ST_INO] != self.ino)
400 if changed and self.stream is not None:
401 self.stream.flush()
402 self.stream.close()
403 self.stream = self._open()
404 if stat is None:
405 stat = os.stat(self.baseFilename)
406 self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
407 logging.FileHandler.emit(self, record)
409 class SocketHandler(logging.Handler):
411 A handler class which writes logging records, in pickle format, to
412 a streaming socket. The socket is kept open across logging calls.
413 If the peer resets it, an attempt is made to reconnect on the next call.
414 The pickle which is sent is that of the LogRecord's attribute dictionary
415 (__dict__), so that the receiver does not need to have the logging module
416 installed in order to process the logging event.
418 To unpickle the record at the receiving end into a LogRecord, use the
419 makeLogRecord function.
422 def __init__(self, host, port):
424 Initializes the handler with a specific host address and port.
426 The attribute 'closeOnError' is set to 1 - which means that if
427 a socket error occurs, the socket is silently closed and then
428 reopened on the next logging call.
430 logging.Handler.__init__(self)
431 self.host = host
432 self.port = port
433 self.sock = None
434 self.closeOnError = 0
435 self.retryTime = None
437 # Exponential backoff parameters.
439 self.retryStart = 1.0
440 self.retryMax = 30.0
441 self.retryFactor = 2.0
443 def makeSocket(self, timeout=1):
445 A factory method which allows subclasses to define the precise
446 type of socket they want.
448 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
449 if hasattr(s, 'settimeout'):
450 s.settimeout(timeout)
451 s.connect((self.host, self.port))
452 return s
454 def createSocket(self):
456 Try to create a socket, using an exponential backoff with
457 a max retry time. Thanks to Robert Olson for the original patch
458 (SF #815911) which has been slightly refactored.
460 now = time.time()
461 # Either retryTime is None, in which case this
462 # is the first time back after a disconnect, or
463 # we've waited long enough.
464 if self.retryTime is None:
465 attempt = 1
466 else:
467 attempt = (now >= self.retryTime)
468 if attempt:
469 try:
470 self.sock = self.makeSocket()
471 self.retryTime = None # next time, no delay before trying
472 except socket.error:
473 #Creation failed, so set the retry time and return.
474 if self.retryTime is None:
475 self.retryPeriod = self.retryStart
476 else:
477 self.retryPeriod = self.retryPeriod * self.retryFactor
478 if self.retryPeriod > self.retryMax:
479 self.retryPeriod = self.retryMax
480 self.retryTime = now + self.retryPeriod
482 def send(self, s):
484 Send a pickled string to the socket.
486 This function allows for partial sends which can happen when the
487 network is busy.
489 if self.sock is None:
490 self.createSocket()
491 #self.sock can be None either because we haven't reached the retry
492 #time yet, or because we have reached the retry time and retried,
493 #but are still unable to connect.
494 if self.sock:
495 try:
496 if hasattr(self.sock, "sendall"):
497 self.sock.sendall(s)
498 else:
499 sentsofar = 0
500 left = len(s)
501 while left > 0:
502 sent = self.sock.send(s[sentsofar:])
503 sentsofar = sentsofar + sent
504 left = left - sent
505 except socket.error:
506 self.sock.close()
507 self.sock = None # so we can call createSocket next time
509 def makePickle(self, record):
511 Pickles the record in binary format with a length prefix, and
512 returns it ready for transmission across the socket.
514 ei = record.exc_info
515 if ei:
516 dummy = self.format(record) # just to get traceback text into record.exc_text
517 record.exc_info = None # to avoid Unpickleable error
518 s = cPickle.dumps(record.__dict__, 1)
519 if ei:
520 record.exc_info = ei # for next handler
521 slen = struct.pack(">L", len(s))
522 return slen + s
524 def handleError(self, record):
526 Handle an error during logging.
528 An error has occurred during logging. Most likely cause -
529 connection lost. Close the socket so that we can retry on the
530 next event.
532 if self.closeOnError and self.sock:
533 self.sock.close()
534 self.sock = None #try to reconnect next time
535 else:
536 logging.Handler.handleError(self, record)
538 def emit(self, record):
540 Emit a record.
542 Pickles the record and writes it to the socket in binary format.
543 If there is an error with the socket, silently drop the packet.
544 If there was a problem with the socket, re-establishes the
545 socket.
547 try:
548 s = self.makePickle(record)
549 self.send(s)
550 except (KeyboardInterrupt, SystemExit):
551 raise
552 except:
553 self.handleError(record)
555 def close(self):
557 Closes the socket.
559 if self.sock:
560 self.sock.close()
561 self.sock = None
562 logging.Handler.close(self)
564 class DatagramHandler(SocketHandler):
566 A handler class which writes logging records, in pickle format, to
567 a datagram socket. The pickle which is sent is that of the LogRecord's
568 attribute dictionary (__dict__), so that the receiver does not need to
569 have the logging module installed in order to process the logging event.
571 To unpickle the record at the receiving end into a LogRecord, use the
572 makeLogRecord function.
575 def __init__(self, host, port):
577 Initializes the handler with a specific host address and port.
579 SocketHandler.__init__(self, host, port)
580 self.closeOnError = 0
582 def makeSocket(self):
584 The factory method of SocketHandler is here overridden to create
585 a UDP socket (SOCK_DGRAM).
587 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
588 return s
590 def send(self, s):
592 Send a pickled string to a socket.
594 This function no longer allows for partial sends which can happen
595 when the network is busy - UDP does not guarantee delivery and
596 can deliver packets out of sequence.
598 if self.sock is None:
599 self.createSocket()
600 self.sock.sendto(s, (self.host, self.port))
602 class SysLogHandler(logging.Handler):
604 A handler class which sends formatted logging records to a syslog
605 server. Based on Sam Rushing's syslog module:
606 http://www.nightmare.com/squirl/python-ext/misc/syslog.py
607 Contributed by Nicolas Untz (after which minor refactoring changes
608 have been made).
611 # from <linux/sys/syslog.h>:
612 # ======================================================================
613 # priorities/facilities are encoded into a single 32-bit quantity, where
614 # the bottom 3 bits are the priority (0-7) and the top 28 bits are the
615 # facility (0-big number). Both the priorities and the facilities map
616 # roughly one-to-one to strings in the syslogd(8) source code. This
617 # mapping is included in this file.
619 # priorities (these are ordered)
621 LOG_EMERG = 0 # system is unusable
622 LOG_ALERT = 1 # action must be taken immediately
623 LOG_CRIT = 2 # critical conditions
624 LOG_ERR = 3 # error conditions
625 LOG_WARNING = 4 # warning conditions
626 LOG_NOTICE = 5 # normal but significant condition
627 LOG_INFO = 6 # informational
628 LOG_DEBUG = 7 # debug-level messages
630 # facility codes
631 LOG_KERN = 0 # kernel messages
632 LOG_USER = 1 # random user-level messages
633 LOG_MAIL = 2 # mail system
634 LOG_DAEMON = 3 # system daemons
635 LOG_AUTH = 4 # security/authorization messages
636 LOG_SYSLOG = 5 # messages generated internally by syslogd
637 LOG_LPR = 6 # line printer subsystem
638 LOG_NEWS = 7 # network news subsystem
639 LOG_UUCP = 8 # UUCP subsystem
640 LOG_CRON = 9 # clock daemon
641 LOG_AUTHPRIV = 10 # security/authorization messages (private)
642 LOG_FTP = 11 # FTP daemon
644 # other codes through 15 reserved for system use
645 LOG_LOCAL0 = 16 # reserved for local use
646 LOG_LOCAL1 = 17 # reserved for local use
647 LOG_LOCAL2 = 18 # reserved for local use
648 LOG_LOCAL3 = 19 # reserved for local use
649 LOG_LOCAL4 = 20 # reserved for local use
650 LOG_LOCAL5 = 21 # reserved for local use
651 LOG_LOCAL6 = 22 # reserved for local use
652 LOG_LOCAL7 = 23 # reserved for local use
654 priority_names = {
655 "alert": LOG_ALERT,
656 "crit": LOG_CRIT,
657 "critical": LOG_CRIT,
658 "debug": LOG_DEBUG,
659 "emerg": LOG_EMERG,
660 "err": LOG_ERR,
661 "error": LOG_ERR, # DEPRECATED
662 "info": LOG_INFO,
663 "notice": LOG_NOTICE,
664 "panic": LOG_EMERG, # DEPRECATED
665 "warn": LOG_WARNING, # DEPRECATED
666 "warning": LOG_WARNING,
669 facility_names = {
670 "auth": LOG_AUTH,
671 "authpriv": LOG_AUTHPRIV,
672 "cron": LOG_CRON,
673 "daemon": LOG_DAEMON,
674 "ftp": LOG_FTP,
675 "kern": LOG_KERN,
676 "lpr": LOG_LPR,
677 "mail": LOG_MAIL,
678 "news": LOG_NEWS,
679 "security": LOG_AUTH, # DEPRECATED
680 "syslog": LOG_SYSLOG,
681 "user": LOG_USER,
682 "uucp": LOG_UUCP,
683 "local0": LOG_LOCAL0,
684 "local1": LOG_LOCAL1,
685 "local2": LOG_LOCAL2,
686 "local3": LOG_LOCAL3,
687 "local4": LOG_LOCAL4,
688 "local5": LOG_LOCAL5,
689 "local6": LOG_LOCAL6,
690 "local7": LOG_LOCAL7,
693 #The map below appears to be trivially lowercasing the key. However,
694 #there's more to it than meets the eye - in some locales, lowercasing
695 #gives unexpected results. See SF #1524081: in the Turkish locale,
696 #"INFO".lower() != "info"
697 priority_map = {
698 "DEBUG" : "debug",
699 "INFO" : "info",
700 "WARNING" : "warning",
701 "ERROR" : "error",
702 "CRITICAL" : "critical"
705 def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
706 facility=LOG_USER, socktype=socket.SOCK_DGRAM):
708 Initialize a handler.
710 If address is specified as a string, a UNIX socket is used. To log to a
711 local syslogd, "SysLogHandler(address="/dev/log")" can be used.
712 If facility is not specified, LOG_USER is used.
714 logging.Handler.__init__(self)
716 self.address = address
717 self.facility = facility
718 self.socktype = socktype
720 if isinstance(address, basestring):
721 self.unixsocket = 1
722 self._connect_unixsocket(address)
723 else:
724 self.unixsocket = 0
725 self.socket = socket.socket(socket.AF_INET, socktype)
726 if socktype == socket.SOCK_STREAM:
727 self.socket.connect(address)
728 self.formatter = None
730 def _connect_unixsocket(self, address):
731 self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
732 # syslog may require either DGRAM or STREAM sockets
733 try:
734 self.socket.connect(address)
735 except socket.error:
736 self.socket.close()
737 self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
738 self.socket.connect(address)
740 # curious: when talking to the unix-domain '/dev/log' socket, a
741 # zero-terminator seems to be required. this string is placed
742 # into a class variable so that it can be overridden if
743 # necessary.
744 log_format_string = '<%d>%s\000'
746 def encodePriority(self, facility, priority):
748 Encode the facility and priority. You can pass in strings or
749 integers - if strings are passed, the facility_names and
750 priority_names mapping dictionaries are used to convert them to
751 integers.
753 if isinstance(facility, basestring):
754 facility = self.facility_names[facility]
755 if isinstance(priority, basestring):
756 priority = self.priority_names[priority]
757 return (facility << 3) | priority
759 def close (self):
761 Closes the socket.
763 if self.unixsocket:
764 self.socket.close()
765 logging.Handler.close(self)
767 def mapPriority(self, levelName):
769 Map a logging level name to a key in the priority_names map.
770 This is useful in two scenarios: when custom levels are being
771 used, and in the case where you can't do a straightforward
772 mapping by lowercasing the logging level name because of locale-
773 specific issues (see SF #1524081).
775 return self.priority_map.get(levelName, "warning")
777 def emit(self, record):
779 Emit a record.
781 The record is formatted, and then sent to the syslog server. If
782 exception information is present, it is NOT sent to the server.
784 msg = self.format(record)
786 We need to convert record level to lowercase, maybe this will
787 change in the future.
789 msg = self.log_format_string % (
790 self.encodePriority(self.facility,
791 self.mapPriority(record.levelname)),
792 msg)
793 # Treat unicode messages as required by RFC 5424
794 if _unicode and type(msg) is unicode:
795 msg = msg.encode('utf-8')
796 if codecs:
797 msg = codecs.BOM_UTF8 + msg
798 try:
799 if self.unixsocket:
800 try:
801 self.socket.send(msg)
802 except socket.error:
803 self._connect_unixsocket(self.address)
804 self.socket.send(msg)
805 elif self.socktype == socket.SOCK_DGRAM:
806 self.socket.sendto(msg, self.address)
807 else:
808 self.socket.sendall(msg)
809 except (KeyboardInterrupt, SystemExit):
810 raise
811 except:
812 self.handleError(record)
814 class SMTPHandler(logging.Handler):
816 A handler class which sends an SMTP email for each logging event.
818 def __init__(self, mailhost, fromaddr, toaddrs, subject,
819 credentials=None, secure=None):
821 Initialize the handler.
823 Initialize the instance with the from and to addresses and subject
824 line of the email. To specify a non-standard SMTP port, use the
825 (host, port) tuple format for the mailhost argument. To specify
826 authentication credentials, supply a (username, password) tuple
827 for the credentials argument. To specify the use of a secure
828 protocol (TLS), pass in a tuple for the secure argument. This will
829 only be used when authentication credentials are supplied. The tuple
830 will be either an empty tuple, or a single-value tuple with the name
831 of a keyfile, or a 2-value tuple with the names of the keyfile and
832 certificate file. (This tuple is passed to the `starttls` method).
834 logging.Handler.__init__(self)
835 if isinstance(mailhost, tuple):
836 self.mailhost, self.mailport = mailhost
837 else:
838 self.mailhost, self.mailport = mailhost, None
839 if isinstance(credentials, tuple):
840 self.username, self.password = credentials
841 else:
842 self.username = None
843 self.fromaddr = fromaddr
844 if isinstance(toaddrs, basestring):
845 toaddrs = [toaddrs]
846 self.toaddrs = toaddrs
847 self.subject = subject
848 self.secure = secure
850 def getSubject(self, record):
852 Determine the subject for the email.
854 If you want to specify a subject line which is record-dependent,
855 override this method.
857 return self.subject
859 def emit(self, record):
861 Emit a record.
863 Format the record and send it to the specified addressees.
865 try:
866 import smtplib
867 from email.utils import formatdate
868 port = self.mailport
869 if not port:
870 port = smtplib.SMTP_PORT
871 smtp = smtplib.SMTP(self.mailhost, port)
872 msg = self.format(record)
873 msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
874 self.fromaddr,
875 ",".join(self.toaddrs),
876 self.getSubject(record),
877 formatdate(), msg)
878 if self.username:
879 if self.secure is not None:
880 smtp.ehlo()
881 smtp.starttls(*self.secure)
882 smtp.ehlo()
883 smtp.login(self.username, self.password)
884 smtp.sendmail(self.fromaddr, self.toaddrs, msg)
885 smtp.quit()
886 except (KeyboardInterrupt, SystemExit):
887 raise
888 except:
889 self.handleError(record)
891 class NTEventLogHandler(logging.Handler):
893 A handler class which sends events to the NT Event Log. Adds a
894 registry entry for the specified application name. If no dllname is
895 provided, win32service.pyd (which contains some basic message
896 placeholders) is used. Note that use of these placeholders will make
897 your event logs big, as the entire message source is held in the log.
898 If you want slimmer logs, you have to pass in the name of your own DLL
899 which contains the message definitions you want to use in the event log.
901 def __init__(self, appname, dllname=None, logtype="Application"):
902 logging.Handler.__init__(self)
903 try:
904 import win32evtlogutil, win32evtlog
905 self.appname = appname
906 self._welu = win32evtlogutil
907 if not dllname:
908 dllname = os.path.split(self._welu.__file__)
909 dllname = os.path.split(dllname[0])
910 dllname = os.path.join(dllname[0], r'win32service.pyd')
911 self.dllname = dllname
912 self.logtype = logtype
913 self._welu.AddSourceToRegistry(appname, dllname, logtype)
914 self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
915 self.typemap = {
916 logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
917 logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
918 logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
919 logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
920 logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
922 except ImportError:
923 print("The Python Win32 extensions for NT (service, event "\
924 "logging) appear not to be available.")
925 self._welu = None
927 def getMessageID(self, record):
929 Return the message ID for the event record. If you are using your
930 own messages, you could do this by having the msg passed to the
931 logger being an ID rather than a formatting string. Then, in here,
932 you could use a dictionary lookup to get the message ID. This
933 version returns 1, which is the base message ID in win32service.pyd.
935 return 1
937 def getEventCategory(self, record):
939 Return the event category for the record.
941 Override this if you want to specify your own categories. This version
942 returns 0.
944 return 0
946 def getEventType(self, record):
948 Return the event type for the record.
950 Override this if you want to specify your own types. This version does
951 a mapping using the handler's typemap attribute, which is set up in
952 __init__() to a dictionary which contains mappings for DEBUG, INFO,
953 WARNING, ERROR and CRITICAL. If you are using your own levels you will
954 either need to override this method or place a suitable dictionary in
955 the handler's typemap attribute.
957 return self.typemap.get(record.levelno, self.deftype)
959 def emit(self, record):
961 Emit a record.
963 Determine the message ID, event category and event type. Then
964 log the message in the NT event log.
966 if self._welu:
967 try:
968 id = self.getMessageID(record)
969 cat = self.getEventCategory(record)
970 type = self.getEventType(record)
971 msg = self.format(record)
972 self._welu.ReportEvent(self.appname, id, cat, type, [msg])
973 except (KeyboardInterrupt, SystemExit):
974 raise
975 except:
976 self.handleError(record)
978 def close(self):
980 Clean up this handler.
982 You can remove the application name from the registry as a
983 source of event log entries. However, if you do this, you will
984 not be able to see the events as you intended in the Event Log
985 Viewer - it needs to be able to access the registry to get the
986 DLL name.
988 #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
989 logging.Handler.close(self)
991 class HTTPHandler(logging.Handler):
993 A class which sends records to a Web server, using either GET or
994 POST semantics.
996 def __init__(self, host, url, method="GET"):
998 Initialize the instance with the host, the request URL, and the method
999 ("GET" or "POST")
1001 logging.Handler.__init__(self)
1002 method = method.upper()
1003 if method not in ["GET", "POST"]:
1004 raise ValueError("method must be GET or POST")
1005 self.host = host
1006 self.url = url
1007 self.method = method
1009 def mapLogRecord(self, record):
1011 Default implementation of mapping the log record into a dict
1012 that is sent as the CGI data. Overwrite in your class.
1013 Contributed by Franz Glasner.
1015 return record.__dict__
1017 def emit(self, record):
1019 Emit a record.
1021 Send the record to the Web server as an URL-encoded dictionary
1023 try:
1024 import httplib, urllib
1025 host = self.host
1026 h = httplib.HTTP(host)
1027 url = self.url
1028 data = urllib.urlencode(self.mapLogRecord(record))
1029 if self.method == "GET":
1030 if (url.find('?') >= 0):
1031 sep = '&'
1032 else:
1033 sep = '?'
1034 url = url + "%c%s" % (sep, data)
1035 h.putrequest(self.method, url)
1036 # support multiple hosts on one IP address...
1037 # need to strip optional :port from host, if present
1038 i = host.find(":")
1039 if i >= 0:
1040 host = host[:i]
1041 h.putheader("Host", host)
1042 if self.method == "POST":
1043 h.putheader("Content-type",
1044 "application/x-www-form-urlencoded")
1045 h.putheader("Content-length", str(len(data)))
1046 h.endheaders(data if self.method == "POST" else None)
1047 h.getreply() #can't do anything with the result
1048 except (KeyboardInterrupt, SystemExit):
1049 raise
1050 except:
1051 self.handleError(record)
1053 class BufferingHandler(logging.Handler):
1055 A handler class which buffers logging records in memory. Whenever each
1056 record is added to the buffer, a check is made to see if the buffer should
1057 be flushed. If it should, then flush() is expected to do what's needed.
1059 def __init__(self, capacity):
1061 Initialize the handler with the buffer size.
1063 logging.Handler.__init__(self)
1064 self.capacity = capacity
1065 self.buffer = []
1067 def shouldFlush(self, record):
1069 Should the handler flush its buffer?
1071 Returns true if the buffer is up to capacity. This method can be
1072 overridden to implement custom flushing strategies.
1074 return (len(self.buffer) >= self.capacity)
1076 def emit(self, record):
1078 Emit a record.
1080 Append the record. If shouldFlush() tells us to, call flush() to process
1081 the buffer.
1083 self.buffer.append(record)
1084 if self.shouldFlush(record):
1085 self.flush()
1087 def flush(self):
1089 Override to implement custom flushing behaviour.
1091 This version just zaps the buffer to empty.
1093 self.buffer = []
1095 def close(self):
1097 Close the handler.
1099 This version just flushes and chains to the parent class' close().
1101 self.flush()
1102 logging.Handler.close(self)
1104 class MemoryHandler(BufferingHandler):
1106 A handler class which buffers logging records in memory, periodically
1107 flushing them to a target handler. Flushing occurs whenever the buffer
1108 is full, or when an event of a certain severity or greater is seen.
1110 def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
1112 Initialize the handler with the buffer size, the level at which
1113 flushing should occur and an optional target.
1115 Note that without a target being set either here or via setTarget(),
1116 a MemoryHandler is no use to anyone!
1118 BufferingHandler.__init__(self, capacity)
1119 self.flushLevel = flushLevel
1120 self.target = target
1122 def shouldFlush(self, record):
1124 Check for buffer full or a record at the flushLevel or higher.
1126 return (len(self.buffer) >= self.capacity) or \
1127 (record.levelno >= self.flushLevel)
1129 def setTarget(self, target):
1131 Set the target handler for this handler.
1133 self.target = target
1135 def flush(self):
1137 For a MemoryHandler, flushing means just sending the buffered
1138 records to the target, if there is one. Override if you want
1139 different behaviour.
1141 if self.target:
1142 for record in self.buffer:
1143 self.target.handle(record)
1144 self.buffer = []
1146 def close(self):
1148 Flush, set the target to None and lose the buffer.
1150 self.flush()
1151 self.target = None
1152 BufferingHandler.close(self)