1 # Copyright 2001-2021 by Vinay Sajip. All Rights Reserved.
2 #
3 # Permission to use, copy, modify, and distribute this software and its
4 # documentation for any purpose and without fee is hereby granted,
5 # provided that the above copyright notice appear in all copies and that
6 # both that copyright notice and this permission notice appear in
7 # supporting documentation, and that the name of Vinay Sajip
8 # not be used in advertising or publicity pertaining to distribution
9 # of the software without specific, written prior permission.
10 # VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
11 # ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
12 # VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
13 # ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
14 # IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
15 # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16
17 """
18 Additional handlers for the logging package for Python. The core package is
19 based on PEP 282 and comments thereto in comp.lang.python.
20
21 Copyright (C) 2001-2021 Vinay Sajip. All Rights Reserved.
22
23 To use, simply 'import logging.handlers' and log away!
24 """
25
26 import io, logging, socket, os, pickle, struct, time, re
27 from stat import ST_DEV, ST_INO, ST_MTIME
28 import queue
29 import threading
30 import copy
31
32 #
33 # Some constants...
34 #
35
36 DEFAULT_TCP_LOGGING_PORT = 9020
37 DEFAULT_UDP_LOGGING_PORT = 9021
38 DEFAULT_HTTP_LOGGING_PORT = 9022
39 DEFAULT_SOAP_LOGGING_PORT = 9023
40 SYSLOG_UDP_PORT = 514
41 SYSLOG_TCP_PORT = 514
42
43 _MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
44
45 class ESC[4;38;5;81mBaseRotatingHandler(ESC[4;38;5;149mloggingESC[4;38;5;149m.ESC[4;38;5;149mFileHandler):
46 """
47 Base class for handlers that rotate log files at a certain point.
48 Not meant to be instantiated directly. Instead, use RotatingFileHandler
49 or TimedRotatingFileHandler.
50 """
51 namer = None
52 rotator = None
53
54 def __init__(self, filename, mode, encoding=None, delay=False, errors=None):
55 """
56 Use the specified filename for streamed logging
57 """
58 logging.FileHandler.__init__(self, filename, mode=mode,
59 encoding=encoding, delay=delay,
60 errors=errors)
61 self.mode = mode
62 self.encoding = encoding
63 self.errors = errors
64
65 def emit(self, record):
66 """
67 Emit a record.
68
69 Output the record to the file, catering for rollover as described
70 in doRollover().
71 """
72 try:
73 if self.shouldRollover(record):
74 self.doRollover()
75 logging.FileHandler.emit(self, record)
76 except Exception:
77 self.handleError(record)
78
79 def rotation_filename(self, default_name):
80 """
81 Modify the filename of a log file when rotating.
82
83 This is provided so that a custom filename can be provided.
84
85 The default implementation calls the 'namer' attribute of the
86 handler, if it's callable, passing the default name to
87 it. If the attribute isn't callable (the default is None), the name
88 is returned unchanged.
89
90 :param default_name: The default name for the log file.
91 """
92 if not callable(self.namer):
93 result = default_name
94 else:
95 result = self.namer(default_name)
96 return result
97
98 def rotate(self, source, dest):
99 """
100 When rotating, rotate the current log.
101
102 The default implementation calls the 'rotator' attribute of the
103 handler, if it's callable, passing the source and dest arguments to
104 it. If the attribute isn't callable (the default is None), the source
105 is simply renamed to the destination.
106
107 :param source: The source filename. This is normally the base
108 filename, e.g. 'test.log'
109 :param dest: The destination filename. This is normally
110 what the source is rotated to, e.g. 'test.log.1'.
111 """
112 if not callable(self.rotator):
113 # Issue 18940: A file may not have been created if delay is True.
114 if os.path.exists(source):
115 os.rename(source, dest)
116 else:
117 self.rotator(source, dest)
118
119 class ESC[4;38;5;81mRotatingFileHandler(ESC[4;38;5;149mBaseRotatingHandler):
120 """
121 Handler for logging to a set of files, which switches from one file
122 to the next when the current file reaches a certain size.
123 """
124 def __init__(self, filename, mode='a', maxBytes=0, backupCount=0,
125 encoding=None, delay=False, errors=None):
126 """
127 Open the specified file and use it as the stream for logging.
128
129 By default, the file grows indefinitely. You can specify particular
130 values of maxBytes and backupCount to allow the file to rollover at
131 a predetermined size.
132
133 Rollover occurs whenever the current log file is nearly maxBytes in
134 length. If backupCount is >= 1, the system will successively create
135 new files with the same pathname as the base file, but with extensions
136 ".1", ".2" etc. appended to it. For example, with a backupCount of 5
137 and a base file name of "app.log", you would get "app.log",
138 "app.log.1", "app.log.2", ... through to "app.log.5". The file being
139 written to is always "app.log" - when it gets filled up, it is closed
140 and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
141 exist, then they are renamed to "app.log.2", "app.log.3" etc.
142 respectively.
143
144 If maxBytes is zero, rollover never occurs.
145 """
146 # If rotation/rollover is wanted, it doesn't make sense to use another
147 # mode. If for example 'w' were specified, then if there were multiple
148 # runs of the calling application, the logs from previous runs would be
149 # lost if the 'w' is respected, because the log file would be truncated
150 # on each run.
151 if maxBytes > 0:
152 mode = 'a'
153 if "b" not in mode:
154 encoding = io.text_encoding(encoding)
155 BaseRotatingHandler.__init__(self, filename, mode, encoding=encoding,
156 delay=delay, errors=errors)
157 self.maxBytes = maxBytes
158 self.backupCount = backupCount
159
160 def doRollover(self):
161 """
162 Do a rollover, as described in __init__().
163 """
164 if self.stream:
165 self.stream.close()
166 self.stream = None
167 if self.backupCount > 0:
168 for i in range(self.backupCount - 1, 0, -1):
169 sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
170 dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
171 i + 1))
172 if os.path.exists(sfn):
173 if os.path.exists(dfn):
174 os.remove(dfn)
175 os.rename(sfn, dfn)
176 dfn = self.rotation_filename(self.baseFilename + ".1")
177 if os.path.exists(dfn):
178 os.remove(dfn)
179 self.rotate(self.baseFilename, dfn)
180 if not self.delay:
181 self.stream = self._open()
182
183 def shouldRollover(self, record):
184 """
185 Determine if rollover should occur.
186
187 Basically, see if the supplied record would cause the file to exceed
188 the size limit we have.
189 """
190 # See bpo-45401: Never rollover anything other than regular files
191 if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename):
192 return False
193 if self.stream is None: # delay was set...
194 self.stream = self._open()
195 if self.maxBytes > 0: # are we rolling over?
196 msg = "%s\n" % self.format(record)
197 self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
198 if self.stream.tell() + len(msg) >= self.maxBytes:
199 return True
200 return False
201
202 class ESC[4;38;5;81mTimedRotatingFileHandler(ESC[4;38;5;149mBaseRotatingHandler):
203 """
204 Handler for logging to a file, rotating the log file at certain timed
205 intervals.
206
207 If backupCount is > 0, when rollover is done, no more than backupCount
208 files are kept - the oldest ones are deleted.
209 """
210 def __init__(self, filename, when='h', interval=1, backupCount=0,
211 encoding=None, delay=False, utc=False, atTime=None,
212 errors=None):
213 encoding = io.text_encoding(encoding)
214 BaseRotatingHandler.__init__(self, filename, 'a', encoding=encoding,
215 delay=delay, errors=errors)
216 self.when = when.upper()
217 self.backupCount = backupCount
218 self.utc = utc
219 self.atTime = atTime
220 # Calculate the real rollover interval, which is just the number of
221 # seconds between rollovers. Also set the filename suffix used when
222 # a rollover occurs. Current 'when' events supported:
223 # S - Seconds
224 # M - Minutes
225 # H - Hours
226 # D - Days
227 # midnight - roll over at midnight
228 # W{0-6} - roll over on a certain day; 0 - Monday
229 #
230 # Case of the 'when' specifier is not important; lower or upper case
231 # will work.
232 if self.when == 'S':
233 self.interval = 1 # one second
234 self.suffix = "%Y-%m-%d_%H-%M-%S"
235 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$"
236 elif self.when == 'M':
237 self.interval = 60 # one minute
238 self.suffix = "%Y-%m-%d_%H-%M"
239 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$"
240 elif self.when == 'H':
241 self.interval = 60 * 60 # one hour
242 self.suffix = "%Y-%m-%d_%H"
243 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$"
244 elif self.when == 'D' or self.when == 'MIDNIGHT':
245 self.interval = 60 * 60 * 24 # one day
246 self.suffix = "%Y-%m-%d"
247 self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
248 elif self.when.startswith('W'):
249 self.interval = 60 * 60 * 24 * 7 # one week
250 if len(self.when) != 2:
251 raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
252 if self.when[1] < '0' or self.when[1] > '6':
253 raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
254 self.dayOfWeek = int(self.when[1])
255 self.suffix = "%Y-%m-%d"
256 self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
257 else:
258 raise ValueError("Invalid rollover interval specified: %s" % self.when)
259
260 self.extMatch = re.compile(self.extMatch, re.ASCII)
261 self.interval = self.interval * interval # multiply by units requested
262 # The following line added because the filename passed in could be a
263 # path object (see Issue #27493), but self.baseFilename will be a string
264 filename = self.baseFilename
265 if os.path.exists(filename):
266 t = os.stat(filename)[ST_MTIME]
267 else:
268 t = int(time.time())
269 self.rolloverAt = self.computeRollover(t)
270
271 def computeRollover(self, currentTime):
272 """
273 Work out the rollover time based on the specified time.
274 """
275 result = currentTime + self.interval
276 # If we are rolling over at midnight or weekly, then the interval is already known.
277 # What we need to figure out is WHEN the next interval is. In other words,
278 # if you are rolling over at midnight, then your base interval is 1 day,
279 # but you want to start that one day clock at midnight, not now. So, we
280 # have to fudge the rolloverAt value in order to trigger the first rollover
281 # at the right time. After that, the regular interval will take care of
282 # the rest. Note that this code doesn't care about leap seconds. :)
283 if self.when == 'MIDNIGHT' or self.when.startswith('W'):
284 # This could be done with less code, but I wanted it to be clear
285 if self.utc:
286 t = time.gmtime(currentTime)
287 else:
288 t = time.localtime(currentTime)
289 currentHour = t[3]
290 currentMinute = t[4]
291 currentSecond = t[5]
292 currentDay = t[6]
293 # r is the number of seconds left between now and the next rotation
294 if self.atTime is None:
295 rotate_ts = _MIDNIGHT
296 else:
297 rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 +
298 self.atTime.second)
299
300 r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 +
301 currentSecond)
302 if r < 0:
303 # Rotate time is before the current time (for example when
304 # self.rotateAt is 13:45 and it now 14:15), rotation is
305 # tomorrow.
306 r += _MIDNIGHT
307 currentDay = (currentDay + 1) % 7
308 result = currentTime + r
309 # If we are rolling over on a certain day, add in the number of days until
310 # the next rollover, but offset by 1 since we just calculated the time
311 # until the next day starts. There are three cases:
312 # Case 1) The day to rollover is today; in this case, do nothing
313 # Case 2) The day to rollover is further in the interval (i.e., today is
314 # day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
315 # next rollover is simply 6 - 2 - 1, or 3.
316 # Case 3) The day to rollover is behind us in the interval (i.e., today
317 # is day 5 (Saturday) and rollover is on day 3 (Thursday).
318 # Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
319 # number of days left in the current week (1) plus the number
320 # of days in the next week until the rollover day (3).
321 # The calculations described in 2) and 3) above need to have a day added.
322 # This is because the above time calculation takes us to midnight on this
323 # day, i.e. the start of the next day.
324 if self.when.startswith('W'):
325 day = currentDay # 0 is Monday
326 if day != self.dayOfWeek:
327 if day < self.dayOfWeek:
328 daysToWait = self.dayOfWeek - day
329 else:
330 daysToWait = 6 - day + self.dayOfWeek + 1
331 newRolloverAt = result + (daysToWait * (60 * 60 * 24))
332 if not self.utc:
333 dstNow = t[-1]
334 dstAtRollover = time.localtime(newRolloverAt)[-1]
335 if dstNow != dstAtRollover:
336 if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
337 addend = -3600
338 else: # DST bows out before next rollover, so we need to add an hour
339 addend = 3600
340 newRolloverAt += addend
341 result = newRolloverAt
342 return result
343
344 def shouldRollover(self, record):
345 """
346 Determine if rollover should occur.
347
348 record is not used, as we are just comparing times, but it is needed so
349 the method signatures are the same
350 """
351 t = int(time.time())
352 if t >= self.rolloverAt:
353 # See #89564: Never rollover anything other than regular files
354 if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename):
355 # The file is not a regular file, so do not rollover, but do
356 # set the next rollover time to avoid repeated checks.
357 self.rolloverAt = self.computeRollover(t)
358 return False
359
360 return True
361 return False
362
363 def getFilesToDelete(self):
364 """
365 Determine the files to delete when rolling over.
366
367 More specific than the earlier method, which just used glob.glob().
368 """
369 dirName, baseName = os.path.split(self.baseFilename)
370 fileNames = os.listdir(dirName)
371 result = []
372 # See bpo-44753: Don't use the extension when computing the prefix.
373 n, e = os.path.splitext(baseName)
374 prefix = n + '.'
375 plen = len(prefix)
376 for fileName in fileNames:
377 if self.namer is None:
378 # Our files will always start with baseName
379 if not fileName.startswith(baseName):
380 continue
381 else:
382 # Our files could be just about anything after custom naming, but
383 # likely candidates are of the form
384 # foo.log.DATETIME_SUFFIX or foo.DATETIME_SUFFIX.log
385 if (not fileName.startswith(baseName) and fileName.endswith(e) and
386 len(fileName) > (plen + 1) and not fileName[plen+1].isdigit()):
387 continue
388
389 if fileName[:plen] == prefix:
390 suffix = fileName[plen:]
391 # See bpo-45628: The date/time suffix could be anywhere in the
392 # filename
393 parts = suffix.split('.')
394 for part in parts:
395 if self.extMatch.match(part):
396 result.append(os.path.join(dirName, fileName))
397 break
398 if len(result) < self.backupCount:
399 result = []
400 else:
401 result.sort()
402 result = result[:len(result) - self.backupCount]
403 return result
404
405 def doRollover(self):
406 """
407 do a rollover; in this case, a date/time stamp is appended to the filename
408 when the rollover happens. However, you want the file to be named for the
409 start of the interval, not the current time. If there is a backup count,
410 then we have to get a list of matching filenames, sort them and remove
411 the one with the oldest suffix.
412 """
413 if self.stream:
414 self.stream.close()
415 self.stream = None
416 # get the time that this sequence started at and make it a TimeTuple
417 currentTime = int(time.time())
418 dstNow = time.localtime(currentTime)[-1]
419 t = self.rolloverAt - self.interval
420 if self.utc:
421 timeTuple = time.gmtime(t)
422 else:
423 timeTuple = time.localtime(t)
424 dstThen = timeTuple[-1]
425 if dstNow != dstThen:
426 if dstNow:
427 addend = 3600
428 else:
429 addend = -3600
430 timeTuple = time.localtime(t + addend)
431 dfn = self.rotation_filename(self.baseFilename + "." +
432 time.strftime(self.suffix, timeTuple))
433 if os.path.exists(dfn):
434 os.remove(dfn)
435 self.rotate(self.baseFilename, dfn)
436 if self.backupCount > 0:
437 for s in self.getFilesToDelete():
438 os.remove(s)
439 if not self.delay:
440 self.stream = self._open()
441 newRolloverAt = self.computeRollover(currentTime)
442 while newRolloverAt <= currentTime:
443 newRolloverAt = newRolloverAt + self.interval
444 #If DST changes and midnight or weekly rollover, adjust for this.
445 if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
446 dstAtRollover = time.localtime(newRolloverAt)[-1]
447 if dstNow != dstAtRollover:
448 if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
449 addend = -3600
450 else: # DST bows out before next rollover, so we need to add an hour
451 addend = 3600
452 newRolloverAt += addend
453 self.rolloverAt = newRolloverAt
454
455 class ESC[4;38;5;81mWatchedFileHandler(ESC[4;38;5;149mloggingESC[4;38;5;149m.ESC[4;38;5;149mFileHandler):
456 """
457 A handler for logging to a file, which watches the file
458 to see if it has changed while in use. This can happen because of
459 usage of programs such as newsyslog and logrotate which perform
460 log file rotation. This handler, intended for use under Unix,
461 watches the file to see if it has changed since the last emit.
462 (A file has changed if its device or inode have changed.)
463 If it has changed, the old file stream is closed, and the file
464 opened to get a new stream.
465
466 This handler is not appropriate for use under Windows, because
467 under Windows open files cannot be moved or renamed - logging
468 opens the files with exclusive locks - and so there is no need
469 for such a handler. Furthermore, ST_INO is not supported under
470 Windows; stat always returns zero for this value.
471
472 This handler is based on a suggestion and patch by Chad J.
473 Schroeder.
474 """
475 def __init__(self, filename, mode='a', encoding=None, delay=False,
476 errors=None):
477 if "b" not in mode:
478 encoding = io.text_encoding(encoding)
479 logging.FileHandler.__init__(self, filename, mode=mode,
480 encoding=encoding, delay=delay,
481 errors=errors)
482 self.dev, self.ino = -1, -1
483 self._statstream()
484
485 def _statstream(self):
486 if self.stream:
487 sres = os.fstat(self.stream.fileno())
488 self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
489
490 def reopenIfNeeded(self):
491 """
492 Reopen log file if needed.
493
494 Checks if the underlying file has changed, and if it
495 has, close the old stream and reopen the file to get the
496 current stream.
497 """
498 # Reduce the chance of race conditions by stat'ing by path only
499 # once and then fstat'ing our new fd if we opened a new log stream.
500 # See issue #14632: Thanks to John Mulligan for the problem report
501 # and patch.
502 try:
503 # stat the file by path, checking for existence
504 sres = os.stat(self.baseFilename)
505 except FileNotFoundError:
506 sres = None
507 # compare file system stat with that of our stream file handle
508 if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
509 if self.stream is not None:
510 # we have an open file handle, clean it up
511 self.stream.flush()
512 self.stream.close()
513 self.stream = None # See Issue #21742: _open () might fail.
514 # open a new file handle and get new stat info from that fd
515 self.stream = self._open()
516 self._statstream()
517
518 def emit(self, record):
519 """
520 Emit a record.
521
522 If underlying file has changed, reopen the file before emitting the
523 record to it.
524 """
525 self.reopenIfNeeded()
526 logging.FileHandler.emit(self, record)
527
528
529 class ESC[4;38;5;81mSocketHandler(ESC[4;38;5;149mloggingESC[4;38;5;149m.ESC[4;38;5;149mHandler):
530 """
531 A handler class which writes logging records, in pickle format, to
532 a streaming socket. The socket is kept open across logging calls.
533 If the peer resets it, an attempt is made to reconnect on the next call.
534 The pickle which is sent is that of the LogRecord's attribute dictionary
535 (__dict__), so that the receiver does not need to have the logging module
536 installed in order to process the logging event.
537
538 To unpickle the record at the receiving end into a LogRecord, use the
539 makeLogRecord function.
540 """
541
542 def __init__(self, host, port):
543 """
544 Initializes the handler with a specific host address and port.
545
546 When the attribute *closeOnError* is set to True - if a socket error
547 occurs, the socket is silently closed and then reopened on the next
548 logging call.
549 """
550 logging.Handler.__init__(self)
551 self.host = host
552 self.port = port
553 if port is None:
554 self.address = host
555 else:
556 self.address = (host, port)
557 self.sock = None
558 self.closeOnError = False
559 self.retryTime = None
560 #
561 # Exponential backoff parameters.
562 #
563 self.retryStart = 1.0
564 self.retryMax = 30.0
565 self.retryFactor = 2.0
566
567 def makeSocket(self, timeout=1):
568 """
569 A factory method which allows subclasses to define the precise
570 type of socket they want.
571 """
572 if self.port is not None:
573 result = socket.create_connection(self.address, timeout=timeout)
574 else:
575 result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
576 result.settimeout(timeout)
577 try:
578 result.connect(self.address)
579 except OSError:
580 result.close() # Issue 19182
581 raise
582 return result
583
584 def createSocket(self):
585 """
586 Try to create a socket, using an exponential backoff with
587 a max retry time. Thanks to Robert Olson for the original patch
588 (SF #815911) which has been slightly refactored.
589 """
590 now = time.time()
591 # Either retryTime is None, in which case this
592 # is the first time back after a disconnect, or
593 # we've waited long enough.
594 if self.retryTime is None:
595 attempt = True
596 else:
597 attempt = (now >= self.retryTime)
598 if attempt:
599 try:
600 self.sock = self.makeSocket()
601 self.retryTime = None # next time, no delay before trying
602 except OSError:
603 #Creation failed, so set the retry time and return.
604 if self.retryTime is None:
605 self.retryPeriod = self.retryStart
606 else:
607 self.retryPeriod = self.retryPeriod * self.retryFactor
608 if self.retryPeriod > self.retryMax:
609 self.retryPeriod = self.retryMax
610 self.retryTime = now + self.retryPeriod
611
612 def send(self, s):
613 """
614 Send a pickled string to the socket.
615
616 This function allows for partial sends which can happen when the
617 network is busy.
618 """
619 if self.sock is None:
620 self.createSocket()
621 #self.sock can be None either because we haven't reached the retry
622 #time yet, or because we have reached the retry time and retried,
623 #but are still unable to connect.
624 if self.sock:
625 try:
626 self.sock.sendall(s)
627 except OSError: #pragma: no cover
628 self.sock.close()
629 self.sock = None # so we can call createSocket next time
630
631 def makePickle(self, record):
632 """
633 Pickles the record in binary format with a length prefix, and
634 returns it ready for transmission across the socket.
635 """
636 ei = record.exc_info
637 if ei:
638 # just to get traceback text into record.exc_text ...
639 dummy = self.format(record)
640 # See issue #14436: If msg or args are objects, they may not be
641 # available on the receiving end. So we convert the msg % args
642 # to a string, save it as msg and zap the args.
643 d = dict(record.__dict__)
644 d['msg'] = record.getMessage()
645 d['args'] = None
646 d['exc_info'] = None
647 # Issue #25685: delete 'message' if present: redundant with 'msg'
648 d.pop('message', None)
649 s = pickle.dumps(d, 1)
650 slen = struct.pack(">L", len(s))
651 return slen + s
652
653 def handleError(self, record):
654 """
655 Handle an error during logging.
656
657 An error has occurred during logging. Most likely cause -
658 connection lost. Close the socket so that we can retry on the
659 next event.
660 """
661 if self.closeOnError and self.sock:
662 self.sock.close()
663 self.sock = None #try to reconnect next time
664 else:
665 logging.Handler.handleError(self, record)
666
667 def emit(self, record):
668 """
669 Emit a record.
670
671 Pickles the record and writes it to the socket in binary format.
672 If there is an error with the socket, silently drop the packet.
673 If there was a problem with the socket, re-establishes the
674 socket.
675 """
676 try:
677 s = self.makePickle(record)
678 self.send(s)
679 except Exception:
680 self.handleError(record)
681
682 def close(self):
683 """
684 Closes the socket.
685 """
686 self.acquire()
687 try:
688 sock = self.sock
689 if sock:
690 self.sock = None
691 sock.close()
692 logging.Handler.close(self)
693 finally:
694 self.release()
695
696 class ESC[4;38;5;81mDatagramHandler(ESC[4;38;5;149mSocketHandler):
697 """
698 A handler class which writes logging records, in pickle format, to
699 a datagram socket. The pickle which is sent is that of the LogRecord's
700 attribute dictionary (__dict__), so that the receiver does not need to
701 have the logging module installed in order to process the logging event.
702
703 To unpickle the record at the receiving end into a LogRecord, use the
704 makeLogRecord function.
705
706 """
707 def __init__(self, host, port):
708 """
709 Initializes the handler with a specific host address and port.
710 """
711 SocketHandler.__init__(self, host, port)
712 self.closeOnError = False
713
714 def makeSocket(self):
715 """
716 The factory method of SocketHandler is here overridden to create
717 a UDP socket (SOCK_DGRAM).
718 """
719 if self.port is None:
720 family = socket.AF_UNIX
721 else:
722 family = socket.AF_INET
723 s = socket.socket(family, socket.SOCK_DGRAM)
724 return s
725
726 def send(self, s):
727 """
728 Send a pickled string to a socket.
729
730 This function no longer allows for partial sends which can happen
731 when the network is busy - UDP does not guarantee delivery and
732 can deliver packets out of sequence.
733 """
734 if self.sock is None:
735 self.createSocket()
736 self.sock.sendto(s, self.address)
737
738 class ESC[4;38;5;81mSysLogHandler(ESC[4;38;5;149mloggingESC[4;38;5;149m.ESC[4;38;5;149mHandler):
739 """
740 A handler class which sends formatted logging records to a syslog
741 server. Based on Sam Rushing's syslog module:
742 http://www.nightmare.com/squirl/python-ext/misc/syslog.py
743 Contributed by Nicolas Untz (after which minor refactoring changes
744 have been made).
745 """
746
747 # from <linux/sys/syslog.h>:
748 # ======================================================================
749 # priorities/facilities are encoded into a single 32-bit quantity, where
750 # the bottom 3 bits are the priority (0-7) and the top 28 bits are the
751 # facility (0-big number). Both the priorities and the facilities map
752 # roughly one-to-one to strings in the syslogd(8) source code. This
753 # mapping is included in this file.
754 #
755 # priorities (these are ordered)
756
757 LOG_EMERG = 0 # system is unusable
758 LOG_ALERT = 1 # action must be taken immediately
759 LOG_CRIT = 2 # critical conditions
760 LOG_ERR = 3 # error conditions
761 LOG_WARNING = 4 # warning conditions
762 LOG_NOTICE = 5 # normal but significant condition
763 LOG_INFO = 6 # informational
764 LOG_DEBUG = 7 # debug-level messages
765
766 # facility codes
767 LOG_KERN = 0 # kernel messages
768 LOG_USER = 1 # random user-level messages
769 LOG_MAIL = 2 # mail system
770 LOG_DAEMON = 3 # system daemons
771 LOG_AUTH = 4 # security/authorization messages
772 LOG_SYSLOG = 5 # messages generated internally by syslogd
773 LOG_LPR = 6 # line printer subsystem
774 LOG_NEWS = 7 # network news subsystem
775 LOG_UUCP = 8 # UUCP subsystem
776 LOG_CRON = 9 # clock daemon
777 LOG_AUTHPRIV = 10 # security/authorization messages (private)
778 LOG_FTP = 11 # FTP daemon
779 LOG_NTP = 12 # NTP subsystem
780 LOG_SECURITY = 13 # Log audit
781 LOG_CONSOLE = 14 # Log alert
782 LOG_SOLCRON = 15 # Scheduling daemon (Solaris)
783
784 # other codes through 15 reserved for system use
785 LOG_LOCAL0 = 16 # reserved for local use
786 LOG_LOCAL1 = 17 # reserved for local use
787 LOG_LOCAL2 = 18 # reserved for local use
788 LOG_LOCAL3 = 19 # reserved for local use
789 LOG_LOCAL4 = 20 # reserved for local use
790 LOG_LOCAL5 = 21 # reserved for local use
791 LOG_LOCAL6 = 22 # reserved for local use
792 LOG_LOCAL7 = 23 # reserved for local use
793
794 priority_names = {
795 "alert": LOG_ALERT,
796 "crit": LOG_CRIT,
797 "critical": LOG_CRIT,
798 "debug": LOG_DEBUG,
799 "emerg": LOG_EMERG,
800 "err": LOG_ERR,
801 "error": LOG_ERR, # DEPRECATED
802 "info": LOG_INFO,
803 "notice": LOG_NOTICE,
804 "panic": LOG_EMERG, # DEPRECATED
805 "warn": LOG_WARNING, # DEPRECATED
806 "warning": LOG_WARNING,
807 }
808
809 facility_names = {
810 "auth": LOG_AUTH,
811 "authpriv": LOG_AUTHPRIV,
812 "console": LOG_CONSOLE,
813 "cron": LOG_CRON,
814 "daemon": LOG_DAEMON,
815 "ftp": LOG_FTP,
816 "kern": LOG_KERN,
817 "lpr": LOG_LPR,
818 "mail": LOG_MAIL,
819 "news": LOG_NEWS,
820 "ntp": LOG_NTP,
821 "security": LOG_SECURITY,
822 "solaris-cron": LOG_SOLCRON,
823 "syslog": LOG_SYSLOG,
824 "user": LOG_USER,
825 "uucp": LOG_UUCP,
826 "local0": LOG_LOCAL0,
827 "local1": LOG_LOCAL1,
828 "local2": LOG_LOCAL2,
829 "local3": LOG_LOCAL3,
830 "local4": LOG_LOCAL4,
831 "local5": LOG_LOCAL5,
832 "local6": LOG_LOCAL6,
833 "local7": LOG_LOCAL7,
834 }
835
836 # Originally added to work around GH-43683. Unnecessary since GH-50043 but kept
837 # for backwards compatibility.
838 priority_map = {
839 "DEBUG" : "debug",
840 "INFO" : "info",
841 "WARNING" : "warning",
842 "ERROR" : "error",
843 "CRITICAL" : "critical"
844 }
845
846 def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
847 facility=LOG_USER, socktype=None):
848 """
849 Initialize a handler.
850
851 If address is specified as a string, a UNIX socket is used. To log to a
852 local syslogd, "SysLogHandler(address="/dev/log")" can be used.
853 If facility is not specified, LOG_USER is used. If socktype is
854 specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
855 socket type will be used. For Unix sockets, you can also specify a
856 socktype of None, in which case socket.SOCK_DGRAM will be used, falling
857 back to socket.SOCK_STREAM.
858 """
859 logging.Handler.__init__(self)
860
861 self.address = address
862 self.facility = facility
863 self.socktype = socktype
864 self.socket = None
865 self.createSocket()
866
867 def _connect_unixsocket(self, address):
868 use_socktype = self.socktype
869 if use_socktype is None:
870 use_socktype = socket.SOCK_DGRAM
871 self.socket = socket.socket(socket.AF_UNIX, use_socktype)
872 try:
873 self.socket.connect(address)
874 # it worked, so set self.socktype to the used type
875 self.socktype = use_socktype
876 except OSError:
877 self.socket.close()
878 if self.socktype is not None:
879 # user didn't specify falling back, so fail
880 raise
881 use_socktype = socket.SOCK_STREAM
882 self.socket = socket.socket(socket.AF_UNIX, use_socktype)
883 try:
884 self.socket.connect(address)
885 # it worked, so set self.socktype to the used type
886 self.socktype = use_socktype
887 except OSError:
888 self.socket.close()
889 raise
890
891 def createSocket(self):
892 """
893 Try to create a socket and, if it's not a datagram socket, connect it
894 to the other end. This method is called during handler initialization,
895 but it's not regarded as an error if the other end isn't listening yet
896 --- the method will be called again when emitting an event,
897 if there is no socket at that point.
898 """
899 address = self.address
900 socktype = self.socktype
901
902 if isinstance(address, str):
903 self.unixsocket = True
904 # Syslog server may be unavailable during handler initialisation.
905 # C's openlog() function also ignores connection errors.
906 # Moreover, we ignore these errors while logging, so it's not worse
907 # to ignore it also here.
908 try:
909 self._connect_unixsocket(address)
910 except OSError:
911 pass
912 else:
913 self.unixsocket = False
914 if socktype is None:
915 socktype = socket.SOCK_DGRAM
916 host, port = address
917 ress = socket.getaddrinfo(host, port, 0, socktype)
918 if not ress:
919 raise OSError("getaddrinfo returns an empty list")
920 for res in ress:
921 af, socktype, proto, _, sa = res
922 err = sock = None
923 try:
924 sock = socket.socket(af, socktype, proto)
925 if socktype == socket.SOCK_STREAM:
926 sock.connect(sa)
927 break
928 except OSError as exc:
929 err = exc
930 if sock is not None:
931 sock.close()
932 if err is not None:
933 raise err
934 self.socket = sock
935 self.socktype = socktype
936
937 def encodePriority(self, facility, priority):
938 """
939 Encode the facility and priority. You can pass in strings or
940 integers - if strings are passed, the facility_names and
941 priority_names mapping dictionaries are used to convert them to
942 integers.
943 """
944 if isinstance(facility, str):
945 facility = self.facility_names[facility]
946 if isinstance(priority, str):
947 priority = self.priority_names[priority]
948 return (facility << 3) | priority
949
950 def close(self):
951 """
952 Closes the socket.
953 """
954 self.acquire()
955 try:
956 sock = self.socket
957 if sock:
958 self.socket = None
959 sock.close()
960 logging.Handler.close(self)
961 finally:
962 self.release()
963
964 def mapPriority(self, levelName):
965 """
966 Map a logging level name to a key in the priority_names map.
967 This is useful in two scenarios: when custom levels are being
968 used, and in the case where you can't do a straightforward
969 mapping by lowercasing the logging level name because of locale-
970 specific issues (see SF #1524081).
971 """
972 return self.priority_map.get(levelName, "warning")
973
974 ident = '' # prepended to all messages
975 append_nul = True # some old syslog daemons expect a NUL terminator
976
977 def emit(self, record):
978 """
979 Emit a record.
980
981 The record is formatted, and then sent to the syslog server. If
982 exception information is present, it is NOT sent to the server.
983 """
984 try:
985 msg = self.format(record)
986 if self.ident:
987 msg = self.ident + msg
988 if self.append_nul:
989 msg += '\000'
990
991 # We need to convert record level to lowercase, maybe this will
992 # change in the future.
993 prio = '<%d>' % self.encodePriority(self.facility,
994 self.mapPriority(record.levelname))
995 prio = prio.encode('utf-8')
996 # Message is a string. Convert to bytes as required by RFC 5424
997 msg = msg.encode('utf-8')
998 msg = prio + msg
999
1000 if not self.socket:
1001 self.createSocket()
1002
1003 if self.unixsocket:
1004 try:
1005 self.socket.send(msg)
1006 except OSError:
1007 self.socket.close()
1008 self._connect_unixsocket(self.address)
1009 self.socket.send(msg)
1010 elif self.socktype == socket.SOCK_DGRAM:
1011 self.socket.sendto(msg, self.address)
1012 else:
1013 self.socket.sendall(msg)
1014 except Exception:
1015 self.handleError(record)
1016
1017 class ESC[4;38;5;81mSMTPHandler(ESC[4;38;5;149mloggingESC[4;38;5;149m.ESC[4;38;5;149mHandler):
1018 """
1019 A handler class which sends an SMTP email for each logging event.
1020 """
1021 def __init__(self, mailhost, fromaddr, toaddrs, subject,
1022 credentials=None, secure=None, timeout=5.0):
1023 """
1024 Initialize the handler.
1025
1026 Initialize the instance with the from and to addresses and subject
1027 line of the email. To specify a non-standard SMTP port, use the
1028 (host, port) tuple format for the mailhost argument. To specify
1029 authentication credentials, supply a (username, password) tuple
1030 for the credentials argument. To specify the use of a secure
1031 protocol (TLS), pass in a tuple for the secure argument. This will
1032 only be used when authentication credentials are supplied. The tuple
1033 will be either an empty tuple, or a single-value tuple with the name
1034 of a keyfile, or a 2-value tuple with the names of the keyfile and
1035 certificate file. (This tuple is passed to the `starttls` method).
1036 A timeout in seconds can be specified for the SMTP connection (the
1037 default is one second).
1038 """
1039 logging.Handler.__init__(self)
1040 if isinstance(mailhost, (list, tuple)):
1041 self.mailhost, self.mailport = mailhost
1042 else:
1043 self.mailhost, self.mailport = mailhost, None
1044 if isinstance(credentials, (list, tuple)):
1045 self.username, self.password = credentials
1046 else:
1047 self.username = None
1048 self.fromaddr = fromaddr
1049 if isinstance(toaddrs, str):
1050 toaddrs = [toaddrs]
1051 self.toaddrs = toaddrs
1052 self.subject = subject
1053 self.secure = secure
1054 self.timeout = timeout
1055
1056 def getSubject(self, record):
1057 """
1058 Determine the subject for the email.
1059
1060 If you want to specify a subject line which is record-dependent,
1061 override this method.
1062 """
1063 return self.subject
1064
1065 def emit(self, record):
1066 """
1067 Emit a record.
1068
1069 Format the record and send it to the specified addressees.
1070 """
1071 try:
1072 import smtplib
1073 from email.message import EmailMessage
1074 import email.utils
1075
1076 port = self.mailport
1077 if not port:
1078 port = smtplib.SMTP_PORT
1079 smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
1080 msg = EmailMessage()
1081 msg['From'] = self.fromaddr
1082 msg['To'] = ','.join(self.toaddrs)
1083 msg['Subject'] = self.getSubject(record)
1084 msg['Date'] = email.utils.localtime()
1085 msg.set_content(self.format(record))
1086 if self.username:
1087 if self.secure is not None:
1088 smtp.ehlo()
1089 smtp.starttls(*self.secure)
1090 smtp.ehlo()
1091 smtp.login(self.username, self.password)
1092 smtp.send_message(msg)
1093 smtp.quit()
1094 except Exception:
1095 self.handleError(record)
1096
1097 class ESC[4;38;5;81mNTEventLogHandler(ESC[4;38;5;149mloggingESC[4;38;5;149m.ESC[4;38;5;149mHandler):
1098 """
1099 A handler class which sends events to the NT Event Log. Adds a
1100 registry entry for the specified application name. If no dllname is
1101 provided, win32service.pyd (which contains some basic message
1102 placeholders) is used. Note that use of these placeholders will make
1103 your event logs big, as the entire message source is held in the log.
1104 If you want slimmer logs, you have to pass in the name of your own DLL
1105 which contains the message definitions you want to use in the event log.
1106 """
1107 def __init__(self, appname, dllname=None, logtype="Application"):
1108 logging.Handler.__init__(self)
1109 try:
1110 import win32evtlogutil, win32evtlog
1111 self.appname = appname
1112 self._welu = win32evtlogutil
1113 if not dllname:
1114 dllname = os.path.split(self._welu.__file__)
1115 dllname = os.path.split(dllname[0])
1116 dllname = os.path.join(dllname[0], r'win32service.pyd')
1117 self.dllname = dllname
1118 self.logtype = logtype
1119 # Administrative privileges are required to add a source to the registry.
1120 # This may not be available for a user that just wants to add to an
1121 # existing source - handle this specific case.
1122 try:
1123 self._welu.AddSourceToRegistry(appname, dllname, logtype)
1124 except Exception as e:
1125 # This will probably be a pywintypes.error. Only raise if it's not
1126 # an "access denied" error, else let it pass
1127 if getattr(e, 'winerror', None) != 5: # not access denied
1128 raise
1129 self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
1130 self.typemap = {
1131 logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
1132 logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
1133 logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
1134 logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
1135 logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
1136 }
1137 except ImportError:
1138 print("The Python Win32 extensions for NT (service, event "\
1139 "logging) appear not to be available.")
1140 self._welu = None
1141
1142 def getMessageID(self, record):
1143 """
1144 Return the message ID for the event record. If you are using your
1145 own messages, you could do this by having the msg passed to the
1146 logger being an ID rather than a formatting string. Then, in here,
1147 you could use a dictionary lookup to get the message ID. This
1148 version returns 1, which is the base message ID in win32service.pyd.
1149 """
1150 return 1
1151
1152 def getEventCategory(self, record):
1153 """
1154 Return the event category for the record.
1155
1156 Override this if you want to specify your own categories. This version
1157 returns 0.
1158 """
1159 return 0
1160
1161 def getEventType(self, record):
1162 """
1163 Return the event type for the record.
1164
1165 Override this if you want to specify your own types. This version does
1166 a mapping using the handler's typemap attribute, which is set up in
1167 __init__() to a dictionary which contains mappings for DEBUG, INFO,
1168 WARNING, ERROR and CRITICAL. If you are using your own levels you will
1169 either need to override this method or place a suitable dictionary in
1170 the handler's typemap attribute.
1171 """
1172 return self.typemap.get(record.levelno, self.deftype)
1173
1174 def emit(self, record):
1175 """
1176 Emit a record.
1177
1178 Determine the message ID, event category and event type. Then
1179 log the message in the NT event log.
1180 """
1181 if self._welu:
1182 try:
1183 id = self.getMessageID(record)
1184 cat = self.getEventCategory(record)
1185 type = self.getEventType(record)
1186 msg = self.format(record)
1187 self._welu.ReportEvent(self.appname, id, cat, type, [msg])
1188 except Exception:
1189 self.handleError(record)
1190
1191 def close(self):
1192 """
1193 Clean up this handler.
1194
1195 You can remove the application name from the registry as a
1196 source of event log entries. However, if you do this, you will
1197 not be able to see the events as you intended in the Event Log
1198 Viewer - it needs to be able to access the registry to get the
1199 DLL name.
1200 """
1201 #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
1202 logging.Handler.close(self)
1203
1204 class ESC[4;38;5;81mHTTPHandler(ESC[4;38;5;149mloggingESC[4;38;5;149m.ESC[4;38;5;149mHandler):
1205 """
1206 A class which sends records to a web server, using either GET or
1207 POST semantics.
1208 """
1209 def __init__(self, host, url, method="GET", secure=False, credentials=None,
1210 context=None):
1211 """
1212 Initialize the instance with the host, the request URL, and the method
1213 ("GET" or "POST")
1214 """
1215 logging.Handler.__init__(self)
1216 method = method.upper()
1217 if method not in ["GET", "POST"]:
1218 raise ValueError("method must be GET or POST")
1219 if not secure and context is not None:
1220 raise ValueError("context parameter only makes sense "
1221 "with secure=True")
1222 self.host = host
1223 self.url = url
1224 self.method = method
1225 self.secure = secure
1226 self.credentials = credentials
1227 self.context = context
1228
1229 def mapLogRecord(self, record):
1230 """
1231 Default implementation of mapping the log record into a dict
1232 that is sent as the CGI data. Overwrite in your class.
1233 Contributed by Franz Glasner.
1234 """
1235 return record.__dict__
1236
1237 def getConnection(self, host, secure):
1238 """
1239 get a HTTP[S]Connection.
1240
1241 Override when a custom connection is required, for example if
1242 there is a proxy.
1243 """
1244 import http.client
1245 if secure:
1246 connection = http.client.HTTPSConnection(host, context=self.context)
1247 else:
1248 connection = http.client.HTTPConnection(host)
1249 return connection
1250
1251 def emit(self, record):
1252 """
1253 Emit a record.
1254
1255 Send the record to the web server as a percent-encoded dictionary
1256 """
1257 try:
1258 import urllib.parse
1259 host = self.host
1260 h = self.getConnection(host, self.secure)
1261 url = self.url
1262 data = urllib.parse.urlencode(self.mapLogRecord(record))
1263 if self.method == "GET":
1264 if (url.find('?') >= 0):
1265 sep = '&'
1266 else:
1267 sep = '?'
1268 url = url + "%c%s" % (sep, data)
1269 h.putrequest(self.method, url)
1270 # support multiple hosts on one IP address...
1271 # need to strip optional :port from host, if present
1272 i = host.find(":")
1273 if i >= 0:
1274 host = host[:i]
1275 # See issue #30904: putrequest call above already adds this header
1276 # on Python 3.x.
1277 # h.putheader("Host", host)
1278 if self.method == "POST":
1279 h.putheader("Content-type",
1280 "application/x-www-form-urlencoded")
1281 h.putheader("Content-length", str(len(data)))
1282 if self.credentials:
1283 import base64
1284 s = ('%s:%s' % self.credentials).encode('utf-8')
1285 s = 'Basic ' + base64.b64encode(s).strip().decode('ascii')
1286 h.putheader('Authorization', s)
1287 h.endheaders()
1288 if self.method == "POST":
1289 h.send(data.encode('utf-8'))
1290 h.getresponse() #can't do anything with the result
1291 except Exception:
1292 self.handleError(record)
1293
1294 class ESC[4;38;5;81mBufferingHandler(ESC[4;38;5;149mloggingESC[4;38;5;149m.ESC[4;38;5;149mHandler):
1295 """
1296 A handler class which buffers logging records in memory. Whenever each
1297 record is added to the buffer, a check is made to see if the buffer should
1298 be flushed. If it should, then flush() is expected to do what's needed.
1299 """
1300 def __init__(self, capacity):
1301 """
1302 Initialize the handler with the buffer size.
1303 """
1304 logging.Handler.__init__(self)
1305 self.capacity = capacity
1306 self.buffer = []
1307
1308 def shouldFlush(self, record):
1309 """
1310 Should the handler flush its buffer?
1311
1312 Returns true if the buffer is up to capacity. This method can be
1313 overridden to implement custom flushing strategies.
1314 """
1315 return (len(self.buffer) >= self.capacity)
1316
1317 def emit(self, record):
1318 """
1319 Emit a record.
1320
1321 Append the record. If shouldFlush() tells us to, call flush() to process
1322 the buffer.
1323 """
1324 self.buffer.append(record)
1325 if self.shouldFlush(record):
1326 self.flush()
1327
1328 def flush(self):
1329 """
1330 Override to implement custom flushing behaviour.
1331
1332 This version just zaps the buffer to empty.
1333 """
1334 self.acquire()
1335 try:
1336 self.buffer.clear()
1337 finally:
1338 self.release()
1339
1340 def close(self):
1341 """
1342 Close the handler.
1343
1344 This version just flushes and chains to the parent class' close().
1345 """
1346 try:
1347 self.flush()
1348 finally:
1349 logging.Handler.close(self)
1350
1351 class ESC[4;38;5;81mMemoryHandler(ESC[4;38;5;149mBufferingHandler):
1352 """
1353 A handler class which buffers logging records in memory, periodically
1354 flushing them to a target handler. Flushing occurs whenever the buffer
1355 is full, or when an event of a certain severity or greater is seen.
1356 """
1357 def __init__(self, capacity, flushLevel=logging.ERROR, target=None,
1358 flushOnClose=True):
1359 """
1360 Initialize the handler with the buffer size, the level at which
1361 flushing should occur and an optional target.
1362
1363 Note that without a target being set either here or via setTarget(),
1364 a MemoryHandler is no use to anyone!
1365
1366 The ``flushOnClose`` argument is ``True`` for backward compatibility
1367 reasons - the old behaviour is that when the handler is closed, the
1368 buffer is flushed, even if the flush level hasn't been exceeded nor the
1369 capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``.
1370 """
1371 BufferingHandler.__init__(self, capacity)
1372 self.flushLevel = flushLevel
1373 self.target = target
1374 # See Issue #26559 for why this has been added
1375 self.flushOnClose = flushOnClose
1376
1377 def shouldFlush(self, record):
1378 """
1379 Check for buffer full or a record at the flushLevel or higher.
1380 """
1381 return (len(self.buffer) >= self.capacity) or \
1382 (record.levelno >= self.flushLevel)
1383
1384 def setTarget(self, target):
1385 """
1386 Set the target handler for this handler.
1387 """
1388 self.acquire()
1389 try:
1390 self.target = target
1391 finally:
1392 self.release()
1393
1394 def flush(self):
1395 """
1396 For a MemoryHandler, flushing means just sending the buffered
1397 records to the target, if there is one. Override if you want
1398 different behaviour.
1399
1400 The record buffer is only cleared if a target has been set.
1401 """
1402 self.acquire()
1403 try:
1404 if self.target:
1405 for record in self.buffer:
1406 self.target.handle(record)
1407 self.buffer.clear()
1408 finally:
1409 self.release()
1410
1411 def close(self):
1412 """
1413 Flush, if appropriately configured, set the target to None and lose the
1414 buffer.
1415 """
1416 try:
1417 if self.flushOnClose:
1418 self.flush()
1419 finally:
1420 self.acquire()
1421 try:
1422 self.target = None
1423 BufferingHandler.close(self)
1424 finally:
1425 self.release()
1426
1427
1428 class ESC[4;38;5;81mQueueHandler(ESC[4;38;5;149mloggingESC[4;38;5;149m.ESC[4;38;5;149mHandler):
1429 """
1430 This handler sends events to a queue. Typically, it would be used together
1431 with a multiprocessing Queue to centralise logging to file in one process
1432 (in a multi-process application), so as to avoid file write contention
1433 between processes.
1434
1435 This code is new in Python 3.2, but this class can be copy pasted into
1436 user code for use with earlier Python versions.
1437 """
1438
1439 def __init__(self, queue):
1440 """
1441 Initialise an instance, using the passed queue.
1442 """
1443 logging.Handler.__init__(self)
1444 self.queue = queue
1445
1446 def enqueue(self, record):
1447 """
1448 Enqueue a record.
1449
1450 The base implementation uses put_nowait. You may want to override
1451 this method if you want to use blocking, timeouts or custom queue
1452 implementations.
1453 """
1454 self.queue.put_nowait(record)
1455
1456 def prepare(self, record):
1457 """
1458 Prepare a record for queuing. The object returned by this method is
1459 enqueued.
1460
1461 The base implementation formats the record to merge the message and
1462 arguments, and removes unpickleable items from the record in-place.
1463 Specifically, it overwrites the record's `msg` and
1464 `message` attributes with the merged message (obtained by
1465 calling the handler's `format` method), and sets the `args`,
1466 `exc_info` and `exc_text` attributes to None.
1467
1468 You might want to override this method if you want to convert
1469 the record to a dict or JSON string, or send a modified copy
1470 of the record while leaving the original intact.
1471 """
1472 # The format operation gets traceback text into record.exc_text
1473 # (if there's exception data), and also returns the formatted
1474 # message. We can then use this to replace the original
1475 # msg + args, as these might be unpickleable. We also zap the
1476 # exc_info, exc_text and stack_info attributes, as they are no longer
1477 # needed and, if not None, will typically not be pickleable.
1478 msg = self.format(record)
1479 # bpo-35726: make copy of record to avoid affecting other handlers in the chain.
1480 record = copy.copy(record)
1481 record.message = msg
1482 record.msg = msg
1483 record.args = None
1484 record.exc_info = None
1485 record.exc_text = None
1486 record.stack_info = None
1487 return record
1488
1489 def emit(self, record):
1490 """
1491 Emit a record.
1492
1493 Writes the LogRecord to the queue, preparing it for pickling first.
1494 """
1495 try:
1496 self.enqueue(self.prepare(record))
1497 except Exception:
1498 self.handleError(record)
1499
1500
1501 class ESC[4;38;5;81mQueueListener(ESC[4;38;5;149mobject):
1502 """
1503 This class implements an internal threaded listener which watches for
1504 LogRecords being added to a queue, removes them and passes them to a
1505 list of handlers for processing.
1506 """
1507 _sentinel = None
1508
1509 def __init__(self, queue, *handlers, respect_handler_level=False):
1510 """
1511 Initialise an instance with the specified queue and
1512 handlers.
1513 """
1514 self.queue = queue
1515 self.handlers = handlers
1516 self._thread = None
1517 self.respect_handler_level = respect_handler_level
1518
1519 def dequeue(self, block):
1520 """
1521 Dequeue a record and return it, optionally blocking.
1522
1523 The base implementation uses get. You may want to override this method
1524 if you want to use timeouts or work with custom queue implementations.
1525 """
1526 return self.queue.get(block)
1527
1528 def start(self):
1529 """
1530 Start the listener.
1531
1532 This starts up a background thread to monitor the queue for
1533 LogRecords to process.
1534 """
1535 self._thread = t = threading.Thread(target=self._monitor)
1536 t.daemon = True
1537 t.start()
1538
1539 def prepare(self, record):
1540 """
1541 Prepare a record for handling.
1542
1543 This method just returns the passed-in record. You may want to
1544 override this method if you need to do any custom marshalling or
1545 manipulation of the record before passing it to the handlers.
1546 """
1547 return record
1548
1549 def handle(self, record):
1550 """
1551 Handle a record.
1552
1553 This just loops through the handlers offering them the record
1554 to handle.
1555 """
1556 record = self.prepare(record)
1557 for handler in self.handlers:
1558 if not self.respect_handler_level:
1559 process = True
1560 else:
1561 process = record.levelno >= handler.level
1562 if process:
1563 handler.handle(record)
1564
1565 def _monitor(self):
1566 """
1567 Monitor the queue for records, and ask the handler
1568 to deal with them.
1569
1570 This method runs on a separate, internal thread.
1571 The thread will terminate if it sees a sentinel object in the queue.
1572 """
1573 q = self.queue
1574 has_task_done = hasattr(q, 'task_done')
1575 while True:
1576 try:
1577 record = self.dequeue(True)
1578 if record is self._sentinel:
1579 if has_task_done:
1580 q.task_done()
1581 break
1582 self.handle(record)
1583 if has_task_done:
1584 q.task_done()
1585 except queue.Empty:
1586 break
1587
1588 def enqueue_sentinel(self):
1589 """
1590 This is used to enqueue the sentinel record.
1591
1592 The base implementation uses put_nowait. You may want to override this
1593 method if you want to use timeouts or work with custom queue
1594 implementations.
1595 """
1596 self.queue.put_nowait(self._sentinel)
1597
1598 def stop(self):
1599 """
1600 Stop the listener.
1601
1602 This asks the thread to terminate, and then waits for it to do so.
1603 Note that if you don't call this before your application exits, there
1604 may be some records still left on the queue, which won't be processed.
1605 """
1606 self.enqueue_sentinel()
1607 self._thread.join()
1608 self._thread = None