Problem: Current logging server keeps running in some tear down scenarios (#2304)

Solution: Remove the current pub/sub based logging infra as this model is no longer necessary. This can now be replaced by a simple system level logging config.
This commit is contained in:
Shahbaz Nazir 2018-06-11 13:08:03 +02:00 committed by GitHub
parent d492485357
commit bec8d13e36
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 27 additions and 610 deletions

View File

@ -1,7 +1,7 @@
import copy
import logging
from bigchaindb.log.configs import SUBSCRIBER_LOGGING_CONFIG as log_config
from bigchaindb.log import DEFAULT_LOGGING_CONFIG as log_config
# from functools import reduce
# PORT_NUMBER = reduce(lambda x, y: x * y, map(ord, 'BigchainDB')) % 2**16
@ -73,7 +73,6 @@ config = {
'fmt_console': log_config['formatters']['console']['format'],
'fmt_logfile': log_config['formatters']['file']['format'],
'granular_levels': {},
'port': log_config['root']['port']
},
}

View File

@ -10,8 +10,9 @@ import sys
import bigchaindb
import bigchaindb.config_utils
from bigchaindb.log.setup import setup_logging
from bigchaindb.log import DEFAULT_LOGGING_CONFIG
from bigchaindb.version import __version__
from logging.config import dictConfig as set_logging_config
def configure_bigchaindb(command):
@ -67,8 +68,27 @@ def start_logging_process(command):
"""
@functools.wraps(command)
def start_logging(args):
from bigchaindb import config
setup_logging(user_log_config=config.get('log'))
logging_configs = DEFAULT_LOGGING_CONFIG
new_logging_configs = bigchaindb.config['log']
# Update log file location
logging_configs['handlers']['file']['filename'] = new_logging_configs['file']
logging_configs['handlers']['errors']['filename'] = new_logging_configs['error_file']
# Update log levels
logging_configs['handlers']['console']['level'] = \
new_logging_configs['level_console'].upper()
logging_configs['handlers']['file']['level'] = new_logging_configs['level_logfile'].upper()
# Update log date format
logging_configs['formatters']['console']['datefmt'] = new_logging_configs['datefmt_console']
logging_configs['formatters']['file']['datefmt'] = new_logging_configs['datefmt_logfile']
# Update log string format
logging_configs['formatters']['console']['format'] = new_logging_configs['fmt_console']
logging_configs['formatters']['file']['format'] = new_logging_configs['fmt_console']
set_logging_config(logging_configs)
command(args)
return start_logging

View File

@ -1,23 +1,11 @@
import logging
from logging.handlers import DEFAULT_TCP_LOGGING_PORT
from os.path import expanduser, join
DEFAULT_SOCKET_LOGGING_HOST = 'localhost'
DEFAULT_SOCKET_LOGGING_PORT = DEFAULT_TCP_LOGGING_PORT
DEFAULT_SOCKET_LOGGING_ADDR = (DEFAULT_SOCKET_LOGGING_HOST,
DEFAULT_SOCKET_LOGGING_PORT)
DEFAULT_LOG_DIR = expanduser('~')
PUBLISHER_LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'root': {
'level': logging.DEBUG,
},
}
SUBSCRIBER_LOGGING_CONFIG = {
DEFAULT_LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
@ -63,6 +51,5 @@ SUBSCRIBER_LOGGING_CONFIG = {
'root': {
'level': logging.DEBUG,
'handlers': ['console', 'file', 'errors'],
'port': DEFAULT_SOCKET_LOGGING_PORT
},
}

View File

@ -1,36 +0,0 @@
import logging.handlers
from gunicorn.glogging import Logger
from .configs import DEFAULT_SOCKET_LOGGING_HOST, DEFAULT_SOCKET_LOGGING_PORT
class HttpServerLogger(Logger):
"""Custom logger class for ``gunicorn`` logs.
Meant for internal usage only, to set the ``logger_class``
configuration setting on gunicorn.
"""
def setup(self, cfg):
"""Setup the gunicorn access and error loggers. This overrides
the parent method. Its main goal is to simply pipe all the logs to
the TCP socket used througout BigchainDB.
Args:
cfg (:obj:`gunicorn.config.Config`): Gunicorn configuration
object. *Ignored*.
"""
log_cfg = self.cfg.env_orig.get('custom_log_config', {})
self.log_port = log_cfg.get('port', DEFAULT_SOCKET_LOGGING_PORT)
self._set_socklog_handler(self.error_log)
self._set_socklog_handler(self.access_log)
def _set_socklog_handler(self, log):
socket_handler = logging.handlers.SocketHandler(
DEFAULT_SOCKET_LOGGING_HOST, self.log_port)
socket_handler._gunicorn = True
log.addHandler(socket_handler)

View File

@ -1,187 +0,0 @@
"""Setup logging."""
from copy import deepcopy
import logging
from logging.config import dictConfig
import logging.handlers
import pickle
from socketserver import StreamRequestHandler, ThreadingTCPServer
import struct
import sys
from .configs import (
DEFAULT_SOCKET_LOGGING_HOST,
DEFAULT_SOCKET_LOGGING_PORT,
PUBLISHER_LOGGING_CONFIG,
SUBSCRIBER_LOGGING_CONFIG,
)
from bigchaindb.utils import Process
from bigchaindb.common.exceptions import ConfigurationError
def _normalize_log_level(level):
try:
return level.upper()
except AttributeError as exc:
raise ConfigurationError('Log level must be a string!') from exc
def setup_pub_logger(logging_port=None):
logging_port = logging_port or DEFAULT_SOCKET_LOGGING_PORT
dictConfig(PUBLISHER_LOGGING_CONFIG)
socket_handler = logging.handlers.SocketHandler(
DEFAULT_SOCKET_LOGGING_HOST, logging_port)
socket_handler.setLevel(logging.DEBUG)
logger = logging.getLogger()
logger.addHandler(socket_handler)
def setup_sub_logger(*, user_log_config=None):
kwargs = {}
log_port = user_log_config.get('port') if user_log_config is not None else None
if log_port is not None:
kwargs['port'] = log_port
server = LogRecordSocketServer(**kwargs)
with server:
server_proc = Process(
name='logging_server',
target=server.serve_forever,
kwargs={'log_config': user_log_config},
)
server_proc.start()
def setup_logging(*, user_log_config=None):
port = user_log_config.get('port') if user_log_config is not None else None
setup_pub_logger(logging_port=port)
setup_sub_logger(user_log_config=user_log_config)
def create_subscriber_logging_config(*, user_log_config=None): # noqa: C901
sub_log_config = deepcopy(SUBSCRIBER_LOGGING_CONFIG)
if not user_log_config:
return sub_log_config
if 'file' in user_log_config:
filename = user_log_config['file']
sub_log_config['handlers']['file']['filename'] = filename
if 'error_file' in user_log_config:
error_filename = user_log_config['error_file']
sub_log_config['handlers']['errors']['filename'] = error_filename
if 'level_console' in user_log_config:
level = _normalize_log_level(user_log_config['level_console'])
sub_log_config['handlers']['console']['level'] = level
if 'level_logfile' in user_log_config:
level = _normalize_log_level(user_log_config['level_logfile'])
sub_log_config['handlers']['file']['level'] = level
if 'fmt_console' in user_log_config:
fmt = user_log_config['fmt_console']
sub_log_config['formatters']['console']['format'] = fmt
if 'fmt_logfile' in user_log_config:
fmt = user_log_config['fmt_logfile']
sub_log_config['formatters']['file']['format'] = fmt
if 'datefmt_console' in user_log_config:
fmt = user_log_config['datefmt_console']
sub_log_config['formatters']['console']['datefmt'] = fmt
if 'datefmt_logfile' in user_log_config:
fmt = user_log_config['datefmt_logfile']
sub_log_config['formatters']['file']['datefmt'] = fmt
log_levels = user_log_config.get('granular_levels', {})
for logger_name, level in log_levels.items():
level = _normalize_log_level(level)
try:
sub_log_config['loggers'][logger_name]['level'] = level
except KeyError:
sub_log_config['loggers'][logger_name] = {'level': level}
return sub_log_config
class LogRecordStreamHandler(StreamRequestHandler):
"""Handler for a streaming logging request.
This basically logs the record using whatever logging policy is
configured locally.
"""
def handle(self):
"""Handle multiple requests - each expected to be a 4-byte length,
followed by the LogRecord in pickle format. Logs the record
according to whatever policy is configured locally.
"""
while True:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack('>L', chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv(slen - len(chunk))
obj = self.unpickle(chunk)
record = logging.makeLogRecord(obj)
self.handle_log_record(record)
def unpickle(self, data):
try:
return pickle.loads(data)
except (pickle.UnpicklingError,
AttributeError, EOFError, TypeError) as exc:
return {
'msg': '({}) Log handling error: un-pickling failed!'.format(
exc.__class__.__name__),
'exc_info': exc.args,
'level': logging.ERROR,
'func': self.unpickle.__name__,
}
def handle_log_record(self, record):
logger = logging.getLogger(record.name)
logger.handle(record)
class LogRecordSocketServer(ThreadingTCPServer):
"""Simple TCP socket-based logging server.
"""
allow_reuse_address = True
def __init__(self,
host='localhost',
port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
handler=LogRecordStreamHandler):
super().__init__((host, port), handler)
def serve_forever(self, *, poll_interval=0.5, log_config=None):
sub_logging_config = create_subscriber_logging_config(
user_log_config=log_config)
dictConfig(sub_logging_config)
try:
super().serve_forever(poll_interval=poll_interval)
except KeyboardInterrupt:
pass
# NOTE: Because the context manager is only available
# from 3.6 and up, we add it for lower versions.
if sys.version_info < (3, 6):
def __enter__(self):
return self
def __exit__(self, *args):
self.server_close()
LogRecordSocketServer.__enter__ = __enter__
LogRecordSocketServer.__exit__ = __exit__

View File

@ -104,7 +104,6 @@ def create_server(settings, log_config=None, bigchaindb_factory=None):
# slower.
settings['threads'] = 1
settings['logger_class'] = 'bigchaindb.log.loggers.HttpServerLogger'
settings['custom_log_config'] = log_config
app = create_app(debug=settings.get('debug', False),
threads=settings['threads'],

View File

@ -33,8 +33,6 @@ For convenience, here's a list of all the relevant environment variables (docume
`BIGCHAINDB_LOG_DATEFMT_LOGFILE`<br>
`BIGCHAINDB_LOG_FMT_CONSOLE`<br>
`BIGCHAINDB_LOG_FMT_LOGFILE`<br>
`BIGCHAINDB_LOG_GRANULAR_LEVELS`<br>
`BIGCHAINDB_LOG_PORT`<br>
The local config file is `$HOME/.bigchaindb` by default (a file which might not even exist), but you can tell BigchainDB to use a different file by using the `-c` command-line option, e.g. `bigchaindb -c path/to/config_file.json start`
@ -217,12 +215,7 @@ holding the logging configuration.
"datefmt_console": "%Y-%m-%d %H:%M:%S",
"datefmt_logfile": "%Y-%m-%d %H:%M:%S",
"fmt_console": "%(asctime)s [%(levelname)s] (%(name)s) %(message)s",
"fmt_logfile": "%(asctime)s [%(levelname)s] (%(name)s) %(message)s",
"granular_levels": {
"bichaindb.backend": "info",
"bichaindb.core": "info"
},
"port": 7070
"fmt_logfile": "%(asctime)s [%(levelname)s] (%(name)s) %(message)s"
}
```
@ -238,9 +231,7 @@ holding the logging configuration.
"datefmt_console": "%Y-%m-%d %H:%M:%S",
"datefmt_logfile": "%Y-%m-%d %H:%M:%S",
"fmt_logfile": "[%(asctime)s] [%(levelname)s] (%(name)s) %(message)s (%(processName)-10s - pid: %(process)d)",
"fmt_console": "[%(asctime)s] [%(levelname)s] (%(name)s) %(message)s (%(processName)-10s - pid: %(process)d)",
"granular_levels": {},
"port": 9020
"fmt_console": "[%(asctime)s] [%(levelname)s] (%(name)s) %(message)s (%(processName)-10s - pid: %(process)d)"
}
```
@ -435,19 +426,3 @@ logging of the `core.py` module to be more verbose, you would set the
```
**Defaults to**: `{}`
### log.port
The port number at which the logging server should listen.
**Example**:
```
{
"log": {
"port": 7070
}
}
```
**Defaults to**: `9020`

View File

@ -1,18 +0,0 @@
from logging.handlers import SocketHandler
class TestHttpServerLogger:
def test_init(self, mocker):
from bigchaindb.log.configs import (
DEFAULT_SOCKET_LOGGING_ADDR as expected_socket_address)
from bigchaindb.log.loggers import HttpServerLogger
from gunicorn import config
logger_config = config.Config()
logger = HttpServerLogger(logger_config)
assert len(logger.access_log.handlers) == 1
assert len(logger.error_log.handlers) == 1
assert isinstance(logger.access_log.handlers[0], SocketHandler)
assert isinstance(logger.error_log.handlers[0], SocketHandler)
assert logger.access_log.handlers[0].address == expected_socket_address
assert logger.error_log.handlers[0].address == expected_socket_address

View File

@ -1,322 +0,0 @@
import logging
import pickle
from logging import getLogger
from logging.config import dictConfig
from logging.handlers import SocketHandler
from pytest import fixture, mark, raises
@fixture
def reset_logging_config():
original_root_logger_level = getLogger().level
dictConfig({'version': 1, 'root': {'level': 'NOTSET'}})
yield
getLogger().setLevel(original_root_logger_level)
@fixture
def mocked_process(mocker):
return mocker.patch(
'bigchaindb.log.setup.Process', autospec=True, spec_set=True)
@fixture
def mocked_socket_server(mocker):
return mocker.patch(
'bigchaindb.log.setup.LogRecordSocketServer',
autospec=True,
spec_set=True,
)
@fixture
def log_record_dict():
return {
'args': None,
'created': 1489584900.595193,
'exc_info': None,
'exc_text': None,
'filename': 'config_utils.py',
'funcName': 'autoconfigure',
'levelname': 'DEBUG',
'levelno': 10,
'lineno': 228,
'module': 'config_utils',
'msecs': 595.1929092407227,
'msg': 'System already configured, skipping autoconfiguration',
'name': 'bigchaindb.config_utils',
'pathname': '/usr/src/app/bigchaindb/config_utils.py',
'process': 1981,
'processName': 'MainProcess',
'relativeCreated': 398.4854221343994,
'stack_info': None,
'thread': 140352503879424,
'threadName': 'MainThread',
}
@fixture
def log_record(log_record_dict):
return logging.makeLogRecord(log_record_dict)
@fixture
def log_record_bytes(log_record_dict):
return pickle.dumps(log_record_dict)
@mark.usefixtures('reset_logging_config')
def test_setup_logging(mocked_setup_pub_logger, mocked_setup_sub_logger):
from bigchaindb.log.setup import setup_logging
setup_logging()
mocked_setup_pub_logger.assert_called_once_with(logging_port=None)
mocked_setup_sub_logger.assert_called_once_with(user_log_config=None)
@mark.usefixtures('reset_logging_config')
def test_setup_pub_logger():
from bigchaindb.log.setup import setup_pub_logger
from bigchaindb.log.configs import PUBLISHER_LOGGING_CONFIG
root_logger = getLogger()
assert root_logger.level == logging.NOTSET
setup_pub_logger()
assert root_logger.level == PUBLISHER_LOGGING_CONFIG['root']['level']
assert root_logger.hasHandlers()
assert isinstance(root_logger.handlers[0], SocketHandler)
@mark.usefixtures('reset_logging_config')
def test_setup_sub_logger_without_config(mocked_socket_server, mocked_process):
from bigchaindb.log.setup import setup_sub_logger
setup_sub_logger()
root_logger = getLogger()
assert root_logger.level == logging.NOTSET
mocked_socket_server.assert_called_once_with()
mocked_process.assert_called_once_with(
target=mocked_socket_server.return_value.serve_forever,
kwargs={'log_config': None},
)
mocked_process.return_value.start.assert_called_once_with()
@mark.usefixtures('reset_logging_config')
def test_setup_sub_logger_with_config(mocked_socket_server, mocked_process):
from bigchaindb.log.setup import setup_sub_logger
user_log_config = {
'file': '/var/log/bdb.log',
'level_console': 'warning',
'level_logfile': 'info',
'fmt_console': '[%(levelname)s] (%(name)s) %(message)s',
'fmt_logfile': '[%(asctime)s] [%(levelname)s] (%(name)s) %(message)s',
'granular_levels': {
'bigchaindb.core': 'debug',
},
'port': 9020
}
root_logger = getLogger()
setup_sub_logger(user_log_config=user_log_config)
assert root_logger.level == logging.NOTSET
mocked_socket_server.assert_called_once_with(port=9020)
mocked_process.assert_called_once_with(
target=mocked_socket_server.return_value.serve_forever,
kwargs={'log_config': user_log_config},
)
mocked_process.return_value.start.assert_called_once_with()
def test_create_subscriber_logging_config_without_user_given_config():
from bigchaindb.log.setup import create_subscriber_logging_config
from bigchaindb.log.configs import SUBSCRIBER_LOGGING_CONFIG
config = create_subscriber_logging_config()
assert config == SUBSCRIBER_LOGGING_CONFIG
def test_create_subscriber_logging_config_with_user_given_config():
from bigchaindb.log.setup import create_subscriber_logging_config
from bigchaindb.log.configs import (
SUBSCRIBER_LOGGING_CONFIG as expected_log_config)
user_log_config = {
'file': '/var/log/bigchaindb/bdb.log',
'error_file': '/var/log/bigchaindb/bdb-err.log',
'level_console': 'warning',
'level_logfile': 'info',
'fmt_console': '[%(levelname)s] (%(name)s) %(message)s',
'fmt_logfile': '[%(asctime)s] [%(levelname)s] (%(name)s) %(message)s',
'datefmt_console': '%H:%M:%S',
'datefmt_logfile': '%a, %d %b %Y %H:%M:%S +0000',
'granular_levels': {
'bigchaindb.core': 'debug',
},
}
config = create_subscriber_logging_config(user_log_config=user_log_config)
assert config['root']['level'] == expected_log_config['root']['level']
assert all(config['loggers'][logger]['level'] == level.upper()
for logger, level in user_log_config['granular_levels'].items())
assert len(config) == len(expected_log_config)
assert config['version'] == expected_log_config['version']
assert (config['disable_existing_loggers'] ==
expected_log_config['disable_existing_loggers'])
assert (config['formatters']['console']['format'] ==
user_log_config['fmt_console'])
assert (config['formatters']['file']['format'] ==
user_log_config['fmt_logfile'])
assert (config['formatters']['console']['datefmt'] ==
user_log_config['datefmt_console'])
assert (config['formatters']['file']['datefmt'] ==
user_log_config['datefmt_logfile'])
assert (config['handlers']['console']['level'] ==
user_log_config['level_console'].upper())
assert (config['handlers']['file']['level'] ==
user_log_config['level_logfile'].upper())
assert config['handlers']['errors']['level'] == logging.ERROR
assert config['handlers']['file']['filename'] == user_log_config['file']
assert (config['handlers']['errors']['filename'] ==
user_log_config['error_file'])
del config['handlers']['console']['level']
del config['handlers']['file']['level']
del config['handlers']['file']['filename']
del config['formatters']['console']['format']
del config['formatters']['console']['datefmt']
del config['formatters']['file']['format']
del config['formatters']['file']['datefmt']
del expected_log_config['handlers']['console']['level']
del expected_log_config['handlers']['file']['level']
del expected_log_config['handlers']['file']['filename']
del expected_log_config['formatters']['console']['format']
del expected_log_config['formatters']['console']['datefmt']
del expected_log_config['formatters']['file']['format']
del expected_log_config['formatters']['file']['datefmt']
assert (config['handlers']['console'] ==
expected_log_config['handlers']['console'])
assert (config['handlers']['file'] ==
expected_log_config['handlers']['file'])
assert (config['formatters']['console'] ==
expected_log_config['formatters']['console'])
assert (config['formatters']['file'] ==
expected_log_config['formatters']['file'])
def test_normalize_log_level():
from bigchaindb.common.exceptions import ConfigurationError
from bigchaindb.log.setup import _normalize_log_level
with raises(ConfigurationError) as exc:
_normalize_log_level(2)
assert exc.value.args == ('Log level must be a string!',)
assert isinstance(exc.value.__cause__, AttributeError)
assert exc.value.__cause__.args == (
"'int' object has no attribute 'upper'",)
class TestLogRecordSocketServer:
def test_init(self):
from bigchaindb.log.setup import (LogRecordSocketServer,
LogRecordStreamHandler)
server = LogRecordSocketServer()
assert server.allow_reuse_address
assert server.server_address == (
'127.0.0.1', logging.handlers.DEFAULT_TCP_LOGGING_PORT)
assert server.RequestHandlerClass == LogRecordStreamHandler
server.server_close()
@mark.parametrize('side_effect', (None, KeyboardInterrupt))
def test_server_forever(self, mocker, side_effect):
from bigchaindb.log.setup import LogRecordSocketServer
nocked_create_subscriber_logging_config = mocker.patch(
'bigchaindb.log.setup.create_subscriber_logging_config',
autospec=True,
spec_set=True,
)
mocked_dict_config = mocker.patch('bigchaindb.log.setup.dictConfig',
autospec=True, spec_set=True)
mocked_parent_serve_forever = mocker.patch(
'bigchaindb.log.setup.ThreadingTCPServer.serve_forever',
autospec=True,
spec_set=True,
side_effect=side_effect,
)
server = LogRecordSocketServer()
with server:
server.serve_forever()
nocked_create_subscriber_logging_config.assert_called_once_with(
user_log_config=None)
mocked_dict_config.assert_called_once_with(
nocked_create_subscriber_logging_config.return_value)
mocked_parent_serve_forever.assert_called_once_with(server,
poll_interval=0.5)
class TestLogRecordStreamHandler:
def test_handle(self, mocker, log_record_dict, log_record_bytes):
from bigchaindb.log.setup import LogRecordStreamHandler
chunks = [log_record_bytes, b'\x00\x00\x02T']
mocked_handle_log_record = mocker.patch(
'bigchaindb.log.setup.LogRecordStreamHandler.handle_log_record',
autospec=True,
spec_set=True,
)
def mocked_recv(bufsize):
try:
return chunks.pop()
except IndexError:
return b' '
request = mocker.patch('socket.socket', autospec=True, spec_set=True)
request.return_value.recv = mocked_recv
client_address = ('127.0.0.1', 9020)
LogRecordStreamHandler(
request.return_value, client_address, None)
assert mocked_handle_log_record.called
assert (mocked_handle_log_record.call_args[0][1].__dict__ ==
log_record_dict)
def test_handle_log_record(self, mocker, log_record):
from bigchaindb.log.setup import LogRecordStreamHandler
mocker.patch('bigchaindb.log.setup.LogRecordStreamHandler.handle')
mocked_logger_handle = mocker.patch(
'bigchaindb.log.setup.logging.Logger.handle',
autospec=True, spec_set=True)
request = mocker.patch('socket.socket', autospec=True, spec_set=True)
client_address = ('127.0.0.1', 9020)
handler = LogRecordStreamHandler(
request.return_value, client_address, None)
handler.handle_log_record(log_record)
assert log_record in mocked_logger_handle.call_args[0]
def test_unpickle(self, mocker, log_record_bytes, log_record_dict):
from bigchaindb.log.setup import LogRecordStreamHandler
mocker.patch('bigchaindb.log.setup.LogRecordStreamHandler.handle')
request = mocker.patch('socket.socket', autospec=True, spec_set=True)
client_address = ('127.0.0.1', 9020)
handler = LogRecordStreamHandler(
request.return_value, client_address, None)
obj = handler.unpickle(log_record_bytes)
assert obj == log_record_dict
@mark.parametrize('error', (
pickle.UnpicklingError, AttributeError, EOFError, TypeError))
def test_unpickle_error(self, mocker, error):
from bigchaindb.log.setup import LogRecordStreamHandler
mocker.patch('bigchaindb.log.setup.LogRecordStreamHandler.handle')
mocker.patch(
'bigchaindb.log.setup.pickle.loads',
autospec=True,
spec_set=True,
side_effect=error('msg'),
)
request = mocker.patch('socket.socket', autospec=True, spec_set=True)
client_address = ('127.0.0.1', 9020)
handler = LogRecordStreamHandler(
request.return_value, client_address, None)
obj = handler.unpickle(None)
assert obj == {
'msg': '({}) Log handling error: un-pickling failed!'.format(
error.__name__),
'exc_info': ('msg',),
'level': logging.ERROR,
'func': handler.unpickle.__name__,
}