1
0
mirror of https://github.com/bigchaindb/bigchaindb.git synced 2024-06-26 03:06:43 +02:00

Add logging infrastructure

This commit is contained in:
Sylvain Bellemare 2017-03-13 17:55:11 +01:00 committed by Sylvain Bellemare
parent 9aa243c9f8
commit 43f779a18b
12 changed files with 680 additions and 27 deletions

View File

@ -45,7 +45,22 @@ config = {
'private': None,
},
'keyring': [],
'backlog_reassign_delay': 120
'backlog_reassign_delay': 120,
'log': {
# TODO Document here or elsewhere.
# Example of config:
# 'file': '/var/log/bigchaindb.log',
# 'level_console': 'info',
# 'level_logfile': 'info',
# 'datefmt_console': '%Y-%m-%d %H:%M:%S',
# 'datefmt_logfile': '%Y-%m-%d %H:%M:%S',
# 'fmt_console': '%(asctime)s [%(levelname)s] (%(name)s) %(message)s',
# 'fmt_logfile': '%(asctime)s [%(levelname)s] (%(name)s) %(message)s',
# 'granular_levels': {
# 'bichaindb.backend': 'info',
# 'bichaindb.core': 'info',
# },
},
}
# We need to maintain a backup copy of the original config dict in case

View File

@ -25,6 +25,7 @@ from bigchaindb.commands.messages import (
RETHINKDB_STARTUP_ERROR,
)
from bigchaindb.commands.utils import configure_bigchaindb, input_on_stderr
from bigchaindb.log.setup import setup_logging
logging.basicConfig(level=logging.INFO)
@ -173,6 +174,9 @@ def run_start(args):
"""Start the processes to run the node"""
logger.info('BigchainDB Version %s', bigchaindb.__version__)
# TODO setup logging -- pass logging config, extracted out from main config
setup_logging()
if args.allow_temp_keypair:
if not (bigchaindb.config['keypair']['private'] or
bigchaindb.config['keypair']['public']):

View File

59
bigchaindb/log/configs.py Normal file
View File

@ -0,0 +1,59 @@
import logging
from os.path import expanduser, join
DEFAULT_LOG_DIR = expanduser('~')
PUBLISHER_LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'root': {
'level': logging.DEBUG,
},
}
SUBSCRIBER_LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'console': {
'class': 'logging.Formatter',
'format': (
'%(name)-15s %(levelname)-8s %(processName)-10s %(message)s'
),
'datefmt': '%Y-%m-%d %H:%M:%S',
},
'file': {
'class': 'logging.Formatter',
'format': ('[%(asctime)s] [%(levelname)s] (%(name)s) '
'%(message)s (%(processName)-10s - pid: %(process)d)'),
'datefmt': '%Y-%m-%d %H:%M:%S',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'console',
'level': logging.INFO,
},
'file': {
'class': 'logging.FileHandler',
'filename': join(DEFAULT_LOG_DIR, 'bigchaindb.log'),
'mode': 'w',
'formatter': 'file',
'level': logging.INFO,
},
'errors': {
'class': 'logging.FileHandler',
'filename': join(DEFAULT_LOG_DIR, 'bigchaindb-errors.log'),
'mode': 'w',
'level': logging.ERROR,
'formatter': 'file',
},
},
'loggers': {},
'root': {
'level': logging.DEBUG,
'handlers': ['console', 'file', 'errors']
},
}

169
bigchaindb/log/setup.py Normal file
View File

@ -0,0 +1,169 @@
"""Setup logging."""
from copy import deepcopy
import logging
from logging.config import dictConfig
import logging.handlers
import pickle
from socketserver import StreamRequestHandler, ThreadingTCPServer
import struct
import sys
from multiprocessing import Process
from .configs import PUBLISHER_LOGGING_CONFIG, SUBSCRIBER_LOGGING_CONFIG
from bigchaindb.common.exceptions import ConfigurationError
def _normalize_log_level(level):
try:
return level.upper()
except AttributeError as exc:
raise ConfigurationError('Log level must be a string!') from exc
def setup_pub_logger():
dictConfig(PUBLISHER_LOGGING_CONFIG)
socket_handler = logging.handlers.SocketHandler(
'localhost', logging.handlers.DEFAULT_TCP_LOGGING_PORT)
socket_handler.setLevel(logging.DEBUG)
logger = logging.getLogger()
logger.addHandler(socket_handler)
def setup_sub_logger(*, user_log_config=None):
server = LogRecordSocketServer()
with server:
server_proc = Process(
target=server.serve_forever,
kwargs={'log_config': user_log_config},
)
server_proc.start()
def setup_logging(*, user_log_config=None):
setup_pub_logger()
setup_sub_logger(user_log_config=user_log_config)
def create_subscriber_logging_config(*, user_log_config=None):
sub_log_config = deepcopy(SUBSCRIBER_LOGGING_CONFIG)
if not user_log_config:
return sub_log_config
if 'file' in user_log_config:
filename = user_log_config['file']
sub_log_config['handlers']['file']['filename'] = filename
if 'level_console' in user_log_config:
level = _normalize_log_level(user_log_config['level_console'])
sub_log_config['handlers']['console']['level'] = level
if 'level_logfile' in user_log_config:
level = _normalize_log_level(user_log_config['level_logfile'])
sub_log_config['handlers']['file']['level'] = level
if 'fmt_console' in user_log_config:
fmt = user_log_config['fmt_console']
sub_log_config['formatters']['console']['format'] = fmt
if 'fmt_logfile' in user_log_config:
fmt = user_log_config['fmt_logfile']
sub_log_config['formatters']['file']['format'] = fmt
if 'datefmt_console' in user_log_config:
fmt = user_log_config['datefmt_console']
sub_log_config['formatters']['console']['datefmt'] = fmt
if 'datefmt_logfile' in user_log_config:
fmt = user_log_config['datefmt_logfile']
sub_log_config['formatters']['file']['datefmt'] = fmt
log_levels = user_log_config.get('granular_levels', {})
for logger_name, level in log_levels.items():
level = _normalize_log_level(level)
try:
sub_log_config['loggers'][logger_name]['level'] = level
except KeyError:
sub_log_config['loggers'][logger_name] = {'level': level}
return sub_log_config
class LogRecordStreamHandler(StreamRequestHandler):
"""Handler for a streaming logging request.
This basically logs the record using whatever logging policy is
configured locally.
"""
def handle(self):
"""
Handle multiple requests - each expected to be a 4-byte length,
followed by the LogRecord in pickle format. Logs the record
according to whatever policy is configured locally.
"""
while True:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack('>L', chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv(slen - len(chunk))
obj = self.unpickle(chunk)
record = logging.makeLogRecord(obj)
self.handle_log_record(record)
def unpickle(self, data):
try:
return pickle.loads(data)
except (pickle.UnpicklingError,
AttributeError, EOFError, TypeError) as exc:
return {
'msg': '({}) Log handling error: un-pickling failed!'.format(
exc.__class__.__name__),
'exc_info': exc.args,
'level': logging.ERROR,
'func': self.unpickle.__name__,
}
def handle_log_record(self, record):
logger = logging.getLogger(record.name)
logger.handle(record)
class LogRecordSocketServer(ThreadingTCPServer):
"""
Simple TCP socket-based logging server.
"""
allow_reuse_address = True
def __init__(self,
host='localhost',
port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
handler=LogRecordStreamHandler):
super().__init__((host, port), handler)
def serve_forever(self, *, poll_interval=0.5, log_config=None):
sub_logging_config = create_subscriber_logging_config(
user_log_config=log_config)
dictConfig(sub_logging_config)
try:
super().serve_forever(poll_interval=poll_interval)
except KeyboardInterrupt:
pass
# NOTE: Because the context manager is only available
# from 3.6 and up, we add it for lower versions.
if sys.version_info < (3, 6):
def __enter__(self):
return self
def __exit__(self, *args):
self.server_close()
LogRecordSocketServer.__enter__ = __enter__
LogRecordSocketServer.__exit__ = __exit__

View File

@ -31,6 +31,7 @@ dev_require = [
'ipdb',
'ipython',
'watchdog',
'logging_tree',
]
docs_require = [

View File

@ -50,3 +50,12 @@ def run_start_args(request):
start_rethinkdb=param.get('start_rethinkdb', False),
allow_temp_keypair=param.get('allow_temp_keypair', False),
)
@pytest.fixture
def mocked_setup_logging(mocker):
return mocker.patch(
'bigchaindb.commands.bigchain.setup_logging',
autospec=True,
spec_set=True,
)

View File

@ -9,12 +9,14 @@ from argparse import Namespace
def test_bigchain_run_start_with_rethinkdb(mock_start_rethinkdb,
mock_run_configure,
mock_processes_start,
mock_db_init_with_existing_db):
mock_db_init_with_existing_db,
mocked_setup_logging):
from bigchaindb.commands.bigchain import run_start
args = Namespace(start_rethinkdb=True, allow_temp_keypair=False, config=None, yes=True)
run_start(args)
mock_start_rethinkdb.assert_called_with()
mocked_setup_logging.assert_called_once_with()
@patch('subprocess.Popen')

View File

@ -33,15 +33,20 @@ def test_main_entrypoint(mock_start):
assert mock_start.called
def test_bigchain_run_start(mock_run_configure, mock_processes_start, mock_db_init_with_existing_db):
def test_bigchain_run_start(mock_run_configure,
mock_processes_start,
mock_db_init_with_existing_db,
mocked_setup_logging):
from bigchaindb.commands.bigchain import run_start
args = Namespace(start_rethinkdb=False, allow_temp_keypair=False, config=None, yes=True)
run_start(args)
mocked_setup_logging.assert_called_once_with()
@pytest.mark.skipif(reason="BigchainDB doesn't support the automatic creation of a config file anymore")
def test_bigchain_run_start_assume_yes_create_default_config(monkeypatch, mock_processes_start,
mock_generate_key_pair, mock_db_init_with_existing_db):
def test_bigchain_run_start_assume_yes_create_default_config(
monkeypatch, mock_processes_start, mock_generate_key_pair,
mock_db_init_with_existing_db, mocked_setup_logging):
import bigchaindb
from bigchaindb.commands.bigchain import run_start
from bigchaindb import config_utils
@ -61,6 +66,7 @@ def test_bigchain_run_start_assume_yes_create_default_config(monkeypatch, mock_p
args = Namespace(config=None, yes=True)
run_start(args)
mocked_setup_logging.assert_called_once_with()
assert value['return'] == expected_config
@ -228,9 +234,9 @@ def test_run_configure_with_backend(backend, monkeypatch, mock_write_config):
@patch('bigchaindb.common.crypto.generate_key_pair',
return_value=('private_key', 'public_key'))
@pytest.mark.usefixtures('ignore_local_config_file')
def test_allow_temp_keypair_generates_one_on_the_fly(mock_gen_keypair,
mock_processes_start,
mock_db_init_with_existing_db):
def test_allow_temp_keypair_generates_one_on_the_fly(
mock_gen_keypair, mock_processes_start,
mock_db_init_with_existing_db, mocked_setup_logging):
import bigchaindb
from bigchaindb.commands.bigchain import run_start
@ -239,6 +245,7 @@ def test_allow_temp_keypair_generates_one_on_the_fly(mock_gen_keypair,
args = Namespace(allow_temp_keypair=True, start_rethinkdb=False, config=None, yes=True)
run_start(args)
mocked_setup_logging.assert_called_once_with()
assert bigchaindb.config['keypair']['private'] == 'private_key'
assert bigchaindb.config['keypair']['public'] == 'public_key'
@ -248,7 +255,8 @@ def test_allow_temp_keypair_generates_one_on_the_fly(mock_gen_keypair,
@pytest.mark.usefixtures('ignore_local_config_file')
def test_allow_temp_keypair_doesnt_override_if_keypair_found(mock_gen_keypair,
mock_processes_start,
mock_db_init_with_existing_db):
mock_db_init_with_existing_db,
mocked_setup_logging):
import bigchaindb
from bigchaindb.commands.bigchain import run_start
@ -262,11 +270,15 @@ def test_allow_temp_keypair_doesnt_override_if_keypair_found(mock_gen_keypair,
args = Namespace(allow_temp_keypair=True, start_rethinkdb=False, config=None, yes=True)
run_start(args)
mocked_setup_logging.assert_called_once_with()
assert bigchaindb.config['keypair']['private'] == original_private_key
assert bigchaindb.config['keypair']['public'] == original_public_key
def test_run_start_when_db_already_exists(mocker, monkeypatch, run_start_args):
def test_run_start_when_db_already_exists(mocker,
monkeypatch,
run_start_args,
mocked_setup_logging):
from bigchaindb.commands.bigchain import run_start
from bigchaindb.common.exceptions import DatabaseAlreadyExists
mocked_start = mocker.patch('bigchaindb.processes.start')
@ -277,10 +289,14 @@ def test_run_start_when_db_already_exists(mocker, monkeypatch, run_start_args):
monkeypatch.setattr(
'bigchaindb.commands.bigchain._run_init', mock_run_init)
run_start(run_start_args)
mocked_setup_logging.assert_called_once_with()
assert mocked_start.called
def test_run_start_when_keypair_not_found(mocker, monkeypatch, run_start_args):
def test_run_start_when_keypair_not_found(mocker,
monkeypatch,
run_start_args,
mocked_setup_logging):
from bigchaindb.commands.bigchain import run_start
from bigchaindb.commands.messages import CANNOT_START_KEYPAIR_NOT_FOUND
from bigchaindb.common.exceptions import KeypairNotFoundException
@ -295,6 +311,7 @@ def test_run_start_when_keypair_not_found(mocker, monkeypatch, run_start_args):
with pytest.raises(SystemExit) as exc:
run_start(run_start_args)
mocked_setup_logging.assert_called_once_with()
assert len(exc.value.args) == 1
assert exc.value.args[0] == CANNOT_START_KEYPAIR_NOT_FOUND
assert not mocked_start.called
@ -302,7 +319,8 @@ def test_run_start_when_keypair_not_found(mocker, monkeypatch, run_start_args):
def test_run_start_when_start_rethinkdb_fails(mocker,
monkeypatch,
run_start_args):
run_start_args,
mocked_setup_logging):
from bigchaindb.commands.bigchain import run_start
from bigchaindb.commands.messages import RETHINKDB_STARTUP_ERROR
from bigchaindb.common.exceptions import StartupError
@ -319,6 +337,7 @@ def test_run_start_when_start_rethinkdb_fails(mocker,
with pytest.raises(SystemExit) as exc:
run_start(run_start_args)
mocked_setup_logging.assert_called_once_with()
assert len(exc.value.args) == 1
assert exc.value.args[0] == RETHINKDB_STARTUP_ERROR.format(err_msg)
assert not mocked_start.called

328
tests/log/test_setup.py Normal file
View File

@ -0,0 +1,328 @@
import logging
import pickle
from logging import getLogger
from logging.config import dictConfig
from logging.handlers import SocketHandler
from pytest import fixture, mark, raises
@fixture
def reset_logging_config():
original_root_logger_level = getLogger().level
dictConfig({'version': 1, 'root': {'level': 'NOTSET'}})
yield
getLogger().setLevel(original_root_logger_level)
@fixture
def mocked_process(mocker):
return mocker.patch(
'bigchaindb.log.setup.Process', autospec=True, spec_set=True)
@fixture
def mocked_socket_server(mocker):
return mocker.patch(
'bigchaindb.log.setup.LogRecordSocketServer',
autospec=True,
spec_set=True,
)
@fixture
def mocked_setup_pub_logger(mocker):
return mocker.patch(
'bigchaindb.log.setup.setup_pub_logger', autospec=True, spec_set=True)
@fixture
def mocked_setup_sub_logger(mocker):
return mocker.patch(
'bigchaindb.log.setup.setup_sub_logger', autospec=True, spec_set=True)
@fixture
def log_record_dict():
return {
'args': None,
'created': 1489584900.595193,
'exc_info': None,
'exc_text': None,
'filename': 'config_utils.py',
'funcName': 'autoconfigure',
'levelname': 'DEBUG',
'levelno': 10,
'lineno': 228,
'module': 'config_utils',
'msecs': 595.1929092407227,
'msg': 'System already configured, skipping autoconfiguration',
'name': 'bigchaindb.config_utils',
'pathname': '/usr/src/app/bigchaindb/config_utils.py',
'process': 1981,
'processName': 'MainProcess',
'relativeCreated': 398.4854221343994,
'stack_info': None,
'thread': 140352503879424,
'threadName': 'MainThread',
}
@fixture
def log_record(log_record_dict):
return logging.makeLogRecord(log_record_dict)
@fixture
def log_record_bytes(log_record_dict):
return pickle.dumps(log_record_dict)
@mark.usefixtures('reset_logging_config')
def test_setup_logging(mocked_setup_pub_logger, mocked_setup_sub_logger):
from bigchaindb.log.setup import setup_logging
setup_logging()
mocked_setup_pub_logger.assert_called_once_with()
mocked_setup_sub_logger.assert_called_once_with(user_log_config=None)
@mark.usefixtures('reset_logging_config')
def test_setup_pub_logger():
from bigchaindb.log.setup import setup_pub_logger
from bigchaindb.log.configs import PUBLISHER_LOGGING_CONFIG
root_logger = getLogger()
assert root_logger.level == logging.NOTSET
setup_pub_logger()
assert root_logger.level == PUBLISHER_LOGGING_CONFIG['root']['level']
assert root_logger.hasHandlers()
assert isinstance(root_logger.handlers[0], SocketHandler)
@mark.usefixtures('reset_logging_config')
def test_setup_sub_logger_without_config(mocked_socket_server, mocked_process):
from bigchaindb.log.setup import setup_sub_logger
setup_sub_logger()
root_logger = getLogger()
assert root_logger.level == logging.NOTSET
mocked_socket_server.assert_called_once_with()
mocked_process.assert_called_once_with(
target=mocked_socket_server.return_value.serve_forever,
kwargs={'log_config': None},
)
mocked_process.return_value.start.assert_called_once_with()
@mark.usefixtures('reset_logging_config')
def test_setup_sub_logger_with_config(mocked_socket_server, mocked_process):
from bigchaindb.log.setup import setup_sub_logger
user_log_config = {
'file': '/var/log/bdb.log',
'level_console': 'warning',
'level_logfile': 'info',
'fmt_console': '[%(levelname)s] (%(name)s) %(message)s',
'fmt_logfile': '[%(asctime)s] [%(levelname)s] (%(name)s) %(message)s',
'granular_levels': {
'bigchaindb.core': 'debug',
},
}
root_logger = getLogger()
setup_sub_logger(user_log_config=user_log_config)
assert root_logger.level == logging.NOTSET
mocked_socket_server.assert_called_once_with()
mocked_process.assert_called_once_with(
target=mocked_socket_server.return_value.serve_forever,
kwargs={'log_config': user_log_config},
)
mocked_process.return_value.start.assert_called_once_with()
def test_create_subscriber_logging_config_without_user_given_config():
from bigchaindb.log.setup import create_subscriber_logging_config
from bigchaindb.log.configs import SUBSCRIBER_LOGGING_CONFIG
config = create_subscriber_logging_config()
assert config == SUBSCRIBER_LOGGING_CONFIG
def test_create_subscriber_logging_config_with_user_given_config():
from bigchaindb.log.setup import create_subscriber_logging_config
from bigchaindb.log.configs import (
SUBSCRIBER_LOGGING_CONFIG as expected_log_config)
user_log_config = {
'file': '/var/log/bigchaindb/bdb.log',
'level_console': 'warning',
'level_logfile': 'info',
'fmt_console': '[%(levelname)s] (%(name)s) %(message)s',
'fmt_logfile': '[%(asctime)s] [%(levelname)s] (%(name)s) %(message)s',
'datefmt_console': '%H:%M:%S',
'datefmt_logfile': '%a, %d %b %Y %H:%M:%S +0000',
'granular_levels': {
'bigchaindb.core': 'debug',
},
}
config = create_subscriber_logging_config(user_log_config=user_log_config)
assert config['root']['level'] == expected_log_config['root']['level']
assert all(config['loggers'][logger]['level'] == level.upper()
for logger, level in user_log_config['granular_levels'].items())
assert len(config) == len(expected_log_config)
assert config['version'] == expected_log_config['version']
assert (config['disable_existing_loggers'] ==
expected_log_config['disable_existing_loggers'])
assert (config['formatters']['console']['format'] ==
user_log_config['fmt_console'])
assert (config['formatters']['file']['format'] ==
user_log_config['fmt_logfile'])
assert (config['formatters']['console']['datefmt'] ==
user_log_config['datefmt_console'])
assert (config['formatters']['file']['datefmt'] ==
user_log_config['datefmt_logfile'])
assert (config['handlers']['console']['level'] ==
user_log_config['level_console'].upper())
assert (config['handlers']['file']['level'] ==
user_log_config['level_logfile'].upper())
assert config['handlers']['file']['filename'] == user_log_config['file']
del config['handlers']['console']['level']
del config['handlers']['file']['level']
del config['handlers']['file']['filename']
del config['formatters']['console']['format']
del config['formatters']['console']['datefmt']
del config['formatters']['file']['format']
del config['formatters']['file']['datefmt']
del expected_log_config['handlers']['console']['level']
del expected_log_config['handlers']['file']['level']
del expected_log_config['handlers']['file']['filename']
del expected_log_config['formatters']['console']['format']
del expected_log_config['formatters']['console']['datefmt']
del expected_log_config['formatters']['file']['format']
del expected_log_config['formatters']['file']['datefmt']
assert (config['handlers']['console'] ==
expected_log_config['handlers']['console'])
assert (config['handlers']['file'] ==
expected_log_config['handlers']['file'])
assert (config['formatters']['console'] ==
expected_log_config['formatters']['console'])
assert (config['formatters']['file'] ==
expected_log_config['formatters']['file'])
def test_normalize_log_level():
from bigchaindb.common.exceptions import ConfigurationError
from bigchaindb.log.setup import _normalize_log_level
with raises(ConfigurationError) as exc:
_normalize_log_level(2)
assert exc.value.args == ('Log level must be a string!',)
assert isinstance(exc.value.__cause__, AttributeError)
assert exc.value.__cause__.args == (
"'int' object has no attribute 'upper'",)
class TestLogRecordSocketServer:
def test_init(self):
from bigchaindb.log.setup import (LogRecordSocketServer,
LogRecordStreamHandler)
server = LogRecordSocketServer()
assert server.allow_reuse_address
assert server.server_address == (
'127.0.0.1', logging.handlers.DEFAULT_TCP_LOGGING_PORT)
assert server.RequestHandlerClass == LogRecordStreamHandler
@mark.parametrize('side_effect', (None, KeyboardInterrupt))
def test_server_forever(self, mocker, side_effect):
from bigchaindb.log.setup import LogRecordSocketServer
nocked_create_subscriber_logging_config = mocker.patch(
'bigchaindb.log.setup.create_subscriber_logging_config',
autospec=True,
spec_set=True,
)
mocked_dict_config = mocker.patch('bigchaindb.log.setup.dictConfig',
autospec=True, spec_set=True)
mocked_parent_serve_forever = mocker.patch(
'bigchaindb.log.setup.ThreadingTCPServer.serve_forever',
autospec=True,
spec_set=True,
side_effect=side_effect,
)
server = LogRecordSocketServer()
with server:
server.serve_forever()
nocked_create_subscriber_logging_config.assert_called_once_with(
user_log_config=None)
mocked_dict_config.assert_called_once_with(
nocked_create_subscriber_logging_config.return_value)
mocked_parent_serve_forever.assert_called_once_with(server,
poll_interval=0.5)
class TestLogRecordStreamHandler:
def test_handle(self, mocker, log_record_dict, log_record_bytes):
from bigchaindb.log.setup import LogRecordStreamHandler
chunks = [log_record_bytes, b'\x00\x00\x02T']
mocked_handle_log_record = mocker.patch(
'bigchaindb.log.setup.LogRecordStreamHandler.handle_log_record',
autospec=True,
spec_set=True,
)
def mocked_recv(bufsize):
try:
return chunks.pop()
except IndexError:
return b' '
request = mocker.patch('socket.socket', autospec=True, spec_set=True)
request.return_value.recv = mocked_recv
client_address = ('127.0.0.1', 9020)
LogRecordStreamHandler(
request.return_value, client_address, None)
assert mocked_handle_log_record.called
assert (mocked_handle_log_record.call_args[0][1].__dict__ ==
log_record_dict)
def test_handle_log_record(self, mocker, log_record):
from bigchaindb.log.setup import LogRecordStreamHandler
mocker.patch('bigchaindb.log.setup.LogRecordStreamHandler.handle')
mocked_logger_handle = mocker.patch(
'bigchaindb.log.setup.logging.Logger.handle',
autospec=True, spec_set=True)
request = mocker.patch('socket.socket', autospec=True, spec_set=True)
client_address = ('127.0.0.1', 9020)
handler = LogRecordStreamHandler(
request.return_value, client_address, None)
handler.handle_log_record(log_record)
assert log_record in mocked_logger_handle.call_args[0]
def test_unpickle(self, mocker, log_record_bytes, log_record_dict):
from bigchaindb.log.setup import LogRecordStreamHandler
mocker.patch('bigchaindb.log.setup.LogRecordStreamHandler.handle')
request = mocker.patch('socket.socket', autospec=True, spec_set=True)
client_address = ('127.0.0.1', 9020)
handler = LogRecordStreamHandler(
request.return_value, client_address, None)
obj = handler.unpickle(log_record_bytes)
assert obj == log_record_dict
@mark.parametrize('error', (
pickle.UnpicklingError, AttributeError, EOFError, TypeError))
def test_unpickle_error(self, mocker, error):
from bigchaindb.log.setup import LogRecordStreamHandler
mocker.patch('bigchaindb.log.setup.LogRecordStreamHandler.handle')
mocker.patch(
'bigchaindb.log.setup.pickle.loads',
autospec=True,
spec_set=True,
side_effect=error('msg'),
)
request = mocker.patch('socket.socket', autospec=True, spec_set=True)
client_address = ('127.0.0.1', 9020)
handler = LogRecordStreamHandler(
request.return_value, client_address, None)
obj = handler.unpickle(None)
assert obj == {
'msg': '({}) Log handling error: un-pickling failed!'.format(
error.__name__),
'exc_info': ('msg',),
'level': logging.ERROR,
'func': handler.unpickle.__name__,
}

View File

@ -202,7 +202,8 @@ def test_autoconfigure_read_both_from_file_and_env(monkeypatch, request):
'private': None,
},
'keyring': KEYRING.split(':'),
'backlog_reassign_delay': 5
'backlog_reassign_delay': 5,
'log': {},
}

View File

@ -44,7 +44,8 @@ def test_post_create_transaction_endpoint(b, client):
assert res.json['outputs'][0]['public_keys'][0] == user_pub
def test_post_create_transaction_with_invalid_id(b, client, caplog):
@patch('bigchaindb.web.views.base.logger')
def test_post_create_transaction_with_invalid_id(mock_logger, b, client):
from bigchaindb.common.exceptions import InvalidHash
from bigchaindb.models import Transaction
user_priv, user_pub = crypto.generate_key_pair()
@ -56,16 +57,29 @@ def test_post_create_transaction_with_invalid_id(b, client, caplog):
res = client.post(TX_ENDPOINT, data=json.dumps(tx))
expected_status_code = 400
expected_error_message = (
'Invalid transaction ({}): The transaction\'s id \'{}\' isn\'t equal to '
'the hash of its body, i.e. it\'s not valid.'
"Invalid transaction ({}): The transaction's id '{}' isn't equal to "
"the hash of its body, i.e. it's not valid."
).format(InvalidHash.__name__, tx['id'])
assert res.status_code == expected_status_code
assert res.json['message'] == expected_error_message
assert caplog.records[0].args['status'] == expected_status_code
assert caplog.records[0].args['message'] == expected_error_message
assert mock_logger.error.called
assert (
'HTTP API error: %(status)s - %(message)s' in
mock_logger.error.call_args[0]
)
assert (
{'message': expected_error_message, 'status': expected_status_code} in
mock_logger.error.call_args[0]
)
# TODO put back caplog based asserts once possible
# assert caplog.records[0].args['status'] == expected_status_code
# assert caplog.records[0].args['message'] == expected_error_message
def test_post_create_transaction_with_invalid_signature(b, client, caplog):
@patch('bigchaindb.web.views.base.logger')
def test_post_create_transaction_with_invalid_signature(mock_logger,
b,
client):
from bigchaindb.common.exceptions import InvalidSignature
from bigchaindb.models import Transaction
user_priv, user_pub = crypto.generate_key_pair()
@ -82,8 +96,18 @@ def test_post_create_transaction_with_invalid_signature(b, client, caplog):
).format(InvalidSignature.__name__)
assert res.status_code == expected_status_code
assert res.json['message'] == expected_error_message
assert caplog.records[0].args['status'] == expected_status_code
assert caplog.records[0].args['message'] == expected_error_message
assert mock_logger.error.called
assert (
'HTTP API error: %(status)s - %(message)s' in
mock_logger.error.call_args[0]
)
assert (
{'message': expected_error_message, 'status': expected_status_code} in
mock_logger.error.call_args[0]
)
# TODO put back caplog based asserts once possible
# assert caplog.records[0].args['status'] == expected_status_code
# assert caplog.records[0].args['message'] == expected_error_message
def test_post_create_transaction_with_invalid_structure(client):
@ -91,7 +115,8 @@ def test_post_create_transaction_with_invalid_structure(client):
assert res.status_code == 400
def test_post_create_transaction_with_invalid_schema(client, caplog):
@patch('bigchaindb.web.views.base.logger')
def test_post_create_transaction_with_invalid_schema(mock_logger, client):
from bigchaindb.models import Transaction
user_priv, user_pub = crypto.generate_key_pair()
tx = Transaction.create(
@ -103,8 +128,18 @@ def test_post_create_transaction_with_invalid_schema(client, caplog):
"Invalid transaction schema: 'version' is a required property")
assert res.status_code == expected_status_code
assert res.json['message'] == expected_error_message
assert caplog.records[0].args['status'] == expected_status_code
assert caplog.records[0].args['message'] == expected_error_message
assert mock_logger.error.called
assert (
'HTTP API error: %(status)s - %(message)s' in
mock_logger.error.call_args[0]
)
assert (
{'message': expected_error_message, 'status': expected_status_code} in
mock_logger.error.call_args[0]
)
# TODO put back caplog based asserts once possible
# assert caplog.records[0].args['status'] == expected_status_code
# assert caplog.records[0].args['message'] == expected_error_message
@pytest.mark.parametrize('exc,msg', (
@ -118,7 +153,8 @@ def test_post_create_transaction_with_invalid_schema(client, caplog):
('TransactionNotInValidBlock', 'Wait, maybe?'),
('ValidationError', '?'),
))
def test_post_invalid_transaction(client, exc, msg, monkeypatch, caplog):
@patch('bigchaindb.web.views.base.logger')
def test_post_invalid_transaction(mock_logger, client, exc, msg, monkeypatch,):
from bigchaindb.common import exceptions
exc_cls = getattr(exceptions, exc)
@ -135,8 +171,18 @@ def test_post_invalid_transaction(client, exc, msg, monkeypatch, caplog):
assert res.status_code == expected_status_code
assert (res.json['message'] ==
'Invalid transaction ({}): {}'.format(exc, msg))
assert caplog.records[2].args['status'] == expected_status_code
assert caplog.records[2].args['message'] == expected_error_message
assert mock_logger.error.called
assert (
'HTTP API error: %(status)s - %(message)s' in
mock_logger.error.call_args[0]
)
assert (
{'message': expected_error_message, 'status': expected_status_code} in
mock_logger.error.call_args[0]
)
# TODO put back caplog based asserts once possible
# assert caplog.records[2].args['status'] == expected_status_code
# assert caplog.records[2].args['message'] == expected_error_message
@pytest.mark.bdb