1
0
mirror of https://github.com/bigchaindb/bigchaindb.git synced 2024-06-17 18:13:22 +02:00

Merge branch 'tendermint' into text-edits-in-http-api-docs

This commit is contained in:
Troy McConaghy 2018-02-22 21:02:33 +01:00 committed by GitHub
commit 497e1039ef
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
95 changed files with 2597 additions and 1303 deletions

View File

@ -8,15 +8,17 @@ RUN apt-get -qq update \
&& pip install --no-cache-dir . \
&& apt-get autoremove \
&& apt-get clean
VOLUME ["/data", "/certs"]
WORKDIR /data
ENV PYTHONUNBUFFERED 0
ENV BIGCHAINDB_CONFIG_PATH /data/.bigchaindb
ENV BIGCHAINDB_SERVER_BIND 0.0.0.0:9984
ENV BIGCHAINDB_WSSERVER_HOST 0.0.0.0
ENV BIGCHAINDB_WSSERVER_SCHEME ws
ENV BIGCHAINDB_WSSERVER_ADVERTISED_SCHEME ws
ENV BIGCHAINDB_WSSERVER_ADVERTISED_HOST 0.0.0.0
ENV BIGCHAINDB_WSSERVER_ADVERTISED_SCHEME ws
ENV BIGCHAINDB_WSSERVER_ADVERTISED_PORT 9985
ENV BIGCHAINDB_START_TENDERMINT 0
ENTRYPOINT ["bigchaindb"]
CMD ["start"]

View File

@ -8,7 +8,7 @@
# BigchainDB Server
BigchainDB is a scalable blockchain database. [The whitepaper](https://www.bigchaindb.com/whitepaper/) explains what that means.
BigchainDB is a blockchain database.
## Get Started with BigchainDB Server
@ -21,7 +21,6 @@ BigchainDB is a scalable blockchain database. [The whitepaper](https://www.bigch
## Links for Everyone
* [BigchainDB.com](https://www.bigchaindb.com/) - the main BigchainDB website, including newsletter signup
* [Whitepaper](https://www.bigchaindb.com/whitepaper/) - outlines the motivations, goals and core algorithms of BigchainDB
* [Roadmap](https://github.com/bigchaindb/org/blob/master/ROADMAP.md)
* [Blog](https://medium.com/the-bigchaindb-blog)
* [Twitter](https://twitter.com/BigchainDB)

View File

@ -2,13 +2,11 @@
A high-level description of the files and subdirectories of BigchainDB.
There are three database tables which underpin BigchainDB: `backlog`, where incoming transactions are held temporarily until they can be consumed; `bigchain`, where blocks of transactions are written permanently; and `votes`, where votes are written permanently. It is the votes in the `votes` table which must be queried to determine block validity and order. For more in-depth explanation, see [the whitepaper](https://www.bigchaindb.com/whitepaper/).
## Files
### [`core.py`](./core.py)
The `Bigchain` class is defined here. Most operations outlined in the [whitepaper](https://www.bigchaindb.com/whitepaper/) as well as database interactions are found in this file. This is the place to start if you are interested in implementing a server API, since many of these class methods concern BigchainDB interacting with the outside world.
The `Bigchain` class is defined here. Most node-level operations and database interactions are found in this file. This is the place to start if you are interested in implementing a server API, since many of these class methods concern BigchainDB interacting with the outside world.
### [`models.py`](./models.py)

View File

@ -24,11 +24,8 @@ def store_transaction(conn, signed_transaction):
@register_query(LocalMongoDBConnection)
def store_transactions(conn, signed_transactions):
try:
return conn.run(conn.collection('transactions')
.insert_many(signed_transactions))
except DuplicateKeyError:
pass
return conn.run(conn.collection('transactions')
.insert_many(signed_transactions))
@register_query(LocalMongoDBConnection)
@ -54,12 +51,9 @@ def get_transactions(conn, transaction_ids):
@register_query(LocalMongoDBConnection)
def store_metadatas(conn, metadata):
try:
return conn.run(
conn.collection('metadata')
.insert_many(metadata, ordered=False))
except DuplicateKeyError:
pass
return conn.run(
conn.collection('metadata')
.insert_many(metadata, ordered=False))
@register_query(LocalMongoDBConnection)
@ -82,12 +76,9 @@ def store_asset(conn, asset):
@register_query(LocalMongoDBConnection)
def store_assets(conn, assets):
try:
return conn.run(
conn.collection('assets')
.insert_many(assets, ordered=False))
except DuplicateKeyError:
pass
return conn.run(
conn.collection('assets')
.insert_many(assets, ordered=False))
@register_query(LocalMongoDBConnection)
@ -201,6 +192,40 @@ def get_block_with_transaction(conn, txid):
projection={'_id': False, 'height': True}))
@register_query(LocalMongoDBConnection)
def delete_zombie_transactions(conn):
txns = conn.run(conn.collection('transactions').find({}))
for txn in txns:
txn_id = txn['id']
block = list(get_block_with_transaction(conn, txn_id))
if len(block) == 0:
delete_transaction(conn, txn_id)
def delete_transaction(conn, txn_id):
conn.run(
conn.collection('transactions').delete_one({'id': txn_id}))
conn.run(
conn.collection('assets').delete_one({'id': txn_id}))
conn.run(
conn.collection('metadata').delete_one({'id': txn_id}))
@register_query(LocalMongoDBConnection)
def delete_latest_block(conn):
block = get_latest_block(conn)
txn_ids = block['transactions']
delete_transactions(conn, txn_ids)
conn.run(conn.collection('blocks').delete_one({'height': block['height']}))
@register_query(LocalMongoDBConnection)
def delete_transactions(conn, txn_ids):
conn.run(conn.collection('assets').delete_many({'id': {'$in': txn_ids}}))
conn.run(conn.collection('metadata').delete_many({'id': {'$in': txn_ids}}))
conn.run(conn.collection('transactions').delete_many({'id': {'$in': txn_ids}}))
@register_query(LocalMongoDBConnection)
def store_unspent_outputs(conn, *unspent_outputs):
try:

View File

@ -549,6 +549,13 @@ def store_block(conn, block):
raise NotImplementedError
@singledispatch
def delete_zombie_transactions(conn):
"""Delete transactions not included in any block"""
raise NotImplementedError
@singledispatch
def store_unspent_outputs(connection, unspent_outputs):
"""Store unspent outputs in ``utxo_set`` table."""
@ -556,6 +563,13 @@ def store_unspent_outputs(connection, unspent_outputs):
raise NotImplementedError
@singledispatch
def delete_latest_block(conn):
"""Delete the latest block along with its transactions"""
raise NotImplementedError
@singledispatch
def delete_unspent_outputs(connection, unspent_outputs):
"""Delete unspent outputs in ``utxo_set`` table."""
@ -563,6 +577,20 @@ def delete_unspent_outputs(connection, unspent_outputs):
raise NotImplementedError
@singledispatch
def delete_transactions(conn, txn_ids):
"""Delete transactions from database
Args:
txn_ids (list): list of transaction ids
Returns:
The result of the operation.
"""
raise NotImplementedError
@singledispatch
def get_unspent_outputs(connection, *, query=None):
"""Retrieves unspent outputs.

View File

@ -15,8 +15,10 @@ from bigchaindb.common.exceptions import (StartupError,
KeypairNotFoundException,
DatabaseDoesNotExist)
import bigchaindb
from bigchaindb.tendermint.core import BigchainDB
from bigchaindb import backend
from bigchaindb.backend import schema
from bigchaindb.backend import query
from bigchaindb.backend.admin import (set_replicas, set_shards, add_replicas,
remove_replicas)
from bigchaindb.backend.exceptions import OperationError
@ -154,6 +156,19 @@ def run_init(args):
print('If you wish to re-initialize it, first drop it.', file=sys.stderr)
def run_recover(b):
query.delete_zombie_transactions(b.connection)
tendermint_height = b.get_latest_block_height_from_tendermint()
block = b.get_latest_block()
if block:
while block['height'] > tendermint_height:
logger.info('BigchainDB is ahead of tendermint, removing block %s', block['height'])
query.delete_latest_block(b.connection)
block = b.get_latest_block()
@configure_bigchaindb
def run_drop(args):
"""Drop the database"""
@ -178,6 +193,8 @@ def run_start(args):
"""Start the processes to run the node"""
logger.info('BigchainDB Version %s', bigchaindb.__version__)
run_recover(BigchainDB())
if args.allow_temp_keypair:
if not (bigchaindb.config['keypair']['private'] or
bigchaindb.config['keypair']['public']):

View File

@ -4,22 +4,24 @@ This directory contains the schemas for the different JSON documents BigchainDB
The aim is to provide:
- a strict definition of the data structures used in BigchainDB
- a language independent tool to validate the structure of incoming/outcoming
data (there are several ready to use
- a strict definition of the data structures used in BigchainDB,
- a language-independent tool to validate the structure of incoming/outcoming
data. (There are several ready to use
[implementations](http://json-schema.org/implementations.html) written in
different languages)
different languages.)
## Sources
The file defining the JSON Schema for votes (`vote.yaml`) is BigchainDB-specific.
The files defining the JSON Schema for transactions (`transaction_*.yaml`)
are copied from the [IPDB Protocol](https://github.com/ipdb/ipdb-protocol).
If you want to add a new version, you must add it to the IPDB Protocol first.
(You can't change existing versions. Those were used to validate old transactions
are based on the [IPDB Transaction Spec](https://github.com/ipdb/ipdb-tx-spec).
If you want to add a new transaction version,
you must add it to the IPDB Transaction Spec first.
(You can't change the JSON Schema files for old versions.
Those were used to validate old transactions
and are needed to re-check those transactions.)
The file defining the JSON Schema for votes (`vote.yaml`) is BigchainDB-specific.
## Learn about JSON Schema
A good resource is [Understanding JSON Schema](http://spacetelescope.github.io/understanding-json-schema/index.html).

View File

@ -7,7 +7,6 @@ import pickle
from socketserver import StreamRequestHandler, ThreadingTCPServer
import struct
import sys
from multiprocessing import Process
from .configs import (
DEFAULT_SOCKET_LOGGING_HOST,
@ -15,6 +14,7 @@ from .configs import (
PUBLISHER_LOGGING_CONFIG,
SUBSCRIBER_LOGGING_CONFIG,
)
from bigchaindb.utils import Process
from bigchaindb.common.exceptions import ConfigurationError
@ -46,6 +46,7 @@ def setup_sub_logger(*, user_log_config=None):
server = LogRecordSocketServer(**kwargs)
with server:
server_proc = Process(
name='logging_server',
target=server.serve_forever,
kwargs={'log_config': user_log_config},
)

View File

@ -1,14 +1,16 @@
import logging
import subprocess
import multiprocessing as mp
from os import getenv
import setproctitle
import bigchaindb
from bigchaindb.tendermint.lib import BigchainDB
from bigchaindb.tendermint.core import App
from bigchaindb.web import server, websocket_server
from bigchaindb.tendermint import event_stream
from bigchaindb.events import Exchange, EventTypes
from bigchaindb.utils import Process
logger = logging.getLogger(__name__)
@ -28,7 +30,6 @@ BANNER = """
def start():
# Exchange object for event stream api
exchange = Exchange()
@ -37,7 +38,7 @@ def start():
settings=bigchaindb.config['server'],
log_config=bigchaindb.config['log'],
bigchaindb_factory=BigchainDB)
p_webapi = mp.Process(name='webapi', target=app_server.run)
p_webapi = Process(name='webapi', target=app_server.run)
p_webapi.start()
# start message
@ -51,15 +52,15 @@ def start():
])
# start websocket server
p_websocket_server = mp.Process(name='ws',
target=websocket_server.start,
args=(exchange.get_subscriber_queue(EventTypes.BLOCK_VALID),))
p_websocket_server = Process(name='ws',
target=websocket_server.start,
args=(exchange.get_subscriber_queue(EventTypes.BLOCK_VALID),))
p_websocket_server.start()
# connect to tendermint event stream
p_websocket_client = mp.Process(name='ws_to_tendermint',
target=event_stream.start,
args=(exchange.get_publisher_queue(),))
p_websocket_client = Process(name='ws_to_tendermint',
target=event_stream.start,
args=(exchange.get_publisher_queue(),))
p_websocket_client.start()
# We need to import this after spawning the web server
@ -67,6 +68,8 @@ def start():
# for gevent.
from abci import ABCIServer
setproctitle.setproctitle('bigchaindb')
app = ABCIServer(app=App())
app.run()

View File

@ -50,6 +50,10 @@ class BigchainDB(Bigchain):
"""Submit a valid transaction to the mempool."""
self.post_transaction(transaction, mode)
def get_latest_block_height_from_tendermint(self):
r = requests.get(ENDPOINT + 'status')
return r.json()['result']['latest_block_height']
def store_transaction(self, transaction):
"""Store a valid transaction to the transactions collection."""

View File

@ -3,6 +3,8 @@ import threading
import queue
import multiprocessing as mp
import setproctitle
class ProcessGroup(object):
@ -26,6 +28,16 @@ class ProcessGroup(object):
self.processes.append(proc)
class Process(mp.Process):
"""Wrapper around multiprocessing.Process that uses
setproctitle to set the name of the process when running
the target task."""
def run(self):
setproctitle.setproctitle(self.name)
super().run()
# Inspired by:
# - http://stackoverflow.com/a/24741694/597097
def pool(builder, size, timeout=None):

View File

@ -1,27 +1,42 @@
"""This module provides the blueprint for the votes API endpoints.
For more information please refer to the documentation: http://bigchaindb.com/http-api
"""
from flask import current_app
from flask_restful import Resource, reqparse
from bigchaindb import backend
We might bring back a votes API endpoint in the future, see:
https://github.com/bigchaindb/bigchaindb/issues/2037
"""
from flask import jsonify
from flask_restful import Resource
# from flask import current_app
# from flask_restful import Resource, reqparse
# from bigchaindb import backend
class VotesApi(Resource):
def get(self):
"""API endpoint to get details about votes on a block.
"""API endpoint to get details about votes.
Return:
A list of votes voting for a block with ID ``block_id``.
404 Not Found
"""
parser = reqparse.RequestParser()
parser.add_argument('block_id', type=str, required=True)
# parser = reqparse.RequestParser()
# parser.add_argument('block_id', type=str, required=True)
args = parser.parse_args(strict=True)
# args = parser.parse_args(strict=True)
pool = current_app.config['bigchain_pool']
with pool() as bigchain:
votes = list(backend.query.get_votes_by_block_id(bigchain.connection, args['block_id']))
# pool = current_app.config['bigchain_pool']
# with pool() as bigchain:
# votes = list(backend.query.get_votes_by_block_id(bigchain.connection, args['block_id']))
return votes
# return votes
# Return an HTTP status code 404 Not Found, which means:
# The requested resource could not be found but may be available in the future.
gone = 'The votes endpoint is gone now, but it might return in the future.'
response = jsonify({'message': gone})
response.status_code = 404
return response

View File

@ -25,5 +25,6 @@ ENV BIGCHAINDB_TENDERMINT_PORT 46657
RUN mkdir -p /usr/src/app
COPY . /usr/src/app/
WORKDIR /usr/src/app
RUN find . | grep -E "(__pycache__|\.pyc|\.pyo$)" | xargs rm -rf
RUN pip install --no-cache-dir .[test]
RUN bigchaindb -y configure "$backend"

View File

@ -29,5 +29,6 @@ services:
image: tendermint/tendermint:0.13
volumes:
- ./tmdata/config.toml:/tendermint/config.toml
entrypoint: ''
command: bash -c "tendermint init && tendermint node"
- ./tmdata/genesis.json:/tendermint/genesis.json
- ./tmdata/priv_validator.json:/tendermint/priv_validator.json
entrypoint: ["/bin/tendermint", "node", "--proxy_app=dummy"]

View File

@ -16,6 +16,14 @@ If you want to generate the HTML version of the long-form documentation on your
pip install -r requirements.txt
```
If you're building the *Server* docs (in `docs/server`) then you must also do:
```bash
pip install -e ../../
```
Note: Don't put `-e ../../` in the `requirements.txt` file. That will work locally
but not on ReadTheDocs.
You can then generate the HTML documentation _in that subdirectory_ by doing:
```bash
make html

View File

@ -4,4 +4,4 @@ sphinx-rtd-theme>=0.1.9
sphinxcontrib-napoleon>=0.4.4
sphinxcontrib-httpdomain>=1.5.0
pyyaml>=3.12
bigchaindb
aafigure>=0.6

View File

@ -10,6 +10,7 @@ The following ports should expect unsolicited inbound traffic:
1. **Port 9984** can expect inbound HTTP (TCP) traffic from BigchainDB clients sending transactions to the BigchainDB HTTP API.
1. **Port 9985** can expect inbound WebSocket traffic from BigchainDB clients.
1. **Port 46656** can expect inbound Tendermint P2P traffic from other Tendermint peers.
1. **Port 9986** can expect inbound HTTP (TCP) traffic from clients accessing the Public Key of a Tendermint instance.
All other ports should only get inbound traffic in response to specific requests from inside the node.
@ -49,6 +50,12 @@ You may want to have Gunicorn and the reverse proxy running on different servers
Port 9985 is the default port for the [BigchainDB WebSocket Event Stream API](../websocket-event-stream-api.html).
## Port 9986
Port 9986 is the default port to access the Public Key of a Tendermint instance, it is used by a NGINX instance
that runs with Tendermint instance(Pod), and only hosts the Public Key.
## Port 46656
Port 46656 is the default port used by Tendermint Core to communicate with other instances of Tendermint Core (peers).

View File

@ -48,7 +48,7 @@ extensions = [
'sphinx.ext.todo',
'sphinx.ext.napoleon',
'sphinxcontrib.httpdomain',
'sphinx.ext.autosectionlabel',
'aafigure.sphinxext',
# Below are actually build steps made to look like sphinx extensions.
# It was the easiest way to get it running with ReadTheDocs.
'generate_http_server_api_documentation',

View File

@ -1,3 +1,5 @@
.. _the-block-model:
The Block Model
===============
@ -27,5 +29,5 @@ Since the blockchain height increases monotonically the height of block can be r
**transactions**
A list of the :ref:`transactions <The Transaction Model>` included in the block.
A list of the :ref:`transactions <the-transaction-model>` included in the block.
(Each transaction is a JSON object.)

View File

@ -1,3 +1,5 @@
.. _the-transaction-model:
The Transaction Model
=====================

View File

@ -1,3 +1,5 @@
.. _the-websocket-event-stream-api:
The WebSocket Event Stream API
==============================
@ -24,7 +26,7 @@ Determining Support for the Event Stream API
It's a good idea to make sure that the node you're connecting with
has advertised support for the Event Stream API. To do so, send a HTTP GET
request to the node's :ref:`API Root Endpoint`
request to the node's :ref:`api-root-endpoint`
(e.g. ``http://localhost:9984/api/v1/``) and check that the
response contains a ``streams`` property:
@ -61,7 +63,7 @@ Streams will always be under the WebSocket protocol (so ``ws://`` or
API root URL (for example, `validated transactions <#valid-transactions>`_
would be accessible under ``/api/v1/streams/valid_transactions``). If you're
running your own BigchainDB instance and need help determining its root URL,
then see the page titled :ref:`Determining the API Root URL`.
then see the page titled :ref:`determining-the-api-root-url`.
All messages sent in a stream are in the JSON format.

View File

@ -1,3 +1,5 @@
.. _the-http-client-server-api:
The HTTP Client-Server API
==========================
@ -26,8 +28,10 @@ with something like the following in the body:
:language: http
.. _api-root-endpoint:
API Root Endpoint
-------------------
-----------------
If you send an HTTP GET request to the API Root Endpoint
e.g. ``http://localhost:9984/api/v1/``
@ -40,7 +44,7 @@ that allows you to discover the BigchainDB API endpoints:
Transactions
-------------------
------------
.. http:get:: /api/v1/transactions/{transaction_id}
@ -140,7 +144,7 @@ Transactions
.. note::
A client can subscribe to the
:ref:`WebSocket Event Stream API <The WebSocket Event Stream API>`
WebSocket Event Stream API
to listen for committed transactions.
**Example request**:
@ -153,7 +157,6 @@ Transactions
.. literalinclude:: http-samples/post-tx-response.http
:language: http
:resheader Content-Type: ``application/json``
:statuscode 202: The meaning of this response depends on the value
@ -608,7 +611,7 @@ so you can access it from the same machine,
but it won't be directly accessible from the outside world.
(The outside world could connect via a SOCKS proxy or whatnot.)
The documentation about BigchainDB Server :any:`Configuration Settings`
The documentation about BigchainDB Server :doc:`Configuration Settings <server-reference/configuration>`
has a section about how to set ``server.bind`` so as to make
the HTTP API publicly accessible.

View File

@ -1,383 +0,0 @@
Kubernetes Template: Add a BigchainDB Node to an Existing BigchainDB Cluster
============================================================================
This page describes how to deploy a BigchainDB node using Kubernetes,
and how to add that node to an existing BigchainDB cluster.
It assumes you already have a running Kubernetes cluster
where you can deploy the new BigchainDB node.
If you want to deploy the first BigchainDB node in a BigchainDB cluster,
or a stand-alone BigchainDB node,
then see :doc:`the page about that <node-on-kubernetes>`.
Terminology Used
----------------
``existing cluster`` will refer to one of the existing Kubernetes clusters
hosting one of the existing BigchainDB nodes.
``ctx-1`` will refer to the kubectl context of the existing cluster.
``new cluster`` will refer to the new Kubernetes cluster that will run a new
BigchainDB node (including a BigchainDB instance and a MongoDB instance).
``ctx-2`` will refer to the kubectl context of the new cluster.
``new MongoDB instance`` will refer to the MongoDB instance in the new cluster.
``existing MongoDB instance`` will refer to the MongoDB instance in the
existing cluster.
``new BigchainDB instance`` will refer to the BigchainDB instance in the new
cluster.
``existing BigchainDB instance`` will refer to the BigchainDB instance in the
existing cluster.
Below, we refer to multiple files by their directory and filename,
such as ``mongodb/mongo-ext-conn-svc.yaml``. Those files are files in the
`bigchaindb/bigchaindb repository on GitHub
<https://github.com/bigchaindb/bigchaindb/>`_ in the ``k8s/`` directory.
Make sure you're getting those files from the appropriate Git branch on
GitHub, i.e. the branch for the version of BigchainDB that your BigchainDB
cluster is using.
Step 1: Prerequisites
---------------------
* :ref:`List of all the things to be done by each node operator <Things Each Node Operator Must Do>`.
* The public key should be shared offline with the other existing BigchainDB
nodes in the existing BigchainDB cluster.
* You will need the public keys of all the existing BigchainDB nodes.
* A new Kubernetes cluster setup with kubectl configured to access it.
* Some familiarity with deploying a BigchainDB node on Kubernetes.
See our :doc:`other docs about that <node-on-kubernetes>`.
Note: If you are managing multiple Kubernetes clusters, from your local
system, you can run ``kubectl config view`` to list all the contexts that
are available for the local kubectl.
To target a specific cluster, add a ``--context`` flag to the kubectl CLI. For
example:
.. code:: bash
$ kubectl --context ctx-1 apply -f example.yaml
$ kubectl --context ctx-2 apply -f example.yaml
$ kubectl --context ctx-1 proxy --port 8001
$ kubectl --context ctx-2 proxy --port 8002
Step 2: Configure the BigchainDB Node
-------------------------------------
See the section on how to :ref:`configure your BigchainDB node <How to Configure a BigchainDB Node>`.
Step 3: Start the NGINX Service
--------------------------------
Please see the following section:
* :ref:`Start NGINX service <Step 4: Start the NGINX Service>`.
Step 4: Assign DNS Name to the NGINX Public IP
----------------------------------------------
Please see the following section:
* :ref:`Assign DNS to NGINX Public IP <Step 5: Assign DNS Name to the NGINX Public IP>`.
Step 5: Start the MongoDB Kubernetes Service
--------------------------------------------
Please see the following section:
* :ref:`Start the MongoDB Kubernetes Service <Step 6: Start the MongoDB Kubernetes Service>`.
Step 6: Start the BigchainDB Kubernetes Service
-----------------------------------------------
Please see the following section:
* :ref:`Start the BigchainDB Kubernetes Service <Step 7: Start the BigchainDB Kubernetes Service>`.
Step 7: Start the OpenResty Kubernetes Service
----------------------------------------------
Please see the following section:
* :ref:`Start the OpenResty Kubernetes Service <Step 8: Start the OpenResty Kubernetes Service>`.
Step 8: Start the NGINX Kubernetes Deployment
---------------------------------------------
Please see the following section:
* :ref:`Run NGINX deployment <Step 9: Start the NGINX Kubernetes Deployment>`.
Step 9: Create Kubernetes Storage Classes for MongoDB
-----------------------------------------------------
Please see the following section:
* :ref:`Step 10: Create Kubernetes Storage Classes for MongoDB`.
Step 10: Create Kubernetes Persistent Volume Claims
---------------------------------------------------
Please see the following section:
* :ref:`Step 11: Create Kubernetes Persistent Volume Claims`.
Step 11: Start a Kubernetes StatefulSet for MongoDB
---------------------------------------------------
Please see the following section:
* :ref:`Step 12: Start a Kubernetes StatefulSet for MongoDB`.
Step 12: Verify network connectivity between the MongoDB instances
------------------------------------------------------------------
Make sure your MongoDB instances can access each other over the network. *If* you are deploying
the new MongoDB node in a different cluster or geographical location using Azure Kubernetes Container
Service, you will have to set up networking between the two clusters using `Kubernetes
Services <https://kubernetes.io/docs/concepts/services-networking/service/>`_.
Assuming we have an existing MongoDB instance ``mdb-instance-0`` residing in Azure data center location ``westeurope`` and we
want to add a new MongoDB instance ``mdb-instance-1`` located in Azure data center location ``eastus`` to the existing MongoDB
replica set. Unless you already have explicitly set up networking for ``mdb-instance-0`` to communicate with ``mdb-instance-1`` and
vice versa, we will have to add a Kubernetes Service in each cluster to accomplish this goal in order to set up a
MongoDB replica set.
It is similar to ensuring that there is a ``CNAME`` record in the DNS
infrastructure to resolve ``mdb-instance-X`` to the host where it is actually available.
We can do this in Kubernetes using a Kubernetes Service of ``type``
``ExternalName``.
* This configuration is located in the file ``mongodb/mongo-ext-conn-svc.yaml``.
* Set the name of the ``metadata.name`` to the host name of the MongoDB instance you are trying to connect to.
For instance if you are configuring this service on cluster with ``mdb-instance-0`` then the ``metadata.name`` will
be ``mdb-instance-1`` and vice versa.
* Set ``spec.ports.port[0]`` to the ``mongodb-backend-port`` from the ConfigMap for the other cluster.
* Set ``spec.externalName`` to the FQDN mapped to NGINX Public IP of the cluster you are trying to connect to.
For more information about the FQDN please refer to: :ref:`Assign DNS Name to the NGINX Public
IP <Step 5: Assign DNS Name to the NGINX Public IP>`
.. note::
This operation needs to be replicated ``n-1`` times per node for a ``n`` node cluster, with the respective FQDNs
we need to communicate with.
If you are not the system administrator of the cluster, you have to get in
touch with the system administrator/s of the other ``n-1`` clusters and
share with them your instance name (``mdb-instance-name`` in the ConfigMap)
and the FQDN for your node (``cluster-fqdn`` in the ConfigMap).
Step 13: Add the New MongoDB Instance to the Existing Replica Set
-----------------------------------------------------------------
Note that by ``replica set``, we are referring to the MongoDB replica set,
not a Kubernetes' ``ReplicaSet``.
If you are not the administrator of an existing BigchainDB node, you
will have to coordinate offline with an existing administrator so that they can
add the new MongoDB instance to the replica set.
Add the new instance of MongoDB from an existing instance by accessing the
``mongo`` shell and authenticate as the ``adminUser`` we created for existing MongoDB instance OR
contact the admin of the PRIMARY MongoDB node:
.. code:: bash
$ kubectl --context ctx-1 exec -it <existing mongodb-instance-name> bash
$ mongo --host <existing mongodb-instance-name> --port 27017 --verbose --ssl \
--sslCAFile /etc/mongod/ssl/ca.pem \
--sslPEMKeyFile /etc/mongod/ssl/mdb-instance.pem
PRIMARY> use admin
PRIMARY> db.auth("adminUser", "superstrongpassword")
One can only add members to a replica set from the ``PRIMARY`` instance.
The ``mongo`` shell prompt should state that this is the primary member in the
replica set.
If not, then you can use the ``rs.status()`` command to find out who the
primary is and login to the ``mongo`` shell in the primary.
Run the ``rs.add()`` command with the FQDN and port number of the other instances:
.. code:: bash
PRIMARY> rs.add("<new mdb-instance-name>:<port>")
Step 14: Verify the Replica Set Membership
------------------------------------------
You can use the ``rs.conf()`` and the ``rs.status()`` commands available in the
mongo shell to verify the replica set membership.
The new MongoDB instance should be listed in the membership information
displayed.
Step 15: Configure Users and Access Control for MongoDB
-------------------------------------------------------
* Create the users in MongoDB with the appropriate roles assigned to them. This
will enable the new BigchainDB instance, new MongoDB Monitoring Agent
instance and the new MongoDB Backup Agent instance to function correctly.
* Please refer to
:ref:`Configure Users and Access Control for MongoDB <Step 13: Configure
Users and Access Control for MongoDB>` to create and configure the new
BigchainDB, MongoDB Monitoring Agent and MongoDB Backup Agent users on the
cluster.
.. note::
You will not have to create the MongoDB replica set or create the admin user, as they already exist.
If you do not have access to the ``PRIMARY`` member of the replica set, you
need to get in touch with the administrator who can create the users in the
MongoDB cluster.
Step 16: Start a Kubernetes Deployment for MongoDB Monitoring Agent
-------------------------------------------------------------------
Please see the following section:
* :ref:`Step 14: Start a Kubernetes Deployment for MongoDB Monitoring Agent`.
.. note::
Every MMS group has only one active Monitoring and Backup Agent and having
multiple agents provides high availability and failover, in case one goes
down. For more information about Monitoring and Backup Agents please
consult the `official MongoDB documenation
<https://docs.cloudmanager.mongodb.com/tutorial/move-agent-to-new-server/>`_.
Step 17: Start a Kubernetes Deployment for MongoDB Backup Agent
---------------------------------------------------------------
Please see the following section:
* :ref:`Step 15: Start a Kubernetes Deployment for MongoDB Backup Agent`.
.. note::
Every MMS group has only one active Monitoring and Backup Agent and having
multiple agents provides high availability and failover, in case one goes
down. For more information about Monitoring and Backup Agents please
consult the `official MongoDB documenation
<https://docs.cloudmanager.mongodb.com/tutorial/move-agent-to-new-server/>`_.
Step 18: Start a Kubernetes Deployment for BigchainDB
-----------------------------------------------------
* Set ``metadata.name`` and ``spec.template.metadata.labels.app`` to the
value set in ``bdb-instance-name`` in the ConfigMap, followed by
``-dep``.
For example, if the value set in the
``bdb-instance-name`` is ``bdb-instance-0``, set the fields to the
value ``bdb-instance-0-dep``.
* Set the value of ``BIGCHAINDB_KEYPAIR_PRIVATE`` (not base64-encoded).
(In the future, we'd like to pull the BigchainDB private key from
the Secret named ``bdb-private-key``, but a Secret can only be mounted as a file,
so BigchainDB Server would have to be modified to look for it
in a file.)
* As we gain more experience running BigchainDB in testing and production,
we will tweak the ``resources.limits`` values for CPU and memory, and as
richer monitoring and probing becomes available in BigchainDB, we will
tweak the ``livenessProbe`` and ``readinessProbe`` parameters.
* Set the ports to be exposed from the pod in the
``spec.containers[0].ports`` section. We currently expose 2 ports -
``bigchaindb-api-port`` and ``bigchaindb-ws-port``. Set them to the
values specified in the ConfigMap.
* Uncomment the env var ``BIGCHAINDB_KEYRING``, it will pick up the
``:`` delimited list of all the public keys in the BigchainDB cluster from the ConfigMap.
Create the required Deployment using:
.. code:: bash
$ kubectl --context ctx-2 apply -f bigchaindb-dep.yaml
You can check its status using the command ``kubectl --context ctx-2 get deploy -w``
Step 19: Restart the Existing BigchainDB Instance(s)
----------------------------------------------------
* Add the public key of the new BigchainDB instance to the ConfigMap
``bdb-keyring`` variable of all the existing BigchainDB instances.
Update all the existing ConfigMap using:
.. code:: bash
$ kubectl --context ctx-1 apply -f configuration/config-map.yaml
* Uncomment the ``BIGCHAINDB_KEYRING`` variable from the
``bigchaindb/bigchaindb-dep.yaml`` to refer to the keyring updated in the
ConfigMap.
Update the running BigchainDB instance using:
.. code:: bash
$ kubectl --context ctx-1 delete -f bigchaindb/bigchaindb-dep.yaml
$ kubectl --context ctx-1 apply -f bigchaindb/bigchaindb-dep.yaml
See the page titled :ref:`How to Configure a BigchainDB Node` for more information about
ConfigMap configuration.
You can SSH to an existing BigchainDB instance and run the ``bigchaindb
show-config`` command to check that the keyring is updated.
Step 20: Start a Kubernetes Deployment for OpenResty
----------------------------------------------------
Please see the following section:
* :ref:`Step 17: Start a Kubernetes Deployment for OpenResty`.
Step 21: Configure the MongoDB Cloud Manager
--------------------------------------------
* MongoDB Cloud Manager auto-detects the members of the replica set and
configures the agents to act as a master/slave accordingly.
* You can verify that the new MongoDB instance is detected by the
Monitoring and Backup Agent using the Cloud Manager UI.
Step 22: Test Your New BigchainDB Node
--------------------------------------
* Please refer to the testing steps :ref:`here <Step 19: Verify the BigchainDB
Node Setup>` to verify that your new BigchainDB node is working as expected.

View File

@ -1,20 +1,144 @@
Architecture of a Testnet Node
==============================
Architecture of a BigchainDB Node
==================================
Each node in the `BigchainDB Testnet <https://testnet.bigchaindb.com/>`_
is hosted on a Kubernetes cluster and includes:
A BigchainDB Production deployment is hosted on a Kubernetes cluster and includes:
* NGINX, OpenResty, BigchainDB and MongoDB
* NGINX, OpenResty, BigchainDB, MongoDB and Tendermint
`Kubernetes Services <https://kubernetes.io/docs/concepts/services-networking/service/>`_.
* NGINX, OpenResty, BigchainDB, Monitoring Agent and Backup Agent
* NGINX, OpenResty, BigchainDB and MongoDB Monitoring Agent.
`Kubernetes Deployments <https://kubernetes.io/docs/concepts/workloads/controllers/deployment/>`_.
* MongoDB `Kubernetes StatefulSet <https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/>`_.
* MongoDB and Tendermint `Kubernetes StatefulSet <https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/>`_.
* Third party services like `3scale <https://3scale.net>`_,
`MongoDB Cloud Manager <https://cloud.mongodb.com>`_ and the
`Azure Operations Management Suite
<https://docs.microsoft.com/en-us/azure/operations-management-suite/>`_.
.. image:: ../_static/arch.jpg
.. _bigchaindb-node:
BigchainDB Node
---------------
.. aafig::
:aspect: 60
:scale: 100
:background: #rgb
:proportional:
+ +
+--------------------------------------------------------------------------------------------------------------------------------------+
| | | |
| | | |
| | | |
| | | |
| | | |
| | | |
| "BigchainDB API" | | "Tendermint P2P" |
| | | "Communication/" |
| | | "Public Key Exchange" |
| | | |
| | | |
| v v |
| |
| +------------------+ |
| |"NGINX Service" | |
| +-------+----------+ |
| | |
| v |
| |
| +------------------+ |
| | "NGINX" | |
| | "Deployment" | |
| | | |
| +-------+----------+ |
| | |
| | |
| | |
| v |
| |
| "443" +----------+ "46656/9986" |
| | "Rate" | |
| +---------------------------+"Limiting"+-----------------------+ |
| | | "Logic" | | |
| | +----+-----+ | |
| | | | |
| | | | |
| | | | |
| | | | |
| | | | |
| | "27017" | | |
| v | v |
| +-------------+ | +------------+ |
| |"HTTPS" | | +------------------> |"Tendermint"| |
| |"Termination"| | | "9986" |"Service" | "46656" |
| | | | | +-------+ | <----+ |
| +-----+-------+ | | | +------------+ | |
| | | | | | |
| | | | v v |
| | | | +------------+ +------------+ |
| | | | |"NGINX" | |"Tendermint"| |
| | | | |"Deployment"| |"Stateful" | |
| | | | |"Pub-Key-Ex"| |"Set" | |
| ^ | | +------------+ +------------+ |
| +-----+-------+ | | |
| "POST" |"Analyze" | "GET" | | |
| |"Request" | | | |
| +-----------+ +--------+ | | |
| | +-------------+ | | | |
| | | | | "Bi+directional, communication between" |
| | | | | "BigchainDB(APP) and Tendermint" |
| | | | | "BFT consensus Engine" |
| | | | | |
| v v | | |
| | | |
| +-------------+ +--------------+ +----+-------------------> +--------------+ |
| | "OpenResty" | | "BigchainDB" | | | "MongoDB" | |
| | "Service" | | "Service" | | | "Service" | |
| | | +----->| | | +-------> | | |
| +------+------+ | +------+-------+ | | +------+-------+ |
| | | | | | | |
| | | | | | | |
| v | v | | v |
| +-------------+ | +-------------+ | | +----------+ |
| | | | | | <------------+ | |"MongoDB" | |
| |"OpenResty" | | | "BigchainDB"| | |"Stateful"| |
| |"Deployment" | | | "Deployment"| | |"Set" | |
| | | | | | | +-----+----+ |
| | | | | +---------------------------+ | |
| | | | | | | |
| +-----+-------+ | +-------------+ | |
| | | | |
| | | | |
| v | | |
| +-----------+ | v |
| | "Auth" | | +------------+ |
| | "Logic" |----------+ |"MongoDB" | |
| | | |"Monitoring"| |
| | | |"Agent" | |
| +---+-------+ +-----+------+ |
| | | |
| | | |
| | | |
| | | |
| | | |
| | | |
+---------------+---------------------------------------------------------------------------------------+------------------------------+
| |
| |
| |
v v
+------------------------------------+ +------------------------------------+
| | | |
| | | |
| | | |
| "3Scale" | | "MongoDB Cloud" |
| | | |
| | | |
| | | |
+------------------------------------+ +------------------------------------+
.. note::
The arrows in the diagram represent the client-server communication. For
@ -23,8 +147,8 @@ is hosted on a Kubernetes cluster and includes:
fully duplex.
NGINX
-----
NGINX: Entrypoint and Gateway
-----------------------------
We use an NGINX as HTTP proxy on port 443 (configurable) at the cloud
entrypoint for:
@ -52,8 +176,8 @@ entrypoint for:
public api port), the connection is proxied to the MongoDB Service.
OpenResty
---------
OpenResty: API Management, Authentication and Authorization
-----------------------------------------------------------
We use `OpenResty <https://openresty.org/>`_ to perform authorization checks
with 3scale using the ``app_id`` and ``app_key`` headers in the HTTP request.
@ -64,13 +188,23 @@ on the LuaJIT compiler to execute the functions to authenticate the ``app_id``
and ``app_key`` with the 3scale backend.
MongoDB
-------
MongoDB: Standalone
-------------------
We use MongoDB as the backend database for BigchainDB.
In a multi-node deployment, MongoDB members communicate with each other via the
public port exposed by the NGINX Service.
We achieve security by avoiding DoS attacks at the NGINX proxy layer and by
ensuring that MongoDB has TLS enabled for all its connections.
Tendermint: BFT consensus engine
--------------------------------
We use Tendermint as the backend consensus engine for BFT replication of BigchainDB.
In a multi-node deployment, Tendermint nodes/peers communicate with each other via
the public ports exposed by the NGINX gateway.
We use port **9986** (configurable) to allow tendermint nodes to access the public keys
of the peers and port **46656** (configurable) for the rest of the communications between
the peers.

View File

@ -0,0 +1,546 @@
.. _kubernetes-template-deploy-bigchaindb-network:
Kubernetes Template: Deploying a BigchainDB network
===================================================
This page describes how to deploy a static BigchainDB + Tendermint network.
If you want to deploy a stand-alone BigchainDB node in a BigchainDB cluster,
or a stand-alone BigchainDB node,
then see :doc:`the page about that <node-on-kubernetes>`.
We can use this guide to deploy a BigchainDB network in the following scenarios:
* Single Azure Kubernetes Site.
* Multiple Azure Kubernetes Sites (Geographically dispersed).
Terminology Used
----------------
``BigchainDB node`` is a set of Kubernetes components that join together to
form a BigchainDB single node. Please refer to the :doc:`architecture diagram <architecture>`
for more details.
``BigchainDB network`` will refer to a collection of nodes working together
to form a network.
Below, we refer to multiple files by their directory and filename,
such as ``tendermint/tendermint-ext-conn-svc.yaml``. Those files are located in the
`bigchaindb/bigchaindb repository on GitHub
<https://github.com/bigchaindb/bigchaindb/>`_ in the ``k8s/`` directory.
Make sure you're getting those files from the appropriate Git branch on
GitHub, i.e. the branch for the version of BigchainDB that your BigchainDB
cluster is using.
.. note::
This deployment strategy is currently used for testing purposes only,
operated by a single stakeholder or tightly coupled stakeholders.
.. note::
Currently, we only support a static set of participants in the network.
Once a BigchainDB network is started with a certain number of validators
and a genesis file. Users cannot add new validator nodes dynamically.
You can track the progress of this funtionality on our
`github repository <https://github.com/bigchaindb/bigchaindb/milestones>`_.
.. _pre-reqs-bdb-network:
Prerequisites
-------------
The deployment methodology is similar to one covered with :doc:`node-on-kubernetes`, but
we need to tweak some configurations depending on your choice of deployment.
The operator needs to follow some consistent naming convention for all the components
covered :ref:`here <things-each-node-operator-must-do>`.
Lets assume we are deploying a 4 node cluster, your naming conventions could look like this:
.. code::
{
"MongoDB": [
"mdb-instance-1",
"mdb-instance-2",
"mdb-instance-3",
"mdb-instance-4"
],
"BigchainDB": [
"bdb-instance-1",
"bdb-instance-2",
"bdb-instance-3",
"bdb-instance-4"
],
"NGINX": [
"ngx-instance-1",
"ngx-instance-2",
"ngx-instance-3",
"ngx-instance-4"
],
"OpenResty": [
"openresty-instance-1",
"openresty-instance-2",
"openresty-instance-3",
"openresty-instance-4"
],
"MongoDB_Monitoring_Agent": [
"mdb-mon-instance-1",
"mdb-mon-instance-2",
"mdb-mon-instance-3",
"mdb-mon-instance-4"
],
"Tendermint": [
"tendermint-instance-1",
"tendermint-instance-2",
"tendermint-instance-3",
"tendermint-instance-4"
]
}
.. note::
Blockchain Genesis ID and Time will be shared across all nodes.
Edit config.yaml and secret.yaml
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Make N(number of nodes) copies of ``configuration/config-map.yaml`` and ``configuration/secret.yaml``.
.. code:: text
# For config-map.yaml
config-map-node-1.yaml
config-map-node-2.yaml
config-map-node-3.yaml
config-map-node-4.yaml
# For secret.yaml
secret-node-1.yaml
secret-node-2.yaml
secret-node-3.yaml
secret-node-4.yaml
Edit the data values as described in :doc:`this document <node-config-map-and-secrets>`, based
on the naming convention described :ref:`above <pre-reqs-bdb-network>`.
**Only for single site deployments**: Since all the configuration files use the
same ConfigMap and Secret Keys i.e.
``metadata.name -> vars, bdb-config and tendermint-config`` and
``metadata.name -> cloud-manager-credentials, mdb-certs, mdb-mon-certs, bdb-certs,``
``https-certs, three-scale-credentials, ca-auth`` respectively, each file
will overwrite the configuration of the previously deployed one.
We want each node to have its own unique configurations.
One way to go about it is that, using the
:ref:`naming convention above <pre-reqs-bdb-network>` we edit the ConfigMap and Secret keys.
.. code:: text
# For config-map-node-1.yaml
metadata.name: vars -> vars-node-1
metadata.name: bdb-config -> bdb-config-node-1
metadata.name: tendermint-config -> tendermint-config-node-1
# For secret-node-1.yaml
metadata.name: cloud-manager-credentials -> cloud-manager-credentials-node-1
metadata.name: mdb-certs -> mdb-certs-node-1
metadata.name: mdb-mon-certs -> mdb-mon-certs-node-1
metadata.name: bdb-certs -> bdb-certs-node-1
metadata.name: https-certs -> https-certs-node-1
metadata.name: threescale-credentials -> threescale-credentials-node-1
metadata.name: ca-auth -> ca-auth-node-1
# Repeat for the remaining files.
Deploy all your configuration maps and secrets.
.. code:: bash
kubectl apply -f configuration/config-map-node-1.yaml
kubectl apply -f configuration/config-map-node-2.yaml
kubectl apply -f configuration/config-map-node-3.yaml
kubectl apply -f configuration/config-map-node-4.yaml
kubectl apply -f configuration/secret-node-1.yaml
kubectl apply -f configuration/secret-node-2.yaml
kubectl apply -f configuration/secret-node-3.yaml
kubectl apply -f configuration/secret-node-4.yaml
.. note::
Similar to what we did, with config-map.yaml and secret.yaml i.e. indexing them
per node, we have to do the same for each Kubernetes component
i.e. Services, StorageClasses, PersistentVolumeClaims, StatefulSets, Deployments etc.
.. code:: text
# For Services
*-node-1-svc.yaml
*-node-2-svc.yaml
*-node-3-svc.yaml
*-node-4-svc.yaml
# For StorageClasses
*-node-1-sc.yaml
*-node-2-sc.yaml
*-node-3-sc.yaml
*-node-4-sc.yaml
# For PersistentVolumeClaims
*-node-1-pvc.yaml
*-node-2-pvc.yaml
*-node-3-pvc.yaml
*-node-4-pvc.yaml
# For StatefulSets
*-node-1-ss.yaml
*-node-2-ss.yaml
*-node-3-ss.yaml
*-node-4-ss.yaml
# For Deployments
*-node-1-dep.yaml
*-node-2-dep.yaml
*-node-3-dep.yaml
*-node-4-dep.yaml
.. _single-site-network:
Single Site: Single Azure Kubernetes Cluster
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
For the deployment of a BigchainDB network under a single cluster, we need to replicate
the :doc:`deployment steps for each node <node-on-kubernetes>` N number of times, N being
the number of participants in the network.
In our Kubernetes deployment template for a single BigchainDB node, we covered the basic configurations
settings :ref:`here <how-to-configure-a-bigchaindb-node>`.
Since, we index the ConfigMap and Secret Keys for the single site deployment, we need to update
all the Kubernetes components to reflect the corresponding changes i.e. For each Kubernetes Service,
StatefulSet, PersistentVolumeClaim, Deployment, and StorageClass, we need to update the respective
`*.yaml` file and update the ConfigMapKeyRef.name OR secret.secretName.
Example
"""""""
Assuming we are deploying the MongoDB StatefulSet for Node 1. We need to update
the ``mongo-node-1-ss.yaml`` and update the corresponding ConfigMapKeyRef.name or secret.secretNames.
.. code:: text
########################################################################
# This YAML file desribes a StatefulSet with a service for running and #
# exposing a MongoDB instance. #
# It depends on the configdb and db k8s pvc. #
########################################################################
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: mdb-instance-0-ss
namespace: default
spec:
serviceName: mdb-instance-0
replicas: 1
template:
metadata:
name: mdb-instance-0-ss
labels:
app: mdb-instance-0-ss
spec:
terminationGracePeriodSeconds: 10
containers:
- name: mongodb
image: bigchaindb/mongodb:3.2
imagePullPolicy: IfNotPresent
env:
- name: MONGODB_FQDN
valueFrom:
configMapKeyRef:
name: vars-1 # Changed from ``vars``
key: mdb-instance-name
- name: MONGODB_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MONGODB_PORT
valueFrom:
configMapKeyRef:
name: vars-1 # Changed from ``vars``
key: mongodb-backend-port
- name: STORAGE_ENGINE_CACHE_SIZE
valueFrom:
configMapKeyRef:
name: vars-1 # Changed from ``vars``
key: storage-engine-cache-size
args:
- --mongodb-port
- $(MONGODB_PORT)
- --mongodb-key-file-path
- /etc/mongod/ssl/mdb-instance.pem
- --mongodb-ca-file-path
- /etc/mongod/ca/ca.pem
- --mongodb-crl-file-path
- /etc/mongod/ca/crl.pem
- --mongodb-fqdn
- $(MONGODB_FQDN)
- --mongodb-ip
- $(MONGODB_POD_IP)
- --storage-engine-cache-size
- $(STORAGE_ENGINE_CACHE_SIZE)
securityContext:
capabilities:
add:
- FOWNER
ports:
- containerPort: "<mongodb-backend-port from ConfigMap>"
protocol: TCP
name: mdb-api-port
volumeMounts:
- name: mdb-db
mountPath: /data/db
- name: mdb-configdb
mountPath: /data/configdb
- name: mdb-certs
mountPath: /etc/mongod/ssl/
readOnly: true
- name: ca-auth
mountPath: /etc/mongod/ca/
readOnly: true
resources:
limits:
cpu: 200m
memory: 5G
livenessProbe:
tcpSocket:
port: mdb-api-port
initialDelaySeconds: 15
successThreshold: 1
failureThreshold: 3
periodSeconds: 15
timeoutSeconds: 10
restartPolicy: Always
volumes:
- name: mdb-db
persistentVolumeClaim:
claimName: mongo-db-claim-1 # Changed from ``mongo-db-claim``
- name: mdb-configdb
persistentVolumeClaim:
claimName: mongo-configdb-claim-1 # Changed from ``mongo-configdb-claim``
- name: mdb-certs
secret:
secretName: mdb-certs-1 # Changed from ``mdb-certs``
defaultMode: 0400
- name: ca-auth
secret:
secretName: ca-auth-1 # Changed from ``ca-auth``
defaultMode: 0400
The above example is meant to be repeated for all the Kubernetes components of a BigchainDB node.
* ``nginx-http/nginx-http-node-X-svc.yaml`` or ``nginx-https/nginx-https-node-X-svc.yaml``
* ``nginx-http/nginx-http-node-X-dep.yaml`` or ``nginx-https/nginx-https-node-X-dep.yaml``
* ``mongodb/mongodb-node-X-svc.yaml``
* ``mongodb/mongodb-node-X-sc.yaml``
* ``mongodb/mongodb-node-X-pvc.yaml``
* ``mongodb/mongodb-node-X-ss.yaml``
* ``tendermint/tendermint-node-X-svc.yaml``
* ``tendermint/tendermint-node-X-sc.yaml``
* ``tendermint/tendermint-node-X-pvc.yaml``
* ``tendermint/tendermint-node-X-ss.yaml``
* ``bigchaindb/bigchaindb-node-X-svc.yaml``
* ``bigchaindb/bigchaindb-node-X-dep.yaml``
* ``nginx-openresty/nginx-openresty-node-X-svc.yaml``
* ``nginx-openresty/nginx-openresty-node-X-dep.yaml``
Multi Site: Multiple Azure Kubernetes Clusters
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
For the multi site deployment of a BigchainDB network with geographically dispersed
nodes, we need to replicate the :doc:`deployment steps for each node <node-on-kubernetes>` N number of times,
N being the number of participants in the network.
The operator needs to follow a consistent naming convention which has :ref:`already
discussed in this document <pre-reqs-bdb-network>`.
.. note::
Assuming we are using independent Kubernetes clusters, the ConfigMap and Secret Keys
do not need to be updated unlike :ref:`single-site-network`, and we also do not
need to update corresponding ConfigMap/Secret imports in the Kubernetes components.
Deploy Kubernetes Services
--------------------------
Deploy the following services for each node by following the naming convention
described :ref:`above <pre-reqs-bdb-network>`:
* :ref:`Start the NGINX Service <start-the-nginx-service>`.
* :ref:`Assign DNS Name to the NGINX Public IP <assign-dns-name-to-nginx-public-ip>`
* :ref:`Start the MongoDB Kubernetes Service <start-the-mongodb-kubernetes-service>`.
* :ref:`Start the BigchainDB Kubernetes Service <start-the-bigchaindb-kubernetes-service>`.
* :ref:`Start the OpenResty Kubernetes Service <start-the-openresty-kubernetes-service>`.
* :ref:`Start the Tendermint Kubernetes Service <start-the-tendermint-kubernetes-service>`.
Only for multi site deployments
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
We need to make sure that clusters are able
to talk to each other i.e. specifically the communication between the
Tendermint peers. Set up networking between the clusters using
`Kubernetes Services <https://kubernetes.io/docs/concepts/services-networking/service/>`_.
Assuming we have a Tendermint instance ``tendermint-instance-1`` residing in Azure data center location ``westeurope`` and we
want to connect to ``tendermint-instance-2``, ``tendermint-instance-3``, and ``tendermint-instance-4`` located in Azure data centers
``eastus``, ``centralus`` and ``westus``, respectively. Unless you already have explicitly set up networking for
``tendermint-instance-1`` to communicate with ``tendermint-instance-2/3/4`` and
vice versa, we will have to add a Kubernetes Service in each cluster to accomplish this goal in order to set up a
Tendermint P2P network.
It is similar to ensuring that there is a ``CNAME`` record in the DNS
infrastructure to resolve ``tendermint-instance-X`` to the host where it is actually available.
We can do this in Kubernetes using a Kubernetes Service of ``type``
``ExternalName``.
* This configuration is located in the file ``tendermint/tendermint-ext-conn-svc.yaml``.
* Set the name of the ``metadata.name`` to the host name of the Tendermint instance you are trying to connect to.
For instance if you are configuring this service on cluster with ``tendermint-instance-1`` then the ``metadata.name`` will
be ``tendermint-instance-2`` and vice versa.
* Set ``spec.ports.port[0]`` to the ``tm-p2p-port`` from the ConfigMap for the other cluster.
* Set ``spec.ports.port[1]`` to the ``tm-rpc-port`` from the ConfigMap for the other cluster.
* Set ``spec.externalName`` to the FQDN mapped to NGINX Public IP of the cluster you are trying to connect to.
For more information about the FQDN please refer to: :ref:`Assign DNS name to NGINX Public
IP <assign-dns-name-to-nginx-public-ip>`.
.. note::
This operation needs to be replicated ``n-1`` times per node for a ``n`` node cluster, with the respective FQDNs
we need to communicate with.
If you are not the system administrator of the cluster, you have to get in
touch with the system administrator/s of the other ``n-1`` clusters and
share with them your instance name (``tendermint-instance-name`` in the ConfigMap)
and the FQDN of the NGINX instance acting as Gateway(set in: :ref:`Assign DNS name to NGINX
Public IP <assign-dns-name-to-nginx-public-ip>`).
Start NGINX Kubernetes deployments
----------------------------------
Start the NGINX deployment that serves as a Gateway for each node by following the
naming convention described :ref:`above <pre-reqs-bdb-network>` and referring to the following instructions:
* :ref:`Start the NGINX Kubernetes Deployment <start-the-nginx-deployment>`.
Deploy Kubernetes StorageClasses for MongoDB and Tendermint
-----------------------------------------------------------
Deploy the following StorageClasses for each node by following the naming convention
described :ref:`above <pre-reqs-bdb-network>`:
* :ref:`Create Kubernetes Storage Classes for MongoDB <create-kubernetes-storage-class-mdb>`.
* :ref:`Create Kubernetes Storage Classes for Tendermint <create-kubernetes-storage-class>`.
Deploy Kubernetes PersistentVolumeClaims for MongoDB and Tendermint
--------------------------------------------------------------------
Deploy the following services for each node by following the naming convention
described :ref:`above <pre-reqs-bdb-network>`:
* :ref:`Create Kubernetes Persistent Volume Claims for MongoDB <create-kubernetes-persistent-volume-claim-mdb>`.
* :ref:`Create Kubernetes Persistent Volume Claims for Tendermint <create-kubernetes-persistent-volume-claim>`
Deploy MongoDB Kubernetes StatefulSet
--------------------------------------
Deploy the MongoDB StatefulSet (standalone MongoDB) for each node by following the naming convention
described :ref:`above <pre-reqs-bdb-network>`: and referring to the following section:
* :ref:`Start a Kubernetes StatefulSet for MongoDB <start-kubernetes-stateful-set-mongodb>`.
Configure Users and Access Control for MongoDB
----------------------------------------------
Configure users and access control for each MongoDB instance
in the network by referring to the following section:
* :ref:`Configure Users and Access Control for MongoDB <configure-users-and-access-control-mongodb>`.
Deploy Tendermint Kubernetes StatefulSet
----------------------------------------
Deploy the Tendermint Stateful for each node by following the
naming convention described :ref:`above <pre-reqs-bdb-network>` and referring to the following instructions:
* :ref:`create-kubernetes-stateful-set`.
Start Kubernetes Deployment for MongoDB Monitoring Agent
---------------------------------------------------------
Start the MongoDB monitoring agent Kubernetes deployment for each node by following the
naming convention described :ref:`above <pre-reqs-bdb-network>` and referring to the following instructions:
* :ref:`Start a Kubernetes StatefulSet for Tendermint <start-kubernetes-deployment-for-mdb-mon-agent>`.
Start Kubernetes Deployment for BigchainDB
------------------------------------------
Start the BigchainDB Kubernetes deployment for each node by following the
naming convention described :ref:`above <pre-reqs-bdb-network>` and referring to the following instructions:
* :ref:`Start a Kubernetes Deployment for BigchainDB <start-kubernetes-deployment-bdb>`.
Start Kubernetes Deployment for OpenResty
------------------------------------------
Start the OpenResty Kubernetes deployment for each node by following the
naming convention described :ref:`above <pre-reqs-bdb-network>` and referring to the following instructions:
* :ref:`Start a Kubernetes Deployment for OpenResty <start-kubernetes-deployment-openresty>`.
Verify and Test
---------------
Verify and test your setup by referring to the following instructions:
* :ref:`Verify the BigchainDB Node Setup <verify-and-test-bdb>`.

View File

@ -1,3 +1,5 @@
.. _how-to-set-up-a-self-signed-certificate-authority:
How to Set Up a Self-Signed Certificate Authority
=================================================
@ -18,7 +20,7 @@ First create a directory for the CA and cd into it:
cd bdb-cluster-ca
Then :ref:`install and configure Easy-RSA in that directory <How to Install & Configure Easy-RSA>`.
Then :ref:`install and configure Easy-RSA in that directory <how-to-install-and-configure-easyrsa>`.
Step 2: Create a Self-Signed CA

View File

@ -1,3 +1,5 @@
.. _how-to-generate-a-client-certificate-for-mongodb:
How to Generate a Client Certificate for MongoDB
================================================
@ -17,7 +19,7 @@ First create a directory for the client certificate and cd into it:
cd client-cert
Then :ref:`install and configure Easy-RSA in that directory <How to Install & Configure Easy-RSA>`.
Then :ref:`install and configure Easy-RSA in that directory <how-to-install-and-configure-easyrsa>`.
Step 2: Create the Client Private Key and CSR

View File

@ -1,8 +1,10 @@
Configure MongoDB Cloud Manager for Monitoring and Backup
=========================================================
.. _configure-mongodb-cloud-manager-for-monitoring:
Configure MongoDB Cloud Manager for Monitoring
==============================================
This document details the steps required to configure MongoDB Cloud Manager to
enable monitoring and backup of data in a MongoDB Replica Set.
enable monitoring of data in a MongoDB Replica Set.
Configure MongoDB Cloud Manager for Monitoring
@ -58,39 +60,3 @@ Configure MongoDB Cloud Manager for Monitoring
* Verify on the UI that data is being sent by the monitoring agent to the
Cloud Manager. It may take upto 5 minutes for data to appear on the UI.
Configure MongoDB Cloud Manager for Backup
------------------------------------------
* Once the Backup Agent is up and running, open
`MongoDB Cloud Manager <https://cloud.mongodb.com>`_.
* Click ``Login`` under ``MongoDB Cloud Manager`` and log in to the Cloud
Manager.
* Select the group from the dropdown box on the page.
* Click ``Backup`` tab.
* Hover over the ``Status`` column of your backup and click ``Start``
to start the backup.
* Select the replica set on the side pane.
* If you have authentication enabled, select the authentication mechanism as
per your deployment. The default BigchainDB production deployment currently
supports ``X.509 Client Certificate`` as the authentication mechanism.
* If you have TLS enabled, select the checkbox ``Replica set allows TLS/SSL
connections``. This should be selected by default in case you selected
``X.509 Client Certificate`` as the auth mechanism above.
* Choose the ``WiredTiger`` storage engine.
* Verify the details of your MongoDB instance and click on ``Start``.
* It may take up to 5 minutes for the backup process to start.
During this process, the UI will show the status of the backup process.
* Verify that data is being backed up on the UI.

View File

@ -1,3 +1,5 @@
.. _how-to-install-and-configure-easyrsa:
How to Install & Configure Easy-RSA
===================================

View File

@ -1,10 +1,10 @@
Production Deployment Template
==============================
This section outlines how *we* deploy production BigchainDB nodes and clusters
on Microsoft Azure
using Kubernetes.
We improve it constantly.
This section outlines how *we* deploy production BigchainDB,
integrated with Tendermint(backend for BFT consensus),
clusters on Microsoft Azure using
Kubernetes. We improve it constantly.
You may choose to use it as a template or reference for your own deployment,
but *we make no claim that it is suitable for your purposes*.
Feel free change things to suit your needs or preferences.
@ -25,8 +25,7 @@ Feel free change things to suit your needs or preferences.
cloud-manager
easy-rsa
upgrade-on-kubernetes
add-node-on-kubernetes
restore-from-mongodb-cloud-manager
bigchaindb-network-on-kubernetes
tectonic-azure
troubleshoot
architecture

View File

@ -1,3 +1,5 @@
.. _how-to-configure-a-bigchaindb-node:
How to Configure a BigchainDB Node
==================================
@ -9,7 +11,7 @@ and ``secret.yaml`` (a set of Secrets).
They are stored in the Kubernetes cluster's key-value store (etcd).
Make sure you did all the things listed in the section titled
:ref:`Things Each Node Operator Must Do`
:ref:`things-each-node-operator-must-do`
(including generation of all the SSL certificates needed
for MongoDB auth).
@ -33,7 +35,7 @@ vars.cluster-fqdn
~~~~~~~~~~~~~~~~~
The ``cluster-fqdn`` field specifies the domain you would have
:ref:`registered before <2. Register a Domain and Get an SSL Certificate for It>`.
:ref:`registered before <register-a-domain-and-get-an-ssl-certificate-for-it>`.
vars.cluster-frontend-port
@ -69,15 +71,8 @@ of naming instances, so the instances in your BigchainDB node
should conform to that standard (i.e. you can't just make up some names).
There are some things worth noting about the ``mdb-instance-name``:
* MongoDB reads the local ``/etc/hosts`` file while bootstrapping a replica
set to resolve the hostname provided to the ``rs.initiate()`` command.
It needs to ensure that the replica set is being initialized in the same
instance where the MongoDB instance is running.
* We use the value in the ``mdb-instance-name`` field to achieve this.
* This field will be the DNS name of your MongoDB instance, and Kubernetes
maps this name to its internal DNS.
* This field will also be used by other MongoDB instances when forming a
MongoDB replica set.
* We use ``mdb-instance-0``, ``mdb-instance-1`` and so on in our
documentation. Your BigchainDB cluster may use a different naming convention.
@ -139,31 +134,10 @@ listening for HTTP requests. Currently set to ``9984`` by default.
The ``bigchaindb-ws-port`` is the port number on which BigchainDB is
listening for Websocket requests. Currently set to ``9985`` by default.
There's another :ref:`page with a complete listing of all the BigchainDB Server
configuration settings <Configuration Settings>`.
There's another :doc:`page with a complete listing of all the BigchainDB Server
configuration settings <../server-reference/configuration>`.
bdb-config.bdb-keyring
~~~~~~~~~~~~~~~~~~~~~~~
This lists the BigchainDB public keys
of all *other* nodes in your BigchainDB cluster
(not including the public key of your BigchainDB node). Cases:
* If you're deploying the first node in the cluster,
the value should be ``""`` (an empty string).
* If you're deploying the second node in the cluster,
the value should be the BigchainDB public key of the first/original
node in the cluster.
For example,
``"EPQk5i5yYpoUwGVM8VKZRjM8CYxB6j8Lu8i8SG7kGGce"``
* If there are two or more other nodes already in the cluster,
the value should be a colon-separated list
of the BigchainDB public keys
of those other nodes.
For example,
``"DPjpKbmbPYPKVAuf6VSkqGCf5jzrEh69Ldef6TrLwsEQ:EPQk5i5yYpoUwGVM8VKZRjM8CYxB6j8Lu8i8SG7kGGce"``
bdb-config.bdb-user
~~~~~~~~~~~~~~~~~~~
@ -174,16 +148,16 @@ We need to specify the user name *as seen in the certificate* issued to
the BigchainDB instance in order to authenticate correctly. Use
the following ``openssl`` command to extract the user name from the
certificate:
.. code:: bash
$ openssl x509 -in <path to the bigchaindb certificate> \
-inform PEM -subject -nameopt RFC2253
You should see an output line that resembles:
.. code:: bash
subject= emailAddress=dev@bigchaindb.com,CN=test-bdb-ssl,OU=BigchainDB-Instance,O=BigchainDB GmbH,L=Berlin,ST=Berlin,C=DE
The ``subject`` line states the complete user name we need to use for this
@ -194,6 +168,137 @@ field (``bdb-config.bdb-user``), i.e.
emailAddress=dev@bigchaindb.com,CN=test-bdb-ssl,OU=BigchainDB-Instance,O=BigchainDB GmbH,L=Berlin,ST=Berlin,C=DE
tendermint-config.tm-instance-name
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Your BigchainDB cluster organization should have a standard way
of naming instances, so the instances in your BigchainDB node
should conform to that standard. There are some things worth noting
about the ``tm-instance-name``:
* This field will be the DNS name of your Tendermint instance, and Kubernetes
maps this name to its internal DNS, so all the peer to peer communication
depends on this, in case of a network/multi-node deployment.
* This parameter is also used to access the public key of a particular node.
* We use ``tm-instance-0``, ``tm-instance-1`` and so on in our
documentation. Your BigchainDB cluster may use a different naming convention.
tendermint-config.ngx-tm-instance-name
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
NGINX needs the FQDN of the servers inside the cluster to be able to forward
traffic.
``ngx-tm-instance-name`` is the FQDN of the Tendermint
instance in this Kubernetes cluster.
In Kubernetes, this is usually the name of the module specified in the
corresponding ``tendermint-config.*-instance-name`` followed by the
``<namespace name>.svc.cluster.local``. For example, if you run Tendermint in
the default Kubernetes namespace, this will be
``<tendermint-config.tm-instance-name>.default.svc.cluster.local``
tendermint-config.tm-seeds
~~~~~~~~~~~~~~~~~~~~~~~~~~
``tm-seeds`` is the initial set of peers to connect to. It is a comma separated
list of all the peers part of the cluster.
If you are deploying a stand-alone BigchainDB node the value should the same as
``<tm-instance-name>``. If you are deploying a network this parameter will look
like this:
.. code::
<tm-instance-1>,<tm-instance-2>,<tm-instance-3>,<tm-instance-4>
tendermint-config.tm-validators
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
``tm-validators`` is the initial set of validators in the network. It is a comma separated list
of all the participant validator nodes.
If you are deploying a stand-alone BigchainDB node the value should be the same as
``<tm-instance-name>``. If you are deploying a network this parameter will look like
this:
.. code::
<tm-instance-1>,<tm-instance-2>,<tm-instance-3>,<tm-instance-4>
tendermint-config.tm-validator-power
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
``tm-validator-power`` represents the voting power of each validator. It is a comma separated
list of all the participants in the network.
**Note**: The order of the validator power list should be the same as the ``tm-validators`` list.
.. code::
tm-validators: <tm-instance-1>,<tm-instance-2>,<tm-instance-3>,<tm-instance-4>
For the above list of validators the ``tm-validator-power`` list should look like this:
.. code::
tm-validator-power: <tm-instance-1-power>,<tm-instance-2-power>,<tm-instance-3-power>,<tm-instance-4-power>
tendermint-config.tm-genesis-time
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
``tm-genesis-time`` represents the official time of blockchain start. Details regarding, how to generate
this parameter are covered :ref:`here <generate-the-blockchain-id-and-genesis-time>`.
tendermint-config.tm-chain-id
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
``tm-chain-id`` represents the ID of the blockchain. This must be unique for every blockchain.
Details regarding, how to generate this parameter are covered
:ref:`here <generate-the-blockchain-id-and-genesis-time>`.
tendermint-config.tm-abci-port
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
``tm-abci-port`` has a default value ``46658`` which is used by Tendermint Core for
ABCI(Application BlockChain Interface) traffic. BigchainDB nodes use this port
internally to communicate with Tendermint Core.
tendermint-config.tm-p2p-port
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
``tm-p2p-port`` has a default value ``46656`` which is used by Tendermint Core for
peer to peer communication.
For a multi-node/zone deployment, this port needs to be available publicly for P2P
communication between Tendermint nodes.
tendermint-config.tm-rpc-port
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
``tm-rpc-port`` has a default value ``46657`` which is used by Tendermint Core for RPC
traffic. BigchainDB nodes use this port with RPC listen address.
tendermint-config.tm-pub-key-access
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
``tm-pub-key-access`` has a default value ``9986``, which is used to discover the public
key of a tendermint node. Each Tendermint StatefulSet(Pod, Tendermint + NGINX) hosts its
public key.
.. code::
http://tendermint-instance-1:9986/pub_key.json
Edit secret.yaml
----------------

View File

@ -1,18 +1,16 @@
.. _kubernetes-template-deploy-a-single-bigchaindb-node:
Kubernetes Template: Deploy a Single BigchainDB Node
====================================================
This page describes how to deploy the first BigchainDB node
in a BigchainDB cluster, or a stand-alone BigchainDB node,
This page describes how to deploy a stand-alone BigchainDB + Tendermint node
using `Kubernetes <https://kubernetes.io/>`_.
It assumes you already have a running Kubernetes cluster.
If you want to add a new BigchainDB node to an existing BigchainDB cluster,
refer to :doc:`the page about that <add-node-on-kubernetes>`.
Below, we refer to many files by their directory and filename,
such as ``configuration/config-map.yaml``. Those files are files in the
`bigchaindb/bigchaindb repository on GitHub
<https://github.com/bigchaindb/bigchaindb/>`_ in the ``k8s/`` directory.
`bigchaindb/bigchaindb repository on GitHub <https://github.com/bigchaindb/bigchaindb/>`_
in the ``k8s/`` directory.
Make sure you're getting those files from the appropriate Git branch on
GitHub, i.e. the branch for the version of BigchainDB that your BigchainDB
cluster is using.
@ -30,7 +28,8 @@ The default location of the kubectl configuration file is ``~/.kube/config``.
If you don't have that file, then you need to get it.
**Azure.** If you deployed your Kubernetes cluster on Azure
using the Azure CLI 2.0 (as per :doc:`our template <template-kubernetes-azure>`),
using the Azure CLI 2.0 (as per :doc:`our template
<../production-deployment-template/template-kubernetes-azure>`),
then you can get the ``~/.kube/config`` file using:
.. code:: bash
@ -105,9 +104,11 @@ That means you can visit the dashboard in your web browser at
Step 3: Configure Your BigchainDB Node
--------------------------------------
See the page titled :ref:`How to Configure a BigchainDB Node`.
See the page titled :ref:`how-to-configure-a-bigchaindb-node`.
.. _start-the-nginx-service:
Step 4: Start the NGINX Service
-------------------------------
@ -137,6 +138,16 @@ Step 4.1: Vanilla NGINX
``cluster-frontend-port`` in the ConfigMap above. This is the
``public-cluster-port`` in the file which is the ingress in to the cluster.
* Set ``ports[1].port`` and ``ports[1].targetPort`` to the value set in the
``tm-pub-access-port`` in the ConfigMap above. This is the
``tm-pub-key-access`` in the file which specifies where Public Key for
the Tendermint instance is available.
* Set ``ports[2].port`` and ``ports[2].targetPort`` to the value set in the
``tm-p2p-port`` in the ConfigMap above. This is the
``tm-p2p-port`` in the file which is used for P2P communication for Tendermint
nodes.
* Start the Kubernetes Service:
.. code:: bash
@ -172,6 +183,17 @@ Step 4.2: NGINX with HTTPS
``public-mdb-port`` in the file which specifies where MongoDB is
available.
* Set ``ports[2].port`` and ``ports[2].targetPort`` to the value set in the
``tm-pub-access-port`` in the ConfigMap above. This is the
``tm-pub-key-access`` in the file which specifies where Public Key for
the Tendermint instance is available.
* Set ``ports[3].port`` and ``ports[3].targetPort`` to the value set in the
``tm-p2p-port`` in the ConfigMap above. This is the
``tm-p2p-port`` in the file which is used for P2P communication between Tendermint
nodes.
* Start the Kubernetes Service:
.. code:: bash
@ -179,6 +201,8 @@ Step 4.2: NGINX with HTTPS
$ kubectl --context k8s-bdb-test-cluster-0 apply -f nginx-https/nginx-https-svc.yaml
.. _assign-dns-name-to-nginx-public-ip:
Step 5: Assign DNS Name to the NGINX Public IP
----------------------------------------------
@ -216,10 +240,12 @@ changes to be applied.
To verify the DNS setting is operational, you can run ``nslookup <DNS
name added in Azure configuration>`` from your local Linux shell.
This will ensure that when you scale the replica set later, other MongoDB
members in the replica set can reach this instance.
This will ensure that when you scale to different geographical zones, other Tendermint
nodes in the network can reach this instance.
.. _start-the-mongodb-kubernetes-service:
Step 6: Start the MongoDB Kubernetes Service
--------------------------------------------
@ -245,6 +271,8 @@ Step 6: Start the MongoDB Kubernetes Service
$ kubectl --context k8s-bdb-test-cluster-0 apply -f mongodb/mongo-svc.yaml
.. _start-the-bigchaindb-kubernetes-service:
Step 7: Start the BigchainDB Kubernetes Service
-----------------------------------------------
@ -268,6 +296,11 @@ Step 7: Start the BigchainDB Kubernetes Service
This is the ``bdb-ws-port`` in the file which specifies where BigchainDB
listens for Websocket connections.
* Set ``ports[2].port`` and ``ports[2].targetPort`` to the value set in the
``tm-abci-port`` in the ConfigMap above.
This is the ``tm-abci-port`` in the file which specifies the port used
for ABCI communication.
* Start the Kubernetes Service:
.. code:: bash
@ -275,6 +308,8 @@ Step 7: Start the BigchainDB Kubernetes Service
$ kubectl --context k8s-bdb-test-cluster-0 apply -f bigchaindb/bigchaindb-svc.yaml
.. _start-the-openresty-kubernetes-service:
Step 8: Start the OpenResty Kubernetes Service
----------------------------------------------
@ -288,6 +323,9 @@ Step 8: Start the OpenResty Kubernetes Service
``openresty-instance-name`` is ``openresty-instance-0``, set the
``spec.selector.app`` to ``openresty-instance-0-dep``.
* Set ``ports[0].port`` and ``ports[0].targetPort`` to the value set in the
``openresty-backend-port`` in the ConfigMap.
* Start the Kubernetes Service:
.. code:: bash
@ -295,19 +333,56 @@ Step 8: Start the OpenResty Kubernetes Service
$ kubectl --context k8s-bdb-test-cluster-0 apply -f nginx-openresty/nginx-openresty-svc.yaml
Step 9: Start the NGINX Kubernetes Deployment
---------------------------------------------
.. _start-the-tendermint-kubernetes-service:
* NGINX is used as a proxy to OpenResty, BigchainDB and MongoDB instances in
Step 9: Start the Tendermint Kubernetes Service
-----------------------------------------------
* This configuration is located in the file ``tendermint/tendermint-svc.yaml``.
* Set the ``metadata.name`` and ``metadata.labels.name`` to the value
set in ``tm-instance-name`` in the ConfigMap above.
* Set the ``spec.selector.app`` to the value set in ``tm-instance-name`` in
the ConfigMap followed by ``-ss``. For example, if the value set in the
``tm-instance-name`` is ``tm-instance-0``, set the
``spec.selector.app`` to ``tm-instance-0-ss``.
* Set ``ports[0].port`` and ``ports[0].targetPort`` to the value set in the
``tm-p2p-port`` in the ConfigMap above.
It specifies where Tendermint peers communicate.
* Set ``ports[1].port`` and ``ports[1].targetPort`` to the value set in the
``tm-rpc-port`` in the ConfigMap above.
It specifies the port used by Tendermint core for RPC traffic.
* Set ``ports[2].port`` and ``ports[2].targetPort`` to the value set in the
``tm-pub-key-access`` in the ConfigMap above.
It specifies the port to host/distribute the public key for the Tendermint node.
* Start the Kubernetes Service:
.. code:: bash
$ kubectl --context k8s-bdb-test-cluster-0 apply -f tendermint/tendermint-svc.yaml
.. _start-the-nginx-deployment:
Step 10: Start the NGINX Kubernetes Deployment
----------------------------------------------
* NGINX is used as a proxy to OpenResty, BigchainDB, Tendermint and MongoDB instances in
the node. It proxies HTTP/HTTPS requests on the ``cluster-frontend-port``
to the corresponding OpenResty or BigchainDB backend, and TCP connections
on ``mongodb-frontend-port`` to the MongoDB backend.
to the corresponding OpenResty or BigchainDB backend, TCP connections
on ``mongodb-frontend-port``, ``tm-p2p-port`` and ``tm-pub-key-access``
to MongoDB and Tendermint respectively.
* As in step 4, you have the option to use vanilla NGINX without HTTPS or
NGINX with HTTPS support.
Step 9.1: Vanilla NGINX
^^^^^^^^^^^^^^^^^^^^^^^
Step 10.1: Vanilla NGINX
^^^^^^^^^^^^^^^^^^^^^^^^
* This configuration is located in the file ``nginx-http/nginx-http-dep.yaml``.
@ -317,9 +392,10 @@ Step 9.1: Vanilla NGINX
``ngx-http-instance-0``, set the fields to ``ngx-http-instance-0-dep``.
* Set the ports to be exposed from the pod in the
``spec.containers[0].ports`` section. We currently expose 3 ports -
``mongodb-frontend-port``, ``cluster-frontend-port`` and
``cluster-health-check-port``. Set them to the values specified in the
``spec.containers[0].ports`` section. We currently expose 5 ports -
``mongodb-frontend-port``, ``cluster-frontend-port``,
``cluster-health-check-port``, ``tm-pub-key-access`` and ``tm-p2p-port``.
Set them to the values specified in the
ConfigMap.
* The configuration uses the following values set in the ConfigMap:
@ -333,6 +409,9 @@ Step 9.1: Vanilla NGINX
- ``ngx-bdb-instance-name``
- ``bigchaindb-api-port``
- ``bigchaindb-ws-port``
- ``ngx-tm-instance-name``
- ``tm-pub-key-access``
- ``tm-p2p-port``
* Start the Kubernetes Deployment:
@ -341,8 +420,8 @@ Step 9.1: Vanilla NGINX
$ kubectl --context k8s-bdb-test-cluster-0 apply -f nginx-http/nginx-http-dep.yaml
Step 9.2: NGINX with HTTPS
^^^^^^^^^^^^^^^^^^^^^^^^^^
Step 10.2: NGINX with HTTPS
^^^^^^^^^^^^^^^^^^^^^^^^^^^
* This configuration is located in the file
``nginx-https/nginx-https-dep.yaml``.
@ -353,9 +432,10 @@ Step 9.2: NGINX with HTTPS
``ngx-https-instance-0``, set the fields to ``ngx-https-instance-0-dep``.
* Set the ports to be exposed from the pod in the
``spec.containers[0].ports`` section. We currently expose 3 ports -
``mongodb-frontend-port``, ``cluster-frontend-port`` and
``cluster-health-check-port``. Set them to the values specified in the
``spec.containers[0].ports`` section. We currently expose 6 ports -
``mongodb-frontend-port``, ``cluster-frontend-port``,
``cluster-health-check-port``, ``tm-pub-key-access`` and ``tm-p2p-port``
. Set them to the values specified in the
ConfigMap.
* The configuration uses the following values set in the ConfigMap:
@ -372,6 +452,9 @@ Step 9.2: NGINX with HTTPS
- ``ngx-bdb-instance-name``
- ``bigchaindb-api-port``
- ``bigchaindb-ws-port``
- ``ngx-tm-instance-name``
- ``tm-pub-key-access``
- ``tm-p2p-port```
* The configuration uses the following values set in the Secret:
@ -384,7 +467,9 @@ Step 9.2: NGINX with HTTPS
$ kubectl --context k8s-bdb-test-cluster-0 apply -f nginx-https/nginx-https-dep.yaml
Step 10: Create Kubernetes Storage Classes for MongoDB
.. _create-kubernetes-storage-class-mdb:
Step 11: Create Kubernetes Storage Classes for MongoDB
------------------------------------------------------
MongoDB needs somewhere to store its data persistently,
@ -394,10 +479,10 @@ Our MongoDB Docker container
exports two volume mounts with correct
permissions from inside the container:
* The directory where the mongod instance stores its data: ``/data/db``.
* The directory where the MongoDB instance stores its data: ``/data/db``.
There's more explanation in the MongoDB docs about `storage.dbpath <https://docs.mongodb.com/manual/reference/configuration-options/#storage.dbPath>`_.
* The directory where the mongodb instance stores the metadata for a sharded
* The directory where the MongoDB instance stores the metadata for a sharded
cluster: ``/data/configdb/``.
There's more explanation in the MongoDB docs about `sharding.configDB <https://docs.mongodb.com/manual/reference/configuration-options/#sharding.configDB>`_.
@ -413,7 +498,7 @@ The first thing to do is create the Kubernetes storage classes.
First, you need an Azure storage account.
If you deployed your Kubernetes cluster on Azure
using the Azure CLI 2.0
(as per :doc:`our template <template-kubernetes-azure>`),
(as per :doc:`our template <../production-deployment-template/template-kubernetes-azure>`),
then the `az acs create` command already created a
storage account in the same location and resource group
as your Kubernetes cluster.
@ -425,7 +510,7 @@ in the same data center.
Premium storage is higher-cost and higher-performance.
It uses solid state drives (SSD).
You can create a `storage account <https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account>`_
for Premium storage and associate it with your Azure resource group.
for Premium storage and associate it with your Azure resource group.
For future reference, the command to create a storage account is
`az storage account create <https://docs.microsoft.com/en-us/cli/azure/storage/account#create>`_.
@ -433,7 +518,7 @@ For future reference, the command to create a storage account is
Please refer to `Azure documentation <https://docs.microsoft.com/en-us/azure/virtual-machines/windows/premium-storage>`_
for the list of VMs that are supported by Premium Storage.
The Kubernetes template for configuration of Storage Class is located in the
The Kubernetes template for configuration of the MongoDB Storage Class is located in the
file ``mongodb/mongo-sc.yaml``.
You may have to update the ``parameters.location`` field in the file to
@ -441,7 +526,7 @@ specify the location you are using in Azure.
If you want to use a custom storage account with the Storage Class, you
can also update `parameters.storageAccount` and provide the Azure storage
account name.
account name.
Create the required storage classes using:
@ -453,8 +538,10 @@ Create the required storage classes using:
You can check if it worked using ``kubectl get storageclasses``.
Step 11: Create Kubernetes Persistent Volume Claims
---------------------------------------------------
.. _create-kubernetes-persistent-volume-claim-mdb:
Step 12: Create Kubernetes Persistent Volume Claims for MongoDB
---------------------------------------------------------------
Next, you will create two PersistentVolumeClaim objects ``mongo-db-claim`` and
``mongo-configdb-claim``.
@ -500,13 +587,15 @@ but it should become "Bound" fairly quickly.
* Run the following command to update a PV's reclaim policy to <Retain>
.. Code:: bash
$ kubectl --context k8s-bdb-test-cluster-0 patch pv <pv-name> -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}'
For notes on recreating a private volume form a released Azure disk resource consult
:ref:`the page about cluster troubleshooting <Cluster Troubleshooting>`.
:doc:`the page about cluster troubleshooting <../production-deployment-template/troubleshoot>`.
Step 12: Start a Kubernetes StatefulSet for MongoDB
.. _start-kubernetes-stateful-set-mongodb:
Step 13: Start a Kubernetes StatefulSet for MongoDB
---------------------------------------------------
* This configuration is located in the file ``mongodb/mongo-ss.yaml``.
@ -551,9 +640,8 @@ Step 12: Start a Kubernetes StatefulSet for MongoDB
* The configuration uses the following values set in the ConfigMap:
- ``mdb-instance-name``
- ``mongodb-replicaset-name``
- ``mongodb-backend-port``
* The configuration uses the following values set in the Secret:
- ``mdb-certs``
@ -590,7 +678,9 @@ Step 12: Start a Kubernetes StatefulSet for MongoDB
$ kubectl --context k8s-bdb-test-cluster-0 get pods -w
Step 13: Configure Users and Access Control for MongoDB
.. _configure-users-and-access-control-mongodb:
Step 14: Configure Users and Access Control for MongoDB
-------------------------------------------------------
* In this step, you will create a user on MongoDB with authorization
@ -618,28 +708,6 @@ Step 13: Configure Users and Access Control for MongoDB
--sslCAFile /etc/mongod/ca/ca.pem \
--sslPEMKeyFile /etc/mongod/ssl/mdb-instance.pem
* Initialize the replica set using:
.. code:: bash
> rs.initiate( {
_id : "bigchain-rs",
members: [ {
_id : 0,
host :"<hostname>:27017"
} ]
} )
The ``hostname`` in this case will be the value set in
``mdb-instance-name`` in the ConfigMap.
For example, if the value set in the ``mdb-instance-name`` is
``mdb-instance-0``, set the ``hostname`` above to the value ``mdb-instance-0``.
* The instance should be voted as the ``PRIMARY`` in the replica set (since
this is the only instance in the replica set till now).
This can be observed from the mongo shell prompt,
which will read ``PRIMARY>``.
* Create a user ``adminUser`` on the ``admin`` database with the
authorization to create other users. This will only work the first time you
log in to the mongo shell. For further details, see `localhost
@ -697,8 +765,7 @@ Step 13: Configure Users and Access Control for MongoDB
]
} )
* You can similarly create users for MongoDB Monitoring Agent and MongoDB
Backup Agent. For example:
* You can similarly create user for MongoDB Monitoring Agent. For example:
.. code:: bash
@ -710,16 +777,127 @@ Step 13: Configure Users and Access Control for MongoDB
]
} )
PRIMARY> db.getSiblingDB("$external").runCommand( {
createUser: 'emailAddress=dev@bigchaindb.com,CN=test-mdb-bak-ssl,OU=MongoDB-Bak-Instance,O=BigchainDB GmbH,L=Berlin,ST=Berlin,C=DE',
writeConcern: { w: 'majority' , wtimeout: 5000 },
roles: [
{ role: 'backup', db: 'admin' }
]
} )
.. _create-kubernetes-storage-class:
Step 15: Create Kubernetes Storage Classes for Tendermint
----------------------------------------------------------
Tendermint needs somewhere to store its data persistently, it uses
LevelDB as the persistent storage layer.
The Kubernetes template for configuration of Storage Class is located in the
file ``tendermint/tendermint-sc.yaml``.
Details about how to create a Azure Storage account and how Kubernetes Storage Class works
are already covered in this document: :ref:`create-kubernetes-storage-class-mdb`.
Create the required storage classes using:
.. code:: bash
$ kubectl --context k8s-bdb-test-cluster-0 apply -f tendermint/tendermint-sc.yaml
Step 14: Start a Kubernetes Deployment for MongoDB Monitoring Agent
You can check if it worked using ``kubectl get storageclasses``.
.. _create-kubernetes-persistent-volume-claim:
Step 16: Create Kubernetes Persistent Volume Claims for Tendermint
------------------------------------------------------------------
Next, you will create two PersistentVolumeClaim objects ``tendermint-db-claim`` and
``tendermint-config-db-claim``.
This configuration is located in the file ``tendermint/tendermint-pvc.yaml``.
Details about Kubernetes Persistent Volumes, Persistent Volume Claims
and how they work with Azure are already covered in this
document: :ref:`create-kubernetes-persistent-volume-claim-mdb`.
Create the required Persistent Volume Claims using:
.. code:: bash
$ kubectl --context k8s-bdb-test-cluster-0 apply -f tendermint/tendermint-pvc.yaml
You can check its status using:
.. code::
kubectl get pvc -w
.. _create-kubernetes-stateful-set:
Step 17: Start a Kubernetes StatefulSet for Tendermint
------------------------------------------------------
* This configuration is located in the file ``tendermint/tendermint-ss.yaml``.
* Set the ``spec.serviceName`` to the value set in ``tm-instance-name`` in
the ConfigMap.
For example, if the value set in the ``tm-instance-name``
is ``tm-instance-0``, set the field to ``tm-instance-0``.
* Set ``metadata.name``, ``spec.template.metadata.name`` and
``spec.template.metadata.labels.app`` to the value set in
``tm-instance-name`` in the ConfigMap, followed by
``-ss``.
For example, if the value set in the
``tm-instance-name`` is ``tm-instance-0``, set the fields to the value
``tm-insance-0-ss``.
* Note how the Tendermint container uses the ``tendermint-db-claim`` and the
``tendermint-config-db-claim`` PersistentVolumeClaims for its ``/tendermint`` and
``/tendermint_node_data`` directories (mount paths).
* As we gain more experience running Tendermint in testing and production, we
will tweak the ``resources.limits.cpu`` and ``resources.limits.memory``.
We deploy Tendermint as POD(Tendermint + NGINX), Tendermint is used as the consensus
engine while NGINX is used to serve the public key of the Tendermint instance.
* For the NGINX container,set the ports to be exposed from the container
``spec.containers[0].ports[0]`` section. Set it to the value specified
for ``tm-pub-key-access`` from ConfigMap.
* For the Tendermint container, Set the ports to be exposed from the container in the
``spec.containers[1].ports`` section. We currently expose two Tendermint ports.
Set it to the value specified for ``tm-p2p-port`` and ``tm-rpc-port``
in the ConfigMap, repectively
* The configuration uses the following values set in the ConfigMap:
- ``tm-pub-key-access``
- ``tm-seeds``
- ``tm-validator-power``
- ``tm-validators``
- ``tm-genesis-time``
- ``tm-chain-id``
- ``tm-abci-port``
- ``bdb-instance-name``
* Create the Tendermint StatefulSet using:
.. code:: bash
$ kubectl --context k8s-bdb-test-cluster-0 apply -f tendermint/tendermint-ss.yaml
* It might take up to 10 minutes for the disks, specified in the Persistent
Volume Claims above, to be created and attached to the pod.
The UI might show that the pod has errored with the message
"timeout expired waiting for volumes to attach/mount". Use the CLI below
to check the status of the pod in this case, instead of the UI.
This happens due to a bug in Azure ACS.
.. code:: bash
$ kubectl --context k8s-bdb-test-cluster-0 get pods -w
.. _start-kubernetes-deployment-for-mdb-mon-agent:
Step 18: Start a Kubernetes Deployment for MongoDB Monitoring Agent
-------------------------------------------------------------------
* This configuration is located in the file
@ -746,34 +924,9 @@ Step 14: Start a Kubernetes Deployment for MongoDB Monitoring Agent
$ kubectl --context k8s-bdb-test-cluster-0 apply -f mongodb-monitoring-agent/mongo-mon-dep.yaml
Step 15: Start a Kubernetes Deployment for MongoDB Backup Agent
---------------------------------------------------------------
.. _start-kubernetes-deployment-bdb:
* This configuration is located in the file
``mongodb-backup-agent/mongo-backup-dep.yaml``.
* Set ``metadata.name``, ``spec.template.metadata.name`` and
``spec.template.metadata.labels.app`` to the value set in
``mdb-bak-instance-name`` in the ConfigMap, followed by
``-dep``.
For example, if the value set in the
``mdb-bak-instance-name`` is ``mdb-bak-instance-0``, set the fields to the
value ``mdb-bak-instance-0-dep``.
* The configuration uses the following values set in the Secret:
- ``mdb-bak-certs``
- ``ca-auth``
- ``cloud-manager-credentials``
* Start the Kubernetes Deployment using:
.. code:: bash
$ kubectl --context k8s-bdb-test-cluster-0 apply -f mongodb-backup-agent/mongo-backup-dep.yaml
Step 16: Start a Kubernetes Deployment for BigchainDB
Step 19: Start a Kubernetes Deployment for BigchainDB
-----------------------------------------------------
* This configuration is located in the file
@ -786,21 +939,14 @@ Step 16: Start a Kubernetes Deployment for BigchainDB
``bdb-instance-name`` is ``bdb-instance-0``, set the fields to the
value ``bdb-insance-0-dep``.
* Set the value of ``BIGCHAINDB_KEYPAIR_PRIVATE`` (not base64-encoded).
(In the future, we'd like to pull the BigchainDB private key from
the Secret named ``bdb-private-key``,
but a Secret can only be mounted as a file,
so BigchainDB Server would have to be modified to look for it
in a file.)
* As we gain more experience running BigchainDB in testing and production,
we will tweak the ``resources.limits`` values for CPU and memory, and as
richer monitoring and probing becomes available in BigchainDB, we will
tweak the ``livenessProbe`` and ``readinessProbe`` parameters.
* Set the ports to be exposed from the pod in the
``spec.containers[0].ports`` section. We currently expose 2 ports -
``bigchaindb-api-port`` and ``bigchaindb-ws-port``. Set them to the
``spec.containers[0].ports`` section. We currently expose 3 ports -
``bigchaindb-api-port``, ``bigchaindb-ws-port`` and ``tm-abci-port``. Set them to the
values specified in the ConfigMap.
* The configuration uses the following values set in the ConfigMap:
@ -821,6 +967,8 @@ Step 16: Start a Kubernetes Deployment for BigchainDB
- ``bigchaindb-database-connection-timeout``
- ``bigchaindb-log-level``
- ``bdb-user``
- ``tm-instance-name``
- ``tm-rpc-port``
* The configuration uses the following values set in the Secret:
@ -837,7 +985,9 @@ Step 16: Start a Kubernetes Deployment for BigchainDB
* You can check its status using the command ``kubectl get deployments -w``
Step 17: Start a Kubernetes Deployment for OpenResty
.. _start-kubernetes-deployment-openresty:
Step 20: Start a Kubernetes Deployment for OpenResty
----------------------------------------------------
* This configuration is located in the file
@ -876,19 +1026,21 @@ Step 17: Start a Kubernetes Deployment for OpenResty
* You can check its status using the command ``kubectl get deployments -w``
Step 18: Configure the MongoDB Cloud Manager
Step 21: Configure the MongoDB Cloud Manager
--------------------------------------------
Refer to the
:ref:`documentation <Configure MongoDB Cloud Manager for Monitoring and Backup>`
:doc:`documentation <../production-deployment-template/cloud-manager>`
for details on how to configure the MongoDB Cloud Manager to enable
monitoring and backup.
Step 19: Verify the BigchainDB Node Setup
.. _verify-and-test-bdb:
Step 22: Verify the BigchainDB Node Setup
-----------------------------------------
Step 19.1: Testing Internally
Step 22.1: Testing Internally
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To test the setup of your BigchainDB node, you could use a Docker container
@ -939,6 +1091,18 @@ To test the BigchainDB instance:
$ wsc -er ws://bdb-instance-0:9985/api/v1/streams/valid_transactions
To test the Tendermint instance:
.. code:: bash
$ nslookup tm-instance-0
$ dig +noall +answer _bdb-api-port._tcp.tm-instance-0.default.svc.cluster.local SRV
$ dig +noall +answer _bdb-ws-port._tcp.tm-instance-0.default.svc.cluster.local SRV
$ curl -X GET http://tm-instance-0:9986/pub_key.json
To test the OpenResty instance:
@ -992,10 +1156,10 @@ The above curl command should result in the response
``It looks like you are trying to access MongoDB over HTTP on the native driver port.``
Step 19.2: Testing Externally
Step 22.2: Testing Externally
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Check the MongoDB monitoring and backup agent on the MongoDB Cloud Manager
Check the MongoDB monitoring agent on the MongoDB Cloud Manager
portal to verify they are working fine.
If you are using the NGINX with HTTP support, accessing the URL
@ -1007,3 +1171,7 @@ If you are using the NGINX with HTTPS support, use ``https`` instead of
Use the Python Driver to send some transactions to the BigchainDB node and
verify that your node or cluster works as expected.
Next, you can set up log analytics and monitoring, by following our templates:
* :doc:`../production-deployment-template/log-analytics`.

View File

@ -1,146 +0,0 @@
How to Restore Data Backed On MongoDB Cloud Manager
===================================================
This page describes how to restore data backed up on
`MongoDB Cloud Manager <https://cloud.mongodb.com/>`_ by
the backup agent when using a single instance MongoDB replica set.
Prerequisites
-------------
- You can restore to either new hardware or existing hardware. We cover
restoring data to an existing MongoDB Kubernetes StatefulSet using a
Kubernetes Persistent Volume Claim below as described
:doc:`here <node-on-kubernetes>`.
- If the backup and destination database storage engines or settings do not
match, mongod cannot start once the backup is restored.
- If the backup and destination database do not belong to the same MongoDB
Cloud Manager group, then the database will start but never initialize
properly.
- The backup restore file includes a metadata file, restoreInfo.txt. This file
captures the options the database used when the snapshot was taken. The
database must be run with the listed options after it has been restored. It
contains:
1. Group name
2. Replica Set name
3. Cluster Id (if applicable)
4. Snapshot timestamp (as Timestamp at UTC)
5. Last Oplog applied (as a BSON Timestamp at UTC)
6. MongoDB version
7. Storage engine type
8. mongod startup options used on the database when the snapshot was taken
Step 1: Get the Backup/Archived Data from Cloud Manager
-------------------------------------------------------
- Log in to the Cloud Manager.
- Select the Group that you want to restore data from.
- Click Backup. Hover over the Status column, click on the
``Restore Or Download`` button.
- Select the appropriate SNAPSHOT, and click Next.
.. note::
We currently do not support restoring data using the ``POINT IN TIME`` and
``OPLOG TIMESTAMP`` method.
- Select 'Pull via Secure HTTP'. Select the number of times the link can be
used to download data in the dropdown box. We select ``Once``.
Select the link expiration time - the time till the download link is active.
We usually select ``1 hour``.
- Check for the email from MongoDB.
.. note::
This can take some time as the Cloud Manager needs to prepare an archive of
the backed up data.
- Once you receive the email, click on the link to open the
``restore jobs page``. Follow the instructions to download the backup data.
.. note::
You will be shown a link to download the back up archive. You can either
click on the ``Download`` button to download it using the browser.
Under rare circumstances, the download is interrupted and errors out; I have
no idea why.
An alternative is to copy the download link and use the ``wget`` tool on
Linux systems to download the data.
Step 2: Copy the archive to the MongoDB Instance
------------------------------------------------
- Once you have the archive, you can copy it to the MongoDB instance running
on a Kubernetes cluster using something similar to:
.. code:: bash
$ kubectl --context ctx-1 cp bigchain-rs-XXXX.tar.gz mdb-instance-name:/
where ``bigchain-rs-XXXX.tar.gz`` is the archive downloaded from Cloud
Manager, and ``mdb-instance-name`` is the name of your MongoDB instance.
Step 3: Prepare the MongoDB Instance for Restore
------------------------------------------------
- Log in to the MongoDB instance using something like:
.. code:: bash
$ kubectl --context ctx-1 exec -it mdb-instance-name bash
- Extract the archive that we have copied to the instance at the proper
location using:
.. code:: bash
$ mv /bigchain-rs-XXXX.tar.gz /data/db
$ cd /data/db
$ tar xzvf bigchain-rs-XXXX.tar.gz
- Rename the directories on the disk, so that MongoDB can find the correct
data after we restart it.
- The current database will be located in the ``/data/db/main`` directory.
We simply rename the old directory to ``/data/db/main.BAK`` and rename the
backup directory ``bigchain-rs-XXXX`` to ``main``.
.. code:: bash
$ mv main main.BAK
$ mv bigchain-rs-XXXX main
.. note::
Ensure that there are no connections to MongoDB from any client, in our
case, BigchainDB. This can be done in multiple ways - iptable rules,
shutting down BigchainDB, stop sending any transactions to BigchainDB, etc.
The simplest way to do it is to stop the MongoDB Kubernetes Service.
BigchainDB has a retry mechanism built in, and it will keep trying to
connect to MongoDB backend repeatedly till it succeeds.
Step 4: Restart the MongoDB Instance
------------------------------------
- This can be achieved using something like:
.. code:: bash
$ kubectl --context ctx-1 delete -f k8s/mongo/mongo-ss.yaml
$ kubectl --context ctx-1 apply -f k8s/mongo/mongo-ss.yaml

View File

@ -10,7 +10,7 @@ Step 1: Revoke a Certificate
----------------------------
Since we used Easy-RSA version 3 to
:ref:`set up the CA <How to Set Up a Self-Signed Certificate Authority>`,
:ref:`set up the CA <how-to-set-up-a-self-signed-certificate-authority>`,
we use it to revoke certificates too.
Go to the following directory (associated with the self-signed CA):

View File

@ -1,3 +1,5 @@
.. _how-to-generate-a-server-certificate-for-mongodb:
How to Generate a Server Certificate for MongoDB
================================================
@ -19,7 +21,7 @@ First create a directory for the server certificate (member cert) and cd into it
cd member-cert
Then :ref:`install and configure Easy-RSA in that directory <How to Install & Configure Easy-RSA>`.
Then :ref:`install and configure Easy-RSA in that directory <how-to-install-and-configure-easyrsa>`.
Step 2: Create the Server Private Key and CSR

View File

@ -14,10 +14,10 @@ Step 1: Prerequisites for Deploying Tectonic Cluster
----------------------------------------------------
Get an Azure account. Refer to
:ref:`this step in our docs <Step 1: Get a Pay-As-You-Go Azure Subscription>`.
:ref:`this step in our docs <get-a-pay-as-you-go-azure-subscription>`.
Create an SSH Key pair for the new Tectonic cluster. Refer to
:ref:`this step in our docs <Step 2: Create an SSH Key Pair>`.
:ref:`this step in our docs <create-an-ssh-key-pair>`.
Step 2: Get a Tectonic Subscription
@ -119,8 +119,9 @@ Step 4: Configure kubectl
$ export KUBECONFIG=/path/to/config/kubectl-config
Next, you can :doc:`run a BigchainDB node on your new
Kubernetes cluster <node-on-kubernetes>`.
Next, you can follow one of our following deployment templates:
* :doc:`node-on-kubernetes`.
Tectonic References
@ -128,5 +129,4 @@ Tectonic References
#. https://coreos.com/tectonic/docs/latest/tutorials/azure/install.html
#. https://coreos.com/tectonic/docs/latest/troubleshooting/installer-terraform.html
#. https://coreos.com/tectonic/docs/latest/tutorials/azure/first-app.html
#. https://coreos.com/tectonic/docs/latest/tutorials/azure/first-app.html

View File

@ -6,6 +6,8 @@ cluster.
This page describes one way to deploy a Kubernetes cluster on Azure.
.. _get-a-pay-as-you-go-azure-subscription:
Step 1: Get a Pay-As-You-Go Azure Subscription
----------------------------------------------
@ -18,6 +20,8 @@ You may find that you have to sign up for a Free Trial subscription first.
That's okay: you can have many subscriptions.
.. _create-an-ssh-key-pair:
Step 2: Create an SSH Key Pair
------------------------------
@ -28,7 +32,8 @@ but it's probably a good idea to make a new SSH key pair
for your Kubernetes VMs and nothing else.)
See the
:ref:`page about how to generate a key pair for SSH <Generate a Key Pair for SSH>`.
:doc:`page about how to generate a key pair for SSH
<../appendices/generate-key-pair-for-ssh>`.
Step 3: Deploy an Azure Container Service (ACS)
@ -99,7 +104,7 @@ Finally, you can deploy an ACS using something like:
--master-count 3 \
--agent-count 2 \
--admin-username ubuntu \
--agent-vm-size Standard_D2_v2 \
--agent-vm-size Standard_L4s \
--dns-prefix <make up a name> \
--ssh-key-value ~/.ssh/<name>.pub \
--orchestrator-type kubernetes \
@ -135,6 +140,8 @@ and click on the one you created
to see all the resources in it.
.. _ssh-to-your-new-kubernetes-cluster-nodes:
Optional: SSH to Your New Kubernetes Cluster Nodes
--------------------------------------------------
@ -217,5 +224,5 @@ CAUTION: You might end up deleting resources other than the ACS cluster.
--name <name of resource group containing the cluster>
Next, you can :doc:`run a BigchainDB node on your new
Kubernetes cluster <node-on-kubernetes>`.
Next, you can :doc: `run a BigchainDB node/cluster(BFT) <node-on-kubernetes>`
on your new Kubernetes cluster.

View File

@ -1,3 +1,5 @@
.. _cluster-troubleshooting:
Cluster Troubleshooting
=======================

View File

@ -32,7 +32,7 @@ as the host (master and agent) operating system.
You can upgrade Ubuntu and Docker on Azure
by SSHing into each of the hosts,
as documented on
:ref:`another page <Optional: SSH to Your New Kubernetes Cluster Nodes>`.
:ref:`another page <ssh-to-your-new-kubernetes-cluster-nodes>`.
In general, you can SSH to each host in your Kubernetes Cluster
to update the OS and Docker.

View File

@ -6,27 +6,14 @@ to set up a production BigchainDB cluster.
We are constantly improving them.
You can modify them to suit your needs.
Things the Managing Organization Must Do First
----------------------------------------------
.. Note::
We use standalone MongoDB (without Replica Set), BFT replication is handled by Tendermint.
1. Set Up a Self-Signed Certificate Authority
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. _register-a-domain-and-get-an-ssl-certificate-for-it:
We use SSL/TLS and self-signed certificates
for MongoDB authentication (and message encryption).
The certificates are signed by the organization managing the cluster.
If your organization already has a process
for signing certificates
(i.e. an internal self-signed certificate authority [CA]),
then you can skip this step.
Otherwise, your organization must
:ref:`set up its own self-signed certificate authority <How to Set Up a Self-Signed Certificate Authority>`.
2. Register a Domain and Get an SSL Certificate for It
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
1. Register a Domain and Get an SSL Certificate for It
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The BigchainDB APIs (HTTP API and WebSocket API) should be served using TLS,
so the organization running the cluster
@ -35,81 +22,148 @@ register the domain name,
and buy an SSL/TLS certificate for the FQDN.
.. _generate-the-blockchain-id-and-genesis-time:
2. Generate the Blockchain ID and Genesis Time
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tendermint nodes require two parameters that need to be common and shared between all the
participants in the network.
* ``chain_id`` : ID of the blockchain. This must be unique for every blockchain.
* Example: ``test-chain-9gHylg``
* ``genesis_time`` : Official time of blockchain start.
* Example: ``0001-01-01T00:00:00Z``
The preceding parameters can be generated using the ``tendermint init`` command.
To `initialize <https://tendermint.readthedocs.io/en/master/using-tendermint.html#initialize>`_.
,you will need to `install Tendermint <https://tendermint.readthedocs.io/en/master/install.html>`_
and verify that a ``genesis.json`` file is created under the `Root Directory
<https://tendermint.readthedocs.io/en/master/using-tendermint.html#directory-root>`_. You can use
the ``genesis_time`` and ``chain_id`` from this example ``genesis.json`` file:
.. code:: json
{
"genesis_time": "0001-01-01T00:00:00Z",
"chain_id": "test-chain-9gHylg",
"validators": [
{
"pub_key": {
"type": "ed25519",
"data": "D12279E746D3724329E5DE33A5AC44D5910623AA6FB8CDDC63617C959383A468"
},
"power": 10,
"name": ""
}
],
"app_hash": ""
}
.. _things-each-node-operator-must-do:
Things Each Node Operator Must Do
---------------------------------
☐ Every MongoDB instance in the cluster must have a unique (one-of-a-kind) name.
Ask the organization managing your cluster if they have a standard
way of naming instances in the cluster.
For example, maybe they assign a unique number to each node,
so that if you're operating node 12, your MongoDB instance would be named
``mdb-instance-12``.
Similarly, other instances must also have unique names in the cluster.
☐ Set Up a Self-Signed Certificate Authority
#. Name of the MongoDB instance (``mdb-instance-*``)
#. Name of the BigchainDB instance (``bdb-instance-*``)
#. Name of the NGINX instance (``ngx-http-instance-*`` or ``ngx-https-instance-*``)
#. Name of the OpenResty instance (``openresty-instance-*``)
#. Name of the MongoDB monitoring agent instance (``mdb-mon-instance-*``)
#. Name of the MongoDB backup agent instance (``mdb-bak-instance-*``)
We use SSL/TLS and self-signed certificates
for MongoDB authentication (and message encryption).
The certificates are signed by the organization managing the :ref:`bigchaindb-node`.
If your organization already has a process
for signing certificates
(i.e. an internal self-signed certificate authority [CA]),
then you can skip this step.
Otherwise, your organization must
:ref:`set up its own self-signed certificate authority <how-to-set-up-a-self-signed-certificate-authority>`.
☐ Generate four keys and corresponding certificate signing requests (CSRs):
Follow Standard and Unique Naming Convention
#. Server Certificate (a.k.a. Member Certificate) for the MongoDB instance
☐ Name of the MongoDB instance (``mdb-instance-*``)
☐ Name of the BigchainDB instance (``bdb-instance-*``)
☐ Name of the NGINX instance (``ngx-http-instance-*`` or ``ngx-https-instance-*``)
☐ Name of the OpenResty instance (``openresty-instance-*``)
☐ Name of the MongoDB monitoring agent instance (``mdb-mon-instance-*``)
☐ Name of the Tendermint instance (``tm-instance-*``)
**Example**
.. code:: text
{
"MongoDB": [
"mdb-instance-1",
"mdb-instance-2",
"mdb-instance-3",
"mdb-instance-4"
],
"BigchainDB": [
"bdb-instance-1",
"bdb-instance-2",
"bdb-instance-3",
"bdb-instance-4"
],
"NGINX": [
"ngx-instance-1",
"ngx-instance-2",
"ngx-instance-3",
"ngx-instance-4"
],
"OpenResty": [
"openresty-instance-1",
"openresty-instance-2",
"openresty-instance-3",
"openresty-instance-4"
],
"MongoDB_Monitoring_Agent": [
"mdb-mon-instance-1",
"mdb-mon-instance-2",
"mdb-mon-instance-3",
"mdb-mon-instance-4"
],
"Tendermint": [
"tm-instance-1",
"tm-instance-2",
"tm-instance-3",
"tm-instance-4"
]
}
☐ Generate three keys and corresponding certificate signing requests (CSRs):
#. Server Certificate for the MongoDB instance
#. Client Certificate for BigchainDB Server to identify itself to MongoDB
#. Client Certificate for MongoDB Monitoring Agent to identify itself to MongoDB
#. Client Certificate for MongoDB Backup Agent to identify itself to MongoDB
Ask the managing organization to use its self-signed CA to sign those four CSRs.
They should send you:
* Four certificates (one for each CSR you sent them).
* One ``ca.crt`` file: their CA certificate.
* One ``crl.pem`` file: a certificate revocation list.
For help, see the pages:
* :ref:`How to Generate a Server Certificate for MongoDB`
* :ref:`How to Generate a Client Certificate for MongoDB`
☐ Every node in a BigchainDB cluster needs its own
BigchainDB keypair (i.e. a public key and corresponding private key).
You can generate a BigchainDB keypair for your node, for example,
using the `BigchainDB Python Driver <http://docs.bigchaindb.com/projects/py-driver/en/latest/index.html>`_.
.. code:: python
from bigchaindb_driver.crypto import generate_keypair
print(generate_keypair())
☐ Share your BigchaindB *public* key with all the other nodes
in the BigchainDB cluster.
Don't share your private key.
☐ Get the BigchainDB public keys of all the other nodes in the cluster.
That list of public keys is known as the BigchainDB "keyring."
Use the self-signed CA to sign those three CSRs. For help, see the pages:
* :doc:`How to Generate a Server Certificate for MongoDB <../production-deployment-template/server-tls-certificate>`
* :doc:`How to Generate a Client Certificate for MongoDB <../production-deployment-template/client-tls-certificate>`
☐ Make up an FQDN for your BigchainDB node (e.g. ``mynode.mycorp.com``).
Make sure you've registered the associated domain name (e.g. ``mycorp.com``),
and have an SSL certificate for the FQDN.
(You can get an SSL certificate from any SSL certificate provider.)
☐ Ask the managing organization for the user name to use for authenticating to
☐ Ask the BigchainDB Node operator/owner for the username to use for authenticating to
MongoDB.
☐ If the cluster uses 3scale for API authentication, monitoring and billing,
you must ask the managing organization for all relevant 3scale credentials -
you must ask the BigchainDB node operator/owner for all relevant 3scale credentials -
secret token, service ID, version header and API service token.
☐ If the cluster uses MongoDB Cloud Manager for monitoring and backup,
☐ If the cluster uses MongoDB Cloud Manager for monitoring,
you must ask the managing organization for the ``Project ID`` and the
``Agent API Key``.
(Each Cloud Manager "Project" has its own ``Project ID``. A ``Project ID`` can
@ -119,11 +173,7 @@ allow easier periodic rotation of the ``Agent API Key`` with a constant
``Project ID``)
:doc:`Deploy a Kubernetes cluster on Azure <template-kubernetes-azure>`.
:doc:`Deploy a Kubernetes cluster on Azure <../production-deployment-template/template-kubernetes-azure>`.
☐ You can now proceed to set up your BigchainDB node based on whether it is the
:ref:`first node in a new cluster
<Kubernetes Template: Deploy a Single BigchainDB Node>` or a
:ref:`node that will be added to an existing cluster
<Kubernetes Template: Add a BigchainDB Node to an Existing BigchainDB Cluster>`.
☐ You can now proceed to set up your :ref:`BigchainDB node
<kubernetes-template-deploy-a-single-bigchaindb-node>`.

View File

@ -12,8 +12,8 @@ spec:
terminationGracePeriodSeconds: 10
containers:
- name: bigchaindb
image: bigchaindb/bigchaindb:1.3.0
imagePullPolicy: IfNotPresent
image: bigchaindb/bigchaindb:unstable
imagePullPolicy: Always
args:
- start
env:
@ -27,13 +27,11 @@ spec:
configMapKeyRef:
name: vars
key: mongodb-backend-port
- name: BIGCHAINDB_DATABASE_REPLICASET
- name: BIGCHAINDB_DATABASE_BACKEND
valueFrom:
configMapKeyRef:
name: vars
key: mongodb-replicaset-name
- name: BIGCHAINDB_DATABASE_BACKEND
value: mongodb
name: bdb-config
key: bdb-db-backend
- name: BIGCHAINDB_DATABASE_NAME
valueFrom:
configMapKeyRef:
@ -69,13 +67,6 @@ spec:
configMapKeyRef:
name: vars
key: bigchaindb-wsserver-advertised-scheme
- name: BIGCHAINDB_KEYPAIR_PUBLIC
valueFrom:
configMapKeyRef:
name: bdb-config
key: bdb-public-key
- name: BIGCHAINDB_KEYPAIR_PRIVATE
value: "<private key here>"
- name: BIGCHAINDB_BACKLOG_REASSIGN_DELAY
valueFrom:
configMapKeyRef:
@ -111,19 +102,33 @@ spec:
configMapKeyRef:
name: bdb-config
key: bdb-user
# The following env var is not required for the bootstrap/first node
#- name: BIGCHAINDB_KEYRING
# valueFrom:
# configMapKeyRef:
# name: bdb-config
# key: bdb-keyring
- name: BIGCHAINDB_START_TENDERMINT
value: "0"
- name: TENDERMINT_HOST
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-instance-name
- name: TENDERMINT_PORT
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-rpc-port
command:
- bash
- "-c"
- |
bigchaindb -l DEBUG start
ports:
- containerPort: "<bigchaindb-api-port from ConfigMap>"
- containerPort: 9984
protocol: TCP
name: bdb-port
- containerPort: "<bigchaindb-ws-port from ConfigMap>"
- containerPort: 9985
protocol: TCP
name: bdb-ws-port
- containerPort: 46658
protocol: TCP
name: tm-abci-port
volumeMounts:
- name: bdb-certs
mountPath: /etc/bigchaindb/ssl/

View File

@ -9,13 +9,17 @@ spec:
selector:
app: bdb-instance-0-dep
ports:
- port: "<bigchaindb-api-port from ConfigMap>"
targetPort: "<bigchaindb-api-port from ConfigMap>"
- port: 9984
targetPort: 9984
name: bdb-api-port
protocol: TCP
- port: "<bigchaindb-ws-port from ConfigMap>"
targetPort: "<bigchaindb-ws-port from ConfigMap>"
- port: 9985
targetPort: 9985
name: bdb-ws-port
protocol: TCP
- port: 46658
targetPort: 46658
name: tm-abci-port
protocol: TCP
type: ClusterIP
clusterIP: None

View File

@ -41,10 +41,6 @@ data:
# in this cluster.
mdb-mon-instance-name: "<name of the mdb monitoring agent instance>"
# mdb-bak-instance-name is the name of the MongoDB Backup Agent instance
# in this cluster.
mdb-bak-instance-name: "<name of the mdb backup agent instance>"
# ngx-mdb-instance-name is the FQDN of the MongoDB instance in this
# Kubernetes cluster.
ngx-mdb-instance-name: "<name of the mdb instance>.default.svc.cluster.local"
@ -57,11 +53,6 @@ data:
# Kubernetes cluster.
ngx-bdb-instance-name: "<name of the bdb instance>.default.svc.cluster.local"
# mongodb-frontend-port is the port number on which external clients can
# access MongoDB. This needs to be restricted to only other MongoDB instances
# by enabling an authentication mechanism on MongoDB.
mongodb-frontend-port: "27017"
# mongodb-backend-port is the port on which MongoDB is actually
# available/listening for requests.
mongodb-backend-port: "27017"
@ -88,9 +79,6 @@ data:
bigchaindb-ws-port: "9985"
bigchaindb-ws-interface: "0.0.0.0"
# mongodb-replicaset-name is the MongoDB replica set name
mongodb-replicaset-name: "bigchain-rs"
# bigchaindb-database-name is the database collection used by BigchainDB with
# the MongoDB backend.
bigchaindb-database-name: "bigchain"
@ -111,17 +99,9 @@ metadata:
name: bdb-config
namespace: default
data:
# Colon-separated list of all *other* nodes' BigchainDB public keys.
bdb-keyring: "<':' separated list of public keys>"
# BigchainDB instance authentication user name
bdb-user: "<user name>"
# BigchainDB public key of *this* node.
# Generated using bigchaindb_driver in the docs
# Example: "EPQk5i5yYpoUwGVM8VKZRjM8CYxB6j8Lu8i8SG7kGGce"
bdb-public-key: "<public key>"
# bigchaindb-backlog-reassign-delay is the number of seconds a transaction
# can remain in the backlog before being reassigned.
bigchaindb-backlog-reassign-delay: "120"
@ -138,3 +118,51 @@ data:
# bigchaindb-log-level is the log level used to log to the console.
bigchaindb-log-level: "debug"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: tendermint-config
namespace: default
data:
# tm-seeds is the list of all the peers in the network.
tm-seeds: "<',' separated list of all tendermint nodes in the network>"
# tm-validators is the list of all validators in the network.
tm-validators: "<',' separated list of all validators in the network>"
# tm-validator-power is the validators voting power, make sure the order and
# the number of nodes in tm-validator-power and tm-validators is the same.
tm-validator-power: "<',' separated list of validator power of each node in the network>"
# tm-genesis-time is the official time of blockchain start.
# example: 0001-01-01T00:00:00Z
tm-genesis-time: "<timestamp of blockchain start>"
# tm-chain-id is the ID of the blockchain. Must be unique for every blockchain.
# example: test-chain-KPI1Ud
tm-chain-id: "<ID of the blockchain>"
# tendermint-instance-name is the name of the Tendermint instance
# in the cluster
tm-instance-name: "<name of tendermint instance>"
# ngx-tm-instance-name is the FQDN of the tendermint instance in this cluster
ngx-tm-instance-name: "<name of tendermint instance>.default.svc.cluster.local"
# tm-abci-port is used by Tendermint Core for ABCI traffic. BigchainDB nodes
# use that internally.
tm-abci-port: "46658"
# tm-p2p-port is used by Tendermint Core to communicate with
# other peers in the network. This port is accessible publicly.
tm-p2p-port: "46656"
# tm-rpc-port is used by Tendermint Core to rpc. BigchainDB nodes
# use this port internally.
tm-rpc-port: "46657"
# tm-pub-key-access is the port number used to host/publish the
# public key of the tendemrint node in this cluster.
tm-pub-key-access: "9986"

View File

@ -22,17 +22,6 @@ data:
---
apiVersion: v1
kind: Secret
metadata:
name: bdb-private-key
namespace: default
type: Opaque
data:
# Base64-encoded BigchainDB private key of *this* node
# Generated using bigchaindb_driver in the docs
private.key: "<b64 encoded BigchainDB private key>"
---
apiVersion: v1
kind: Secret
metadata:
name: mdb-certs
namespace: default
@ -53,16 +42,6 @@ data:
---
apiVersion: v1
kind: Secret
metadata:
name: mdb-bak-certs
namespace: default
type: Opaque
data:
# Base64-encoded, concatenated certificate and private key
mdb-bak-instance.pem: "<b64 encoded, concatenated certificate and private key>"
---
apiVersion: v1
kind: Secret
metadata:
name: bdb-certs
namespace: default

View File

@ -1,24 +0,0 @@
FROM ubuntu:xenial
LABEL maintainer "dev@bigchaindb.com"
ARG DEBIAN_FRONTEND=noninteractive
ARG DEB_FILE=mongodb-mms-backup-agent_latest_amd64.ubuntu1604.deb
ARG FILE_URL="https://cloud.mongodb.com/download/agent/backup/"$DEB_FILE
WORKDIR /
RUN apt update \
&& apt -y upgrade \
&& apt -y install --no-install-recommends \
curl \
ca-certificates \
logrotate \
libsasl2-2 \
&& curl -OL $FILE_URL \
&& dpkg -i $DEB_FILE \
&& rm -f $DEB_FILE \
&& apt -y purge curl \
&& apt -y autoremove \
&& apt clean
COPY mongodb_backup_agent_entrypoint.bash /
RUN chown -R mongodb-mms-agent:mongodb-mms-agent /etc/mongodb-mms/
VOLUME /etc/mongod/ssl /etc/mongod/ca
USER mongodb-mms-agent
ENTRYPOINT ["/mongodb_backup_agent_entrypoint.bash"]

View File

@ -1,5 +0,0 @@
#!/bin/bash
docker build -t bigchaindb/mongodb-backup-agent:3.5 .
docker push bigchaindb/mongodb-backup-agent:3.5

View File

@ -1,44 +0,0 @@
#!/bin/bash
set -euo pipefail
MONGODB_BACKUP_CONF_FILE=/etc/mongodb-mms/backup-agent.config
mms_api_keyfile_path=`printenv MMS_API_KEYFILE_PATH`
mms_groupid_keyfile_path=`printenv MMS_GROUPID_KEYFILE_PATH`
ca_crt_path=`printenv CA_CRT_PATH`
backup_pem_path=`printenv BACKUP_PEM_PATH`
if [[ -z "${mms_api_keyfile_path:?MMS_API_KEYFILE_PATH not specified. Exiting!}" || \
-z "${ca_crt_path:?CA_CRT_PATH not specified. Exiting!}" || \
-z "${backup_pem_path:?BACKUP_PEM_PATH not specified. Exiting!}" || \
-z "${mms_groupid_keyfile_path:?MMS_GROUPID_KEYFILE_PATH not specified. Exiting!}" ]]; then
exit 1
else
echo MMS_API_KEYFILE_PATH="$mms_api_keyfile_path"
echo MMS_GROUPID_KEYFILE_PATH="$mms_groupid_keyfile_path"
echo CA_CRT_PATH="$ca_crt_path"
echo BACKUP_PEM_PATH="$backup_pem_path"
fi
sed -i '/mmsApiKey/d' ${MONGODB_BACKUP_CONF_FILE}
sed -i '/mmsGroupId/d' ${MONGODB_BACKUP_CONF_FILE}
sed -i '/mothership/d' ${MONGODB_BACKUP_CONF_FILE}
# Get the api key from file
mms_api_key=`cat ${mms_api_keyfile_path}`
mms_groupid_key=`cat ${mms_groupid_keyfile_path}`
echo "mmsApiKey="${mms_api_key} >> ${MONGODB_BACKUP_CONF_FILE}
echo "mmsGroupId="${mms_groupid_key} >> ${MONGODB_BACKUP_CONF_FILE}
echo "mothership=api-backup.eu-west-1.mongodb.com" >> ${MONGODB_BACKUP_CONF_FILE}
# Append SSL settings to the config file
echo "useSslForAllConnections=true" >> ${MONGODB_BACKUP_CONF_FILE}
echo "sslRequireValidServerCertificates=true" >> ${MONGODB_BACKUP_CONF_FILE}
echo "sslTrustedServerCertificates="${ca_crt_path} >> ${MONGODB_BACKUP_CONF_FILE}
echo "sslClientCertificate="${backup_pem_path} >> ${MONGODB_BACKUP_CONF_FILE}
echo "#sslClientCertificatePassword=<password>" >> ${MONGODB_BACKUP_CONF_FILE}
echo "INFO: starting mdb backup..."
exec mongodb-mms-backup-agent -c $MONGODB_BACKUP_CONF_FILE

View File

@ -1,65 +0,0 @@
############################################################
# This config file defines a k8s Deployment for the #
# bigchaindb/mongodb-backup-agent Docker image #
# #
# It connects to a MongoDB instance in a separate pod, #
# all remote MongoDB instances in the cluster, #
# and also to MongoDB Cloud Manager (an external service). #
# Notes: #
# MongoDB agents connect to Cloud Manager on port 443. #
############################################################
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: mdb-bak-instance-0-dep
spec:
replicas: 1
template:
metadata:
name: mdb-bak-instance-0-dep
labels:
app: mdb-bak-instance-0-dep
spec:
terminationGracePeriodSeconds: 10
containers:
- name: mdb-backup
image: bigchaindb/mongodb-backup-agent:3.5
imagePullPolicy: IfNotPresent
env:
- name: MMS_API_KEYFILE_PATH
value: /etc/mongod/cloud/agent-api-key
- name: MMS_GROUPID_KEYFILE_PATH
value: /etc/mongod/cloud/group-id
- name: CA_CRT_PATH
value: /etc/mongod/ca/ca.pem
- name: BACKUP_PEM_PATH
value: /etc/mongod/ssl/mdb-bak-instance.pem
resources:
limits:
cpu: 200m
memory: 768Mi
volumeMounts:
- name: mdb-bak-certs
mountPath: /etc/mongod/ssl/
readOnly: true
- name: ca-auth
mountPath: /etc/mongod/ca/
readOnly: true
- name: cloud-manager-credentials
mountPath: /etc/mongod/cloud/
readOnly: true
restartPolicy: Always
volumes:
- name: mdb-bak-certs
secret:
secretName: mdb-bak-certs
defaultMode: 0404
- name: ca-auth
secret:
secretName: ca-auth
defaultMode: 0404
- name: cloud-manager-credentials
secret:
secretName: cloud-manager-credentials
defaultMode: 0404

View File

@ -1,4 +1,4 @@
FROM mongo:3.4.10
FROM mongo:3.4.13
LABEL maintainer "dev@bigchaindb.com"
WORKDIR /
RUN apt-get update \

View File

@ -1,14 +1,5 @@
## Custom MongoDB container for BigchainDB Backend
### Need
* MongoDB needs the hostname provided in the `rs.initiate()` command to be
resolvable through the hosts file locally.
* In the future, with the introduction of TLS for inter-cluster MongoDB
communications, we will need a way to specify detailed configuration.
* We also need a way to overwrite certain parameters to suit our use case.
### Step 1: Build and Push the Latest Container
Use the `docker_build_and_push.bash` script to build the latest docker image
and upload it to Docker Hub.
@ -27,84 +18,11 @@ docker run \
--volume=<host dir for mongodb data files>:/data/db \
--volume=<host dir for mongodb config data files>:/data/configdb \
--volume=<host dir with the required TLS certificates>:/mongo-ssl:ro \
bigchaindb/mongodb:3.0 \
bigchaindb/mongodb:<version of container> \
--mongodb-port <mongod port number for external connections> \
--mongodb-key-file-path /mongo-ssl/<private key file name>.pem \
--mongodb-key-file-password <password for the private key file> \
--mongodb-ca-file-path /mongo-ssl/<ca certificate file name>.crt \
--mongodb-crl-file-path /mongo-ssl/<crl certificate file name>.pem \
--replica-set-name <replica set name> \
--mongodb-fqdn <fully qualified domain name of this instance> \
--mongodb-ip <ip address of the mongodb container>
```
#### Step 3: Initialize the Replica Set
Login to one of the MongoDB containers, say mdb1:
`docker exec -it mongodb bash`
Since we need TLS certificates to use the mongo shell now, copy them using:
```
docker cp bdb-instance-0.pem mongodb:/
docker cp ca.crt mongodb:/
```
Start the `mongo` shell:
```
mongo --host mdb1-fqdn --port mdb1-port --verbose --ssl \
--sslCAFile /ca.crt \
--sslPEMKeyFile /bdb-instance-0.pem \
--sslPEMKeyPassword password
```
Run the rs.initiate() command:
```
rs.initiate({
_id : "<replica-set-name", members: [
{
_id : 0,
host : "<fqdn of this instance>:<port number>"
} ]
})
```
For example:
```
rs.initiate({ _id : "test-repl-set", members: [ { _id : 0, host :
"mdb-instance-0.westeurope.cloudapp.azure.com:27017" } ] })
```
You should also see changes in the mongo shell prompt from `>` to
`test-repl-set:OTHER>` to `test-repl-set:SECONDARY>` to finally
`test-repl-set:PRIMARY>`.
If this instance is not the primary, you can use the `rs.status()` command to
find out who is the primary.
#### Step 4: Add members to the Replica Set
We can only add members to a replica set from the PRIMARY instance.
Login to the PRIMARY and open a `mongo` shell.
Run the rs.add() command with the ip and port number of the other
containers/instances:
```
rs.add("<fqdn>:<port>")
```
For example:
Add mdb2 to replica set from mdb1:
```
rs.add("bdb-cluster-1.northeurope.cloudapp.azure.com:27017")
```
Add mdb3 to replica set from mdb1:
```
rs.add("bdb-cluster-2.northeurope.cloudapp.azure.com:27017")
```

View File

@ -1,5 +1,4 @@
#!/bin/bash
docker build -t bigchaindb/mongodb:3.3 .
docker push bigchaindb/mongodb:3.3
docker build -t bigchaindb/localmongodb:unstable .
docker push bigchaindb/localmongodb:unstable

View File

@ -25,8 +25,6 @@ systemLog:
verbosity: 0
query:
verbosity: 0
replication:
verbosity: 0
sharding:
verbosity: 0
storage:
@ -95,7 +93,3 @@ storage:
operationProfiling:
mode: slowOp
slowOpThresholdMs: 100
replication:
replSetName: REPLICA_SET_NAME
enableMajorityReadConcern: true

View File

@ -3,10 +3,8 @@ set -euo pipefail
MONGODB_PORT=""
MONGODB_KEY_FILE_PATH=""
#MONGODB_KEY_FILE_PASSWORD=""
MONGODB_CA_FILE_PATH=""
MONGODB_CRL_FILE_PATH=""
REPLICA_SET_NAME=""
MONGODB_FQDN=""
MONGODB_IP=""
@ -21,11 +19,6 @@ while [[ $# -gt 1 ]]; do
MONGODB_KEY_FILE_PATH="$2"
shift
;;
--mongodb-key-file-password)
# TODO(Krish) move this to a mapped file later
MONGODB_KEY_FILE_PASSWORD="$2"
shift
;;
--mongodb-ca-file-path)
MONGODB_CA_FILE_PATH="$2"
shift
@ -34,10 +27,6 @@ while [[ $# -gt 1 ]]; do
MONGODB_CRL_FILE_PATH="$2"
shift
;;
--replica-set-name)
REPLICA_SET_NAME="$2"
shift
;;
--mongodb-fqdn)
MONGODB_FQDN="$2"
shift
@ -59,20 +48,15 @@ while [[ $# -gt 1 ]]; do
done
# sanity checks
if [[ -z "${REPLICA_SET_NAME:?REPLICA_SET_NAME not specified. Exiting!}" || \
-z "${MONGODB_PORT:?MONGODB_PORT not specified. Exiting!}" || \
if [[ -z "${MONGODB_PORT:?MONGODB_PORT not specified. Exiting!}" || \
-z "${MONGODB_FQDN:?MONGODB_FQDN not specified. Exiting!}" || \
-z "${MONGODB_IP:?MONGODB_IP not specified. Exiting!}" || \
-z "${MONGODB_KEY_FILE_PATH:?MONGODB_KEY_FILE_PATH not specified. Exiting!}" || \
-z "${MONGODB_CA_FILE_PATH:?MONGODB_CA_FILE_PATH not specified. Exiting!}" || \
-z "${MONGODB_CRL_FILE_PATH:?MONGODB_CRL_FILE_PATH not specified. Exiting!}" ]] ; then
# Not handling the STORAGE_ENGINE_CACHE_SIZE because
# it is optional. If not specified the default cache
# size is: max((50% RAM - 1GB), 256MB)
echo "Missing required enviroment variable(s)."
-z "${MONGODB_CRL_FILE_PATH:?MONGODB_CRL_FILE_PATH not specified. Exiting!}" || \
-z "${STORAGE_ENGINE_CACHE_SIZE:=''}" ]] ; then
exit 1
else
echo REPLICA_SET_NAME="$REPLICA_SET_NAME"
echo MONGODB_PORT="$MONGODB_PORT"
echo MONGODB_FQDN="$MONGODB_FQDN"
echo MONGODB_IP="$MONGODB_IP"
@ -88,10 +72,8 @@ HOSTS_FILE_PATH=/etc/hosts
# configure the mongod.conf file
sed -i "s|MONGODB_PORT|${MONGODB_PORT}|g" ${MONGODB_CONF_FILE_PATH}
sed -i "s|MONGODB_KEY_FILE_PATH|${MONGODB_KEY_FILE_PATH}|g" ${MONGODB_CONF_FILE_PATH}
#sed -i "s|MONGODB_KEY_FILE_PASSWORD|${MONGODB_KEY_FILE_PASSWORD}|g" ${MONGODB_CONF_FILE_PATH}
sed -i "s|MONGODB_CA_FILE_PATH|${MONGODB_CA_FILE_PATH}|g" ${MONGODB_CONF_FILE_PATH}
sed -i "s|MONGODB_CRL_FILE_PATH|${MONGODB_CRL_FILE_PATH}|g" ${MONGODB_CONF_FILE_PATH}
sed -i "s|REPLICA_SET_NAME|${REPLICA_SET_NAME}|g" ${MONGODB_CONF_FILE_PATH}
if [ ! -z "$STORAGE_ENGINE_CACHE_SIZE" ]; then
if [[ "$STORAGE_ENGINE_CACHE_SIZE" =~ ^[0-9]+(G|M|T)B$ ]]; then
sed -i.bk "s|STORAGE_ENGINE_CACHE_SIZE|${STORAGE_ENGINE_CACHE_SIZE}|g" ${MONGODB_CONF_FILE_PATH}

View File

@ -1,5 +1,5 @@
####################################################################
# This YAML section desribes a StorageClass for the mongodb dbPath #
# This YAML section desribes a StorageClass for the mongodb dbPath #
####################################################################
kind: StorageClass
apiVersion: storage.k8s.io/v1beta1
@ -15,7 +15,7 @@ parameters:
#kind: Managed
---
######################################################################
# This YAML section desribes a StorageClass for the mongodb configDB #
# This YAML section desribes a StorageClass for the mongodb configDB #
######################################################################
kind: StorageClass
apiVersion: storage.k8s.io/v1beta1

View File

@ -21,8 +21,8 @@ spec:
terminationGracePeriodSeconds: 10
containers:
- name: mongodb
image: bigchaindb/mongodb:3.2
imagePullPolicy: IfNotPresent
image: bigchaindb/localmongodb:unstable
imagePullPolicy: Always
env:
- name: MONGODB_FQDN
valueFrom:
@ -33,11 +33,6 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MONGODB_REPLICA_SET_NAME
valueFrom:
configMapKeyRef:
name: vars
key: mongodb-replicaset-name
- name: MONGODB_PORT
valueFrom:
configMapKeyRef:
@ -57,8 +52,6 @@ spec:
- /etc/mongod/ca/ca.pem
- --mongodb-crl-file-path
- /etc/mongod/ca/crl.pem
- --replica-set-name
- $(MONGODB_REPLICA_SET_NAME)
- --mongodb-fqdn
- $(MONGODB_FQDN)
- --mongodb-ip
@ -70,7 +63,7 @@ spec:
add:
- FOWNER
ports:
- containerPort: "<mongodb-backend-port from ConfigMap>"
- containerPort: 27017
protocol: TCP
name: mdb-api-port
volumeMounts:

View File

@ -9,8 +9,8 @@ spec:
selector:
app: mdb-instance-0-ss
ports:
- port: "<mongodb-backend-port from ConfigMap>"
targetPort: "<mongodb-backend-port from ConfigMap>"
- port: 27017
targetPort: 27017
name: mdb-port
protocol: TCP
type: ClusterIP

View File

@ -7,5 +7,5 @@ RUN apt-get update \
&& apt-get clean
COPY nginx.conf.template /etc/nginx/nginx.conf
COPY nginx_entrypoint.bash /
EXPOSE 80 27017
EXPOSE 80 27017 9986 46656
ENTRYPOINT ["/nginx_entrypoint.bash"]

View File

@ -9,7 +9,7 @@ reflect any changes made to the container.
### Note about testing Websocket connections:
You can test the WebSocket server by using
You can test the WebSocket server by using
[wsc](https://www.npmjs.com/package/wsc) tool with a command like:
`wsc -er ws://localhost:9985/api/v1/streams/valid_transactions`.

View File

@ -1,5 +1,5 @@
#!/bin/bash
docker build -t bigchaindb/nginx_http:1.1 .
docker build -t bigchaindb/nginx_http:unstable .
docker push bigchaindb/nginx_http:1.1
docker push bigchaindb/nginx_http:unstable

View File

@ -123,16 +123,15 @@ http {
}
}
# NGINX stream block for TCP and UDP proxies. Used to proxy MDB TCP
# connection.
# NGINX stream block for TCP and UDP proxies.
stream {
log_format mdb_log '[$time_iso8601] $realip_remote_addr $remote_addr '
log_format bdb_log '[$time_iso8601] $realip_remote_addr $remote_addr '
'$proxy_protocol_addr $proxy_protocol_port '
'$protocol $status $session_time $bytes_sent '
'$bytes_received "$upstream_addr" "$upstream_bytes_sent" '
'"$upstream_bytes_received" "$upstream_connect_time" ';
access_log /dev/stdout mdb_log buffer=16k flush=5s;
access_log /dev/stdout bdb_log buffer=16k flush=5s;
# Define a zone 'two' of size 10 megabytes to store the counters
# that hold number of TCP connections from a specific IP address.
@ -149,16 +148,23 @@ stream {
# The following map block enables lazy-binding to the backend at runtime,
# rather than binding as soon as NGINX starts.
map $remote_addr $mdb_backend {
default MONGODB_BACKEND_HOST;
map $remote_addr $tm_backend {
default TM_BACKEND_HOST;
}
# Frontend server to forward connections to MDB instance.
# Server to forward connection to nginx instance hosting
# tendermint node public key.
server {
listen MONGODB_FRONTEND_PORT so_keepalive=10m:1m:5;
preread_timeout 30s;
listen TM_PUB_KEY_ACCESS_PORT;
proxy_pass $tm_backend:TM_PUB_KEY_ACCESS_PORT;
}
# Server to forward p2p connections to Tendermint instance.
server {
listen TM_P2P_PORT so_keepalive=3m:1m:5;
preread_timeout 60s;
tcp_nodelay on;
proxy_pass $mdb_backend:MONGODB_BACKEND_PORT;
proxy_pass $tm_backend:TM_P2P_PORT;
}
}

View File

@ -31,7 +31,10 @@ if [[ -z "${cluster_frontend_port:?CLUSTER_FRONTEND_PORT not specified. Exiting!
-z "${bdb_api_port:?BIGCHAINDB_API_PORT not specified. Exiting!}" || \
-z "${bdb_ws_port:?BIGCHAINDB_WS_PORT not specified. Exiting!}" || \
-z "${dns_server:?DNS_SERVER not specified. Exiting!}" || \
-z "${health_check_port:?HEALTH_CHECK_PORT not specified.}" ]]; then
-z "${health_check_port:?HEALTH_CHECK_PORT not specified.}" || \
-z "${tm_pub_key_access_port:?TM_PUB_KEY_ACCESS_PORT not specified. Exiting!}" || \
-z "${tm_backend_host:?TM_BACKEND_HOST not specified. Exiting!}" || \
-z "${tm_p2p_port:?TM_P2P_PORT not specified. Exiting!}" ]]; then
exit 1
else
echo CLUSTER_FRONTEND_PORT="$cluster_frontend_port"
@ -43,6 +46,9 @@ else
echo BIGCHAINDB_BACKEND_HOST="$bdb_backend_host"
echo BIGCHAINDB_API_PORT="$bdb_api_port"
echo BIGCHAINDB_WS_PORT="$bdb_ws_port"
echo TM_PUB_KEY_ACCESS_PORT="$tm_pub_key_access_port"
echo TM_BACKEND_HOST="$tm_backend_host"
echo TM_P2P_PORT="$tm_p2p_port"
fi
NGINX_CONF_FILE=/etc/nginx/nginx.conf
@ -57,6 +63,9 @@ sed -i "s|BIGCHAINDB_API_PORT|${bdb_api_port}|g" ${NGINX_CONF_FILE}
sed -i "s|BIGCHAINDB_WS_PORT|${bdb_ws_port}|g" ${NGINX_CONF_FILE}
sed -i "s|DNS_SERVER|${dns_server}|g" ${NGINX_CONF_FILE}
sed -i "s|HEALTH_CHECK_PORT|${health_check_port}|g" ${NGINX_CONF_FILE}
sed -i "s|TM_PUB_KEY_ACCESS_PORT|${tm_pub_key_access_port}|g" ${NGINX_CONF_FILE}
sed -i "s|TM_BACKEND_HOST|${tm_backend_host}|g" ${NGINX_CONF_FILE}
sed -i "s|TM_P2P_PORT|${tm_p2p_port}|g" ${NGINX_CONF_FILE}
# start nginx
echo "INFO: starting nginx..."

View File

@ -12,8 +12,8 @@ spec:
terminationGracePeriodSeconds: 10
containers:
- name: nginx
image: bigchaindb/nginx_http:1.1
imagePullPolicy: IfNotPresent
image: bigchaindb/nginx_http:unstable
imagePullPolicy: Always
env:
- name: CLUSTER_FRONTEND_PORT
valueFrom:
@ -30,11 +30,6 @@ spec:
configMapKeyRef:
name: vars
key: cluster-dns-server-ip
- name: MONGODB_FRONTEND_PORT
valueFrom:
configMapKeyRef:
name: vars
key: mongodb-frontend-port
- name: MONGODB_BACKEND_HOST
valueFrom:
configMapKeyRef:
@ -60,14 +55,33 @@ spec:
configMapKeyRef:
name: vars
key: bigchaindb-ws-port
- name: TM_PUB_KEY_ACCESS_PORT
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-pub-key-access
- name: TM_BACKEND_HOST
valueFrom:
configMapKeyRef:
name: tendermint-config
key: ngx-tm-instance-name
- name: TM_P2P_PORT
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-p2p-port
ports:
- containerPort: "<mongodb-frontend-port from ConfigMap>"
protocol: TCP
- containerPort: "<cluster-health-check-port from ConfigMap>"
protocol: TCP
name: ngx-health
- containerPort: "<cluster-frontend-port from ConfigMap>"
protocol: TCP
- containerPort: "<tm-pub-key-access from ConfigMap>"
protocol: TCP
name: tm-pub-key
- containerPort: "<tm-p2p-port from ConfigMap>"
protocol: TCP
name: tm-p2p-port
livenessProbe:
httpGet:
path: /health

View File

@ -17,4 +17,12 @@ spec:
targetPort: "<cluster-frontend-port from ConfigMap>"
name: public-cluster-port
protocol: TCP
- port: "<tm-pub-key-access from ConfigMap>"
targetPort: "<tm-pub-key-access from ConfigMap>"
name: tm-pub-key-access
protocol: TCP
- port: "<tm-p2p-port from ConfigMap>"
targetPort: "<tm-p2p-port from ConfigMap>"
protocol: TCP
name: tm-p2p-port
type: LoadBalancer

View File

@ -7,5 +7,5 @@ RUN apt-get update \
&& apt-get clean
COPY nginx.conf.template /etc/nginx/nginx.conf
COPY nginx_entrypoint.bash /
EXPOSE 80 443 27017
EXPOSE 80 443 27017 9986 46656
ENTRYPOINT ["/nginx_entrypoint.bash"]

View File

@ -9,7 +9,7 @@ reflect any changes made to the container.
### Note about testing Websocket connections:
You can test the WebSocket server by using
You can test the WebSocket server by using
[wsc](https://www.npmjs.com/package/wsc) tool with a command like:
`wsc -er wss://localhost:9985/api/v1/streams/valid_transactions`.

View File

@ -1,5 +1,5 @@
#!/bin/bash
docker build -t bigchaindb/nginx_https:1.1 .
docker build -t bigchaindb/nginx_https:unstable .
docker push bigchaindb/nginx_https:1.1
docker push bigchaindb/nginx_https:unstable

View File

@ -2,8 +2,7 @@
# 1. Acts as the HTTPS termination point.
# 2. Forwards BDB HTTP requests to OpenResty backend.
# 3. Forwards BDB WS requests to BDB backend.
# 4. Forwards MDB TCP connections to MDB backend.
# 5. Does health check with LB.
# 4. Does health check with LB.
worker_processes 2;
daemon off;
@ -152,16 +151,15 @@ http {
}
}
# NGINX stream block for TCP and UDP proxies. Used to proxy MDB TCP
# connection.
# NGINX stream block for TCP and UDP proxies.
stream {
log_format mdb_log '[$time_iso8601] $realip_remote_addr $remote_addr '
log_format bdb_log '[$time_iso8601] $realip_remote_addr $remote_addr '
'$proxy_protocol_addr $proxy_protocol_port '
'$protocol $status $session_time $bytes_sent '
'$bytes_received "$upstream_addr" "$upstream_bytes_sent" '
'"$upstream_bytes_received" "$upstream_connect_time" ';
access_log /dev/stdout mdb_log buffer=16k flush=5s;
access_log /dev/stdout bdb_log buffer=16k flush=5s;
# Define a zone 'two' of size 10 megabytes to store the counters
# that hold number of TCP connections from a specific IP address.
@ -170,10 +168,6 @@ stream {
# Enable logging when connections are being throttled.
limit_conn_log_level notice;
# For a multi node BigchainDB deployment we need around 2^5 connections
# (for inter-node communication)per node via NGINX, we can bump this up in case
# there is a requirement to scale up. But we should not remove this
# for security reasons.
# Allow 256 connections from the same IP address.
limit_conn two 256;
@ -182,16 +176,23 @@ stream {
# The following map block enables lazy-binding to the backend at runtime,
# rather than binding as soon as NGINX starts.
map $remote_addr $mdb_backend {
default MONGODB_BACKEND_HOST;
map $remote_addr $tm_backend {
default TM_BACKEND_HOST;
}
# Frontend server to forward connections to MDB instance.
# Server to forward connection to nginx instance hosting
# tendermint node public key.
server {
listen MONGODB_FRONTEND_PORT so_keepalive=3m:1m:5;
preread_timeout 30s;
listen TM_PUB_KEY_ACCESS_PORT;
proxy_pass $tm_backend:TM_PUB_KEY_ACCESS_PORT;
}
# Server to forward p2p connections to Tendermint instance.
server {
listen TM_P2P_PORT so_keepalive=3m:1m:5;
preread_timeout 60s;
tcp_nodelay on;
proxy_pass $mdb_backend:MONGODB_BACKEND_PORT;
proxy_pass $tm_backend:TM_P2P_PORT;
}
}

View File

@ -27,6 +27,10 @@ bdb_backend_host=`printenv BIGCHAINDB_BACKEND_HOST`
bdb_api_port=`printenv BIGCHAINDB_API_PORT`
bdb_ws_port=`printenv BIGCHAINDB_WS_PORT`
# Tendermint vars
tm_pub_key_access_port=`printenv TM_PUB_KEY_ACCESS_PORT`
tm_backend_host=`printenv TM_BACKEND_HOST`
tm_p2p_port=`printenv TM_P2P_PORT`
# sanity check
if [[ -z "${cluster_frontend_port:?CLUSTER_FRONTEND_PORT not specified. Exiting!}" || \
@ -40,7 +44,11 @@ if [[ -z "${cluster_frontend_port:?CLUSTER_FRONTEND_PORT not specified. Exiting!
-z "${bdb_ws_port:?BIGCHAINDB_WS_PORT not specified. Exiting!}" || \
-z "${dns_server:?DNS_SERVER not specified. Exiting!}" || \
-z "${health_check_port:?HEALTH_CHECK_PORT not specified. Exiting!}" || \
-z "${cluster_fqdn:?CLUSTER_FQDN not specified. Exiting!}" ]]; then
-z "${cluster_fqdn:?CLUSTER_FQDN not specified. Exiting!}" || \
-z "${tm_pub_key_access_port:?TM_PUB_KEY_ACCESS_PORT not specified. Exiting!}" || \
-z "${tm_backend_host:?TM_BACKEND_HOST not specified. Exiting!}" || \
-z "${tm_p2p_port:?TM_P2P_PORT not specified. Exiting!}" ]]; then
echo "Missing required environment variables. Exiting!"
exit 1
else
echo CLUSTER_FQDN="$cluster_fqdn"
@ -55,6 +63,9 @@ else
echo BIGCHAINDB_BACKEND_HOST="$bdb_backend_host"
echo BIGCHAINDB_API_PORT="$bdb_api_port"
echo BIGCHAINDB_WS_PORT="$bdb_ws_port"
echo TM_PUB_KEY_ACCESS_PORT="$tm_pub_key_access_port"
echo TM_BACKEND_HOST="$tm_backend_host"
echo TM_P2P_PORT="$tm_p2p_port"
fi
NGINX_CONF_FILE=/etc/nginx/nginx.conf
@ -72,8 +83,10 @@ sed -i "s|BIGCHAINDB_API_PORT|${bdb_api_port}|g" ${NGINX_CONF_FILE}
sed -i "s|BIGCHAINDB_WS_PORT|${bdb_ws_port}|g" ${NGINX_CONF_FILE}
sed -i "s|DNS_SERVER|${dns_server}|g" ${NGINX_CONF_FILE}
sed -i "s|HEALTH_CHECK_PORT|${health_check_port}|g" ${NGINX_CONF_FILE}
sed -i "s|TM_PUB_KEY_ACCESS_PORT|${tm_pub_key_access_port}|g" ${NGINX_CONF_FILE}
sed -i "s|TM_BACKEND_HOST|${tm_backend_host}|g" ${NGINX_CONF_FILE}
sed -i "s|TM_P2P_PORT|${tm_p2p_port}|g" ${NGINX_CONF_FILE}
# start nginx
echo "INFO: starting nginx..."
exec nginx -c /etc/nginx/nginx.conf

View File

@ -12,8 +12,8 @@ spec:
terminationGracePeriodSeconds: 10
containers:
- name: nginx
image: bigchaindb/nginx_https:1.1
imagePullPolicy: IfNotPresent
image: bigchaindb/nginx_https:unstable
imagePullPolicy: Always
env:
- name: CLUSTER_FRONTEND_PORT
valueFrom:
@ -35,11 +35,6 @@ spec:
configMapKeyRef:
name: vars
key: cluster-dns-server-ip
- name: MONGODB_FRONTEND_PORT
valueFrom:
configMapKeyRef:
name: vars
key: mongodb-frontend-port
- name: MONGODB_BACKEND_HOST
valueFrom:
configMapKeyRef:
@ -75,18 +70,37 @@ spec:
configMapKeyRef:
name: vars
key: bigchaindb-ws-port
- name: TM_PUB_KEY_ACCESS_PORT
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-pub-key-access
- name: TM_BACKEND_HOST
valueFrom:
configMapKeyRef:
name: tendermint-config
key: ngx-tm-instance-name
- name: TM_P2P_PORT
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-p2p-port
ports:
# return a pretty error message on port 80, since we are expecting
# HTTPS traffic.
- containerPort: 80
protocol: TCP
- containerPort: "<mongodb-frontend-port from ConfigMap>"
- containerPort: 443
protocol: TCP
- containerPort: "<cluster-frontend-port from ConfigMap>"
protocol: TCP
- containerPort: "<cluster-health-check-port from ConfigMap>"
- containerPort: 8888
protocol: TCP
name: ngx-port
- containerPort: 9986
protocol: TCP
name: tm-pub-key
- containerPort: 46656
protocol: TCP
name: tm-p2p-port
livenessProbe:
httpGet:
path: /health

View File

@ -13,14 +13,22 @@ spec:
selector:
app: ngx-instance-0-dep
ports:
- port: "<cluster-frontend-port from ConfigMap>"
targetPort: "<cluster-frontend-port from ConfigMap>"
- port: 443
targetPort: 443
name: public-secure-cluster-port
protocol: TCP
- port: "<mongodb-frontend-port from ConfigMap>"
targetPort: "<mongodb-frontend-port from ConfigMap>"
- port: 27017
targetPort: 27017
name: public-mdb-port
protocol: TCP
- port: 9986
targetPort: 9986
name: tm-pub-key-access
protocol: TCP
- port: 46656
targetPort: 46656
protocol: TCP
name: tm-p2p-port
- port: 80
targetPort: 80
name: public-insecure-cluster-port

212
k8s/scripts/cert_gen.sh Executable file
View File

@ -0,0 +1,212 @@
#!/usr/bin/env bash
set -euo pipefail
# base directories for operations
BASE_DIR=$(pwd)
# base variables with default values
MDB_CN="mdb-instance"
BDB_CN="bdb-instance"
MDB_MON_CN="mdb-mon-instance"
INDEX=''
CONFIGURE_CA=''
CONFIGURE_MEMBER=''
CONFIGURE_CLIENT=''
function show_help(){
cat > /dev/stdout << END
${0} --index INDEX --mdb-name MONGODB_MEMBER_COMMON_NAME
--bdb-name BIGCHAINDB_INSTANCE_COMMON_NAME
--mdb-mon-name MONGODB_MONITORING_INSTNACE_COMMON_NAME [--help]
OPTIONAL ARGS:
--mdb-cn - Common name of MongoDB instance:- default ${MDB_CN}
--bdb-cn - Common name of BigchainDB instance:- default ${BDB_CN}
--mdb-mon-cn - Common name of MongoDB monitoring agent:- default ${MDB_MON_CN}
--dir - Absolute path of base directory:- default ${pwd}
--help - show help
EXAMPLES
- "Generate Certificates for first node(index=1) in the cluster i.e. MongoDB instance: mdb-instance,"
"BigchainDB instance: bdb-instance, MongoDB monitoring agent: mdb-mon-instance"
./cert_gen.sh --index 1 --mdb-cn mdb-instance --bdb-cn bdb-instance \
--mdb-mon-cn mdb-mon-instance
END
}
function configure_root_ca(){
# $1:- Base directory for Root CA
echo "Generate Root CA"
echo 'set_var EASYRSA_DN "org"' >> $1/vars
echo 'set_var EASYRSA_KEY_SIZE 4096' >> $1/vars
#TODO: Parametrize the below configurations
echo 'set_var EASYRSA_REQ_COUNTRY "DE"' >> $1/vars
echo 'set_var EASYRSA_REQ_PROVINCE "Berlin"' >> $1/vars
echo 'set_var EASYRSA_REQ_CITY "Berlin"' >> $1/vars
echo 'set_var EASYRSA_REQ_ORG "BigchainDB GmbH"' >> $1/vars
echo 'set_var EASYRSA_REQ_OU "ROOT-CA"' >> $1/vars
echo 'set_var EASYRSA_REQ_EMAIL "dev@bigchaindb.com"' >> $1//vars
sed -i.bk '/^extendedKeyUsage/ s/$/,clientAuth/' $1/x509-types/server
echo "set_var EASYRSA_SSL_CONF \"$1/openssl-1.0.cnf\"" >> $1/vars
echo "set_var EASYRSA_PKI \"$1/pki\"" >> $1/vars
echo "set_var EASYRSA_EXT_DIR \"$1/x509-types\"" >> $1/vars
$1/easyrsa init-pki
$1/easyrsa build-ca
$1/easyrsa gen-crl
}
function configure_member_cert_gen(){
# $1:- Base directory for MongoDB Member Requests/Keys
echo "Generate MongoDB Member Requests/Certificate(s)"
echo 'set_var EASYRSA_DN "org"' >> $1/vars
echo 'set_var EASYRSA_KEY_SIZE 4096' >> $1/vars
#TODO: Parametrize the below configurations
echo 'set_var EASYRSA_REQ_COUNTRY "DE"' >> $1/vars
echo 'set_var EASYRSA_REQ_PROVINCE "Berlin"' >> $1/vars
echo 'set_var EASYRSA_REQ_CITY "Berlin"' >> $1/vars
echo 'set_var EASYRSA_REQ_ORG "BigchainDB GmbH"' >> $1/vars
echo 'set_var EASYRSA_REQ_OU "MONGO-MEMBER"' >> $1/vars
echo 'set_var EASYRSA_REQ_EMAIL "dev@bigchaindb.com"' >> $1/vars
echo "set_var EASYRSA_SSL_CONF \"$1/openssl-1.0.cnf\"" >> $1/vars
echo "set_var EASYRSA_PKI \"$1/pki\"" >> member-cert/easy-rsa-3.0.1/easyrsa3/vars
$1/easyrsa init-pki
$1/easyrsa --req-cn="$MDB_CN"-"$INDEX" --subject-alt-name=DNS:localhost,DNS:"$MDB_CN"-"$INDEX" gen-req "$MDB_CN"-"$INDEX" nopass
}
function configure_client_cert_gen(){
# $1:- Base directory for MongoDB Client Requests/Keys
echo "Generate MongoDB Client Requests/Certificate(s)"
echo 'set_var EASYRSA_DN "org"' >> $1/vars
echo 'set_var EASYRSA_KEY_SIZE 4096' >> $1/vars
#TODO: Parametrize the below configurations
echo 'set_var EASYRSA_REQ_COUNTRY "DE"' >> $1/vars
echo 'set_var EASYRSA_REQ_PROVINCE "Berlin"' >> $1/vars
echo 'set_var EASYRSA_REQ_CITY "Berlin"' >> $1/vars
echo 'set_var EASYRSA_REQ_ORG "BigchainDB GmbH"' >> $1/vars
echo 'set_var EASYRSA_REQ_OU "MONGO-CLIENT"' >> $1/vars
echo 'set_var EASYRSA_REQ_EMAIL "dev@bigchaindb.com"' >> $1/vars
echo "set_var EASYRSA_SSL_CONF \"$1/openssl-1.0.cnf\"" >> $1/vars
echo "set_var EASYRSA_PKI \"$1/pki\"" >> $1/vars
$1/easyrsa init-pki
$1/easyrsa gen-req "$BDB_CN"-"$INDEX" nopass
$1/easyrsa gen-req "$MDB_MON_CN"-"$INDEX" nopass
}
function import_requests(){
# $1:- Base directory for Root CA
$1/easyrsa import-req $BASE_MEMBER_CERT_DIR/$BASE_EASY_RSA_PATH/pki/reqs/"$MDB_CN"-"$INDEX".req "$MDB_CN"-"$INDEX"
$1/easyrsa import-req $BASE_CLIENT_CERT_DIR/$BASE_EASY_RSA_PATH/pki/reqs/"$BDB_CN"-"$INDEX".req "$BDB_CN"-"$INDEX"
$1/easyrsa import-req $BASE_CLIENT_CERT_DIR/$BASE_EASY_RSA_PATH/pki/reqs/"$MDB_MON_CN"-"$INDEX".req "$MDB_MON_CN"-"$INDEX"
}
function sign_requests(){
# $1:- Base directory for Root CA
$1/easyrsa --subject-alt-name=DNS:localhost,DNS:"$MDB_CN"-"$INDEX" sign-req server "$MDB_CN"-"$INDEX"
$1/easyrsa sign-req client "$BDB_CN"-"$INDEX"
$1/easyrsa sign-req client "$MDB_MON_CN"-"$INDEX"
}
function make_pem_files(){
# $1:- Base directory for Root CA
# $2:- Base directory for kubernetes related config for secret.yaml
mkdir $2
cat $1/pki/issued/"$MDB_CN"-"$INDEX".crt $BASE_MEMBER_CERT_DIR/$BASE_EASY_RSA_PATH/pki/private/"$MDB_CN"-"$INDEX".key > $2/"$MDB_CN"-"$INDEX".pem
cat $1/pki/issued/"$BDB_CN"-"$INDEX".crt $BASE_CLIENT_CERT_DIR/$BASE_EASY_RSA_PATH/pki/private/"$BDB_CN"-"$INDEX".key > $2/"$BDB_CN"-"$INDEX".pem
cat $1/pki/issued/"$MDB_MON_CN"-"$INDEX".crt $BASE_CLIENT_CERT_DIR/$BASE_EASY_RSA_PATH/pki/private/"$MDB_MON_CN"-"$INDEX".key > $2/"$MDB_MON_CN"-"$INDEX".pem
}
function convert_b64(){
# $1:- Base directory for kubernetes related config for secret.yaml
# $2:- Base directory for Root CA
# $3:- Base directory for client requests/keys
cat $1/"$MDB_CN"-"$INDEX".pem | base64 -w 0 > $1/"$MDB_CN"-"$INDEX".pem.b64
cat $1/"$BDB_CN"-"$INDEX".pem | base64 -w 0 > $1/"$BDB_CN"-"$INDEX".pem.b64
cat $1/"$MDB_MON_CN"-"$INDEX".pem | base64 -w 0 > $1/"$MDB_MON_CN"-"$INDEX".pem.b64
cat $3/pki/private/"$BDB_CN"-"$INDEX".key | base64 -w 0 > $1/"$BDB_CN"-"$INDEX".key.b64
cat $2/pki/ca.crt | base64 -w 0 > $1/ca.crt.b64
cat $2/pki/crl.pem | base64 -w 0 > $1/crl.pem.b64
}
function configure_common(){
sudo apt-get update -y
sudo apt-get install openssl -y
wget https://github.com/OpenVPN/easy-rsa/archive/3.0.1.tar.gz -P $1
wget https://github.com/OpenVPN/easy-rsa/archive/3.0.1.tar.gz -P $1
wget https://github.com/OpenVPN/easy-rsa/archive/3.0.1.tar.gz -P $1
tar xzvf $1/3.0.1.tar.gz -C $1/
rm $1/3.0.1.tar.gz
cp $1/$BASE_EASY_RSA_PATH/vars.example $1/$BASE_EASY_RSA_PATH/vars
}
while [[ $# -gt 0 ]]; do
arg="$1"
case $arg in
--index)
INDEX="$2"
shift
;;
--mdb-cn)
MDB_CN="$2"
shift
;;
--bdb-cn)
BDB_CN="$2"
shift
;;
--mdb-mon-cn)
MDB_MON_CN="$2"
shift
;;
--dir)
BASE_DIR="$2"
shift
;;
--help)
show_help
exit 0
;;
*)
echo "Unknown option: $1"
exit 1
;;
esac
shift
done
BASE_CA_DIR="${BASE_DIR}"/bdb-cluster-ca
BASE_MEMBER_CERT_DIR="${BASE_DIR}"/member-cert
BASE_CLIENT_CERT_DIR="${BASE_DIR}"/client-cert
BASE_EASY_RSA_PATH='easy-rsa-3.0.1/easyrsa3'
BASE_K8S_DIR="${BASE_DIR}"/k8s
# sanity checks
if [[ -z "${INDEX}" ]] ; then
echo "Missing required arguments"
exit 1
fi
# Configure Root CA
mkdir $BASE_CA_DIR
configure_common $BASE_CA_DIR
configure_root_ca $BASE_CA_DIR/$BASE_EASY_RSA_PATH
# Configure Member Request/Key generation
mkdir $BASE_MEMBER_CERT_DIR
configure_common $BASE_MEMBER_CERT_DIR
configure_member_cert_gen $BASE_MEMBER_CERT_DIR/$BASE_EASY_RSA_PATH
# Configure Client Request/Key generation
mkdir $BASE_CLIENT_CERT_DIR
configure_common $BASE_CLIENT_CERT_DIR
configure_client_cert_gen $BASE_CLIENT_CERT_DIR/$BASE_EASY_RSA_PATH
import_requests $BASE_CA_DIR/$BASE_EASY_RSA_PATH
sign_requests $BASE_CA_DIR/$BASE_EASY_RSA_PATH
make_pem_files $BASE_CA_DIR/$BASE_EASY_RSA_PATH $BASE_K8S_DIR
convert_b64 $BASE_K8S_DIR $BASE_CA_DIR/$BASE_EASY_RSA_PATH $BASE_CLIENT_CERT_DIR/$BASE_EASY_RSA_PATH

View File

@ -0,0 +1,12 @@
FROM nginx:stable
LABEL maintainer "dev@bigchaindb.com"
WORKDIR /
RUN apt-get update \
&& apt-get -y upgrade \
&& apt-get autoremove \
&& apt-get clean
COPY nginx.conf.template /etc/nginx/conf.d/access_pub_key.conf
COPY nginx_entrypoint.bash /
VOLUME /usr/share/nginx
EXPOSE 9986
ENTRYPOINT ["/nginx_entrypoint.bash"]

View File

@ -0,0 +1,19 @@
## Nginx container for hosting public key for a tendermint instance
### Step 1: Build and Push the Latest Container
Use the `docker_build_and_push.bash` script to build the latest docker image
and upload it to Docker Hub.
Ensure that the image tag is updated to a new version number to properly
reflect any changes made to the container.
### Step 2: Run the container
```
docker run \
--name=tendermint_instance_pub_key \
--env TENDERMINT_PUB_KEY_ACCESS_PORT=''
--publish=<nginx port for external connections>:<corresponding host port> \
--volume=<host dir with public key>:/tendermint_node_data \
bigchaindb/nginx_pub_key_access:<version_number>
```

View File

@ -0,0 +1,5 @@
#!/bin/bash
docker build -t bigchaindb/nginx_pub_key_access:unstable .
docker push bigchaindb/nginx_pub_key_access:unstable

View File

@ -0,0 +1,10 @@
# Serve the public key for a tendermint instance
server {
listen PUBLIC_KEY_ACCESS_PORT default_server;
listen [::]:PUBLIC_KEY_ACCESS_PORT default_server ipv6only=on;
location / {
root /usr/share/nginx/;
autoindex on;
}
}

View File

@ -0,0 +1,21 @@
#!/bin/bash
set -euo pipefail
# Tendermint public key access port
tm_pub_key_access_port=`printenv TM_PUB_KEY_ACCESS_PORT`
if [[ -z "${tm_pub_key_access_port:?TM_PUB_KEY_ACCESS_PORT not specified. Exiting}" ]]; then
exit 1
else
echo TM_PUB_KEY_ACCESS_PORT="$tm_pub_key_access_port"
fi
NGINX_CONF_FILE=/etc/nginx/conf.d/access_pub_key.conf
# configure the access_pub_key file with env variable(s)
sed -i "s|PUBLIC_KEY_ACCESS_PORT|${tm_pub_key_access_port}|g" ${NGINX_CONF_FILE}
cat /etc/nginx/conf.d/access_pub_key.conf
# start nginx
echo "INFO: starting nginx..."
exec nginx -g "daemon off;"

View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
# Name of tendermint instance you are trying to connect to
# e.g. tm-instance-0
name: "<remote-tendermint-host>"
namespace: default
spec:
ports:
spec:
ports:
- port: 46656
name: p2p
- port: 46657
name: pubkey
# FQDN of remote cluster/NGINX instance
externalName: "<dns-name-remote-nginx>"

View File

@ -0,0 +1,41 @@
#########################################################
# This YAML section desribes a k8s PV for tendermint db #
#########################################################
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-tm-db
spec:
accessModes:
- ReadWriteOnce
azureDisk:
cachingMode: None
diskName: <Azure Disk Name>
diskURI: <Azure Disk URL>
fsType: ext4
readOnly: false
capacity:
storage: 20Gi
persistentVolumeReclaimPolicy: Retain
storageClassName: tendermint-db
---
##############################################################
# This YAML section desribes a k8s PV for Tendermint config #
##############################################################
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-tm-configdb
spec:
accessModes:
- ReadWriteOnce
azureDisk:
cachingMode: None
diskName: <Azure Disk Name>
diskURI: <Azure Disk URL>
fsType: ext4
readOnly: false
capacity:
storage: 1Gi
persistentVolumeReclaimPolicy: Retain
storageClassName: tendermint-config-db

View File

@ -0,0 +1,32 @@
##########################################################
# This section file desribes a k8s pvc for tendermint db #
##########################################################
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: tendermint-db-claim
annotations:
volume.beta.kubernetes.io/storage-class: tendermint-db
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
---
################################################################
# This YAML section desribes a k8s pvc for tendermint configdb #
################################################################
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: tendermint-config-db-claim
annotations:
volume.beta.kubernetes.io/storage-class: tendermint-config-db
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi

View File

@ -0,0 +1,31 @@
###################################################################
# This YAML section desribes a StorageClass for the tendermint db #
###################################################################
kind: StorageClass
apiVersion: storage.k8s.io/v1beta1
metadata:
name: tendermint-db
provisioner: kubernetes.io/azure-disk
parameters:
skuName: Premium_LRS #[Premium_LRS, Standard_LRS]
location: westeurope
# If you have created a different storage account e.g. for Premium Storage
#storageAccount: <Storage account name>
# Use Managed Disk(s) with VMs using Managed Disks(Only used for Tectonic deployment)
#kind: Managed
---
#########################################################################
# This YAML section desribes a StorageClass for the tendermint configdb #
#########################################################################
kind: StorageClass
apiVersion: storage.k8s.io/v1beta1
metadata:
name: tendermint-config-db
provisioner: kubernetes.io/azure-disk
parameters:
skuName: Premium_LRS #[Premium_LRS, Standard_LRS]
location: westeurope
# If you have created a different storage account e.g. for Premium Storage
#storageAccount: <Storage account name>
# Use Managed Disk(s) with VMs using Managed Disks(Only used for Tectonic deployment)
#kind: Managed

View File

@ -0,0 +1,115 @@
#################################################################################
# This YAML file desribes a StatefulSet with a service for running and exposing #
# a Tendermint instance. It depends on the tendermint-config-db-claim #
# and tendermint-db-claim k8s pvc. #
#################################################################################
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: tm-instance-0-ss
namespace: default
spec:
serviceName: tm-instance-0
replicas: 1
template:
metadata:
name: tm-instance-0-ss
labels:
app: tm-instance-0-ss
spec:
restartPolicy: Always
volumes:
- name: tm-data
persistentVolumeClaim:
claimName: tendermint-db-claim
- name: tm-config-data
persistentVolumeClaim:
claimName: tendermint-config-db-claim
containers:
# Treating nginx + tendermint as a POD because they should not
# exist without each other
# Nginx container for hosting public key of this ndoe
- name: nginx
imagePullPolicy: Always
image: bigchaindb/nginx_pub_key_access:unstable
env:
- name: TM_PUB_KEY_ACCESS_PORT
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-pub-key-access
ports:
- containerPort: 9986
name: tm-pk-access
volumeMounts:
- name: tm-config-data
mountPath: /usr/share/nginx
readOnly: true
#Tendermint container
- name: tendermint
imagePullPolicy: Always
image: bigchaindb/tendermint:unstable
env:
- name: TM_SEEDS
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-seeds
- name: TM_VALIDATOR_POWER
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-validator-power
- name: TM_VALIDATORS
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-validators
- name: TM_PUB_KEY_ACCESS_PORT
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-pub-key-access
- name: TM_GENESIS_TIME
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-genesis-time
- name: TM_CHAIN_ID
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-chain-id
- name: TM_P2P_PORT
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-p2p-port
- name: TMHOME
value: /tendermint
- name: TM_PROXY_APP
valueFrom:
configMapKeyRef:
name: vars
key: bdb-instance-name
- name: TM_ABCI_PORT
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-abci-port
# Resource constraint on the pod, can be changed
resources:
limits:
cpu: 200m
memory: 5G
volumeMounts:
- name: tm-data
mountPath: /tendermint
- name: tm-config-data
mountPath: /tendermint_node_data
ports:
- containerPort: 46656
name: p2p
- containerPort: 46657
name: rpc

View File

@ -0,0 +1,24 @@
apiVersion: v1
kind: Service
metadata:
name: tm-instance-1
namespace: default
labels:
name: tm-instance-1
spec:
selector:
app: tm-instance-1-ss
ports:
- port: 46656
targetPort: 46656
name: p2p
protocol: TCP
- port: 46657
targetPort: 46657
name: rpc
protocol: TCP
- port: 9986
targetPort: 9986
name: pub-key-access
protocol: TCP
clusterIP: None

View File

@ -0,0 +1,8 @@
FROM tendermint/tendermint:0.13
LABEL maintainer "dev@bigchaindb.com"
WORKDIR /
COPY genesis.json.template /etc/tendermint/genesis.json
COPY tendermint_entrypoint.bash /
VOLUME /tendermint /tendermint_node_data
EXPOSE 46656 46657
ENTRYPOINT ["/tendermint_entrypoint.bash"]

View File

@ -0,0 +1,29 @@
## Tendermint container used for BFT replication and consensus
### Step 1: Build and Push the Latest Container
Use the `docker_build_and_push.bash` script to build the latest docker image
and upload it to Docker Hub.
Ensure that the image tag is updated to a new version number to properly
reflect any changes made to the container.
### Step 2: Run the container
```
docker run \
--name=tendermint \
--env TM_PUB_KEY_ACCESS_PORT=<port to access public keys hosted by nginx> \
--env TM_SEEDS=<commad separated list of all nodes IP addresses/Hostnames> \
--env TM_VALIDATOR_POWER=<voting power of node> \
--env TM_VALIDATORS=<list of all validators> \
--env TM_GENESIS_TIME=<genesis time> \
--env TM_CHAIN_ID=<chain id> \
--env TM_P2P_PORT=<Port used by all peers to communicate> \
--env TMHOME=<Tendermint home directory containing all config files> \
--env TM_PROXY_APP=<Hostname/IP address of app> \
--publish=<rpc port on host>:<rpc port> \
--publish=<p2p port on host>:<p2p port> \
--volume <host dir for tendermint data>:/tendermint \
--volume=<host dir for public key>:/tendermint_node_data \
bigchaindb/tendermint:<version_number>
```

View File

@ -0,0 +1,5 @@
#!/bin/bash
docker build -t bigchaindb/tendermint:unstable .
docker push bigchaindb/tendermint:unstable

View File

@ -0,0 +1,6 @@
{
"genesis_time": TM_GENESIS_TIME,
"chain_id": TM_CHAIN_ID,
"validators": [],
"app_hash": ""
}

View File

@ -0,0 +1,98 @@
#!/bin/bash
set -euo pipefail
# Cluster vars
tm_seeds=`printenv TM_SEEDS`
tm_validators=`printenv TM_VALIDATORS`
tm_validator_power=`printenv TM_VALIDATOR_POWER`
tm_pub_key_access_port=`printenv TM_PUB_KEY_ACCESS_PORT`
tm_genesis_time=`printenv TM_GENESIS_TIME`
tm_chain_id=`printenv TM_CHAIN_ID`
tm_p2p_port=`printenv TM_P2P_PORT`
# tendermint node vars
tmhome=`printenv TMHOME`
tm_proxy_app=`printenv TM_PROXY_APP`
tm_abci_port=`printenv TM_ABCI_PORT`
# sanity check
if [[ -z "${tm_seeds:?TM_SEEDS not specified. Exiting!}" || \
-z "${tm_validators:?TM_VALIDATORS not specified. Exiting!}" || \
-z "${tm_validator_power:?TM_VALIDATOR_POWER not specified. Exiting!}" || \
-z "${tm_pub_key_access_port:?TM_PUB_KEY_ACCESS_PORT not specified. Exiting!}" || \
-z "${tm_genesis_time:?TM_GENESIS_TIME not specified. Exiting!}" || \
-z "${tm_chain_id:?TM_CHAIN_ID not specified. Exiting!}" || \
-z "${tmhome:?TMHOME not specified. Exiting!}" || \
-z "${tm_p2p_port:?TM_P2P_PORT not specified. Exiting!}" || \
-z "${tm_abci_port:?TM_ABCI_PORT not specified. Exiting! }" ]]; then
echo "Missing required enviroment variables."
exit 1
else
echo tm_seeds="$TM_SEEDS"
echo tm_validators="$TM_VALIDATORS"
echo tm_validator_power="$TM_VALIDATOR_POWER"
echo tm_pub_key_access_port="$TM_PUB_KEY_ACCESS_PORT"
echo tm_genesis_time="$TM_GENESIS_TIME"
echo tm_chain_id="$TM_CHAIN_ID"
echo tmhome="$TMHOME"
echo tm_p2p_port="$TM_P2P_PORT"
echo tm_abci_port="$TM_ABCI_PORT"
fi
# copy template
cp /etc/tendermint/genesis.json /tendermint/genesis.json
TM_GENESIS_FILE=/tendermint/genesis.json
TM_PUB_KEY_DIR=/tendermint_node_data
# configure the nginx.conf file with env variables
sed -i "s|TM_GENESIS_TIME|\"${tm_genesis_time}\"|g" ${TM_GENESIS_FILE}
sed -i "s|TM_CHAIN_ID|\"${tm_chain_id}\"|g" ${TM_GENESIS_FILE}
if [ ! -f /tendermint/priv_validator.json ]; then
tendermint gen_validator > /tendermint/priv_validator.json
# pub_key.json will be served by the nginx container
cat /tendermint/priv_validator.json
cat /tendermint/priv_validator.json | jq ".pub_key" > "$TM_PUB_KEY_DIR"/pub_key.json
fi
# fill genesis file with validators
IFS=',' read -ra VALS_ARR <<< "$TM_VALIDATORS"
IFS=',' read -ra VAL_POWERS_ARR <<< "$TM_VALIDATOR_POWER"
if [ ${#VALS_ARR[@]} -ne ${#VAL_POWERS_ARR[@]} ]; then
echo "Invalid configuration of Validator(s) and Validator Power(s)"
exit 1
fi
for i in "${!VALS_ARR[@]}"; do
# wait until validator generates priv/pub key pair
set +e
echo Validator: "${VALS_ARR[$i]}"
echo Validator Power: "${VALS_POWERS_ARR[$i]}"
echo "http://${VALS_ARR[$i]}:$tm_pub_key_access_port/pub_key.json"
curl -s --fail "http://${VALS_ARR[$i]}:$tm_pub_key_access_port/pub_key.json" > /dev/null
ERR=$?
while [ "$ERR" != 0 ]; do
sleep 5
curl -s --fail "http://${VALS_ARR[$i]}:$tm_pub_key_access_port/pub_key.json" > /dev/null
ERR=$?
done
set -e
# add validator to genesis file along with its pub_key
curl -s "http://${VALS_ARR[$i]}:$tm_pub_key_access_port/pub_key.json" | jq ". as \$k | {pub_key: \$k, power: ${VAL_POWERS_ARR[$i]}, name: \"${VALS_ARR[$i]}\"}" > pub_validator.json
cat /tendermint/genesis.json | jq ".validators |= .+ [$(cat pub_validator.json)]" > tmpgenesis && mv tmpgenesis /tendermint/genesis.json
rm pub_validator.json
done
# construct seeds
IFS=',' read -ra SEEDS_ARR <<< "$tm_seeds"
seeds=()
for s in "${SEEDS_ARR[@]}"; do
seeds+=("$s:$tm_p2p_port")
done
seeds=$(IFS=','; echo "${seeds[*]}")
# start nginx
echo "INFO: starting tendermint..."
exec tendermint node --p2p.seeds="$seeds" --moniker="`hostname`" --proxy_app="tcp://$tm_proxy_app:$tm_abci_port"

View File

@ -41,6 +41,7 @@ docs_require = [
'sphinx-rtd-theme>=0.1.9',
'sphinxcontrib-httpdomain>=1.5.0',
'sphinxcontrib-napoleon>=0.4.4',
'aafigure>=0.6',
]
tests_require = [
@ -84,6 +85,7 @@ install_requires = [
'aiohttp~=2.3',
'python-rapidjson-schema==0.1.1',
'abci~=0.3.0',
'setproctitle~=1.1.0',
]
setup(

View File

@ -199,6 +199,48 @@ def test_get_block():
assert block['height'] == 3
def test_delete_zombie_transactions(signed_create_tx, signed_transfer_tx):
from bigchaindb.backend import connect, query
from bigchaindb.tendermint.lib import Block
conn = connect()
conn.db.transactions.insert_one(signed_create_tx.to_dict())
query.store_asset(conn, {'id': signed_create_tx.id})
block = Block(app_hash='random_utxo',
height=3,
transactions=[signed_create_tx.id])
query.store_block(conn, block._asdict())
conn.db.transactions.insert_one(signed_transfer_tx.to_dict())
query.store_metadatas(conn, [{'id': signed_transfer_tx.id}])
query.delete_zombie_transactions(conn)
assert query.get_transaction(conn, signed_transfer_tx.id) is None
assert query.get_asset(conn, signed_transfer_tx.id) is None
assert list(query.get_metadata(conn, [signed_transfer_tx.id])) == []
assert query.get_transaction(conn, signed_create_tx.id) is not None
assert query.get_asset(conn, signed_create_tx.id) is not None
def test_delete_latest_block(signed_create_tx, signed_transfer_tx):
from bigchaindb.backend import connect, query
from bigchaindb.tendermint.lib import Block
conn = connect()
conn.db.transactions.insert_one(signed_create_tx.to_dict())
query.store_asset(conn, {'id': signed_create_tx.id})
block = Block(app_hash='random_utxo',
height=51,
transactions=[signed_create_tx.id])
query.store_block(conn, block._asdict())
query.delete_latest_block(conn)
assert query.get_transaction(conn, signed_create_tx.id) is None
assert query.get_asset(conn, signed_create_tx.id) is None
assert query.get_block(conn, 51) is None
def test_delete_unspent_outputs(db_context, utxoset):
from bigchaindb.backend import query
unspent_outputs, utxo_collection = utxoset

View File

@ -6,14 +6,15 @@ import copy
import pytest
@pytest.mark.tendermint
def test_make_sure_we_dont_remove_any_command():
# thanks to: http://stackoverflow.com/a/18161115/597097
from bigchaindb.commands.bigchaindb import create_parser
parser = create_parser()
assert parser.parse_args(['configure', 'rethinkdb']).command
assert parser.parse_args(['configure', 'mongodb']).command
assert parser.parse_args(['configure', 'localmongodb']).command
assert parser.parse_args(['configure', 'localmongodb']).command
assert parser.parse_args(['show-config']).command
assert parser.parse_args(['export-my-pubkey']).command
assert parser.parse_args(['init']).command
@ -517,3 +518,99 @@ def test_run_remove_replicas(mock_remove_replicas):
assert exc.value.args == ('err',)
assert mock_remove_replicas.call_count == 1
mock_remove_replicas.reset_mock()
@pytest.mark.tendermint
@pytest.mark.bdb
def test_recover_db_from_zombie_txn(b, monkeypatch):
from bigchaindb.commands.bigchaindb import run_recover
from bigchaindb.models import Transaction
from bigchaindb.common.crypto import generate_key_pair
from bigchaindb.tendermint.lib import Block
from bigchaindb import backend
alice = generate_key_pair()
tx = Transaction.create([alice.public_key],
[([alice.public_key], 1)],
asset={'cycle': 'hero'},
metadata={'name': 'hohenheim'}) \
.sign([alice.private_key])
b.store_bulk_transactions([tx])
block = Block(app_hash='random_app_hash', height=10,
transactions=[])._asdict()
b.store_block(block)
def mock_get(uri):
return MockResponse(10)
monkeypatch.setattr('requests.get', mock_get)
run_recover(b)
assert list(backend.query.get_metadata(b.connection, [tx.id])) == []
assert not backend.query.get_asset(b.connection, tx.id)
assert not b.get_transaction(tx.id)
@pytest.mark.tendermint
@pytest.mark.bdb
def test_recover_db_from_zombie_block(b, monkeypatch):
from bigchaindb.commands.bigchaindb import run_recover
from bigchaindb.models import Transaction
from bigchaindb.common.crypto import generate_key_pair
from bigchaindb.tendermint.lib import Block
from bigchaindb import backend
alice = generate_key_pair()
tx = Transaction.create([alice.public_key],
[([alice.public_key], 1)],
asset={'cycle': 'hero'},
metadata={'name': 'hohenheim'}) \
.sign([alice.private_key])
b.store_bulk_transactions([tx])
block9 = Block(app_hash='random_app_hash', height=9,
transactions=[])._asdict()
b.store_block(block9)
block10 = Block(app_hash='random_app_hash', height=10,
transactions=[tx.id])._asdict()
b.store_block(block10)
def mock_get(uri):
return MockResponse(9)
monkeypatch.setattr('requests.get', mock_get)
run_recover(b)
assert list(backend.query.get_metadata(b.connection, [tx.id])) == []
assert not backend.query.get_asset(b.connection, tx.id)
assert not b.get_transaction(tx.id)
block = b.get_latest_block()
assert block['height'] == 9
@pytest.mark.tendermint
@patch('bigchaindb.config_utils.autoconfigure')
@patch('bigchaindb.commands.bigchaindb.run_recover')
@patch('bigchaindb.tendermint.commands.start')
def test_recover_db_on_start(mock_autoconfigure,
mock_run_recover,
mock_start,
mocked_setup_logging):
from bigchaindb.commands.bigchaindb import run_start
args = Namespace(start_rethinkdb=False, allow_temp_keypair=False, config=None, yes=True,
skip_initialize_database=False)
run_start(args)
assert mock_run_recover.called
assert mock_start.called
# Helper
class MockResponse():
def __init__(self, height):
self.height = height
def json(self):
return {'result': {'latest_block_height': self.height}}

View File

@ -328,8 +328,8 @@ def merlin_pubkey(merlin):
@pytest.fixture
def b():
from bigchaindb import Bigchain
return Bigchain()
from bigchaindb.tendermint import BigchainDB
return BigchainDB()
@pytest.fixture

View File

@ -3,6 +3,8 @@ from unittest.mock import patch, call
import pytest
pytestmark = pytest.mark.tendermint
@pytest.fixture
def mock_queue(monkeypatch):
@ -157,3 +159,18 @@ def test_lazy_execution():
lz.name.upper()
result = lz.run(cat)
assert result == 'SHMUI'
def test_process_set_title():
from uuid import uuid4
from multiprocessing import Queue
from setproctitle import getproctitle
from bigchaindb.utils import Process
queue = Queue()
uuid = str(uuid4())
process = Process(target=lambda: queue.put(getproctitle()),
name=uuid)
process.start()
assert queue.get() == uuid

View File

@ -1,10 +1,33 @@
import pytest
from bigchaindb.models import Transaction
# from bigchaindb.models import Transaction
VOTES_ENDPOINT = '/api/v1/votes'
@pytest.mark.tendermint
def test_get_votes_endpoint(client):
gone = 'The votes endpoint is gone now, but it might return in the future.'
response = {'message': gone}
res = client.get(VOTES_ENDPOINT)
assert response == res.json
assert res.status_code == 404
res = client.get(VOTES_ENDPOINT + '?block_id=')
assert response == res.json
assert res.status_code == 404
res = client.get(VOTES_ENDPOINT + '?block_id=123')
assert response == res.json
assert res.status_code == 404
"""
# Old tests are below. We're keeping their code in a long comment block for now,
# because we might bring back a votes endpoint in the future.
# https://github.com/bigchaindb/bigchaindb/issues/2037
@pytest.mark.bdb
@pytest.mark.usefixtures('inputs')
def test_get_votes_endpoint(b, client):
@ -73,3 +96,4 @@ def test_get_votes_endpoint_returns_400_bad_query_params(client):
res = client.get(VOTES_ENDPOINT + '?tx_id=123&block_id=123')
assert res.status_code == 400
"""

1
tmdata/genesis.json Normal file
View File

@ -0,0 +1 @@
{"genesis_time":"0001-01-01T00:00:00Z","chain_id":"test-chain-JCYeEN","validators":[{"pub_key":{"type":"ed25519","data":"0C988282C02CFF72E5E296DB78CE26D922178549B327C375D992548C9AFCCE6D"},"power":10,"name":""}],"app_hash":""}

View File

@ -0,0 +1 @@
{"address":"E6CB05DA326F70BB4CC0A4AF83FC3BBF70B9A4D5","pub_key":{"type":"ed25519","data":"0C988282C02CFF72E5E296DB78CE26D922178549B327C375D992548C9AFCCE6D"},"last_height":0,"last_round":0,"last_step":0,"last_signature":null,"priv_key":{"type":"ed25519","data":"D4488996BDF92CE1D80670C66923D4996AE1B772FE0F76DAE33EDC410DC1D58F0C988282C02CFF72E5E296DB78CE26D922178549B327C375D992548C9AFCCE6D"}}