1
0
mirror of https://github.com/bigchaindb/bigchaindb.git synced 2024-06-17 18:13:22 +02:00

Merge remote-tracking branch 'origin/master' into kyber-master

This commit is contained in:
diminator 2017-09-15 18:13:11 +02:00
commit c119bb5b67
137 changed files with 3747 additions and 1808 deletions

View File

@ -2,33 +2,33 @@
set -e -x
if [[ "${TOXENV}" == *-rdb ]]; then
rethinkdb --daemon
if [[ "${BIGCHAINDB_DATABASE_BACKEND}" == rethinkdb ]]; then
docker pull rethinkdb:2.3.5
docker run -d --publish=28015:28015 --name rdb rethinkdb:2.3.5
elif [[ "${BIGCHAINDB_DATABASE_BACKEND}" == mongodb && \
-z "${BIGCHAINDB_DATABASE_SSL}" ]]; then
# Connect to MongoDB on port 27017 via a normal, unsecure connection if
# BIGCHAINDB_DATABASE_SSL is unset.
# It is unset in this case in .travis.yml.
wget https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-ubuntu1404-3.4.4.tgz -O /tmp/mongodb.tgz
tar -xvf /tmp/mongodb.tgz
mkdir /tmp/mongodb-data
${PWD}/mongodb-linux-x86_64-ubuntu1404-3.4.4/bin/mongod \
--dbpath=/tmp/mongodb-data --replSet=bigchain-rs &> /dev/null &
docker pull mongo:3.4.4
docker run -d --publish=27017:27017 --name mdb-without-ssl mongo:3.4.4 \
--replSet=bigchain-rs
elif [[ "${BIGCHAINDB_DATABASE_BACKEND}" == mongodb && \
"${BIGCHAINDB_DATABASE_SSL}" == true ]]; then
# Connect to MongoDB on port 27017 via TLS/SSL connection if
# BIGCHAINDB_DATABASE_SSL is set.
# It is set to 'true' here in .travis.yml. Dummy certificates for testing
# are stored under bigchaindb/tests/backend/mongodb-ssl/certs/ directory.
wget https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-ubuntu1404-3.4.4.tgz -O /tmp/mongodb-ssl.tgz
tar -xvf /tmp/mongodb-ssl.tgz
mkdir /tmp/mongodb-ssl-data
${PWD}/mongodb-linux-x86_64-ubuntu1404-3.4.4/bin/mongod \
--dbpath=/tmp/mongodb-ssl-data \
docker pull mongo:3.4.4
docker run -d \
--name mdb-with-ssl \
--publish=27017:27017 \
--volume=${TRAVIS_BUILD_DIR}/tests/backend/mongodb-ssl/certs:/certs \
mongo:3.4.4 \
--replSet=bigchain-rs \
--sslAllowInvalidHostnames \
--sslMode=requireSSL \
--sslCAFile=$TRAVIS_BUILD_DIR/tests/backend/mongodb-ssl/certs/ca.crt \
--sslCRLFile=$TRAVIS_BUILD_DIR/tests/backend/mongodb-ssl/certs/crl.pem \
--sslPEMKeyFile=$TRAVIS_BUILD_DIR/tests/backend/mongodb-ssl/certs/test_mdb_ssl_cert_and_key.pem &> /dev/null &
--sslCAFile=/certs/ca.crt \
--sslCRLFile=/certs/crl.pem \
--sslPEMKeyFile=/certs/test_mdb_ssl_cert_and_key.pem
fi

View File

@ -1,3 +1,10 @@
sudo: required
dist: trusty
services:
- docker
language: python
cache: pip
@ -21,16 +28,12 @@ matrix:
env: TOXENV=docsserver
include:
- python: 3.5
addons:
rethinkdb: '2.3.5'
env: BIGCHAINDB_DATABASE_BACKEND=rethinkdb
- python: 3.5
env:
- BIGCHAINDB_DATABASE_BACKEND=mongodb
- BIGCHAINDB_DATABASE_SSL=
- python: 3.6
addons:
rethinkdb: '2.3.5'
env: BIGCHAINDB_DATABASE_BACKEND=rethinkdb
- python: 3.6
env:

View File

@ -1,7 +1,24 @@
# Change Log (Release Notes)
All _notable_ changes to this project will be documented in this file (`CHANGELOG.md`).
This project adheres to [the Python form of Semantic Versioning](https://packaging.python.org/tutorials/distributing-packages/#choosing-a-versioning-scheme) (or at least we try).
The BigchainDB **public API**
* includes:
* This definition of **public API**.
* The configuration defaults, schemas, and paths where configuration files are stored.
* The Command Line Interface.
* The transaction schema.
* All the endpoints exposed by the HTTP API (including payloads sent to and received from them).
* The WebSocket Event Stream API.
* The docker setup.
* excludes:
* **Everything** marked as `experimental` (even if the feature is part of the **public API**).
* The logs generated by the server.
* The internal API, such as classes, methods, functions.
Contributors to this file, please follow the guidelines on [keepachangelog.com](http://keepachangelog.com/).
Note that each version (or "release") is the name of a [Git _tag_](https://git-scm.com/book/en/v2/Git-Basics-Tagging) of a particular commit, so the associated date and time are the date and time of that commit (as reported by GitHub), _not_ the "Uploaded on" date listed on PyPI (which may differ).
For reference, the possible headings are:
@ -15,6 +32,22 @@ For reference, the possible headings are:
* **External Contributors** to list contributors outside of BigchainDB GmbH.
* **Notes**
## [1.0.1] - 2017-07-13
Tag name: v1.0.1
### Fixed
* Various issues in the Quickstart page. Pull requests
[#1641](https://github.com/bigchaindb/bigchaindb/pull/1641) and
[#1648](https://github.com/bigchaindb/bigchaindb/pull/1648).
* Changefeed hanging when MongoDB primary node is turned off.
[Pull request #1638](https://github.com/bigchaindb/bigchaindb/pull/1638).
* Missing `assets` tables for RethinkDB backend.
[Pull request #1646](https://github.com/bigchaindb/bigchaindb/pull/1646).
* Cryptoconditions version mismatch.
[Pull request #1659](https://github.com/bigchaindb/bigchaindb/pull/1659).
## [1.0.0] - 2017-07-05
Tag name: v1.0.0
@ -42,7 +75,7 @@ Tag name: v1.0.0rc1
[#1536](https://github.com/bigchaindb/bigchaindb/pull/1536),
[#1551](https://github.com/bigchaindb/bigchaindb/pull/1551) and
[#1552](https://github.com/bigchaindb/bigchaindb/pull/1552).
* Text search support (only if using MongoDB). Pull Requests [#1469](https://github.com/bigchaindb/bigchaindb/pull/1469) and [#1471](https://github.com/bigchaindb/bigchaindb/pull/1471)
* Text search support (only if using MongoDB). Pull Requests [#1469](https://github.com/bigchaindb/bigchaindb/pull/1469) and [#1471](https://github.com/bigchaindb/bigchaindb/pull/1471)
* The `database.connection_timeout` configuration setting now works with RethinkDB too. [#1512](https://github.com/bigchaindb/bigchaindb/pull/1512)
* New code and tools for benchmarking CREATE transactions. [Pull Request #1511](https://github.com/bigchaindb/bigchaindb/pull/1511)
@ -58,7 +91,7 @@ Tag name: v1.0.0rc1
* If a node comes back from being down for a while, it will resume voting on blocks in the order determined by the MongoDB oplog, in the case of MongoDB. (In the case of RethinkDB, blocks missed in the changefeed will not be voted on.) [Pull Request #1389](https://github.com/bigchaindb/bigchaindb/pull/1389)
* Parallelized transaction schema validation in the vote pipeline. [Pull Request #1492](https://github.com/bigchaindb/bigchaindb/pull/1492)
* `asset.data` or `asset.id` are now *required* in a CREATE or TRANSFER transaction, respectively. [Pull Request #1518](https://github.com/bigchaindb/bigchaindb/pull/1518)
* The HTTP response body, in the response to the `GET /` and the `GET /api/v1` endpoints, was changed substantially. [Pull Request #1529](https://github.com/bigchaindb/bigchaindb/pull/1529)
* The HTTP response body, in the response to the `GET /` and the `GET /api/v1` endpoints, was changed substantially. [Pull Request #1529](https://github.com/bigchaindb/bigchaindb/pull/1529)
* Changed the HTTP `GET /api/v1/transactions/{transaction_id}` endpoint. It now only returns the transaction if it's in a valid block. It also returns a new header with a relative link to a status monitor. [Pull Request #1543](https://github.com/bigchaindb/bigchaindb/pull/1543)
* All instances of `txid` and `tx_id` were replaced with `transaction_id`, in the transaction model and the HTTP API. [Pull Request #1532](https://github.com/bigchaindb/bigchaindb/pull/1532)
* The hostname and port were removed from all URLs in all HTTP API responses. [Pull Request #1538](https://github.com/bigchaindb/bigchaindb/pull/1538)
@ -98,7 +131,7 @@ Tag name: v0.10.3
Tag name: v0.10.2
### Added
* Add Cross Origin Resource Sharing (CORS) support for the HTTP API.
* Add Cross Origin Resource Sharing (CORS) support for the HTTP API.
[Commit 6cb7596](https://github.com/bigchaindb/bigchaindb/commit/6cb75960b05403c77bdae0fd327612482589efcb)
### Fixed
@ -269,7 +302,7 @@ Tag name: v0.8.1
### Fixed
- Workaround for rapidjson problem with package metadata extraction
(https://github.com/kenrobbins/python-rapidjson/pull/52).
(https://github.com/kenrobbins/python-rapidjson/pull/52).
## [0.8.0] - 2016-11-29
@ -294,7 +327,7 @@ Tag name: v0.8.0
- Renamed "verifying key" to "public key". Renamed "signing key" to "private key". Renamed "vk" to "pk". [Pull Request #807](https://github.com/bigchaindb/bigchaindb/pull/807)
- `get_transaction_by_asset_id` now ignores invalid transactions. [Pull Request #810](https://github.com/bigchaindb/bigchaindb/pull/810)
- `get_transaction_by_metadata_id` now ignores invalid transactions. [Pull Request #811](https://github.com/bigchaindb/bigchaindb/pull/811)
- Updates to the configs and scripts for deploying a test network on AWS. The example config file deploys virtual machines running Ubuntu 16.04 now. Pull Requests
- Updates to the configs and scripts for deploying a test network on AWS. The example config file deploys virtual machines running Ubuntu 16.04 now. Pull Requests
[#771](https://github.com/bigchaindb/bigchaindb/pull/771),
[#813](https://github.com/bigchaindb/bigchaindb/pull/813)
- Changed logging of transactions on block creation so now it just says the length of the list of transactions, rather than listing all the transactions. [Pull Request #861](https://github.com/bigchaindb/bigchaindb/pull/861)
@ -362,7 +395,7 @@ committed: Oct 28, 2016, 4:00 PM GMT+2
2. an `assignee`: the public key of the node it was assigned to.
- The `assignment_timestamp` wasn't removed before writing the transaction to a block. That was fixed in [Pull Request #627](https://github.com/bigchaindb/bigchaindb/pull/627)
- The `assignment_timestamp` and `assignee` weren't removed in the response to an HTTP API request sent to the `/api/v1/transactions/<txid>` endpoint. That was fixed in [Pull Request #646](https://github.com/bigchaindb/bigchaindb/pull/646)
- When validating a TRANSFER transaction, if any fulfillment refers to a transaction that's _not_ in a valid block, then the transaction isn't valid. This wasn't checked before but it is now. [Pull Request #629](https://github.com/bigchaindb/bigchaindb/pull/629)
- When validating a TRANSFER transaction, if any fulfillment refers to a transaction that's _not_ in a valid block, then the transaction isn't valid. This wasn't checked before but it is now. [Pull Request #629](https://github.com/bigchaindb/bigchaindb/pull/629)
### External Contributors
- @MinchinWeb - [Pull Request #696](https://github.com/bigchaindb/bigchaindb/pull/696)
@ -390,11 +423,11 @@ committed: Oct 28, 2016, 4:00 PM GMT+2
[#684](https://github.com/bigchaindb/bigchaindb/pull/684),
[#688](https://github.com/bigchaindb/bigchaindb/pull/688),
[#699](https://github.com/bigchaindb/bigchaindb/pull/699),
[#705](https://github.com/bigchaindb/bigchaindb/pull/705),
[#705](https://github.com/bigchaindb/bigchaindb/pull/705),
[#737](https://github.com/bigchaindb/bigchaindb/pull/737),
[#748](https://github.com/bigchaindb/bigchaindb/pull/748),
[#753](https://github.com/bigchaindb/bigchaindb/pull/753),
[#757](https://github.com/bigchaindb/bigchaindb/pull/757),
[#748](https://github.com/bigchaindb/bigchaindb/pull/748),
[#753](https://github.com/bigchaindb/bigchaindb/pull/753),
[#757](https://github.com/bigchaindb/bigchaindb/pull/757),
[#759](https://github.com/bigchaindb/bigchaindb/pull/759), and more
@ -519,7 +552,7 @@ committed: June 15, 2016, 1:42 PM GMT+2
### Changed
- Round timestamps to a precision of one second, and replace payload hash with payload UUID in transactions: [Pull Request #384](https://github.com/bigchaindb/bigchaindb/pull/384)
- Updated cryptoconditions API usage: [Pull Request #373](https://github.com/bigchaindb/bigchaindb/pull/373)
- Updated cryptoconditions API usage: [Pull Request #373](https://github.com/bigchaindb/bigchaindb/pull/373)
## [0.4.1] - 2016-06-13
@ -564,7 +597,7 @@ committed: May 27, 2016, 1:42 PM GMT+2
### Changed
- The block processes now use GroupProcess: [Pull Request #267](https://github.com/bigchaindb/bigchaindb/pull/267)
- Replaced the `json` Python package with `rapidjson` (a Python wrapper for a fast JSON parser/generator written in C++), to speed up JSON serialization and deserialization: [Pull Request #318](https://github.com/bigchaindb/bigchaindb/pull/318)
- Overhauled `ROADMAP.md` and moved it to [the bigchaindb/org repository](https://github.com/bigchaindb/org): Pull Requests
- Overhauled `ROADMAP.md` and moved it to [the bigchaindb/org repository](https://github.com/bigchaindb/org): Pull Requests
[#282](https://github.com/bigchaindb/bigchaindb/pull/282),
[#306](https://github.com/bigchaindb/bigchaindb/pull/306),
[#308](https://github.com/bigchaindb/bigchaindb/pull/308),
@ -612,18 +645,18 @@ committed: April 26, 2016, 11:09 AM GMT+2
- Ability to use environment variables to set (or partially set) configuration settings: [Pull Request #153](https://github.com/bigchaindb/bigchaindb/pull/153)
- `bigchaindb --export-my-pubkey`: [Pull Request #186](https://github.com/bigchaindb/bigchaindb/pull/186)
- `bigchaindb --version`, and one central source for the current version (`version.py`): [Pull Request #208](https://github.com/bigchaindb/bigchaindb/pull/208)
- AWS deployment scripts: Pull Requests
- AWS deployment scripts: Pull Requests
[#160](https://github.com/bigchaindb/bigchaindb/pull/160),
[#166](https://github.com/bigchaindb/bigchaindb/pull/166),
[#172](https://github.com/bigchaindb/bigchaindb/pull/172),
[#203](https://github.com/bigchaindb/bigchaindb/pull/203)
- `codecov.yml`: [Pull Request #161](https://github.com/bigchaindb/bigchaindb/pull/161)
- `CHANGELOG.md` (this file): [Pull Request #117](https://github.com/bigchaindb/bigchaindb/pull/117)
- Signatures using Ed25519: Pull Requests
- Signatures using Ed25519: Pull Requests
[#138](https://github.com/bigchaindb/bigchaindb/pull/138),
[#152](https://github.com/bigchaindb/bigchaindb/pull/152)
- Multisig support: [Pull Request #107](https://github.com/bigchaindb/bigchaindb/pull/107)
- HTTP Server & Web API: Pull Requests
- HTTP Server & Web API: Pull Requests
[#102](https://github.com/bigchaindb/bigchaindb/pull/102),
[#150](https://github.com/bigchaindb/bigchaindb/pull/150),
[#155](https://github.com/bigchaindb/bigchaindb/pull/155),
@ -642,7 +675,7 @@ committed: April 26, 2016, 11:09 AM GMT+2
- Bug related to running the `bigchaindb-benchmark load` on docker [Pull Request #225](https://github.com/bigchaindb/bigchaindb/pull/225)
## External Contributors
- [@thedoctor](https://github.com/thedoctor): Pull Requests
- [@thedoctor](https://github.com/thedoctor): Pull Requests
[#99](https://github.com/bigchaindb/bigchaindb/pull/99),
[#136](https://github.com/bigchaindb/bigchaindb/pull/136)
- [@roderik](https://github.com/roderik): [Pull Request #162](https://github.com/bigchaindb/bigchaindb/pull/162)
@ -658,8 +691,8 @@ committed: April 20, 2016, 3:31 PM GMT+2
## [0.1.4] - 2016-02-22
Tag name: v0.1.4
= commit: c4c850f480bc9ae72df2a54f81c0825b6fb4ed62
Tag name: v0.1.4
= commit: c4c850f480bc9ae72df2a54f81c0825b6fb4ed62
committed: Feb 22, 2016, 11:51 AM GMT+1
### Added
@ -680,8 +713,8 @@ committed Feb 16, 2016, 11:37 AM GMT+1
## [0.1.2] - 2016-02-15
Tag name: v0.1.2
= commit d2ff24166d69dda68dd7b4a24a88279b1d37e222
Tag name: v0.1.2
= commit d2ff24166d69dda68dd7b4a24a88279b1d37e222
committed Feb 15, 2016, 2:23 PM GMT+1
### Added
@ -691,8 +724,8 @@ committed Feb 15, 2016, 2:23 PM GMT+1
- Fix exception when running `start`: [Pull Request #32](https://github.com/bigchaindb/bigchaindb/pull/32) resolved [Issue #35]
## [0.1.1] - 2016-02-15
Tag name: v0.1.1
= commit 2a025448b29fe7056760de1039c73bbcfe992461
Tag name: v0.1.1
= commit 2a025448b29fe7056760de1039c73bbcfe992461
committed Feb 15, 2016, 10:48 AM GMT+1
### Added

View File

@ -14,5 +14,9 @@ ENV BIGCHAINDB_CONFIG_PATH /data/.bigchaindb
ENV BIGCHAINDB_SERVER_BIND 0.0.0.0:9984
ENV BIGCHAINDB_WSSERVER_HOST 0.0.0.0
ENV BIGCHAINDB_WSSERVER_SCHEME ws
ENV BIGCHAINDB_WSSERVER_ADVERTISED_SCHEME ws
ENV BIGCHAINDB_WSSERVER_ADVERTISED_HOST 0.0.0.0
ENV BIGCHAINDB_WSSERVER_ADVERTISED_PORT 9985
ENTRYPOINT ["bigchaindb"]
CMD ["start"]

View File

@ -12,6 +12,9 @@ ENV BIGCHAINDB_SERVER_BIND 0.0.0.0:9984
ENV BIGCHAINDB_WSSERVER_HOST 0.0.0.0
ENV BIGCHAINDB_WSSERVER_SCHEME ws
ENV BIGCHAINDB_WSSERVER_ADVERTISED_HOST 0.0.0.0
ENV BIGCHAINDB_WSSERVER_ADVERTISED_SCHEME ws
ARG backend
RUN mkdir -p /usr/src/app

View File

@ -18,12 +18,14 @@ A minor release is preceeded by a feature freeze and created from the 'master' b
1. Update the `CHANGELOG.md` file in master
1. In `k8s/bigchaindb/bigchaindb-dep.yaml`, find the line of the form `image: bigchaindb/bigchaindb:0.8.1` and change the version number to the new version number, e.g. `0.9.0`. (This is the Docker image that Kubernetes should pull from Docker Hub.) Commit that change to master
1. Create and checkout a new branch for the minor release, named after the minor version, without a preceeding 'v', e.g. `git checkout -b 0.9` (*not* 0.9.0, this new branch will be for e.g. 0.9.0, 0.9.1, 0.9.2, etc. each of which will be identified by a tagged commit)
1. Push the new branch to GitHub, e.g. `git push origin 0.9`
1. Create and checkout a new branch off of the 0.9 branch. Let's call it branch T for now
1. In `bigchaindb/version.py`, update `__version__` and `__short_version__`, e.g. to `0.9` and `0.9.0` (with no `.dev` on the end)
1. Commit that change, and push the new branch to GitHub
1. On GitHub, use the new branch to create a new pull request and wait for all the tests to pass
1. Commit those changes, push the new branch T to GitHub, and use the pushed branch T to create a new pull request merging the T branch into the 0.9 branch.
1. Wait for all the tests to pass!
1. Follow steps outlined in [Common Steps](#common-steps)
1. In 'master' branch, Edit `bigchaindb/version.py`, increment the minor version to the next planned release, e.g. `0.10.0.dev`. (Exception: If you just released `X.Y.Zrc1` then increment the minor version to `X.Y.Zrc2`.) This step is so people reading the latest docs will know that they're for the latest (master branch) version of BigchainDB Server, not the docs at the time of the most recent release (which are also available).
1. Go to [Docker Hub](https://hub.docker.com/), sign in, go to Settings - Build Settings, and under the build with Docker Tag Name equal to `latest`, change the Name to the number of the new release, e.g. `0.9`
1. Go to [Docker Hub](https://hub.docker.com/), sign in, go to bigchaindb/bigchaindb, go to Settings - Build Settings, and under the build with Docker Tag Name equal to `latest`, change the Name to the number of the new release, e.g. `0.9`
Congratulations, you have released BigchainDB!

View File

@ -72,6 +72,9 @@ config = {
'scheme': os.environ.get('BIGCHAINDB_WSSERVER_SCHEME') or 'ws',
'host': os.environ.get('BIGCHAINDB_WSSERVER_HOST') or 'localhost',
'port': int(os.environ.get('BIGCHAINDB_WSSERVER_PORT', 9985)),
'advertised_scheme': os.environ.get('BIGCHAINDB_WSSERVER_ADVERTISED_SCHEME') or 'ws',
'advertised_host': os.environ.get('BIGCHAINDB_WSSERVER_ADVERTISED_HOST') or 'localhost',
'advertised_port': int(os.environ.get('BIGCHAINDB_WSSERVER_ADVERTISED_PORT', 9985)),
},
'database': _database_map[
os.environ.get('BIGCHAINDB_DATABASE_BACKEND', 'rethinkdb')

View File

@ -111,7 +111,8 @@ class MongoDBConnection(Connection):
self.port,
replicaset=self.replicaset,
serverselectiontimeoutms=self.connection_timeout,
ssl=self.ssl)
ssl=self.ssl,
**MONGO_OPTS)
if self.login is not None and self.password is not None:
client[self.dbname].authenticate(self.login, self.password)
else:
@ -126,7 +127,8 @@ class MongoDBConnection(Connection):
ssl_keyfile=self.keyfile,
ssl_pem_passphrase=self.keyfile_passphrase,
ssl_crlfile=self.crlfile,
ssl_cert_reqs=CERT_REQUIRED)
ssl_cert_reqs=CERT_REQUIRED,
**MONGO_OPTS)
if self.login is not None:
client[self.dbname].authenticate(self.login,
mechanism='MONGODB-X509')
@ -143,6 +145,11 @@ class MongoDBConnection(Connection):
raise ConfigurationError from exc
MONGO_OPTS = {
'socketTimeoutMS': 20000,
}
def initialize_replica_set(host, port, connection_timeout, dbname, ssl, login,
password, ca_cert, certfile, keyfile,
keyfile_passphrase, crlfile):
@ -160,7 +167,8 @@ def initialize_replica_set(host, port, connection_timeout, dbname, ssl, login,
conn = pymongo.MongoClient(host,
port,
serverselectiontimeoutms=connection_timeout,
ssl=ssl)
ssl=ssl,
**MONGO_OPTS)
if login is not None and password is not None:
conn[dbname].authenticate(login, password)
else:
@ -174,7 +182,8 @@ def initialize_replica_set(host, port, connection_timeout, dbname, ssl, login,
ssl_keyfile=keyfile,
ssl_pem_passphrase=keyfile_passphrase,
ssl_crlfile=crlfile,
ssl_cert_reqs=CERT_REQUIRED)
ssl_cert_reqs=CERT_REQUIRED,
**MONGO_OPTS)
if login is not None:
logger.info('Authenticating to the database...')
conn[dbname].authenticate(login, mechanism='MONGODB-X509')

View File

@ -19,7 +19,7 @@ from bigchaindb.backend.connection import connect
logger = logging.getLogger(__name__)
TABLES = ('bigchain', 'backlog', 'votes')
TABLES = ('bigchain', 'backlog', 'votes', 'assets')
@singledispatch

View File

@ -155,26 +155,26 @@ def _fulfillment_to_details(fulfillment):
raise UnsupportedTypeError(fulfillment.type_name)
def _fulfillment_from_details(data):
def _fulfillment_from_details(data, _depth=0):
"""
Load a fulfillment for a signing spec dictionary
Args:
data: tx.output[].condition.details dictionary
"""
if _depth == 100:
raise ThresholdTooDeep()
if data['type'] == 'ed25519-sha-256':
public_key = base58.b58decode(data['public_key'])
return Ed25519Sha256(public_key=public_key)
if data['type'] == 'threshold-sha-256':
try:
threshold = ThresholdSha256(data['threshold'])
for cond in data['subconditions']:
cond = _fulfillment_from_details(cond)
threshold.add_subfulfillment(cond)
return threshold
except RecursionError:
raise ThresholdTooDeep()
threshold = ThresholdSha256(data['threshold'])
for cond in data['subconditions']:
cond = _fulfillment_from_details(cond, _depth+1)
threshold.add_subfulfillment(cond)
return threshold
raise UnsupportedTypeError(data.get('type'))
@ -638,6 +638,7 @@ class Transaction(object):
for recipient in recipients:
if not isinstance(recipient, tuple) or len(recipient) != 2:
raise ValueError(('Each `recipient` in the list must be a'
' tuple of `([<list of public keys>],'
' <amount>)`'))
pub_keys, amount = recipient
outputs.append(Output.generate(pub_keys, amount))

View File

@ -307,3 +307,16 @@ def load_consensus_plugin(name=None):
'consensus.BaseConsensusRules`'.format(type(plugin)))
return plugin
def load_events_plugins(names=None):
plugins = []
if names is None:
return plugins
for name in names:
for entry_point in iter_entry_points('bigchaindb.events', name):
plugins.append((name, entry_point.load()))
return plugins

View File

@ -231,14 +231,16 @@ class Bigchain(object):
response, tx_status = None, None
validity = self.get_blocks_status_containing_tx(txid)
blocks_validity_status = self.get_blocks_status_containing_tx(txid)
check_backlog = True
if validity:
if blocks_validity_status:
# Disregard invalid blocks, and return if there are no valid or undecided blocks
validity = {_id: status for _id, status in validity.items()
if status != Bigchain.BLOCK_INVALID}
if validity:
blocks_validity_status = {
_id: status for _id, status in blocks_validity_status.items()
if status != Bigchain.BLOCK_INVALID
}
if blocks_validity_status:
# The transaction _was_ found in an undecided or valid block,
# so there's no need to look in the backlog table
@ -248,8 +250,8 @@ class Bigchain(object):
# If the transaction is in a valid or any undecided block, return it. Does not check
# if transactions in undecided blocks are consistent, but selects the valid block
# before undecided ones
for target_block_id in validity:
if validity[target_block_id] == Bigchain.BLOCK_VALID:
for target_block_id in blocks_validity_status:
if blocks_validity_status[target_block_id] == Bigchain.BLOCK_VALID:
tx_status = self.TX_VALID
break
@ -307,20 +309,24 @@ class Bigchain(object):
blocks = backend.query.get_blocks_status_from_transaction(self.connection, txid)
if blocks:
# Determine the election status of each block
validity = {block['id']: self.block_election_status(block)
for block in blocks}
blocks_validity_status = {
block['id']: self.block_election_status(block)
for block in blocks
}
# NOTE: If there are multiple valid blocks with this transaction,
# something has gone wrong
if list(validity.values()).count(Bigchain.BLOCK_VALID) > 1:
block_ids = str([block for block in validity
if validity[block] == Bigchain.BLOCK_VALID])
if list(blocks_validity_status.values()).count(Bigchain.BLOCK_VALID) > 1:
block_ids = str([
block for block in blocks_validity_status
if blocks_validity_status[block] == Bigchain.BLOCK_VALID
])
raise core_exceptions.CriticalDoubleInclusion(
'Transaction {tx} is present in '
'multiple valid blocks: {block_ids}'
.format(tx=txid, block_ids=block_ids))
return validity
return blocks_validity_status
else:
return None

View File

@ -1,33 +1,91 @@
from enum import Enum
from collections import defaultdict
from multiprocessing import Queue
class EventTypes(Enum):
POISON_PILL = 'POISON_PILL'
class EventTypes:
"""Container class that holds all the possible
events BigchainDB manages."""
# If you add a new Event Type, make sure to add it
# to the docs in docs/server/source/event-plugin-api.rst
ALL = ~0
BLOCK_VALID = 1
BLOCK_INVALID = 2
# NEW_EVENT = 4
# NEW_EVENT = 8
# NEW_EVENT = 16...
class Event:
"""An Event."""
def __init__(self, event_type, event_data):
"""Creates a new event.
Args:
event_type (int): the type of the event, see
:class:`~bigchaindb.events.EventTypes`
event_data (obj): the data of the event.
"""
self.type = event_type
self.data = event_data
class EventHandler:
class Exchange:
"""Dispatch events to subscribers."""
def __init__(self, events_queue):
self.events_queue = events_queue
def __init__(self):
self.publisher_queue = Queue()
def put_event(self, event, timeout=None):
# TODO: handle timeouts
self.events_queue.put(event, timeout=None)
# Map <event_types -> queues>
self.queues = defaultdict(list)
def get_event(self, timeout=None):
# TODO: handle timeouts
return self.events_queue.get(timeout=None)
def get_publisher_queue(self):
"""Get the queue used by the publisher.
Returns:
a :class:`multiprocessing.Queue`.
"""
def setup_events_queue():
# TODO: set bounds to the queue
return Queue()
return self.publisher_queue
def get_subscriber_queue(self, event_types=None):
"""Create a new queue for a specific combination of event types
and return it.
Returns:
a :class:`multiprocessing.Queue`.
"""
if event_types is None:
event_types = EventTypes.ALL
queue = Queue()
self.queues[event_types].append(queue)
return queue
def dispatch(self, event):
"""Given an event, send it to all the subscribers.
Args
event (:class:`~bigchaindb.events.EventTypes`): the event to
dispatch to all the subscribers.
"""
for event_types, queues in self.queues.items():
if event.type & event_types:
for queue in queues:
queue.put(event)
def run(self):
"""Start the exchange"""
while True:
event = self.publisher_queue.get()
if event == POISON_PILL:
return
else:
self.dispatch(event)

View File

@ -13,7 +13,7 @@ from bigchaindb import backend
from bigchaindb.backend.changefeed import ChangeFeed
from bigchaindb.models import Block
from bigchaindb import Bigchain
from bigchaindb.events import EventHandler, Event, EventTypes
from bigchaindb.events import EventTypes, Event
logger = logging.getLogger(__name__)
@ -25,9 +25,7 @@ class Election:
def __init__(self, events_queue=None):
self.bigchain = Bigchain()
self.event_handler = None
if events_queue:
self.event_handler = EventHandler(events_queue)
self.events_queue = events_queue
def check_for_quorum(self, next_vote):
"""
@ -73,7 +71,7 @@ class Election:
return invalid_block
def handle_block_events(self, result, block_id):
if self.event_handler:
if self.events_queue:
if result['status'] == self.bigchain.BLOCK_UNDECIDED:
return
elif result['status'] == self.bigchain.BLOCK_INVALID:
@ -82,7 +80,7 @@ class Election:
event_type = EventTypes.BLOCK_VALID
event = Event(event_type, self.bigchain.get_block(block_id))
self.event_handler.put_event(event)
self.events_queue.put(event)
def create_pipeline(events_queue=None):

View File

@ -39,7 +39,7 @@ class Vote:
self.last_voted_id = Bigchain().get_last_voted_block().id
self.counters = Counter()
self.validity = {}
self.blocks_validity_status = {}
dummy_tx = Transaction.create([self.bigchain.me],
[([self.bigchain.me], 1)]).to_dict()
@ -127,16 +127,16 @@ class Vote:
"""
self.counters[block_id] += 1
self.validity[block_id] = tx_validity and self.validity.get(block_id,
True)
self.blocks_validity_status[block_id] = tx_validity and self.blocks_validity_status.get(block_id,
True)
if self.counters[block_id] == num_tx:
vote = self.bigchain.vote(block_id,
self.last_voted_id,
self.validity[block_id])
self.blocks_validity_status[block_id])
self.last_voted_id = block_id
del self.counters[block_id]
del self.validity[block_id]
del self.blocks_validity_status[block_id]
return vote, num_tx
def write_vote(self, vote, num_tx):

View File

@ -2,8 +2,9 @@ import logging
import multiprocessing as mp
import bigchaindb
from bigchaindb import config_utils
from bigchaindb.pipelines import vote, block, election, stale
from bigchaindb.events import setup_events_queue
from bigchaindb.events import Exchange, EventTypes
from bigchaindb.web import server, websocket_server
@ -23,15 +24,30 @@ BANNER = """
"""
def start_events_plugins(exchange):
plugins = config_utils.load_events_plugins(
bigchaindb.config.get('events_plugins'))
for name, plugin in plugins:
logger.info('Loading events plugin %s', name)
event_types = getattr(plugin, 'event_types', None)
queue = exchange.get_subscriber_queue(event_types)
mp.Process(name='events_plugin_{}'.format(name),
target=plugin.run,
args=(queue, )).start()
def start():
logger.info('Initializing BigchainDB...')
# Create the events queue
# Create a Exchange object.
# The events queue needs to be initialized once and shared between
# processes. This seems the best way to do it
# At this point only the election processs and the event consumer require
# this queue.
events_queue = setup_events_queue()
exchange = Exchange()
# start the processes
logger.info('Starting block')
@ -44,7 +60,7 @@ def start():
stale.start()
logger.info('Starting election')
election.start(events_queue=events_queue)
election.start(events_queue=exchange.get_publisher_queue())
# start the web api
app_server = server.create_server(bigchaindb.config['server'])
@ -54,8 +70,12 @@ def start():
logger.info('WebSocket server started')
p_websocket_server = mp.Process(name='ws',
target=websocket_server.start,
args=(events_queue,))
args=(exchange.get_subscriber_queue(EventTypes.BLOCK_VALID),))
p_websocket_server.start()
# start message
logger.info(BANNER.format(bigchaindb.config['server']['bind']))
start_events_plugins(exchange)
exchange.run()

View File

@ -1,2 +1,2 @@
__version__ = '1.0.0'
__short_version__ = '1.0'
__version__ = '1.1.0.dev'
__short_version__ = '1.1.dev'

View File

@ -3,7 +3,7 @@ Common classes and methods for API handlers
"""
import logging
from flask import jsonify
from flask import jsonify, request
from bigchaindb import config
@ -14,16 +14,26 @@ logger = logging.getLogger(__name__)
def make_error(status_code, message=None):
if status_code == 404 and message is None:
message = 'Not found'
response_content = {'status': status_code, 'message': message}
logger.error('HTTP API error: %(status)s - %(message)s', response_content)
request_info = {'method': request.method, 'path': request.path}
request_info.update(response_content)
logger.error('HTTP API error: %(status)s - %(method)s:%(path)s - %(message)s', request_info)
response = jsonify(response_content)
response.status_code = status_code
return response
def base_ws_uri():
"""Base websocket uri."""
scheme = config['wsserver']['scheme']
host = config['wsserver']['host']
port = config['wsserver']['port']
"""Base websocket URL that is advertised to external clients.
Useful when the websocket URL advertised to the clients needs to be
customized (typically when running behind NAT, firewall, etc.)
"""
scheme = config['wsserver']['advertised_scheme']
host = config['wsserver']['advertised_host']
port = config['wsserver']['advertised_port']
return '{}://{}:{}'.format(scheme, host, port)

View File

@ -6,10 +6,10 @@ BigchainDB can store data of any kind (within reason), but it's designed to be p
* The fundamental thing that one sends to a BigchainDB cluster, to be checked and stored (if valid), is a *transaction*, and there are two kinds: CREATE transactions and TRANSFER transactions.
* A CREATE transaction can be use to register any kind of asset (divisible or indivisible), along with arbitrary metadata.
* An asset can have zero, one, or several owners.
* The owners of an asset can specify (crypto-)conditions which must be satisified by anyone wishing transfer the asset to new owners. For example, a condition might be that at least 3 of the 5 current owners must cryptographically sign a transfer transaction.
* BigchainDB verifies that the conditions have been satisified as part of checking the validity of transfer transactions. (Moreover, anyone can check that they were satisfied.)
* The owners of an asset can specify (crypto-)conditions which must be satisfied by anyone wishing transfer the asset to new owners. For example, a condition might be that at least 3 of the 5 current owners must cryptographically sign a transfer transaction.
* BigchainDB verifies that the conditions have been satisfied as part of checking the validity of transfer transactions. (Moreover, anyone can check that they were satisfied.)
* BigchainDB prevents double-spending of an asset.
* Validated transactions are strongly tamper-resistant; see :doc:`the page about immutability / tamper-resistance <immutable>`.
* Validated transactions are strongly tamper-resistant; see :doc:`the page about immutability <immutable>`.
BigchainDB Integration with Other Blockchains

View File

@ -1,19 +1,6 @@
# BigchainDB and Byzantine Fault Tolerance
We have Byzantine fault tolerance (BFT) in our roadmap, as a switch that people can turn on. We anticipate that turning it on will cause a severe dropoff in performance (to gain some extra security). See [Issue #293](https://github.com/bigchaindb/bigchaindb/issues/293).
While BigchainDB is not currently [Byzantine fault tolerant (BFT)](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance), we plan to offer it as an option.
We anticipate that turning it on will cause a severe dropoff in performance. See [Issue #293](https://github.com/bigchaindb/bigchaindb/issues/293).
Among the big, industry-used distributed databases in production today (e.g. DynamoDB, Bigtable, MongoDB, Cassandra, Elasticsearch), none of them are BFT. Indeed, almost all wide-area distributed systems in production are not BFT, including military, banking, healthcare, and other security-sensitive systems.
There are many more practical things that nodes can do to increase security (e.g. firewalls, key management, access controls).
From a [recent essay by Ken Birman](http://sigops.org/sosp/sosp15/history/05-birman.pdf) (of Cornell):
> Oh, and with respect to the BFT point: Jim [Gray] felt that real systems fail by crashing [54]. Others have since done studies reinforcing this view, or finding that even crash-failure solutions can sometimes defend against application corruption. One interesting study, reported during a SOSP WIPS session by Ben Reed (one of the co-developers of Zookeeper), found that at Yahoo, Zookeeper itself had never experienced Byzantine faults in a one-year period that they studied closely.
> [54] Jim Gray. Why Do Computers Stop and What Can Be Done About It? SOSP, 1985.
Ben Reed never published those results, but Birman wrote more about them in the book *Guide to Reliable Distributed Systems: Building High-Assurance Applications*. From page 358 of that book:
> But the cloud community, led by Ben Reed and Flavio Junqueira at Yahoo, sees things differently (these are the two inventors [sic] of Yahoos ZooKeeper service). **They have described informal studies of how applications and machines at Yahoo failed, concluding that the frequency of Byzantine failures was extremely small relative to the frequency of crash failures** [emphasis added]. Sometimes they did see data corruption, but then they often saw it occur in a correlated way that impacted many replicas all at once. And very often they saw failures occur in the client layer, then propagate into the service. BFT techniques tend to be used only within a service, not in the client layer that talks to that service, hence offer no protection against malfunctioning clients. **All of this, Reed and Junqueira conclude, lead to the realization that BFT just does not match the real needs of a cloud computing company like Yahoo, even if the data being managed by a service really is of very high importance** [emphasis added]. Unfortunately, they have not published this study; it was reported at an “outrageous opinions” session at the ACM Symposium on Operating Systems Principles, in 2009.
> The practical use of the Byzantine protocol raises another concern: The timing assumptions built into the model [i.e. synchronous or partially-synchronous nodes] are not realizable in most computing environments…
In the meantime, there are practical things that one can do to increase security (e.g. firewalls, key management, and access controls).

View File

@ -14,8 +14,8 @@ A consortium can increase its decentralization (and its resilience) by increasin
Theres no node that has a long-term special position in the cluster. All nodes run the same software and perform the same duties.
RethinkDB and MongoDB have an “admin” user which cant be deleted and which can make big changes to the database, such as dropping a table. Right now, thats a big security vulnerability, but we have plans to mitigate it by:
MongoDB and RethinkDB have an “admin” user which cant be deleted and which can make big changes to the database, such as dropping a table. Right now, thats a big security vulnerability, but we have plans to mitigate it by:
1. Locking down the admin user as much as possible.
2. Having all nodes inspect admin-type requests before acting on them. Requests can be checked against an evolving whitelist of allowed actions. Nodes requesing non-allowed requests can be removed from the list of cluster nodes.
2. Having all nodes inspect admin-type requests before acting on them. Requests can be checked against an evolving whitelist of allowed actions. Nodes requesting non-allowed requests can be removed from the list of cluster nodes.
Its worth noting that the RethinkDB admin user cant transfer assets, even today. The only way to create a valid transfer transaction is to fulfill the current (crypto) conditions on the asset, and the admin user cant do that because the admin user doesnt have the necessary private keys (or preimages, in the case of hashlock conditions). Theyre not stored in the database.
Its worth noting that the admin user cant transfer assets, even today. The only way to create a valid transfer transaction is to fulfill the current (crypto) conditions on the asset, and the admin user cant do that because the admin user doesnt have the necessary private keys (or preimages, in the case of hashlock conditions). Theyre not stored in the database.

View File

@ -1,19 +1,21 @@
# How BigchainDB is Immutable / Tamper-Resistant
# How BigchainDB is Immutable
The word _immutable_ means "unchanging over time or unable to be changed." For example, the decimal digits of π are immutable (3.14159…).
The blockchain community often describes blockchains as “immutable.” If we interpret that word literally, it means that blockchain data is unchangeable or permanent, which is absurd. The data _can_ be changed. For example, a plague might drive humanity extinct; the data would then get corrupted over time due to water damage, thermal noise, and the general increase of entropy. In the case of Bitcoin, nothing so drastic is required: a 51% attack will suffice.
Its true that blockchain data is more difficult to change than usual: its more tamper-resistant than a typical file system or database. Therefore, in the context of blockchains, we interpret the word “immutable” to mean tamper-resistant. (Linguists would say that the word “immutable” is a _term of art_ in the blockchain community.)
Its true that blockchain data is more difficult to change (or delete) than usual. It's more than just "tamper-resistant" (which implies intent), blockchain data also resists random changes that can happen without any intent, such as data corruption on a hard drive. Therefore, in the context of blockchains, we interpret the word “immutable” to mean *practically* immutable, for all intents and purposes. (Linguists would say that the word “immutable” is a _term of art_ in the blockchain community.)
BigchainDB achieves strong tamper-resistance in the following ways:
Blockchain data can achieve immutability in several ways:
1. **Replication.** All data is sharded and shards are replicated in several (different) places. The replication factor can be set by the consortium. The higher the replication factor, the more difficult it becomes to change or delete all replicas.
2. **Internal watchdogs.** All nodes monitor all changes and if some unallowed change happens, then appropriate action is taken. For example, if a valid block is deleted, then it is put back.
3. **External watchdogs.** A consortium may opt to have trusted third-parties to monitor and audit their data, looking for irregularities. For a consortium with publicly-readable data, the public can act as an auditor.
4. **Cryptographic signatures** are used throughout BigchainDB as a way to check if messages (transactions, blocks and votes) have been tampered with enroute, and as a way to verify who signed the messages. Each block is signed by the node that created it. Each vote is signed by the node that cast it. A creation transaction is signed by the node that created it, although there are plans to improve that by adding signatures from the sending client and multiple nodes; see [Issue #347](https://github.com/bigchaindb/bigchaindb/issues/347). Transfer transactions can contain multiple inputs (fulfillments, one per asset transferred). Each fulfillment will typically contain one or more signatures from the owners (i.e. the owners before the transfer). Hashlock fulfillments are an exception; theres an open issue ([#339](https://github.com/bigchaindb/bigchaindb/issues/339)) to address that.
5. **Full or partial backups** of the database may be recorded from time to time, possibly on magnetic tape storage, other blockchains, printouts, etc.
6. **Strong security.** Node owners can adopt and enforce strong security policies.
7. **Node diversity.** Diversity makes it so that no one thing (e.g. natural disaster or operating system bug) can compromise enough of the nodes. See [the section on the kinds of node diversity](diversity.html).
1. **Replication.** All data is replicated (copied) to several different places. The replication factor can be set by the consortium. The higher the replication factor, the more difficult it becomes to change or delete all replicas.
1. **Internal watchdogs.** All nodes monitor all changes and if some unallowed change happens, then appropriate action can be taken.
1. **External watchdogs.** A consortium may opt to have trusted third-parties to monitor and audit their data, looking for irregularities. For a consortium with publicly-readable data, the public can act as an auditor.
1. **Economic incentives.** Some blockchain systems make it very expensive to change old stored data. Examples include proof-of-work and proof-of-stake systems. BigchainDB doesn't use explicit incentives like those.
1. Data can be stored using fancy techniques, such as error-correction codes, to make some kinds of changes easier to undo.
1. **Cryptographic signatures** are often used as a way to check if messages (e.g. transactions, blocks or votes) have been tampered with enroute, and as a way to verify who signed the messages. In BigchainDB, each transaction must be signed (by one or more parties), each block is signed by the node that created it, and each vote is signed by the node that cast it.
1. **Full or partial backups** may be recorded from time to time, possibly on magnetic tape storage, other blockchains, printouts, etc.
1. **Strong security.** Node owners can adopt and enforce strong security policies.
1. **Node diversity.** Diversity makes it so that no one thing (e.g. natural disaster or operating system bug) can compromise enough of the nodes. See [the section on the kinds of node diversity](diversity.html).
Some of these things come "for free" as part of the BigchainDB software, and others require some extra effort from the consortium and node owners.

View File

@ -3,7 +3,7 @@
BigchainDB is not production-ready. You can use it to build a prototype or proof-of-concept (POC); many people are already doing that.
Once BigchainDB is production-ready, we'll make an announcement.
BigchainDB version numbers follow the conventions of *Semantic Versioning* as documented at [semver.org](http://semver.org/). This means, among other things:
BigchainDB version numbers follow the conventions of *Semantic Versioning* as documented at [semver.org](http://semver.org/). (For Python stuff, we use [Python's version of Semantic Versioning](https://packaging.python.org/tutorials/distributing-packages/#choosing-a-versioning-scheme).) This means, among other things:
* Before version 1.0, breaking API changes could happen in any new version, even in a change from version 0.Y.4 to 0.Y.5.

View File

@ -3,7 +3,7 @@ BigchainDB and Smart Contracts
One can store the source code of any smart contract (i.e. a computer program) in BigchainDB, but BigchainDB won't run arbitrary smart contracts.
BigchainDB will run the subset of smart contracts expressible using "crypto-conditions," a subset we like to call "simple contracts." Crypto-conditions are part of the `Interledger Protocol <https://interledger.org/>`_.
BigchainDB will run the subset of smart contracts expressible using `Crypto-Conditions <https://tools.ietf.org/html/draft-thomas-crypto-conditions-03>`_. Crypto-conditions are part of the `Interledger Protocol <https://interledger.org/>`_.
The owners of an asset can impose conditions on it that must be met for the asset to be transferred to new owners. Examples of possible conditions (crypto-conditions) include:
@ -11,7 +11,7 @@ The owners of an asset can impose conditions on it that must be met for the asse
- Three out of five current owners must sign the transfer transaction.
- (Shannon and Kelly) or Morgan must sign the transfer transaction.
Crypto-conditions can be quite complex if-this-then-that type conditions, where the "this" can be a long boolean expression. Crypto-conditions can't include loops or recursion and are therefore will always run/check in finite time.
Crypto-conditions can be quite complex. They can't include loops or recursion and therefore will always run/check in finite time.
.. note::

View File

@ -5,12 +5,12 @@ There is some specialized terminology associated with BigchainDB. To get started
## BigchainDB Node
A **BigchainDB node** is a machine or set of closely-linked machines running RethinkDB/MongoDB Server, BigchainDB Server, and related software. Each node is controlled by one person or organization.
A **BigchainDB node** is a machine or set of closely-linked machines running MongoDB Server (or RethinkDB Server), BigchainDB Server, and related software. Each node is controlled by one person or organization.
## BigchainDB Cluster
A set of BigchainDB nodes can connect to each other to form a **BigchainDB cluster**. Each node in the cluster runs the same software. A cluster contains one logical RethinkDB/MongoDB datastore. A cluster may have additional machines to do things such as cluster monitoring.
A set of BigchainDB nodes can connect to each other to form a **BigchainDB cluster**. Each node in the cluster runs the same software. A cluster contains one logical MongoDB/RethinkDB datastore. A cluster may have additional machines to do things such as cluster monitoring.
## BigchainDB Consortium

View File

@ -44,7 +44,7 @@ You can look at many timestamps to get a statistical sense of when something hap
## How BigchainDB Uses Timestamps
BigchainDB _doesn't_ use timestamps to determine the order of transactions or blocks. In particular, the order of blocks is determined by RethinkDB's changefeed on the bigchain table.
BigchainDB _doesn't_ use timestamps to determine the order of transactions or blocks. In particular, the order of blocks is determined by MongoDB's oplog (or RethinkDB's changefeed) on the bigchain table.
BigchainDB does use timestamps for some things. When a Transaction is written to the backlog, a timestamp is assigned called the `assignment_timestamp`, to determine if it has been waiting in the backlog for too long (i.e. because the node assigned to it hasn't handled it yet).

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.6 KiB

View File

@ -15,7 +15,7 @@ algorithm provided by the
which is a wrapper around the optimized reference implementation
from [http://keccak.noekeon.org](http://keccak.noekeon.org).
**Important**: Since selecting the Keccak hashing algorithm for SHA-3 in 2012, NIST [released a new version](https://en.wikipedia.org/wiki/SHA-3#cite_note-14) of the hash using the same algorithm but slightly different parameters. As of version 0.9, BigchainDB is using the latest version, supported by pysha3 1.0b1. See below for an example output of the hash function.
**Important**: Since selecting the Keccak hashing algorithm for SHA-3 in 2012, NIST released a new version of the hash using the same algorithm but slightly different parameters. As of version 0.9, BigchainDB is using the latest version, supported by pysha3 1.0b1. See below for an example output of the hash function.
Here's the relevant code from `bigchaindb/bigchaindb/common/crypto.py:

View File

@ -19,8 +19,6 @@ Appendices
commands
aws-setup
aws-testing-cluster
template-terraform-aws
template-ansible
azure-quickstart-template
generate-key-pair-for-ssh
firewall-notes

View File

@ -5,7 +5,7 @@ BigchainDB Server has some OS-level dependencies that must be installed.
On Ubuntu 16.04, we found that the following was enough:
```text
sudo apt-get update
sudo apt-get install g++ python3-dev libffi-dev
sudo apt-get install libffi-dev libssl-dev
```
On Fedora 2325, we found that the following was enough:

View File

@ -104,8 +104,8 @@ docker run \
--name=mongodb \
--publish=172.17.0.1:27017:27017 \
--restart=always \
--volume=/tmp/mongodb_docker/db:/data/db \
--volume=/tmp/mongodb_docker/configdb:/data/configdb \
--volume=$HOME/mongodb_docker/db:/data/db \
--volume=$HOME/mongodb_docker/configdb:/data/configdb \
mongo:3.4.1 --replSet=bigchain-rs
```

View File

@ -1,84 +0,0 @@
# Template: Ansible Playbook to Run a BigchainDB Node on an Ubuntu Machine
This page explains how to use [Ansible](https://www.ansible.com/) to install, configure and run all the software needed to run a one-machine BigchainDB node on a server running Ubuntu 16.04.
**Note: We're not actively maintaining the associated Ansible files (e.g. playbooks). They are RethinkDB-specific, even though we now recommend using MongoDB. You may find the old Ansible stuff useful nevertheless, which is why we moved this page to the Appendices rather than deleting it.**
## Install Ansible
The Ansible documentation has [installation instructions](https://docs.ansible.com/ansible/intro_installation.html). Note the control machine requirements: at the time of writing, Ansible required Python 2.6 or 2.7. ([Python 3 support is coming](https://docs.ansible.com/ansible/python_3_support.html): "Ansible 2.2 features a tech preview of Python 3 support." and the latest version, as of January 31, 2017, was 2.2.1.0. For now, it's probably best to use it with Python 2.)
For example, you could create a special Python 2.x virtualenv named `ansenv` and then install Ansible in it:
```text
cd repos/bigchaindb/ntools
virtualenv -p /usr/local/lib/python2.7.11/bin/python ansenv
source ansenv/bin/activate
pip install ansible
```
## About Our Example Ansible Playbook
Our example Ansible playbook installs, configures and runs a basic BigchainDB node on an Ubuntu 16.04 machine. That playbook is in `.../bigchaindb/ntools/one-m/ansible/one-m-node.yml`.
When you run the playbook (as per the instructions below), it ensures all the necessary software is installed, configured and running. It can be used to get a BigchainDB node set up on a bare Ubuntu 16.04 machine, but it can also be used to ensure that everything is okay on a running BigchainDB node. (If you run the playbook against a host where everything is okay, then it won't change anything on that host.)
## Create an Ansible Inventory File
An Ansible "inventory" file is a file which lists all the hosts (machines) you want to manage using Ansible. (Ansible will communicate with them via SSH.) Right now, we only want to manage one host.
First, determine the public IP address of the host (i.e. something like `192.0.2.128`).
Then create a one-line text file named `hosts` by doing this:
```text
# cd to the directory .../bigchaindb/ntools/one-m/ansible
echo "192.0.2.128" > hosts
```
but replace `192.0.2.128` with the IP address of the host.
## Run the Ansible Playbook(s)
The latest Ubuntu 16.04 AMIs from Canonical don't include Python 2 (which is required by Ansible), so the first step is to run a small Ansible playbook to install Python 2 on the managed node:
```text
# cd to the directory .../bigchaindb/ntools/one-m/ansible
ansible-playbook -i hosts --private-key ~/.ssh/<key-name> install-python2.yml
```
where `<key-name>` should be replaced by the name of the SSH private key you created earlier (for SSHing to the host machine at your cloud hosting provider).
The next step is to run the Ansible playbook named `one-m-node.yml`:
```text
# cd to the directory .../bigchaindb/ntools/one-m/ansible
ansible-playbook -i hosts --private-key ~/.ssh/<key-name> one-m-node.yml
```
What did you just do? Running that playbook ensures all the software necessary for a one-machine BigchainDB node is installed, configured, and running properly. You can run that playbook on a regular schedule to ensure that the system stays properly configured. If something is okay, it does nothing; it only takes action when something is not as-desired.
## Some Notes on the One-Machine Node You Just Got Running
* It ensures that the installed version of RethinkDB is the latest. You can change that by changing the installation task.
* It uses a very basic RethinkDB configuration file based on `bigchaindb/ntools/one-m/ansible/roles/rethinkdb/templates/rethinkdb.conf.j2`.
* If you edit the RethinkDB configuration file, then running the Ansible playbook will **not** restart RethinkDB for you. You must do that manually. (You can stop RethinkDB using `sudo /etc/init.d/rethinkdb stop`; run the playbook to get RethinkDB started again. This assumes you're using init.d, which is what the Ansible playbook assumes. If you want to use systemd, you'll have to edit the playbook accordingly, and stop RethinkDB using `sudo systemctl stop rethinkdb@<name_instance>`.)
* It generates and uses a default BigchainDB configuration file, which it stores in `~/.bigchaindb` (the default location).
* If you edit the BigchainDB configuration file, then running the Ansible playbook will **not** restart BigchainDB for you. You must do that manually. (You could stop it using `sudo killall -9 bigchaindb`. Run the playbook to get it started again.)
## Optional: Create an Ansible Config File
The above command (`ansible-playbook -i ...`) is fairly long. You can omit the optional arguments if you put their values in an [Ansible configuration file](https://docs.ansible.com/ansible/intro_configuration.html) (config file) instead. There are many places where you can put a config file, but to make one specifically for the "one-m" case, you should put it in `.../bigchaindb/ntools/one-m/ansible/`. In that directory, create a file named `ansible.cfg` with the following contents:
```text
[defaults]
private_key_file = $HOME/.ssh/<key-name>
inventory = hosts
```
where, as before, `<key-name>` must be replaced.
## Next Steps
You could make changes to the Ansible playbook (and the resources it uses) to make the node more production-worthy. See [the section on production node assumptions, components and requirements](../production-nodes/index.html).

View File

@ -1,84 +0,0 @@
# Template: Using Terraform to Provision an Ubuntu Machine on AWS
This page explains a way to use [Terraform](https://www.terraform.io/) to provision an Ubuntu machine (i.e. an EC2 instance with Ubuntu 16.04) and other resources on [AWS](https://aws.amazon.com/). That machine can then be used to host a one-machine BigchainDB node, for example.
**Note: We're not actively maintaining the associated Terraform files. You may find them useful nevertheless, which is why we moved this page to the Appendices rather than deleting it.**
## Install Terraform
The [Terraform documentation has installation instructions](https://www.terraform.io/intro/getting-started/install.html) for all common operating systems.
If you don't want to run Terraform on your local machine, you can install it on a cloud machine under your control (e.g. on AWS).
Note: Hashicorp has an enterprise version of Terraform called "Terraform Enterprise." You can license it by itself or get it as part of Atlas. If you decide to license Terraform Enterprise or Atlas, be sure to install it on your own hosting (i.e. "on premise"), not on the hosting provided by Hashicorp. The reason is that BigchainDB clusters are supposed to be decentralized. If everyone used Hashicorp's hosted Atlas, then that would be a point of centralization.
**Ubuntu Installation Tips**
If you want to install Terraform on Ubuntu, first [download the .zip file](https://www.terraform.io/downloads.html). Then install it in `/opt`:
```text
sudo mkdir -p /opt/terraform
sudo unzip path/to/zip-file.zip -d /opt/terraform
```
Why install it in `/opt`? See [the answers at Ask Ubuntu](https://askubuntu.com/questions/1148/what-is-the-best-place-to-install-user-apps).
Next, add `/opt/terraform` to your path. If you use bash for your shell, then you could add this line to `~/.bashrc`:
```text
export PATH="/opt/terraform:$PATH"
```
After doing that, relaunch your shell or force it to read `~/.bashrc` again, e.g. by doing `source ~/.bashrc`. You can verify that terraform is installed and in your path by doing:
```text
terraform --version
```
It should say the current version of Terraform.
## Get Set Up to Use Terraform
First, do the [basic AWS setup steps outlined in the Appendices](../appendices/aws-setup.html).
Then go to the `.../bigchaindb/ntools/one-m/aws/` directory and open the file `variables.tf`. Most of the variables have sensible default values, but you can change them if you like. In particular, you may want to change `aws_region`. (Terraform looks in `~/.aws/credentials` to get your AWS credentials, so you don't have to enter those anywhere.)
The `ssh_key_name` has no default value, so Terraform will prompt you every time it needs it.
To see what Terraform will do, run:
```text
terraform plan
```
It should ask you the value of `ssh_key_name`.
It figured out the plan by reading all the `.tf` Terraform files in the directory.
If you don't want to be asked for the `ssh_key_name`, you can change the default value of `ssh_key_name` (in the file `variables.tf`) or [you can set an environmen variable](https://www.terraform.io/docs/configuration/variables.html) named `TF_VAR_ssh_key_name`.
## Use Terraform to Provision Resources
To provision all the resources specified in the plan, do the following. **Note: This will provision actual resources on AWS, and those cost money. Be sure to shut down the resources you don't want to keep running later, otherwise the cost will keep growing.**
```text
terraform apply
```
Terraform will report its progress as it provisions all the resources. Once it's done, you can go to the Amazon EC2 web console and see the instance, its security group, its elastic IP, and its attached storage volumes (one for the root directory and one for RethinkDB storage).
At this point, there is no software installed on the instance except for Ubuntu 16.04 and whatever else came with the Amazon Machine Image (AMI) specified in the Terraform configuration (files).
The next step is to install, configure and run all the necessary software for a BigchainDB node. You could use [our example Ansible playbook](template-ansible.html) to do that.
## Optional: "Destroy" the Resources
If you want to shut down all the resources just provisioned, you must first disable termination protection on the instance:
1. Go to the EC2 console and select the instance you just launched. It should be named `BigchainDB_node`.
2. Click **Actions** > **Instance Settings** > **Change Termination Protection** > **Yes, Disable**
3. Back in your terminal, do `terraform destroy`
Terraform should "destroy" (i.e. terminate or delete) all the AWS resources you provisioned above.
If it fails (e.g. because of an attached and mounted EBS volume), then you can terminate the instance using the EC2 console: **Actions** > **Instance State** > **Terminate** > **Yes, Terminate**. Once the instance is terminated, you should still do `terraform destroy` to make sure that all the other resources are destroyed.

View File

@ -0,0 +1,102 @@
Conditions
==========
At a high level, a condition is like a lock on an output.
If can you satisfy the condition, you can unlock the output and transfer/spend it.
BigchainDB Server supports a subset of the ILP Crypto-Conditions
(`version 02 of Crypto-Conditions <https://tools.ietf.org/html/draft-thomas-crypto-conditions-02>`_).
A condition object can be quite elaborate,
with many nested levels,
but the simplest case is actually quite simple.
Here's an example signature condition:
.. code-block:: json
{
"details": {
"type": "ed25519-sha-256",
"public_key": "HFp773FH21sPFrn4y8wX3Ddrkzhqy4La4cQLfePT2vz7"
},
"uri": "ni:///sha-256;at0MY6Ye8yvidsgL9FrnKmsVzX0XrNNXFmuAPF4bQeU?fpt=ed25519-sha-256&cost=131072"
}
If someone wants to spend the output where this condition is found, then they must create a TRANSFER transaction with an input that fulfills it (this condition). Because it's a ed25519-sha-256 signature condition, that means they must sign the TRANSFER transaction with the private key corresponding to the public key HFp773…
Supported Crypto-Conditions
---------------------------
BigchainDB Server v1.0 supports two of the Crypto-Conditions:
1. ED25519-SHA-256 signature conditions
2. THRESHOLD-SHA-256 threshold conditions
We saw an example signature condition above.
For more information about how BigchainDB handles keys and signatures,
see the page titled :ref:`Signature Algorithm and Keys`.
A more complex condition can be composed by using n signature conditions as inputs to an m-of-n threshold condition: a logic gate which outputs TRUE if and only if m or more inputs are TRUE. If there are n inputs to a threshold condition:
* 1-of-n is the same as a logical OR of all the inputs
* n-of-n is the same as a logical AND of all the inputs
For example, you could create a condition requiring m (of n) signatures.
Here's an example 2-of-2 condition:
.. code-block:: json
{
"details": {
"type": "threshold-sha-256",
"threshold": 2,
"subconditions": [
{
"public_key": "5ycPMinRx7D7e6wYXLNLa3TCtQrMQfjkap4ih7JVJy3h",
"type": "ed25519-sha-256"
},
{
"public_key": "9RSas2uCxR5sx1rJoUgcd2PB3tBK7KXuCHbUMbnH3X1M",
"type": "ed25519-sha-256"
}
]
},
"uri": "ni:///sha-256;zr5oThl2kk6613WKGFDg-JGu00Fv88nXcDcp6Cyr0Vw?fpt=threshold-sha-256&cost=264192&subtypes=ed25519-sha-256"
}
The (single) output of a threshold condition can be used as one of the inputs to another threshold condition. That means you can combine threshold conditions to build complex expressions such as ``(x OR y) AND (2 of {a, b, c})``.
.. image:: /_static/Conditions_Circuit_Diagram.png
When you create a condition, you can calculate its
`cost <https://tools.ietf.org/html/draft-thomas-crypto-conditions-02#section-7.2.2>`_,
an estimate of the resources that would be required to validate the fulfillment.
For example, the cost of one signature condition is 131072.
A BigchainDB federation can put an upper limit on the complexity of each
condition, either directly by setting a maximum allowed cost,
or
`indirectly <https://github.com/bigchaindb/bigchaindb/issues/356#issuecomment-288085251>`_
by :ref:`setting a maximum allowed transaction size <Enforcing a Max Transaction Size>`
which would limit
the overall complexity accross all inputs and outputs of a transaction.
Note: At the time of writing, there was no configuration setting
to set a maximum allowed cost,
so the only real option was to
:ref:`set a maximum allowed transaction size <Enforcing a Max Transaction Size>`.
Constructing a Condition
------------------------
The above examples should make it clear how to construct
a condition object, but they didn't say how to generate the ``uri``.
If you want to generate a correct condition URI,
then you should consult the Crypto-Conditions spec
or use one of the existing Crypto-Conditions packages/libraries
(which are used by the BigchainDB Drivers).
* `Crypto-Conditions Spec (Version 02) <https://tools.ietf.org/html/draft-thomas-crypto-conditions-02>`_
* BigchainDB :ref:`Drivers & Tools`
The `Handcrafting Transactions <https://docs.bigchaindb.com/projects/py-driver/en/latest/handcraft.html>`_
page may also be of interest.

View File

@ -15,5 +15,6 @@ This section unpacks each one in turn.
transaction-model
asset-model
inputs-outputs
conditions
block-model
vote-model

View File

@ -17,7 +17,7 @@ An input has the following structure:
{
"owners_before": ["<The public_keys list in the output being spent>"],
"fulfillment": "<Fulfillment URI fulfilling the condition of the output being spent>",
"fulfillment": "<String that fulfills the condition in the output being spent>",
"fulfills": {
"output_index": "<Index of the output being spent (an integer)>",
"transaction_id": "<ID of the transaction containing the output being spent>"
@ -28,10 +28,22 @@ You can think of the ``fulfills`` object as a pointer to an output on another tr
A CREATE transaction should have exactly one input. That input can contain one or more ``owners_before``, a ``fulfillment`` (with one signature from each of the owners-before), and the value of ``fulfills`` should be ``null``). A TRANSFER transaction should have at least one input, and the value of ``fulfills`` should not be ``null``.
See the reference on :ref:`inputs <Input>` for more description about the meaning of each field.
To calculate a fulfillment URI, you can use one of the
The ``fulfillment`` string fulfills the condition in the output that is being spent (transferred).
To calculate it:
1. Determine the fulfillment as per the `Crypto-Conditions spec (version 02) <https://tools.ietf.org/html/draft-thomas-crypto-conditions-02>`_.
2. Encode the fulfillment using the `ASN.1 Distinguished Encoding Rules (DER) <http://www.itu.int/ITU-T/recommendations/rec.aspx?rec=12483&lang=en>`_.
3. Encode the resulting bytes using "base64url" (*not* typical base64) as per `RFC 4648, Section 5 <https://tools.ietf.org/html/rfc4648#section-5>`_.
To do those calculations, you can use one of the
:ref:`BigchainDB drivers or transaction-builders <Drivers & Tools>`,
or use a low-level crypto-conditions library as illustrated
in the page about `Handcrafting Transactions <https://docs.bigchaindb.com/projects/py-driver/en/latest/handcraft.html>`_.
A ``fulfillment`` string should look something like:
.. code::
"pGSAIDgbT-nnN57wgI4Cx17gFHv3UB_pIeAzwZCk10rAjs9bgUDxyNnXMl-5PFgSIOrN7br2Tz59MiWe2XY0zlC7LcN52PKhpmdRtcr7GR1PXuTfQ9dE3vGhv7LHn6QqDD6qYHYM"
Outputs
@ -47,55 +59,13 @@ An output has the following structure:
"amount": "<Number of shares of the asset (an integer in a string)>"
}
The :ref:`page about conditions <Conditions>` explains the contents of a ``condition``.
The list of ``public_keys`` is always the "owners" of the asset at the time the transaction completed, but before the next transaction started.
See the reference on :ref:`outputs <Output>` for more description about the meaning of each field.
Below is a high-level description of what goes into building a ``condition`` object.
To construct an actual ``condition`` object, you can use one of the
:ref:`BigchainDB drivers or transaction-builders <Drivers & Tools>`,
or use a low-level crypto-conditions library as illustrated
in the page about `Handcrafting Transactions <https://docs.bigchaindb.com/projects/py-driver/en/latest/handcraft.html>`_.
Conditions
----------
At a high level, a condition is like a lock on an output.
If can you satisfy the condition, you can unlock the output and transfer/spend it.
BigchainDB Server v1.0 supports a subset of the ILP Crypto-Conditions
(`version 02 of Crypto-Conditions <https://tools.ietf.org/html/draft-thomas-crypto-conditions-02>`_).
The simplest supported condition is a simple signature condition.
Such a condition could be stated as,
"You can satisfy this condition
if you send me a message and a cryptographic signature of that message,
produced using the private key corresponding to this public key."
The public key is put in the output.
BigchainDB currently only supports ED25519 signatures.
A more complex condition can be composed by using n simple signature conditions as inputs to an m-of-n threshold condition (a logic gate which outputs TRUE if and only if m or more inputs are TRUE). If there are n inputs to a threshold condition:
* 1-of-n is the same as a logical OR of all the inputs
* n-of-n is the same as a logical AND of all the inputs
For example, one could create a condition requiring m (of n) signatures before their asset can be transferred.
The (single) output of a threshold condition can be used as one of the inputs of other threshold conditions. This means that one can combine threshold conditions to build complex logical expressions, e.g. (x OR y) AND (u OR v).
When one creates a condition, one can calculate its
`cost <https://tools.ietf.org/html/draft-thomas-crypto-conditions-02#section-7.2.2>`_,
an estimate of the resources that would be required to validate the fulfillment.
A BigchainDB federation can put an upper limit on the complexity of each
condition, either directly by setting a maximum allowed cost,
or
`indirectly <https://github.com/bigchaindb/bigchaindb/issues/356#issuecomment-288085251>`_
by :ref:`setting a maximum allowed transaction size <Enforcing a Max Transaction Size>`
which would limit
the overall complexity accross all inputs and outputs of a transaction.
Note: At the time of writing, there was no configuration setting
to set a maximum allowed cost,
so the only real option was to
:ref:`set a maximum allowed transaction size <Enforcing a Max Transaction Size>`.
Note that ``amount`` must be a string (e.g. ``"7"``).
In a TRANSFER transaction, the sum of the output amounts must be the same as the sum of the outputs that it transfers (i.e. the sum of the input amounts). For example, if a TRANSFER transaction has two outputs, one with ``"amount": "2"`` and one with ``"amount": "3"``, then the sum of the outputs is 5 and so the sum of the outputs-being-transferred must also be 5.
.. note::

View File

@ -86,7 +86,7 @@ to rebuild them after the upgrade to install any new dependencies.
Start RethinkDB:
```bash
docker-compose up -d rdb
docker-compose -f docker-compose.rdb.yml up -d rdb
```
The RethinkDB web interface should be accessible at http://localhost:58080/.
@ -98,19 +98,19 @@ web interface at: http://0.0.0.0:58080/.
Start a BigchainDB node:
```bash
docker-compose up -d bdb-rdb
docker-compose -f docker-compose.rdb.yml up -d bdb-rdb
```
You can monitor the logs:
```bash
docker-compose logs -f bdb-rdb
docker-compose -f docker-compose.rdb.yml logs -f bdb-rdb
```
If you wish to run the tests:
```bash
docker-compose run --rm bdb-rdb py.test -v -n auto
docker-compose -f docker-compose.rdb.yml run --rm bdb-rdb pytest -v -n auto
```
### Docker with MongoDB
@ -147,22 +147,16 @@ docker-compose run --rm bdb py.test -v --database-backend=mongodb
### Accessing the HTTP API
A quick check to make sure that the BigchainDB server API is operational:
You can do quick check to make sure that the BigchainDB server API is operational:
```bash
curl $(docker-compose port bdb 9984)
```
should give you something like:
The result should be a JSON object (inside braces like { })
containing the name of the software ("BigchainDB"),
the version of BigchainDB, the node's public key, and other information.
```bash
{
"keyring": [],
"public_key": "Brx8g4DdtEhccsENzNNV6yvQHR8s9ebhKyXPFkWUXh5e",
"software": "BigchainDB",
"version": "0.6.0"
}
```
How does the above curl command work? Inside the Docker container, BigchainDB
exposes the HTTP API on port `9984`. First we get the public port where that
port is bound:

View File

@ -25,3 +25,4 @@ Community-Driven Libraries and Tools
* `Go driver <https://github.com/zbo14/envoke/blob/master/bigchain/bigchain.go>`_
* `Java driver <https://github.com/mgrand/bigchaindb-java-driver>`_
* `Ruby driver <https://github.com/LicenseRocks/bigchaindb_ruby>`_
* `Ruby library for preparing/signing transactions and submitting them or querying a BigchainDB/IPDB node (MIT licensed) <https://rubygems.org/gems/bigchaindb>`_

View File

@ -0,0 +1,67 @@
The Event Plugin API [experimental]
===================================
.. danger::
The Event Plugin API is **experimental** and might change in the future.
BigchainDB implements an internal event system that allows different software
components to receive updates on specific topics. The WebSocket API, for example,
is a subscriber to a stream of events called ``BLOCK_VALID``. Every time a block is
voted valid, the WebSocket API is notified, and it sends updates to all the
clients connected.
We decided to make this internal event system public, to allow developers to
integrate BigchainDB with other applications, such as AMQP systems.
Available Events
----------------
The event types are listed in the source file ``bigchaindb/events.py``.
.. list-table:: Event Types
:widths: 15 10 30
:header-rows: 1
* - event name
- event id
- description
* - BLOCK_VALID
- 1
- a block has been voted valid by the network.
* - BLOCK_INVALID
- 2
- a block has been voted invalid by the network.
Plugin Example
----------------
We developed a minimal plugin that listens to new valid blocks and prints them
to the console:
https://github.com/bigchaindb/events-plugin-example
Architecture of an Event Plugin
-------------------------------
Creating your own plugin is really easy, and can be summarized in few steps:
1. Create a new Python package that defines the entry point ``bigchaindb.events`` in its ``setup.py``.
2. In your entry point, define two properties:
- ``event_types``: a variable to tell BigchainDB which events your plugin is interested in.
A plugin can subscribe to more than one events by combining them using the
**binary or** operator, e.g. in case you want to subscribe to both valid and
invalid blocks your ``event_types`` can be ``1 | 2``.
- ``run``: a function that will process the events coming from BigchainDB.
3. Install the newly created Python package in the current environment.
4. Add the plugin name to your BigchainDB configuration.
5. (Re)start BigchainDB.
If the installation was successful, the plugin will be run in a different
process. Your plugin will receive events through a ``multiprocessing.Queue``
object.
.. note::
It's your plugin's responsibility to consume it's queue.

View File

@ -0,0 +1,8 @@
The Events API
==============
.. toctree::
:maxdepth: 1
websocket-event-stream-api
event-plugin-api

View File

@ -13,7 +13,7 @@ BigchainDB Server Documentation
dev-and-test/index
server-reference/index
http-client-server-api
websocket-event-stream-api
events/index
drivers-clients/index
data-models/index
schema/transaction

View File

@ -35,11 +35,19 @@ cluster.
``existing BigchainDB instance`` will refer to the BigchainDB instance in the
existing cluster.
Below, we refer to multiple files by their directory and filename,
such as ``mongodb/mongo-ext-conn-svc.yaml``. Those files are files in the
`bigchaindb/bigchaindb repository on GitHub
<https://github.com/bigchaindb/bigchaindb/>`_ in the ``k8s/`` directory.
Make sure you're getting those files from the appropriate Git branch on
GitHub, i.e. the branch for the version of BigchainDB that your BigchainDB
cluster is using.
Step 1: Prerequisites
---------------------
* A public/private key pair for the new BigchainDB instance.
* :ref:`List of all the things to be done by each node operator <Things Each Node Operator Must Do>`.
* The public key should be shared offline with the other existing BigchainDB
nodes in the existing BigchainDB cluster.
@ -65,20 +73,126 @@ example:
$ kubectl --context ctx-2 proxy --port 8002
Step 2: Prepare the New Kubernetes Cluster
------------------------------------------
Step 2: Configure the BigchainDB Node
-------------------------------------
Follow the steps in the sections to set up Storage Classes and Persistent Volume
Claims, and to run MongoDB in the new cluster:
1. :ref:`Add Storage Classes <Step 9: Create Kubernetes Storage Classes for MongoDB>`.
2. :ref:`Add Persistent Volume Claims <Step 10: Create Kubernetes Persistent Volume Claims>`.
3. :ref:`Create the Config Map <Step 3: Configure Your BigchainDB Node>`.
4. :ref:`Run MongoDB instance <Step 11: Start a Kubernetes StatefulSet for MongoDB>`.
See the section on how to :ref:`configure your BigchainDB node <How to Configure a BigchainDB Node>`.
Step 3: Add the New MongoDB Instance to the Existing Replica Set
----------------------------------------------------------------
Step 3: Start the NGINX Service
--------------------------------
Please see the following section:
* :ref:`Start NGINX service <Step 4: Start the NGINX Service>`.
Step 4: Assign DNS Name to the NGINX Public IP
----------------------------------------------
Please see the following section:
* :ref:`Assign DNS to NGINX Public IP <Step 5: Assign DNS Name to the NGINX Public IP>`.
Step 5: Start the MongoDB Kubernetes Service
--------------------------------------------
Please see the following section:
* :ref:`Start the MongoDB Kubernetes Service <Step 6: Start the MongoDB Kubernetes Service>`.
Step 6: Start the BigchainDB Kubernetes Service
-----------------------------------------------
Please see the following section:
* :ref:`Start the BigchainDB Kubernetes Service <Step 7: Start the BigchainDB Kubernetes Service>`.
Step 7: Start the OpenResty Kubernetes Service
----------------------------------------------
Please see the following section:
* :ref:`Start the OpenResty Kubernetes Service <Step 8: Start the OpenResty Kubernetes Service>`.
Step 8: Start the NGINX Kubernetes Deployment
---------------------------------------------
Please see the following section:
* :ref:`Run NGINX deployment <Step 9: Start the NGINX Kubernetes Deployment>`.
Step 9: Create Kubernetes Storage Classes for MongoDB
-----------------------------------------------------
Please see the following section:
* :ref:`Step 10: Create Kubernetes Storage Classes for MongoDB`.
Step 10: Create Kubernetes Persistent Volume Claims
---------------------------------------------------
Please see the following section:
* :ref:`Step 11: Create Kubernetes Persistent Volume Claims`.
Step 11: Start a Kubernetes StatefulSet for MongoDB
---------------------------------------------------
Please see the following section:
* :ref:`Step 12: Start a Kubernetes StatefulSet for MongoDB`.
Step 12: Verify network connectivity between the MongoDB instances
------------------------------------------------------------------
Make sure your MongoDB instances can access each other over the network. *If* you are deploying
the new MongoDB node in a different cluster or geographical location using Azure Kubernetes Container
Service, you will have to set up networking between the two clusters using `Kubernetes
Services <https://kubernetes.io/docs/concepts/services-networking/service/>`_.
Assuming we have an existing MongoDB instance ``mdb-instance-0`` residing in Azure data center location ``westeurope`` and we
want to add a new MongoDB instance ``mdb-instance-1`` located in Azure data center location ``eastus`` to the existing MongoDB
replica set. Unless you already have explicitly set up networking for ``mdb-instance-0`` to communicate with ``mdb-instance-1`` and
vice versa, we will have to add a Kubernetes Service in each cluster to accomplish this goal in order to set up a
MongoDB replica set.
It is similar to ensuring that there is a ``CNAME`` record in the DNS
infrastructure to resolve ``mdb-instance-X`` to the host where it is actually available.
We can do this in Kubernetes using a Kubernetes Service of ``type``
``ExternalName``.
* This configuration is located in the file ``mongodb/mongo-ext-conn-svc.yaml``.
* Set the name of the ``metadata.name`` to the host name of the MongoDB instance you are trying to connect to.
For instance if you are configuring this service on cluster with ``mdb-instance-0`` then the ``metadata.name`` will
be ``mdb-instance-1`` and vice versa.
* Set ``spec.ports.port[0]`` to the ``mongodb-backend-port`` from the ConfigMap for the other cluster.
* Set ``spec.externalName`` to the FQDN mapped to NGINX Public IP of the cluster you are trying to connect to.
For more information about the FQDN please refer to: :ref:`Assign DNS Name to the NGINX Public
IP <Step 5: Assign DNS Name to the NGINX Public IP>`
.. note::
This operation needs to be replicated ``n-1`` times per node for a ``n`` node cluster, with the respective FQDNs
we need to communicate with.
If you are not the system administrator of the cluster, you have to get in
touch with the system administrator/s of the other ``n-1`` clusters and
share with them your instance name (``mdb-instance-name`` in the ConfigMap)
and the FQDN for your node (``cluster-fqdn`` in the ConfigMap).
Step 13: Add the New MongoDB Instance to the Existing Replica Set
-----------------------------------------------------------------
Note that by ``replica set``, we are referring to the MongoDB replica set,
not a Kubernetes' ``ReplicaSet``.
@ -88,12 +202,18 @@ will have to coordinate offline with an existing administrator so that they can
add the new MongoDB instance to the replica set.
Add the new instance of MongoDB from an existing instance by accessing the
``mongo`` shell.
``mongo`` shell and authenticate as the ``adminUser`` we created for existing MongoDB instance OR
contact the admin of the PRIMARY MongoDB node:
.. code:: bash
$ kubectl --context ctx-1 exec -it mdb-0 -c mongodb -- /bin/bash
root@mdb-0# mongo --port 27017
$ kubectl --context ctx-1 exec -it <existing mongodb-instance-name> bash
$ mongo --host <existing mongodb-instance-name> --port 27017 --verbose --ssl \
--sslCAFile /etc/mongod/ssl/ca.pem \
--sslPEMKeyFile /etc/mongod/ssl/mdb-instance.pem
PRIMARY> use admin
PRIMARY> db.auth("adminUser", "superstrongpassword")
One can only add members to a replica set from the ``PRIMARY`` instance.
The ``mongo`` shell prompt should state that this is the primary member in the
@ -105,11 +225,11 @@ Run the ``rs.add()`` command with the FQDN and port number of the other instance
.. code:: bash
PRIMARY> rs.add("<fqdn>:<port>")
PRIMARY> rs.add("<new mdb-instance-name>:<port>")
Step 4: Verify the Replica Set Membership
-----------------------------------------
Step 14: Verify the Replica Set Membership
------------------------------------------
You can use the ``rs.conf()`` and the ``rs.status()`` commands available in the
mongo shell to verify the replica set membership.
@ -118,22 +238,86 @@ The new MongoDB instance should be listed in the membership information
displayed.
Step 5: Start the New BigchainDB Instance
-----------------------------------------
Step 15: Configure Users and Access Control for MongoDB
-------------------------------------------------------
Get the file ``bigchaindb-dep.yaml`` from GitHub using:
* Create the users in MongoDB with the appropriate roles assigned to them. This
will enable the new BigchainDB instance, new MongoDB Monitoring Agent
instance and the new MongoDB Backup Agent instance to function correctly.
.. code:: bash
* Please refer to
:ref:`Configure Users and Access Control for MongoDB <Step 13: Configure
Users and Access Control for MongoDB>` to create and configure the new
BigchainDB, MongoDB Monitoring Agent and MongoDB Backup Agent users on the
cluster.
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/bigchaindb/bigchaindb-dep.yaml
.. note::
You will not have to create the MongoDB replica set or create the admin user, as they already exist.
Note that we set the ``BIGCHAINDB_DATABASE_HOST`` to ``mdb`` which is the name
of the MongoDB service defined earlier.
If you do not have access to the ``PRIMARY`` member of the replica set, you
need to get in touch with the administrator who can create the users in the
MongoDB cluster.
Edit the ``BIGCHAINDB_KEYPAIR_PUBLIC`` with the public key of this instance,
the ``BIGCHAINDB_KEYPAIR_PRIVATE`` with the private key of this instance and
the ``BIGCHAINDB_KEYRING`` with a ``:`` delimited list of all the public keys
in the BigchainDB cluster.
Step 16: Start a Kubernetes Deployment for MongoDB Monitoring Agent
-------------------------------------------------------------------
Please see the following section:
* :ref:`Step 14: Start a Kubernetes Deployment for MongoDB Monitoring Agent`.
.. note::
Every MMS group has only one active Monitoring and Backup Agent and having
multiple agents provides high availability and failover, in case one goes
down. For more information about Monitoring and Backup Agents please
consult the `official MongoDB documenation
<https://docs.cloudmanager.mongodb.com/tutorial/move-agent-to-new-server/>`_.
Step 17: Start a Kubernetes Deployment for MongoDB Backup Agent
---------------------------------------------------------------
Please see the following section:
* :ref:`Step 15: Start a Kubernetes Deployment for MongoDB Backup Agent`.
.. note::
Every MMS group has only one active Monitoring and Backup Agent and having
multiple agents provides high availability and failover, in case one goes
down. For more information about Monitoring and Backup Agents please
consult the `official MongoDB documenation
<https://docs.cloudmanager.mongodb.com/tutorial/move-agent-to-new-server/>`_.
Step 18: Start a Kubernetes Deployment for BigchainDB
-----------------------------------------------------
* Set ``metadata.name`` and ``spec.template.metadata.labels.app`` to the
value set in ``bdb-instance-name`` in the ConfigMap, followed by
``-dep``.
For example, if the value set in the
``bdb-instance-name`` is ``bdb-instance-0``, set the fields to the
value ``bdb-instance-0-dep``.
* Set the value of ``BIGCHAINDB_KEYPAIR_PRIVATE`` (not base64-encoded).
(In the future, we'd like to pull the BigchainDB private key from
the Secret named ``bdb-private-key``, but a Secret can only be mounted as a file,
so BigchainDB Server would have to be modified to look for it
in a file.)
* As we gain more experience running BigchainDB in testing and production,
we will tweak the ``resources.limits`` values for CPU and memory, and as
richer monitoring and probing becomes available in BigchainDB, we will
tweak the ``livenessProbe`` and ``readinessProbe`` parameters.
* Set the ports to be exposed from the pod in the
``spec.containers[0].ports`` section. We currently expose 2 ports -
``bigchaindb-api-port`` and ``bigchaindb-ws-port``. Set them to the
values specified in the ConfigMap.
* Uncomment the env var ``BIGCHAINDB_KEYRING``, it will pick up the
``:`` delimited list of all the public keys in the BigchainDB cluster from the ConfigMap.
Create the required Deployment using:
@ -141,38 +325,59 @@ Create the required Deployment using:
$ kubectl --context ctx-2 apply -f bigchaindb-dep.yaml
You can check its status using the command ``kubectl get deploy -w``
You can check its status using the command ``kubectl --context ctx-2 get deploy -w``
Step 6: Restart the Existing BigchainDB Instance(s)
---------------------------------------------------
Step 19: Restart the Existing BigchainDB Instance(s)
----------------------------------------------------
Add the public key of the new BigchainDB instance to the keyring of all the
existing BigchainDB instances and update the BigchainDB instances using:
* Add the public key of the new BigchainDB instance to the ConfigMap
``bdb-keyring`` variable of all the existing BigchainDB instances.
Update all the existing ConfigMap using:
.. code:: bash
$ kubectl --context ctx-1 replace -f bigchaindb-dep.yaml
$ kubectl --context ctx-1 apply -f configuration/config-map.yaml
This will create a "rolling deployment" in Kubernetes where a new instance of
BigchainDB will be created, and if the health check on the new instance is
successful, the earlier one will be terminated. This ensures that there is
zero downtime during updates.
* Uncomment the ``BIGCHAINDB_KEYRING`` variable from the
``bigchaindb/bigchaindb-dep.yaml`` to refer to the keyring updated in the
ConfigMap.
Update the running BigchainDB instance using:
.. code:: bash
$ kubectl --context ctx-1 delete -f bigchaindb/bigchaindb-dep.yaml
$ kubectl --context ctx-1 apply -f bigchaindb/bigchaindb-dep.yaml
See the page titled :ref:`How to Configure a BigchainDB Node` for more information about
ConfigMap configuration.
You can SSH to an existing BigchainDB instance and run the ``bigchaindb
show-config`` command to check that the keyring is updated.
Step 7: Run NGINX as a Deployment
---------------------------------
Step 20: Start a Kubernetes Deployment for OpenResty
----------------------------------------------------
Please see :ref:`this page <Step 8: Start the NGINX Kubernetes Deployment>` to
set up NGINX in your new node.
Please see the following section:
* :ref:`Step 17: Start a Kubernetes Deployment for OpenResty`.
Step 8: Test Your New BigchainDB Node
-------------------------------------
Step 21: Configure the MongoDB Cloud Manager
--------------------------------------------
Please refer to the testing steps :ref:`here <Step 17: Verify the BigchainDB
Node Setup>` to verify that your new BigchainDB node is working as expected.
* MongoDB Cloud Manager auto-detects the members of the replica set and
configures the agents to act as a master/slave accordingly.
* You can verify that the new MongoDB instance is detected by the
Monitoring and Backup Agent using the Cloud Manager UI.
Step 22: Test Your New BigchainDB Node
--------------------------------------
* Please refer to the testing steps :ref:`here <Step 19: Verify the BigchainDB
Node Setup>` to verify that your new BigchainDB node is working as expected.

View File

@ -28,13 +28,13 @@ by going into the directory ``client-cert/easy-rsa-3.0.1/easyrsa3``
and using:
.. code:: bash
./easyrsa init-pki
./easyrsa gen-req bdb-instance-0 nopass
You should change the Common Name (e.g. ``bdb-instance-0``)
to a value that reflects what the
to a value that reflects what the
client certificate is being used for, e.g. ``mdb-mon-instance-3`` or ``mdb-bak-instance-4``. (The final integer is specific to your BigchainDB node in the BigchainDB cluster.)
You will be prompted to enter the Distinguished Name (DN) information for this certificate. For each field, you can accept the default value [in brackets] by pressing Enter.
@ -48,6 +48,10 @@ You will be prompted to enter the Distinguished Name (DN) information for this c
Aside: The ``nopass`` option means "do not encrypt the private key (default is encrypted)". You can get help with the ``easyrsa`` command (and its subcommands)
by using the subcommand ``./easyrsa help``.
.. note::
For more information about requirements for MongoDB client certificates, please consult the `official MongoDB
documentation <https://docs.mongodb.com/manual/tutorial/configure-x509-client-authentication/>`_.
Step 3: Get the Client Certificate Signed
-----------------------------------------
@ -66,11 +70,11 @@ Go to your ``bdb-cluster-ca/easy-rsa-3.0.1/easyrsa3/``
directory and do something like:
.. code:: bash
./easyrsa import-req bdb-instance-0.req bdb-instance-0
./easyrsa import-req /path/to/bdb-instance-0.req bdb-instance-0
./easyrsa sign-req client bdb-instance-0
Once you have signed it, you can send the signed certificate
and the CA certificate back to the requestor.
The files are ``pki/issued/bdb-instance-0.crt`` and ``pki/ca.crt``.
@ -79,9 +83,21 @@ The files are ``pki/issued/bdb-instance-0.crt`` and ``pki/ca.crt``.
Step 4: Generate the Consolidated Client PEM File
-------------------------------------------------
MongoDB requires a single, consolidated file containing both the public and
private keys.
.. note::
This step can be skipped for BigchainDB client certificate as BigchainDB
uses the PyMongo driver, which accepts separate certificate and key files.
MongoDB, MongoDB Backup Agent and MongoDB Monitoring Agent require a single,
consolidated file containing both the public and private keys.
.. code:: bash
cat bdb-instance-0.crt bdb-instance-0.key > bdb-instance-0.pem
cat /path/to/mdb-instance-0.crt /path/to/mdb-instance-0.key > mdb-instance-0.pem
OR
cat /path/to/mdb-mon-instance-0.crt /path/to/mdb-mon-instance-0.key > mdb-mon-instance-0.pem
OR
cat /path/to/mdb-bak-instance-0.crt /path/to/mdb-bak-instance-0.key > mdb-bak-instance-0.pem

View File

@ -20,9 +20,11 @@ Feel free change things to suit your needs or preferences.
revoke-tls-certificate
template-kubernetes-azure
node-on-kubernetes
add-node-on-kubernetes
upgrade-on-kubernetes
log-analytics
easy-rsa
cloud-manager
node-config-map-and-secrets
log-analytics
cloud-manager
easy-rsa
upgrade-on-kubernetes
add-node-on-kubernetes
restore-from-mongodb-cloud-manager
tectonic-azure

View File

@ -1,32 +1,54 @@
Log Analytics on Azure
======================
This section documents how to create and configure a Log Analytics workspace on
Azure, for a Kubernetes-based deployment.
The documented approach is based on an integration of Microsoft's Operations
Management Suite (OMS) with a Kubernetes-based Azure Container Service cluster.
This page describes how we use Microsoft Operations Management Suite (OMS)
to collect all logs from a Kubernetes cluster,
to search those logs,
and to set up email alerts based on log messages.
The :ref:`oms-k8s-references` section (below) contains links
to more detailed documentation.
The :ref:`oms-k8s-references` section (below) contains links to more detailed documentation on
Azure, and Kubernetes.
There are two steps:
There are three main steps involved:
1. Setup: Create a log analytics OMS workspace
and a Containers solution under that workspace.
2. Deploy OMS agents to your Kubernetes cluster.
1. Create a workspace (``LogAnalyticsOMS``).
2. Create a ``ContainersOMS`` solution under the workspace.
3. Deploy the OMS agent(s).
Steps 1 and 2 rely on `Azure Resource Manager templates`_ and can be done with
one template so we'll cover them together. Step 3 relies on a
`Kubernetes DaemonSet`_ and will be covered separately.
Step 1: Setup
-------------
Minimum Requirements
--------------------
This document assumes that you have already deployed a Kubernetes cluster, and
that you have the Kubernetes command line interface ``kubectl`` installed.
Step 1 can be done the web browser way or the command-line way.
Creating a Workspace and Adding a Containers Solution
-----------------------------------------------------
For the sake of this document and example, we'll assume an existing resource
The Web Browser Way
~~~~~~~~~~~~~~~~~~~
To create a new log analytics OMS workspace:
1. Go to the Azure Portal in your web browser.
2. Click on **More services >** in the lower left corner of the Azure Portal.
3. Type "log analytics" or similar.
4. Select **Log Analytics** from the list of options.
5. Click on **+ Add** to add a new log analytics OMS workspace.
6. Give answers to the questions. You can call the OMS workspace anything,
but use the same resource group and location as your Kubernetes cluster.
The free option will suffice, but of course you can also use a paid one.
To add a "Containers solution" to that new workspace:
1. In Azure Portal, in the Log Analytics section, click the name of the new workspace
2. Click **OMS Workspace**.
3. Click **OMS Portal**. It should launch the OMS Portal in a new tab.
4. Click the **Solutions Gallery** tile.
5. Click the **Containers** tile.
6. Click **Add**.
The Command-Line Way
~~~~~~~~~~~~~~~~~~~~
We'll assume your Kubernetes cluster has a resource
group named:
* ``resource_group``
@ -50,53 +72,53 @@ An example of a simple template file (``--template-file``):
.. code-block:: json
{
"$schema": "http://schema.management.azure.com/schemas/2014-04-01-preview/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"sku": {
"type": "String"
},
"workspaceName": {
"type": "String"
},
"solutionType": {
"type": "String"
},
},
"$schema": "http://schema.management.azure.com/schemas/2014-04-01-preview/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"sku": {
"type": "String"
},
"workspaceName": {
"type": "String"
},
"solutionType": {
"type": "String"
},
"resources": [
{
"apiVersion": "2015-03-20",
"type": "Microsoft.OperationalInsights/workspaces",
"name": "[parameters('workspaceName')]",
"location": "[resourceGroup().location]",
"properties": {
"sku": {
"name": "[parameters('sku')]"
}
},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"location": "[resourceGroup().location]",
"name": "[Concat(parameters('solutionType'), '(', parameters('workspaceName'), ')')]",
"type": "Microsoft.OperationsManagement/solutions",
"id": "[Concat(resourceGroup().id, '/providers/Microsoft.OperationsManagement/solutions/', parameters('solutionType'), '(', parameters('workspaceName'), ')')]",
"dependsOn": [
"[concat('Microsoft.OperationalInsights/workspaces/', parameters('workspaceName'))]"
],
"properties": {
"workspaceResourceId": "[resourceId('Microsoft.OperationalInsights/workspaces/', parameters('workspaceName'))]"
},
"plan": {
"publisher": "Microsoft",
"product": "[Concat('OMSGallery/', parameters('solutionType'))]",
"name": "[Concat(parameters('solutionType'), '(', parameters('workspaceName'), ')')]",
"promotionCode": ""
}
}
]
}
{
"apiVersion": "2015-03-20",
"type": "Microsoft.OperationalInsights/workspaces",
"name": "[parameters('workspaceName')]",
"location": "[resourceGroup().location]",
"properties": {
"sku": {
"name": "[parameters('sku')]"
}
},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"location": "[resourceGroup().location]",
"name": "[Concat(parameters('solutionType'), '(', parameters('workspaceName'), ')')]",
"type": "Microsoft.OperationsManagement/solutions",
"id": "[Concat(resourceGroup().id, '/providers/Microsoft.OperationsManagement/solutions/', parameters('solutionType'), '(', parameters('workspaceName'), ')')]",
"dependsOn": [
"[concat('Microsoft.OperationalInsights/workspaces/', parameters('workspaceName'))]"
],
"properties": {
"workspaceResourceId": "[resourceId('Microsoft.OperationalInsights/workspaces/', parameters('workspaceName'))]"
},
"plan": {
"publisher": "Microsoft",
"product": "[Concat('OMSGallery/', parameters('solutionType'))]",
"name": "[Concat(parameters('solutionType'), '(', parameters('workspaceName'), ')')]",
"promotionCode": ""
}
}
]
}
]
}
}
An example of the associated parameter file (``--parameters``):
@ -104,27 +126,29 @@ An example of the associated parameter file (``--parameters``):
.. code-block:: json
{
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"sku": {
"value": "Free"
},
"workspaceName": {
"value": "work_space"
},
"solutionType": {
"value": "Containers"
},
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"sku": {
"value": "Free"
},
"workspaceName": {
"value": "work_space"
},
"solutionType": {
"value": "Containers"
}
}
}
Deploy the OMS Agents
---------------------
Step 2: Deploy the OMS Agents
-----------------------------
To deploy an OMS agent, two important pieces of information are needed:
* workspace id
* workspace key
1. workspace id
2. workspace key
You can obtain the workspace id using:
@ -203,8 +227,7 @@ or the OMS Portal, but at the time of writing,
there was more functionality in the OMS Portal
(e.g. the ability to create an Alert based on a search).
There are instructions to get to the OMS Portal
in the section titled :ref:`Deploy the OMS Agents` above.
There are instructions to get to the OMS Portal above.
Once you're in the OMS Portal, click on **Log Search**
and enter a query.
Here are some example queries:

View File

@ -29,6 +29,38 @@ where all data values must be base64-encoded.
This is true of all Kubernetes ConfigMaps and Secrets.)
vars.cluster-fqdn
~~~~~~~~~~~~~~~~~
The ``cluster-fqdn`` field specifies the domain you would have
:ref:`registered before <2. Register a Domain and Get an SSL Certificate for It>`.
vars.cluster-frontend-port
~~~~~~~~~~~~~~~~~~~~~~~~~~
The ``cluster-frontend-port`` field specifies the port on which your cluster
will be available to all external clients.
It is set to the HTTPS port ``443`` by default.
vars.cluster-health-check-port
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The ``cluster-healthcheck-port`` is the port number on which health check
probes are sent to the main NGINX instance.
It is set to ``8888`` by default.
vars.cluster-dns-server-ip
~~~~~~~~~~~~~~~~~~~~~~~~~~
The ``cluster-dns-server-ip`` is the IP of the DNS server for a node.
We use DNS for service discovery. A Kubernetes deployment always has a DNS
server (``kube-dns``) running at 10.0.0.10, and since we use Kubernetes, this is
set to ``10.0.0.10`` by default, which is the default ``kube-dns`` IP address.
vars.mdb-instance-name and Similar
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -49,6 +81,68 @@ There are some things worth noting about the ``mdb-instance-name``:
* We use ``mdb-instance-0``, ``mdb-instance-1`` and so on in our
documentation. Your BigchainDB cluster may use a different naming convention.
vars.ngx-mdb-instance-name and Similar
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
NGINX needs the FQDN of the servers inside the cluster to be able to forward
traffic.
The ``ngx-openresty-instance-name``, ``ngx-mdb-instance-name`` and
``ngx-bdb-instance-name`` are the FQDNs of the OpenResty instance, the MongoDB
instance, and the BigchainDB instance in this Kubernetes cluster respectively.
In Kubernetes, this is usually the name of the module specified in the
corresponding ``vars.*-instance-name`` followed by the
``<namespace name>.svc.cluster.local``. For example, if you run OpenResty in
the default Kubernetes namespace, this will be
``<vars.openresty-instance-name>.default.svc.cluster.local``
vars.mongodb-frontend-port and vars.mongodb-backend-port
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The ``mongodb-frontend-port`` is the port number on which external clients can
access MongoDB. This needs to be restricted to only other MongoDB instances
by enabling an authentication mechanism on MongoDB cluster.
It is set to ``27017`` by default.
The ``mongodb-backend-port`` is the port number on which MongoDB is actually
available/listening for requests in your cluster.
It is also set to ``27017`` by default.
vars.openresty-backend-port
~~~~~~~~~~~~~~~~~~~~~~~~~~~
The ``openresty-backend-port`` is the port number on which OpenResty is
listening for requests.
This is used by the NGINX instance to forward requests
destined for the OpenResty instance to the right port.
This is also used by OpenResty instance to bind to the correct port to
receive requests from NGINX instance.
It is set to ``80`` by default.
vars.bigchaindb-wsserver-advertised-scheme
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The ``bigchaindb-wsserver-advertised-scheme`` is the protocol used to access
the WebSocket API in BigchainDB. This can be set to ``wss`` or ``ws``.
It is set to ``wss`` by default.
vars.bigchaindb-api-port, vars.bigchaindb-ws-port and Similar
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The ``bigchaindb-api-port`` is the port number on which BigchainDB is
listening for HTTP requests. Currently set to ``9984`` by default.
The ``bigchaindb-ws-port`` is the port number on which BigchainDB is
listening for Websocket requests. Currently set to ``9985`` by default.
There's another :ref:`page with a complete listing of all the BigchainDB Server
configuration settings <Configuration Settings>`.
bdb-config.bdb-keyring
~~~~~~~~~~~~~~~~~~~~~~~
@ -127,18 +221,15 @@ If you're not using 3scale,
you can delete the ``threescale-credentials`` Secret
or leave all the values blank (``""``).
If you *are* using 3scale, you can get the value for ``frontend-api-dns-name``
using something like ``echo "your.nodesubdomain.net" | base64 -w 0``
To get the values for ``secret-token``, ``service-id``,
``version-header`` and ``provider-key``, login to your 3scale admin,
then click **APIs** and click on **Integration** for the relevant API.
If you *are* using 3scale, get the values for ``secret-token``,
``service-id``, ``version-header`` and ``service-token`` by logging in to 3scale
portal using your admin account, click **APIs** and click on **Integration**
for the relevant API.
Scroll to the bottom of the page and click the small link
in the lower right corner, labelled **Download the NGINX Config files**.
You'll get a ``.zip`` file.
Unzip it, then open the ``.conf`` file and the ``.lua`` file.
Unzip it(if it is a ``zip`` file). Open the ``.conf`` and the ``.lua`` file.
You should be able to find all the values in those files.
You have to be careful because it will have values for *all* your APIs,
You have to be careful because it will have values for **all** your APIs,
and some values vary from API to API.
The ``version-header`` is the timestamp in a line that looks like:

View File

@ -53,7 +53,7 @@ to the above command (i.e. the path to the private key).
the context for cluster 2. To find out the current context, do:
.. code:: bash
$ kubectl config view
and then look for the ``current-context`` in the output.
@ -87,6 +87,10 @@ You can connect to your cluster's
$ kubectl proxy -p 8001
or
$ az acs kubernetes browse -g [Resource Group] -n [Container service instance name] --ssh-key-file /path/to/privateKey
or, if you prefer to be explicit about the context (explained above):
.. code:: bash
@ -102,7 +106,7 @@ Step 3: Configure Your BigchainDB Node
--------------------------------------
See the page titled :ref:`How to Configure a BigchainDB Node`.
Step 4: Start the NGINX Service
-------------------------------
@ -113,89 +117,66 @@ Step 4: Start the NGINX Service
public IP to be assigned.
* You have the option to use vanilla NGINX without HTTPS support or an
OpenResty NGINX integrated with 3scale API Gateway.
NGINX with HTTPS support.
Step 4.1: Vanilla NGINX
^^^^^^^^^^^^^^^^^^^^^^^
* This configuration is located in the file ``nginx/nginx-svc.yaml``.
* This configuration is located in the file ``nginx-http/nginx-http-svc.yaml``.
* Set the ``metadata.name`` and ``metadata.labels.name`` to the value
set in ``ngx-instance-name`` in the ConfigMap above.
* Set the ``spec.selector.app`` to the value set in ``ngx-instance-name`` in
the ConfigMap followed by ``-dep``. For example, if the value set in the
``ngx-instance-name`` is ``ngx-instance-0``, set the
``spec.selector.app`` to ``ngx-instance-0-dep``.
* Set ``ngx-public-mdb-port.port`` to 27017, or the port number on which you
want to expose MongoDB service.
Set the ``ngx-public-mdb-port.targetPort`` to the port number on which the
Kubernetes MongoDB service will be present.
``ngx-instance-name`` is ``ngx-http-instance-0``, set the
``spec.selector.app`` to ``ngx-http-instance-0-dep``.
* Set ``ngx-public-api-port.port`` to 80, or the port number on which you want to
expose BigchainDB API service.
Set the ``ngx-public-api-port.targetPort`` to the port number on which the
Kubernetes BigchainDB API service will present.
* Set ``ports[0].port`` and ``ports[0].targetPort`` to the value set in the
``cluster-frontend-port`` in the ConfigMap above. This is the
``public-cluster-port`` in the file which is the ingress in to the cluster.
* Set ``ngx-public-ws-port.port`` to 81, or the port number on which you want to
expose BigchainDB Websocket service.
Set the ``ngx-public-ws-port.targetPort`` to the port number on which the
BigchainDB Websocket service will be present.
* Start the Kubernetes Service:
.. code:: bash
$ kubectl --context k8s-bdb-test-cluster-0 apply -f nginx/nginx-svc.yaml
$ kubectl --context k8s-bdb-test-cluster-0 apply -f nginx-http/nginx-http-svc.yaml
Step 4.2: OpenResty NGINX + 3scale
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Step 4.2: NGINX with HTTPS
^^^^^^^^^^^^^^^^^^^^^^^^^^
* You have to enable HTTPS for this one and will need an HTTPS certificate
for your domain.
* You should have already created the necessary Kubernetes Secrets in the previous
step (e.g. ``https-certs`` and ``threescale-credentials``).
* This configuration is located in the file ``nginx-3scale/nginx-3scale-svc.yaml``.
* You should have already created the necessary Kubernetes Secrets in the previous
step (i.e. ``https-certs``).
* This configuration is located in the file ``nginx-https/nginx-https-svc.yaml``.
* Set the ``metadata.name`` and ``metadata.labels.name`` to the value
set in ``ngx-instance-name`` in the ConfigMap above.
* Set the ``spec.selector.app`` to the value set in ``ngx-instance-name`` in
the ConfigMap followed by ``-dep``. For example, if the value set in the
``ngx-instance-name`` is ``ngx-instance-0``, set the
``spec.selector.app`` to ``ngx-instance-0-dep``.
* Set ``ngx-public-mdb-port.port`` to 27017, or the port number on which you
want to expose MongoDB service.
Set the ``ngx-public-mdb-port.targetPort`` to the port number on which the
Kubernetes MongoDB service will be present.
``ngx-instance-name`` is ``ngx-https-instance-0``, set the
``spec.selector.app`` to ``ngx-https-instance-0-dep``.
* Set ``ngx-public-3scale-port.port`` to 8080, or the port number on which
you want to let 3scale communicate with Openresty NGINX for authenctication.
Set the ``ngx-public-3scale-port.targetPort`` to the port number on which
this Openresty NGINX service will be listening to for communication with
3scale.
* Set ``ports[0].port`` and ``ports[0].targetPort`` to the value set in the
``cluster-frontend-port`` in the ConfigMap above. This is the
``public-secure-cluster-port`` in the file which is the ingress in to the cluster.
* Set ``ngx-public-bdb-port.port`` to 443, or the port number on which you want
to expose BigchainDB API service.
Set the ``ngx-public-api-port.targetPort`` to the port number on which the
Kubernetes BigchainDB API service will present.
* Set ``ports[1].port`` and ``ports[1].targetPort`` to the value set in the
``mongodb-frontend-port`` in the ConfigMap above. This is the
``public-mdb-port`` in the file which specifies where MongoDB is
available.
* Set ``ngx-public-bdb-port-http.port`` to 80, or the port number on which you
want to expose BigchainDB Websocket service.
Set the ``ngx-public-bdb-port-http.targetPort`` to the port number on which the
BigchainDB Websocket service will be present.
* Start the Kubernetes Service:
.. code:: bash
$ kubectl --context k8s-bdb-test-cluster-0 apply -f nginx-3scale/nginx-3scale-svc.yaml
$ kubectl --context k8s-bdb-test-cluster-0 apply -f nginx-https/nginx-https-svc.yaml
Step 5: Assign DNS Name to the NGINX Public IP
@ -208,11 +189,11 @@ Step 5: Assign DNS Name to the NGINX Public IP
* The following command can help you find out if the NGINX service started
above has been assigned a public IP or external IP address:
.. code:: bash
$ kubectl --context k8s-bdb-test-cluster-0 get svc -w
* Once a public IP is assigned, you can map it to
a DNS name.
We usually assign ``bdb-test-cluster-0``, ``bdb-test-cluster-1`` and
@ -223,7 +204,7 @@ Step 5: Assign DNS Name to the NGINX Public IP
**Set up DNS mapping in Azure.**
Select the current Azure resource group and look for the ``Public IP``
resource. You should see at least 2 entries there - one for the Kubernetes
master and the other for the MongoDB instance. You may have to ``Refresh`` the
master and the other for the NGINX instance. You may have to ``Refresh`` the
Azure web page listing the resources in a resource group for the latest
changes to be reflected.
Select the ``Public IP`` resource that is attached to your service (it should
@ -233,7 +214,7 @@ have the Azure DNS prefix name along with a long random string, without the
changes to be applied.
To verify the DNS setting is operational, you can run ``nslookup <DNS
name added in ConfigMap>`` from your local Linux shell.
name added in Azure configuration>`` from your local Linux shell.
This will ensure that when you scale the replica set later, other MongoDB
members in the replica set can reach this instance.
@ -246,12 +227,17 @@ Step 6: Start the MongoDB Kubernetes Service
* Set the ``metadata.name`` and ``metadata.labels.name`` to the value
set in ``mdb-instance-name`` in the ConfigMap above.
* Set the ``spec.selector.app`` to the value set in ``mdb-instance-name`` in
the ConfigMap followed by ``-ss``. For example, if the value set in the
``mdb-instance-name`` is ``mdb-instance-0``, set the
``spec.selector.app`` to ``mdb-instance-0-ss``.
* Set ``ports[0].port`` and ``ports[0].targetPort`` to the value set in the
``mongodb-backend-port`` in the ConfigMap above.
This is the ``mdb-port`` in the file which specifies where MongoDB listens
for API requests.
* Start the Kubernetes Service:
.. code:: bash
@ -266,12 +252,22 @@ Step 7: Start the BigchainDB Kubernetes Service
* Set the ``metadata.name`` and ``metadata.labels.name`` to the value
set in ``bdb-instance-name`` in the ConfigMap above.
* Set the ``spec.selector.app`` to the value set in ``bdb-instance-name`` in
the ConfigMap followed by ``-dep``. For example, if the value set in the
``bdb-instance-name`` is ``bdb-instance-0``, set the
``spec.selector.app`` to ``bdb-instance-0-dep``.
* Set ``ports[0].port`` and ``ports[0].targetPort`` to the value set in the
``bigchaindb-api-port`` in the ConfigMap above.
This is the ``bdb-api-port`` in the file which specifies where BigchainDB
listens for HTTP API requests.
* Set ``ports[1].port`` and ``ports[1].targetPort`` to the value set in the
``bigchaindb-ws-port`` in the ConfigMap above.
This is the ``bdb-ws-port`` in the file which specifies where BigchainDB
listens for Websocket connections.
* Start the Kubernetes Service:
.. code:: bash
@ -279,81 +275,86 @@ Step 7: Start the BigchainDB Kubernetes Service
$ kubectl --context k8s-bdb-test-cluster-0 apply -f bigchaindb/bigchaindb-svc.yaml
Step 8: Start the NGINX Kubernetes Deployment
Step 8: Start the OpenResty Kubernetes Service
----------------------------------------------
* This configuration is located in the file ``nginx-openresty/nginx-openresty-svc.yaml``.
* Set the ``metadata.name`` and ``metadata.labels.name`` to the value
set in ``openresty-instance-name`` in the ConfigMap above.
* Set the ``spec.selector.app`` to the value set in ``openresty-instance-name`` in
the ConfigMap followed by ``-dep``. For example, if the value set in the
``openresty-instance-name`` is ``openresty-instance-0``, set the
``spec.selector.app`` to ``openresty-instance-0-dep``.
* Start the Kubernetes Service:
.. code:: bash
$ kubectl --context k8s-bdb-test-cluster-0 apply -f nginx-openresty/nginx-openresty-svc.yaml
Step 9: Start the NGINX Kubernetes Deployment
---------------------------------------------
* NGINX is used as a proxy to both the BigchainDB and MongoDB instances in
the node. It proxies HTTP requests on port 80 to the BigchainDB backend,
and TCP connections on port 27017 to the MongoDB backend.
* NGINX is used as a proxy to OpenResty, BigchainDB and MongoDB instances in
the node. It proxies HTTP/HTTPS requests on the ``cluster-frontend-port``
to the corresponding OpenResty or BigchainDB backend, and TCP connections
on ``mongodb-frontend-port`` to the MongoDB backend.
* As in step 4, you have the option to use vanilla NGINX or an OpenResty
NGINX integrated with 3scale API Gateway.
* As in step 4, you have the option to use vanilla NGINX without HTTPS or
NGINX with HTTPS support.
Step 8.1: Vanilla NGINX
Step 9.1: Vanilla NGINX
^^^^^^^^^^^^^^^^^^^^^^^
* This configuration is located in the file ``nginx/nginx-dep.yaml``.
* This configuration is located in the file ``nginx-http/nginx-http-dep.yaml``.
* Set the ``metadata.name`` and ``spec.template.metadata.labels.app``
to the value set in ``ngx-instance-name`` in the ConfigMap followed by a
``-dep``. For example, if the value set in the ``ngx-instance-name`` is
``ngx-instance-0``, set the fields to ``ngx-instance-0-dep``.
``ngx-http-instance-0``, set the fields to ``ngx-http-instance-0-dep``.
* Set the ports to be exposed from the pod in the
``spec.containers[0].ports`` section. We currently expose 3 ports -
``mongodb-frontend-port``, ``cluster-frontend-port`` and
``cluster-health-check-port``. Set them to the values specified in the
ConfigMap.
* Set ``MONGODB_BACKEND_HOST`` env var to
the value set in ``mdb-instance-name`` in the ConfigMap, followed by
``.default.svc.cluster.local``. For example, if the value set in the
``mdb-instance-name`` is ``mdb-instance-0``, set the
``MONGODB_BACKEND_HOST`` env var to
``mdb-instance-0.default.svc.cluster.local``.
* Set ``BIGCHAINDB_BACKEND_HOST`` env var to
the value set in ``bdb-instance-name`` in the ConfigMap, followed by
``.default.svc.cluster.local``. For example, if the value set in the
``bdb-instance-name`` is ``bdb-instance-0``, set the
``BIGCHAINDB_BACKEND_HOST`` env var to
``bdb-instance-0.default.svc.cluster.local``.
* Start the Kubernetes Deployment:
.. code:: bash
$ kubectl --context k8s-bdb-test-cluster-0 apply -f nginx/nginx-dep.yaml
$ kubectl --context k8s-bdb-test-cluster-0 apply -f nginx-http/nginx-http-dep.yaml
Step 8.2: OpenResty NGINX + 3scale
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Step 9.2: NGINX with HTTPS
^^^^^^^^^^^^^^^^^^^^^^^^^^
* This configuration is located in the file
``nginx-3scale/nginx-3scale-dep.yaml``.
``nginx-https/nginx-https-dep.yaml``.
* Set the ``metadata.name`` and ``spec.template.metadata.labels.app``
to the value set in ``ngx-instance-name`` in the ConfigMap followed by a
``-dep``. For example, if the value set in the ``ngx-instance-name`` is
``ngx-instance-0``, set the fields to ``ngx-instance-0-dep``.
``ngx-https-instance-0``, set the fields to ``ngx-https-instance-0-dep``.
* Set the ports to be exposed from the pod in the
``spec.containers[0].ports`` section. We currently expose 3 ports -
``mongodb-frontend-port``, ``cluster-frontend-port`` and
``cluster-health-check-port``. Set them to the values specified in the
ConfigMap.
* Set ``MONGODB_BACKEND_HOST`` env var to
the value set in ``mdb-instance-name`` in the ConfigMap, followed by
``.default.svc.cluster.local``. For example, if the value set in the
``mdb-instance-name`` is ``mdb-instance-0``, set the
``MONGODB_BACKEND_HOST`` env var to
``mdb-instance-0.default.svc.cluster.local``.
* Set ``BIGCHAINDB_BACKEND_HOST`` env var to
the value set in ``bdb-instance-name`` in the ConfigMap, followed by
``.default.svc.cluster.local``. For example, if the value set in the
``bdb-instance-name`` is ``bdb-instance-0``, set the
``BIGCHAINDB_BACKEND_HOST`` env var to
``bdb-instance-0.default.svc.cluster.local``.
* Start the Kubernetes Deployment:
.. code:: bash
$ kubectl --context k8s-bdb-test-cluster-0 apply -f nginx-3scale/nginx-3scale-dep.yaml
$ kubectl --context k8s-bdb-test-cluster-0 apply -f nginx-https/nginx-https-dep.yaml
Step 9: Create Kubernetes Storage Classes for MongoDB
-----------------------------------------------------
Step 10: Create Kubernetes Storage Classes for MongoDB
------------------------------------------------------
MongoDB needs somewhere to store its data persistently,
outside the container where MongoDB is running.
@ -425,7 +426,7 @@ Kubernetes just looks for a storageAccount
with the specified skuName and location.
Step 10: Create Kubernetes Persistent Volume Claims
Step 11: Create Kubernetes Persistent Volume Claims
---------------------------------------------------
Next, you will create two PersistentVolumeClaim objects ``mongo-db-claim`` and
@ -457,7 +458,7 @@ Initially, the status of persistent volume claims might be "Pending"
but it should become "Bound" fairly quickly.
Step 11: Start a Kubernetes StatefulSet for MongoDB
Step 12: Start a Kubernetes StatefulSet for MongoDB
---------------------------------------------------
* This configuration is located in the file ``mongodb/mongo-ss.yaml``.
@ -466,7 +467,7 @@ Step 11: Start a Kubernetes StatefulSet for MongoDB
the ConfigMap.
For example, if the value set in the ``mdb-instance-name``
is ``mdb-instance-0``, set the field to ``mdb-instance-0``.
* Set ``metadata.name``, ``spec.template.metadata.name`` and
``spec.template.metadata.labels.app`` to the value set in
``mdb-instance-name`` in the ConfigMap, followed by
@ -478,7 +479,7 @@ Step 11: Start a Kubernetes StatefulSet for MongoDB
* Note how the MongoDB container uses the ``mongo-db-claim`` and the
``mongo-configdb-claim`` PersistentVolumeClaims for its ``/data/db`` and
``/data/configdb`` directories (mount paths).
* Note also that we use the pod's ``securityContext.capabilities.add``
specification to add the ``FOWNER`` capability to the container. That is
because the MongoDB container has the user ``mongodb``, with uid ``999`` and
@ -494,25 +495,30 @@ Step 11: Start a Kubernetes StatefulSet for MongoDB
* As we gain more experience running MongoDB in testing and production, we
will tweak the ``resources.limits.cpu`` and ``resources.limits.memory``.
* Set the ports to be exposed from the pod in the
``spec.containers[0].ports`` section. We currently only expose the MongoDB
backend port. Set it to the value specified for ``mongodb-backend-port``
in the ConfigMap.
* Create the MongoDB StatefulSet using:
.. code:: bash
$ kubectl --context k8s-bdb-test-cluster-0 apply -f mongodb/mongo-ss.yaml
* It might take up to 10 minutes for the disks, specified in the Persistent
Volume Claims above, to be created and attached to the pod.
The UI might show that the pod has errored with the message
"timeout expired waiting for volumes to attach/mount". Use the CLI below
to check the status of the pod in this case, instead of the UI.
This happens due to a bug in Azure ACS.
.. code:: bash
$ kubectl --context k8s-bdb-test-cluster-0 get pods -w
Step 12: Configure Users and Access Control for MongoDB
Step 13: Configure Users and Access Control for MongoDB
-------------------------------------------------------
* In this step, you will create a user on MongoDB with authorization
@ -524,26 +530,26 @@ Step 12: Configure Users and Access Control for MongoDB
* Find out the name of your MongoDB pod by reading the output
of the ``kubectl ... get pods`` command at the end of the last step.
It should be something like ``mdb-instance-0-ss-0``.
* Log in to the MongoDB pod using:
.. code:: bash
$ kubectl --context k8s-bdb-test-cluster-0 exec -it <name of your MongoDB pod> bash
* Open a mongo shell using the certificates
already present at ``/etc/mongod/ssl/``
.. code:: bash
$ mongo --host localhost --port 27017 --verbose --ssl \
--sslCAFile /etc/mongod/ssl/ca.pem \
--sslCAFile /etc/mongod/ca/ca.pem \
--sslPEMKeyFile /etc/mongod/ssl/mdb-instance.pem
* Initialize the replica set using:
.. code:: bash
> rs.initiate( {
_id : "bigchain-rs",
members: [ {
@ -556,7 +562,7 @@ Step 12: Configure Users and Access Control for MongoDB
``mdb-instance-name`` in the ConfigMap.
For example, if the value set in the ``mdb-instance-name`` is
``mdb-instance-0``, set the ``hostname`` above to the value ``mdb-instance-0``.
* The instance should be voted as the ``PRIMARY`` in the replica set (since
this is the only instance in the replica set till now).
This can be observed from the mongo shell prompt,
@ -567,14 +573,15 @@ Step 12: Configure Users and Access Control for MongoDB
log in to the mongo shell. For further details, see `localhost
exception <https://docs.mongodb.com/manual/core/security-users/#localhost-exception>`_
in MongoDB.
.. code:: bash
PRIMARY> use admin
PRIMARY> db.createUser( {
user: "adminUser",
pwd: "superstrongpassword",
roles: [ { role: "userAdminAnyDatabase", db: "admin" } ]
roles: [ { role: "userAdminAnyDatabase", db: "admin" },
{ role: "clusterManager", db: "admin"} ]
} )
* Exit and restart the mongo shell using the above command.
@ -599,16 +606,16 @@ Step 12: Configure Users and Access Control for MongoDB
-inform PEM -subject -nameopt RFC2253
You should see an output line that resembles:
.. code:: bash
subject= emailAddress=dev@bigchaindb.com,CN=test-bdb-ssl,OU=BigchainDB-Instance,O=BigchainDB GmbH,L=Berlin,ST=Berlin,C=DE
The ``subject`` line states the complete user name we need to use for
creating the user on the mongo shell as follows:
.. code:: bash
PRIMARY> db.getSiblingDB("$external").runCommand( {
createUser: 'emailAddress=dev@bigchaindb.com,CN=test-bdb-ssl,OU=BigchainDB-Instance,O=BigchainDB GmbH,L=Berlin,ST=Berlin,C=DE',
writeConcern: { w: 'majority' , wtimeout: 5000 },
@ -640,7 +647,7 @@ Step 12: Configure Users and Access Control for MongoDB
} )
Step 13: Start a Kubernetes Deployment for MongoDB Monitoring Agent
Step 14: Start a Kubernetes Deployment for MongoDB Monitoring Agent
-------------------------------------------------------------------
* This configuration is located in the file
@ -661,7 +668,7 @@ Step 13: Start a Kubernetes Deployment for MongoDB Monitoring Agent
$ kubectl --context k8s-bdb-test-cluster-0 apply -f mongodb-monitoring-agent/mongo-mon-dep.yaml
Step 14: Start a Kubernetes Deployment for MongoDB Backup Agent
Step 15: Start a Kubernetes Deployment for MongoDB Backup Agent
---------------------------------------------------------------
* This configuration is located in the file
@ -682,7 +689,7 @@ Step 14: Start a Kubernetes Deployment for MongoDB Backup Agent
$ kubectl --context k8s-bdb-test-cluster-0 apply -f mongodb-backup-agent/mongo-backup-dep.yaml
Step 15: Start a Kubernetes Deployment for BigchainDB
Step 16: Start a Kubernetes Deployment for BigchainDB
-----------------------------------------------------
* This configuration is located in the file
@ -694,19 +701,24 @@ Step 15: Start a Kubernetes Deployment for BigchainDB
For example, if the value set in the
``bdb-instance-name`` is ``bdb-instance-0``, set the fields to the
value ``bdb-insance-0-dep``.
* Set the value of ``BIGCHAINDB_KEYPAIR_PRIVATE`` (not base64-encoded).
(In the future, we'd like to pull the BigchainDB private key from
the Secret named ``bdb-private-key``,
but a Secret can only be mounted as a file,
so BigchainDB Server would have to be modified to look for it
in a file.)
* As we gain more experience running BigchainDB in testing and production,
we will tweak the ``resources.limits`` values for CPU and memory, and as
richer monitoring and probing becomes available in BigchainDB, we will
tweak the ``livenessProbe`` and ``readinessProbe`` parameters.
* Set the ports to be exposed from the pod in the
``spec.containers[0].ports`` section. We currently expose 2 ports -
``bigchaindb-api-port`` and ``bigchaindb-ws-port``. Set them to the
values specified in the ConfigMap.
* Create the BigchainDB Deployment using:
.. code:: bash
@ -717,7 +729,35 @@ Step 15: Start a Kubernetes Deployment for BigchainDB
* You can check its status using the command ``kubectl get deployments -w``
Step 16: Configure the MongoDB Cloud Manager
Step 17: Start a Kubernetes Deployment for OpenResty
----------------------------------------------------
* This configuration is located in the file
``nginx-openresty/nginx-openresty-dep.yaml``.
* Set ``metadata.name`` and ``spec.template.metadata.labels.app`` to the
value set in ``openresty-instance-name`` in the ConfigMap, followed by
``-dep``.
For example, if the value set in the
``openresty-instance-name`` is ``openresty-instance-0``, set the fields to
the value ``openresty-instance-0-dep``.
* Set the port to be exposed from the pod in the
``spec.containers[0].ports`` section. We currently expose the port at
which OpenResty is listening for requests, ``openresty-backend-port`` in
the above ConfigMap.
* Create the OpenResty Deployment using:
.. code:: bash
$ kubectl --context k8s-bdb-test-cluster-0 apply -f nginx-openresty/nginx-openresty-dep.yaml
* You can check its status using the command ``kubectl get deployments -w``
Step 18: Configure the MongoDB Cloud Manager
--------------------------------------------
Refer to the
@ -726,10 +766,10 @@ for details on how to configure the MongoDB Cloud Manager to enable
monitoring and backup.
Step 17: Verify the BigchainDB Node Setup
Step 19: Verify the BigchainDB Node Setup
-----------------------------------------
Step 17.1: Testing Internally
Step 19.1: Testing Internally
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To test the setup of your BigchainDB node, you could use a Docker container
@ -752,13 +792,13 @@ You can use it as below to get started immediately:
It will drop you to the shell prompt.
To test the MongoDB instance:
.. code:: bash
$ nslookup mdb-instance-0
$ dig +noall +answer _mdb-port._tcp.mdb-instance-0.default.svc.cluster.local SRV
$ curl -X GET http://mdb-instance-0:27017
The ``nslookup`` command should output the configured IP address of the service
@ -767,52 +807,84 @@ The ``dig`` command should return the configured port numbers.
The ``curl`` command tests the availability of the service.
To test the BigchainDB instance:
.. code:: bash
$ nslookup bdb-instance-0
$ dig +noall +answer _bdb-port._tcp.bdb-instance-0.default.svc.cluster.local SRV
$ dig +noall +answer _bdb-api-port._tcp.bdb-instance-0.default.svc.cluster.local SRV
$ dig +noall +answer _bdb-ws-port._tcp.bdb-instance-0.default.svc.cluster.local SRV
$ curl -X GET http://bdb-instance-0:9984
To test the NGINX instance:
$ wsc -er ws://bdb-instance-0:9985/api/v1/streams/valid_transactions
To test the OpenResty instance:
.. code:: bash
$ nslookup openresty-instance-0
$ dig +noall +answer _openresty-svc-port._tcp.openresty-instance-0.default.svc.cluster.local SRV
To verify if OpenResty instance forwards the requests properly, send a ``POST``
transaction to OpenResty at post ``80`` and check the response from the backend
BigchainDB instance.
To test the vanilla NGINX instance:
.. code:: bash
$ nslookup ngx-http-instance-0
$ dig +noall +answer _public-cluster-port._tcp.ngx-http-instance-0.default.svc.cluster.local SRV
$ dig +noall +answer _public-health-check-port._tcp.ngx-http-instance-0.default.svc.cluster.local SRV
$ wsc -er ws://ngx-http-instance-0/api/v1/streams/valid_transactions
$ curl -X GET http://ngx-http-instance-0:27017
The above curl command should result in the response
``It looks like you are trying to access MongoDB over HTTP on the native driver port.``
To test the NGINX instance with HTTPS and 3scale integration:
.. code:: bash
$ nslookup ngx-instance-0
$ dig +noall +answer _ngx-public-mdb-port._tcp.ngx-instance-0.default.svc.cluster.local SRV
$ dig +noall +answer _ngx-public-bdb-port._tcp.ngx-instance-0.default.svc.cluster.local SRV
$ dig +noall +answer _public-secure-cluster-port._tcp.ngx-instance-0.default.svc.cluster.local SRV
$ curl -X GET http://ngx-instance-0:27017
$ dig +noall +answer _public-mdb-port._tcp.ngx-instance-0.default.svc.cluster.local SRV
The curl command should result get the response
``curl: (7) Failed to connect to ngx-instance-0 port 27017: Connection refused``.
$ dig +noall +answer _public-insecure-cluster-port._tcp.ngx-instance-0.default.svc.cluster.local SRV
If you ran the vanilla NGINX instance, run:
$ wsc -er wss://<cluster-fqdn>/api/v1/streams/valid_transactions
.. code:: bash
$ curl -X GET http://<cluster-fqdn>:27017
$ curl -X GET http://ngx-instance-0:80
If you ran the OpenResty NGINX + 3scale instance, run:
.. code:: bash
$ curl -X GET https://ngx-instance-0
The above curl command should result in the response
``It looks like you are trying to access MongoDB over HTTP on the native driver port.``
Step 17.2: Testing Externally
Step 19.2: Testing Externally
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Check the MongoDB monitoring and backup agent on the MongoDB Cloud Manager
portal to verify they are working fine.
Try to access the ``<DNS/IP of your exposed BigchainDB service endpoint>:80``
on your browser. You should receive a JSON response that shows the BigchainDB
If you are using the NGINX with HTTP support, accessing the URL
``http://<DNS/IP of your exposed BigchainDB service endpoint>:cluster-frontend-port``
on your browser should result in a JSON response that shows the BigchainDB
server version, among other things.
If you are using the NGINX with HTTPS support, use ``https`` instead of
``http`` above.
Use the Python Driver to send some transactions to the BigchainDB node and
verify that your node or cluster works as expected.

View File

@ -0,0 +1,146 @@
How to Restore Data Backed On MongoDB Cloud Manager
===================================================
This page describes how to restore data backed up on
`MongoDB Cloud Manager <https://cloud.mongodb.com/>`_ by
the backup agent when using a single instance MongoDB replica set.
Prerequisites
-------------
- You can restore to either new hardware or existing hardware. We cover
restoring data to an existing MongoDB Kubernetes StatefulSet using a
Kubernetes Persistent Volume Claim below as described
:doc:`here <node-on-kubernetes>`.
- If the backup and destination database storage engines or settings do not
match, mongod cannot start once the backup is restored.
- If the backup and destination database do not belong to the same MongoDB
Cloud Manager group, then the database will start but never initialize
properly.
- The backup restore file includes a metadata file, restoreInfo.txt. This file
captures the options the database used when the snapshot was taken. The
database must be run with the listed options after it has been restored. It
contains:
1. Group name
2. Replica Set name
3. Cluster Id (if applicable)
4. Snapshot timestamp (as Timestamp at UTC)
5. Last Oplog applied (as a BSON Timestamp at UTC)
6. MongoDB version
7. Storage engine type
8. mongod startup options used on the database when the snapshot was taken
Step 1: Get the Backup/Archived Data from Cloud Manager
-------------------------------------------------------
- Log in to the Cloud Manager.
- Select the Group that you want to restore data from.
- Click Backup. Hover over the Status column, click on the
``Restore Or Download`` button.
- Select the appropriate SNAPSHOT, and click Next.
.. note::
We currently do not support restoring data using the ``POINT IN TIME`` and
``OPLOG TIMESTAMP`` method.
- Select 'Pull via Secure HTTP'. Select the number of times the link can be
used to download data in the dropdown box. We select ``Once``.
Select the link expiration time - the time till the download link is active.
We usually select ``1 hour``.
- Check for the email from MongoDB.
.. note::
This can take some time as the Cloud Manager needs to prepare an archive of
the backed up data.
- Once you receive the email, click on the link to open the
``restore jobs page``. Follow the instructions to download the backup data.
.. note::
You will be shown a link to download the back up archive. You can either
click on the ``Download`` button to download it using the browser.
Under rare circumstances, the download is interrupted and errors out; I have
no idea why.
An alternative is to copy the download link and use the ``wget`` tool on
Linux systems to download the data.
Step 2: Copy the archive to the MongoDB Instance
------------------------------------------------
- Once you have the archive, you can copy it to the MongoDB instance running
on a Kubernetes cluster using something similar to:
.. code:: bash
$ kubectl --context ctx-1 cp bigchain-rs-XXXX.tar.gz mdb-instance-name:/
where ``bigchain-rs-XXXX.tar.gz`` is the archive downloaded from Cloud
Manager, and ``mdb-instance-name`` is the name of your MongoDB instance.
Step 3: Prepare the MongoDB Instance for Restore
------------------------------------------------
- Log in to the MongoDB instance using something like:
.. code:: bash
$ kubectl --context ctx-1 exec -it mdb-instance-name bash
- Extract the archive that we have copied to the instance at the proper
location using:
.. code:: bash
$ mv /bigchain-rs-XXXX.tar.gz /data/db
$ cd /data/db
$ tar xzvf bigchain-rs-XXXX.tar.gz
- Rename the directories on the disk, so that MongoDB can find the correct
data after we restart it.
- The current database will be located in the ``/data/db/main`` directory.
We simply rename the old directory to ``/data/db/main.BAK`` and rename the
backup directory ``bigchain-rs-XXXX`` to ``main``.
.. code:: bash
$ mv main main.BAK
$ mv bigchain-rs-XXXX main
.. note::
Ensure that there are no connections to MongoDB from any client, in our
case, BigchainDB. This can be done in multiple ways - iptable rules,
shutting down BigchainDB, stop sending any transactions to BigchainDB, etc.
The simplest way to do it is to stop the MongoDB Kubernetes Service.
BigchainDB has a retry mechanism built in, and it will keep trying to
connect to MongoDB backend repeatedly till it succeeds.
Step 4: Restart the MongoDB Instance
------------------------------------
- This can be achieved using something like:
.. code:: bash
$ kubectl --context ctx-1 delete -f k8s/mongo/mongo-ss.yaml
$ kubectl --context ctx-1 apply -f k8s/mongo/mongo-ss.yaml

View File

@ -29,8 +29,13 @@ You can create the server private key and certificate signing request (CSR)
by going into the directory ``member-cert/easy-rsa-3.0.1/easyrsa3``
and using something like:
.. note::
Please make sure you are fullfilling the requirements for `MongoDB server/member certificates
<https://docs.mongodb.com/manual/tutorial/configure-x509-member-authentication>`_.
.. code:: bash
./easyrsa init-pki
./easyrsa --req-cn=mdb-instance-0 --subject-alt-name=DNS:localhost,DNS:mdb-instance-0 gen-req mdb-instance-0 nopass
@ -67,11 +72,11 @@ Go to your ``bdb-cluster-ca/easy-rsa-3.0.1/easyrsa3/``
directory and do something like:
.. code:: bash
./easyrsa import-req mdb-instance-0.req mdb-instance-0
./easyrsa import-req /path/to/mdb-instance-0.req mdb-instance-0
./easyrsa --subject-alt-name=DNS:localhost,DNS:mdb-instance-0 sign-req server mdb-instance-0
Once you have signed it, you can send the signed certificate
and the CA certificate back to the requestor.
The files are ``pki/issued/mdb-instance-0.crt`` and ``pki/ca.crt``.
@ -84,6 +89,6 @@ MongoDB requires a single, consolidated file containing both the public and
private keys.
.. code:: bash
cat mdb-instance-0.crt mdb-instance-0.key > mdb-instance-0.pem
cat /path/to/mdb-instance-0.crt /path/to/mdb-instance-0.key > mdb-instance-0.pem

View File

@ -0,0 +1,118 @@
Walkthrough: Deploy a Kubernetes Cluster on Azure using Tectonic by CoreOS
==========================================================================
A BigchainDB node can be run inside a `Kubernetes <https://kubernetes.io/>`_
cluster.
This page describes one way to deploy a Kubernetes cluster on Azure using Tectonic.
Tectonic helps in easier cluster management of Kubernetes clusters.
If you would rather use Azure Container Service to manage Kubernetes Clusters,
please read :doc:`our guide for that <template-kubernetes-azure>`.
Step 1: Prerequisites for Deploying Tectonic Cluster
----------------------------------------------------
Get an Azure account. Refer to
:ref:`this step in our docs <Step 1: Get a Pay-As-You-Go Azure Subscription>`.
Create an SSH Key pair for the new Tectonic cluster. Refer to
:ref:`this step in our docs <Step 2: Create an SSH Key Pair>`.
Step 2: Get a Tectonic Subscription
-----------------------------------
CoreOS offers Tectonic for free for up to 10 nodes.
Sign up for an account `here <https://coreos.com/tectonic>`__ if you do not
have one already and get a license for 10 nodes.
Login to your account, go to Overview > Your Account and save the
``CoreOS License`` and the ``Pull Secret`` to your local machine.
Step 3: Deploy the cluster on Azure
-----------------------------------
The latest instructions for deployment can be found
`here <https://coreos.com/tectonic/docs/latest/tutorials/azure/install.html>`__.
The following points suggests some customizations for a BigchainDB deployment
when following the steps above:
#. Set the ``CLUSTER`` variable to the name of the cluster. Also note that the
cluster will be deployed in a resource group named
``tectonic-cluster-CLUSTER``.
#. Set the ``tectonic_base_domain`` to ``""`` if you want to use Azure managed
DNS. You will be assigned a ``cloudapp.azure.com`` sub-domain by default.
#. Set the ``tectonic_cl_channel`` to ``"stable"`` unless you want to
experiment or test with the latest release.
#. Set the ``tectonic_cluster_name`` to the ``CLUSTER`` variable defined in
the step above.
#. Set the ``tectonic_license_path`` and ``tectonic_pull_secret_path`` to the
location where you have stored the ``tectonic-license.txt`` and the
``config.json`` files downloaded in the previous step.
#. Set the ``tectonic_etcd_count`` to ``"3"``, so that you have a multi-node
etcd cluster that can tolerate a single node failure.
#. Set the ``tectonic_etcd_tls_enabled`` to ``"true"`` as this will enable TLS
connectivity between the etcd nodes and their clients.
#. Set the ``tectonic_master_count`` to ``"3"`` so that you cane tolerate a
single master failure.
#. Set the ``tectonic_worker_count`` to ``"2"``.
#. Set the ``tectonic_azure_location`` to ``"westeurope"`` if you want to host
the cluster in Azure's ``westeurope`` datacenter.
#. Set the ``tectonic_azure_ssh_key`` to the path of the public key created in
the previous step.
#. Note that the ``tectonic_azure_client_secret`` is the same as the
``ARM_CLIENT_SECRET``.
#. Note that the URL for the Tectonic console using these settings will be the
cluster name set in the configutation file, the datacenter name and
``cloudapp.azure.com``. For example, if you named your cluster as
``test-cluster`` and specified the datacenter as ``westeurope``, the Tectonic
console will be available at ``test-cluster.westeurope.cloudapp.azure.com``.
Step 4: Configure kubectl
-------------------------
#. Refer to `this tutorial
<https://coreos.com/tectonic/docs/latest/tutorials/azure/first-app.html>`__
for instructions on how to download the kubectl configuration files for
your cluster.
#. Set the ``KUBECONFIG`` environment variable to make ``kubectl`` use the new
config file along with the existing configuration.
.. code:: bash
$ export KUBECONFIG=$HOME/.kube/config:/path/to/config/kubectl-config
# OR to only use the new configuration, try
$ export KUBECONFIG=/path/to/config/kubectl-config
Next, you can :doc:`run a BigchainDB node on your new
Kubernetes cluster <node-on-kubernetes>`.
Tectonic References
-------------------
#. https://coreos.com/tectonic/docs/latest/tutorials/azure/install.html
#. https://coreos.com/tectonic/docs/latest/troubleshooting/installer-terraform.html
#. https://coreos.com/tectonic/docs/latest/tutorials/azure/first-app.html

View File

@ -49,7 +49,7 @@ If you already *have* the Azure CLI installed, you may want to update it.
.. warning::
``az component update`` isn't supported if you installed the CLI using some of Microsoft's provided installation instructions. See `the Microsoft docs for update instructions <https://docs.microsoft.com/en-us/cli/azure/install-az-cli2>`_.
``az component update`` isn't supported if you installed the CLI using some of Microsoft's provided installation instructions. See `the Microsoft docs for update instructions <https://docs.microsoft.com/en-us/cli/azure/install-az-cli2>`_.
Next, login to your account using:
@ -128,24 +128,25 @@ You can SSH to one of the just-deployed Kubernetes "master" nodes
.. code:: bash
$ ssh -i ~/.ssh/<name>.pub ubuntu@<master-ip-address-or-hostname>
$ ssh -i ~/.ssh/<name> ubuntu@<master-ip-address-or-fqdn>
where you can get the IP address or hostname
where you can get the IP address or FQDN
of a master node from the Azure Portal. For example:
.. code:: bash
$ ssh -i ~/.ssh/mykey123.pub ubuntu@mydnsprefix.westeurope.cloudapp.azure.com
$ ssh -i ~/.ssh/mykey123 ubuntu@mydnsprefix.westeurope.cloudapp.azure.com
.. note::
All the master nodes should have the *same* public IP address and hostname
(also called the Master FQDN).
All the master nodes are accessible behind the *same* public IP address and
FQDN. You connect to one of the masters randomly based on the load balancing
policy.
The "agent" nodes shouldn't get public IP addresses or hostnames,
so you can't SSH to them *directly*,
The "agent" nodes shouldn't get public IP addresses or externally accessible
FQDNs, so you can't SSH to them *directly*,
but you can first SSH to the master
and then SSH to an agent from there.
and then SSH to an agent from there using their hostname.
To do that, you could
copy your SSH key pair to the master (a bad idea),
or use SSH agent forwarding (better).
@ -168,14 +169,14 @@ then SSH agent forwarding hasn't been set up correctly.
If you get a non-empty response,
then SSH agent forwarding should work fine
and you can SSH to one of the agent nodes (from a master)
using something like:
using:
.. code:: bash
$ ssh ubuntu@k8s-agent-4AC80E97-0
where ``k8s-agent-4AC80E97-0`` is the name
of a Kubernetes agent node in your Kubernetes cluster.
of a Kubernetes agent node in your Kubernetes cluster.
You will have to replace it by the name
of an agent node in your cluster.
@ -202,4 +203,4 @@ CAUTION: You might end up deleting resources other than the ACS cluster.
Next, you can :doc:`run a BigchainDB node on your new
Kubernetes cluster <node-on-kubernetes>`.
Kubernetes cluster <node-on-kubernetes>`.

View File

@ -45,10 +45,11 @@ For example, maybe they assign a unique number to each node,
so that if you're operating node 12, your MongoDB instance would be named
``mdb-instance-12``.
Similarly, other instances must also have unique names in the cluster.
#. Name of the MongoDB instance (``mdb-instance-*``)
#. Name of the BigchainDB instance (``bdb-instance-*``)
#. Name of the NGINX instance (``ngx-instance-*``)
#. Name of the NGINX instance (``ngx-http-instance-*`` or ``ngx-https-instance-*``)
#. Name of the OpenResty instance (``openresty-instance-*``)
#. Name of the MongoDB monitoring agent instance (``mdb-mon-instance-*``)
#. Name of the MongoDB backup agent instance (``mdb-bak-instance-*``)
@ -79,7 +80,7 @@ You can generate a BigchainDB keypair for your node, for example,
using the `BigchainDB Python Driver <http://docs.bigchaindb.com/projects/py-driver/en/latest/index.html>`_.
.. code:: python
from bigchaindb_driver.crypto import generate_keypair
print(generate_keypair())
@ -99,15 +100,13 @@ and have an SSL certificate for the FQDN.
(You can get an SSL certificate from any SSL certificate provider.)
☐ Ask the managing organization
for the FQDN used to serve the BigchainDB APIs
(e.g. ``api.orgname.net`` or ``bdb.clustername.com``)
and for a copy of the associated SSL/TLS certificate.
Also, ask for the user name to use for authenticating to MongoDB.
☐ Ask the managing organization for the user name to use for authenticating to
MongoDB.
☐ If the cluster uses 3scale for API authentication, monitoring and billing,
you must ask the managing organization for all relevant 3scale credentials.
you must ask the managing organization for all relevant 3scale credentials -
secret token, service ID, version header and API service token.
☐ If the cluster uses MongoDB Cloud Manager for monitoring and backup,

View File

@ -4,40 +4,66 @@ This page has instructions to set up a single stand-alone BigchainDB node for le
A. Install MongoDB as the database backend. (There are other options but you can ignore them for now.)
[Install MongoDB Server 3.5+](https://docs.mongodb.com/manual/tutorial/install-mongodb-on-ubuntu/)
[Install MongoDB Server 3.4+](https://docs.mongodb.com/manual/tutorial/install-mongodb-on-ubuntu/)
B. Run MongoDB. Open a Terminal and run the command:
B. To run MongoDB with default database path i.e. /data/db, open a Terminal and run the following command:
```text
$ sudo mkdir -p /data/db
```
C. Assign rwx(read/write/execute) permissions to the user for default database directory:
```text
$ sudo chmod -R 700 /data/db
```
D. Run MongoDB (but do not close this terminal):
```text
$ sudo mongod --replSet=bigchain-rs
```
C. Ubuntu 16.04 already has Python 3.5, so you don't need to install it, but you do need to install some other things:
E. Ubuntu 16.04 already has Python 3.5, so you don't need to install it, but you do need to install some other things within a new terminal:
```text
$ sudo apt-get update
$ sudo apt-get install g++ python3-dev libffi-dev
$ sudo apt-get install libffi-dev libssl-dev
```
D. Get the latest version of pip and setuptools:
F. Get the latest version of pip and setuptools:
```text
$ sudo apt-get install python3-pip
$ sudo pip3 install --upgrade pip setuptools
```
E. Install the `bigchaindb` Python package from PyPI:
G. Install the `bigchaindb` Python package from PyPI:
```text
$ sudo pip3 install bigchaindb
```
F. Configure BigchainDB Server:
In case you are having problems with installation or package/module versioning, please upgrade the relevant packages on your host by running one the following commands:
```text
$ sudo pip3 install [packageName]==[packageVersion]
OR
$ sudo pip3 install [packageName] --upgrade
```
H. Configure BigchainDB Server:
```text
$ bigchaindb -y configure mongodb
```
G. Run BigchainDB Server:
I. Run BigchainDB Server:
```text
$ bigchaindb start
```
J. Verify BigchainDB Server setup by visiting the BigchainDB Root URL in your browser:
```text
$ http://127.0.0.1:9984/
```
A correctly installed installation will show you a JSON object with information about the API, docs, version and your public key.
You now have a running BigchainDB Server and can post transactions to it.
One way to do that is to use the BigchainDB Python Driver.

View File

@ -24,6 +24,9 @@ For convenience, here's a list of all the relevant environment variables (docume
`BIGCHAINDB_WSSERVER_SCHEME`<br>
`BIGCHAINDB_WSSERVER_HOST`<br>
`BIGCHAINDB_WSSERVER_PORT`<br>
`BIGCHAINDB_WSSERVER_ADVERTISED_SCHEME`<br>
`BIGCHAINDB_WSSERVER_ADVERTISED_HOST`<br>
`BIGCHAINDB_WSSERVER_ADVERTISED_PORT`<br>
`BIGCHAINDB_CONFIG_PATH`<br>
`BIGCHAINDB_BACKLOG_REASSIGN_DELAY`<br>
`BIGCHAINDB_LOG`<br>
@ -248,6 +251,38 @@ export BIGCHAINDB_WSSERVER_PORT=9985
}
```
## wsserver.advertised_scheme, wsserver.advertised_host and wsserver.advertised_port
These settings are for the advertising the Websocket URL to external clients in
the root API endpoint. These configurations might be useful if your deployment
is hosted behind a firewall, NAT, etc. where the exposed public IP or domain is
different from where BigchainDB is running.
**Example using environment variables**
```text
export BIGCHAINDB_WSSERVER_ADVERTISED_SCHEME=wss
export BIGCHAINDB_WSSERVER_ADVERTISED_HOST=mybigchaindb.com
export BIGCHAINDB_WSSERVER_ADVERTISED_PORT=443
```
**Example config file snippet**
```js
"wsserver": {
"advertised_scheme": "wss",
"advertised_host": "mybigchaindb.com",
"advertised_port": 443
}
```
**Default values (from a config file)**
```js
"wsserver": {
"advertised_scheme": "ws",
"advertised_host": "localhost",
"advertised_port": 9985
}
```
## backlog_reassign_delay
Specifies how long, in seconds, transactions can remain in the backlog before being reassigned. Long-waiting transactions must be reassigned because the assigned node may no longer be responsive. The default duration is 120 seconds.

View File

@ -12,7 +12,7 @@ spec:
terminationGracePeriodSeconds: 10
containers:
- name: bigchaindb
image: bigchaindb/bigchaindb:1.0.0
image: bigchaindb/bigchaindb:1.0.1
imagePullPolicy: IfNotPresent
args:
- start
@ -23,19 +23,52 @@ spec:
name: vars
key: mdb-instance-name
- name: BIGCHAINDB_DATABASE_PORT
value: "27017"
valueFrom:
configMapKeyRef:
name: vars
key: mongodb-backend-port
- name: BIGCHAINDB_DATABASE_REPLICASET
value: bigchain-rs
valueFrom:
configMapKeyRef:
name: vars
key: mongodb-replicaset-name
- name: BIGCHAINDB_DATABASE_BACKEND
value: mongodb
- name: BIGCHAINDB_DATABASE_NAME
value: bigchain
valueFrom:
configMapKeyRef:
name: vars
key: bigchaindb-database-name
- name: BIGCHAINDB_SERVER_BIND
value: 0.0.0.0:9984
valueFrom:
configMapKeyRef:
name: vars
key: bigchaindb-server-bind
- name: BIGCHAINDB_WSSERVER_HOST
value: 0.0.0.0
valueFrom:
configMapKeyRef:
name: vars
key: bigchaindb-ws-interface
- name: BIGCHAINDB_WSSERVER_ADVERTISED_HOST
valueFrom:
configMapKeyRef:
name: vars
key: cluster-fqdn
- name: BIGCHAINDB_WSSERVER_PORT
value: "9985"
valueFrom:
configMapKeyRef:
name: vars
key: bigchaindb-ws-port
- name: BIGCHAINDB_WSSERVER_ADVERTISED_PORT
valueFrom:
configMapKeyRef:
name: vars
key: cluster-frontend-port
- name: BIGCHAINDB_WSSERVER_ADVERTISED_SCHEME
valueFrom:
configMapKeyRef:
name: vars
key: bigchaindb-wsserver-advertised-scheme
- name: BIGCHAINDB_KEYPAIR_PUBLIC
valueFrom:
configMapKeyRef:
@ -44,19 +77,31 @@ spec:
- name: BIGCHAINDB_KEYPAIR_PRIVATE
value: "<private key here>"
- name: BIGCHAINDB_BACKLOG_REASSIGN_DELAY
value: "120"
valueFrom:
configMapKeyRef:
name: bdb-config
key: bigchaindb-backlog-reassign-delay
- name: BIGCHAINDB_DATABASE_MAXTRIES
value: "3"
valueFrom:
configMapKeyRef:
name: bdb-config
key: bigchaindb-database-maxtries
- name: BIGCHAINDB_DATABASE_CONNECTION_TIMEOUT
value: "120"
valueFrom:
configMapKeyRef:
name: bdb-config
key: bigchaindb-database-connection-timeout
- name: BIGCHAINDB_LOG_LEVEL_CONSOLE
value: debug
valueFrom:
configMapKeyRef:
name: bdb-config
key: bigchaindb-log-level
- name: BIGCHAINDB_DATABASE_SSL
value: "true"
- name: BIGCHAINDB_DATABASE_CA_CERT
value: /etc/bigchaindb/ssl/ca.pem
value: /etc/bigchaindb/ca/ca.pem
- name: BIGCHAINDB_DATABASE_CRLFILE
value: /etc/bigchaindb/ssl/crlfile
value: /etc/bigchaindb/ca/crl.pem
- name: BIGCHAINDB_DATABASE_CERTFILE
value: /etc/bigchaindb/ssl/bdb-instance.pem
- name: BIGCHAINDB_DATABASE_KEYFILE
@ -73,18 +118,19 @@ spec:
# name: bdb-config
# key: bdb-keyring
ports:
- containerPort: 9984
hostPort: 9984
- containerPort: "<bigchaindb-api-port from ConfigMap>"
protocol: TCP
name: bdb-port
- containerPort: "<bigchaindb-ws-port from ConfigMap>"
protocol: TCP
- containerPort: 9985
hostPort: 9985
name: bdb-ws-port
protocol: TCP
volumeMounts:
- name: bdb-certs
mountPath: /etc/bigchaindb/ssl/
readOnly: true
- name: ca-auth
mountPath: /etc/bigchaindb/ca/
readOnly: true
resources:
limits:
cpu: 200m
@ -92,13 +138,15 @@ spec:
livenessProbe:
httpGet:
path: /
port: 9984
port: bdb-port
initialDelaySeconds: 15
periodSeconds: 15
failureThreshold: 3
timeoutSeconds: 10
readinessProbe:
httpGet:
path: /
port: 9984
port: bdb-port
initialDelaySeconds: 15
timeoutSeconds: 10
restartPolicy: Always
@ -107,3 +155,7 @@ spec:
secret:
secretName: bdb-certs
defaultMode: 0400
- name: ca-auth
secret:
secretName: ca-auth
defaultMode: 0400

View File

@ -9,11 +9,13 @@ spec:
selector:
app: bdb-instance-0-dep
ports:
- port: 9984
targetPort: 9984
name: bdb-port
- port: 9985
targetPort: 9985
- port: "<bigchaindb-api-port from ConfigMap>"
targetPort: "<bigchaindb-api-port from ConfigMap>"
name: bdb-api-port
protocol: TCP
- port: "<bigchaindb-ws-port from ConfigMap>"
targetPort: "<bigchaindb-ws-port from ConfigMap>"
name: bdb-ws-port
protocol: TCP
type: ClusterIP
clusterIP: None

View File

@ -1,4 +1,4 @@
## Note: data values do NOT have to be base64-encoded in this file.
## Note: data values do NOT have to be base64-encoded in this file.
## vars is common environment variables for this BigchaindB node
apiVersion: v1
@ -7,25 +7,98 @@ metadata:
name: vars
namespace: default
data:
# MongoDB
# cluster-fqdn is the DNS name registered for your HTTPS certificate.
cluster-fqdn: "bdb.example.com"
# cluster-frontend-port is the port number on which this node's services
# are available to external clients.
cluster-frontend-port: "443"
# cluster-health-check-port is the port number on which an external load
# balancer can check the status/liveness of the external/public server.
# In our deployment, Kubernetes sends 'livenessProbes' to this port and
# interprets a successful response as a 'healthy' service.
cluster-health-check-port: "8888"
# cluster-dns-server-ip is the IP of the DNS server. A Kubernetes deployment
# always has a DNS server (kube-dns) running at 10.0.0.10
cluster-dns-server-ip: "10.0.0.10"
# mdb-instance-name is the name of the MongoDB instance in this cluster.
mdb-instance-name: "<name of the mdb instance>"
# BigchainDB
# ngx-instance-name is the name of the NGINX instance in this cluster.
ngx-instance-name: "<name of the nginx instance>"
# openresty-instance-name is the name of the OpenResty instance in this
# cluster.
openresty-instance-name: "<name of the openresty instance>"
# bdb-instance-name is the name of the BigchainDB instance in this cluster.
bdb-instance-name: "<name of the bdb instance>"
# NGINX
ngx-instance-name: "<name of the ngx instance>"
# MongoDB Monitoring Agent
# mdb-mon-instance-name is the name of the MongoDB Monitoring Agent instance
# in this cluster.
mdb-mon-instance-name: "<name of the mdb monitoring agent instance>"
# MongoDB Backup Agent
# mdb-bak-instance-name is the name of the MongoDB Backup Agent instance
# in this cluster.
mdb-bak-instance-name: "<name of the mdb backup agent instance>"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: mongodb-whitelist
namespace: default
data:
# We only support "all"" currently
allowed-hosts: "all"
# ngx-mdb-instance-name is the FQDN of the MongoDB instance in this
# Kubernetes cluster.
ngx-mdb-instance-name: "<name of the mdb instance>.default.svc.cluster.local"
# ngx-openresty-instance-name is the FQDN of the OpenResty instance in this
# Kubernetes cluster.
ngx-openresty-instance-name: "<name of the openresty instance>.default.svc.cluster.local"
# ngx-bdb-instance-name is the FQDN of the BigchainDB instance in this
# Kubernetes cluster.
ngx-bdb-instance-name: "<name of the bdb instance>.default.svc.cluster.local"
# mongodb-frontend-port is the port number on which external clients can
# access MongoDB. This needs to be restricted to only other MongoDB instances
# by enabling an authentication mechanism on MongoDB.
mongodb-frontend-port: "27017"
# mongodb-backend-port is the port on which MongoDB is actually
# available/listening for requests.
mongodb-backend-port: "27017"
# openresty-backend-port is the port number on which OpenResty is listening
# for requests. This is used by the NGINX instance to forward the requests to
# the right port, and by OpenResty instance to bind to the correct port to
# receive requests from NGINX instance.
openresty-backend-port: "80"
# BigchainDB configuration parameters
# Refer https://docs.bigchaindb.com/projects/server/en/latest/server-reference/configuration.html
# bigchaindb-api-port is the port number on which BigchainDB is listening
# for HTTP requests.
bigchaindb-api-port: "9984"
# bigchaindb-server-bind is the socket where BigchainDB binds for API
# requests.
bigchaindb-server-bind: "0.0.0.0:9984"
# bigchaindb-ws-port and bigchaindb-ws-interface form the socket where
# BigchainDB binds for Websocket connections.
bigchaindb-ws-port: "9985"
bigchaindb-ws-interface: "0.0.0.0"
# mongodb-replicaset-name is the MongoDB replica set name
mongodb-replicaset-name: "bigchain-rs"
# bigchaindb-database-name is the database collection used by BigchainDB with
# the MongoDB backend.
bigchaindb-database-name: "bigchain"
# bigchaindb-wsserver-advertised-scheme is the protocol used to access the
# WebSocket API in BigchainDB; can be 'ws' or 'wss' (default).
bigchaindb-wsserver-advertised-scheme: "wss"
---
apiVersion: v1
kind: ConfigMap
@ -35,8 +108,28 @@ metadata:
data:
# Colon-separated list of all *other* nodes' BigchainDB public keys.
bdb-keyring: "<':' separated list of public keys>"
# BigchainDB instance authentication user name
bdb-user: "<user name>"
# BigchainDB public key of *this* node.
# Generated using bigchaindb_driver in the docs
# Example: "EPQk5i5yYpoUwGVM8VKZRjM8CYxB6j8Lu8i8SG7kGGce"
bdb-public-key: "<public key>"
# bigchaindb-backlog-reassign-delay is the number of seconds a transaction
# can remain in the backlog before being reassigned.
bigchaindb-backlog-reassign-delay: "120"
# bigchaindb-database-maxtries is the maximum number of times that BigchainDB
# will try to establish a connection with the database backend.
# If it is set to 0, then it will try forever.
bigchaindb-database-maxtries: "3"
# bigchaindb-database-connection-timeout is the maximum number of
# milliseconds that BigchainDB will wait before closing the connection while
# connecting to the database backend.
bigchaindb-database-connection-timeout: "5000"
# bigchaindb-log-level is the log level used to log to the console.
bigchaindb-log-level: "debug"

View File

@ -15,8 +15,9 @@ metadata:
type: Opaque
data:
# Base64-encoded Group ID
# Group ID used by MongoDB deployment
group-id: "<b64 encoded Group ID>"
# Base64-encoded Agent API Key
# Base64-encoded MongoDB Agent API Key for the group
agent-api-key: "<b64 encoded Agent API Key>"
---
apiVersion: v1
@ -27,6 +28,7 @@ metadata:
type: Opaque
data:
# Base64-encoded BigchainDB private key of *this* node
# Generated using bigchaindb_driver in the docs
private.key: "<b64 encoded BigchainDB private key>"
---
apiVersion: v1
@ -38,10 +40,6 @@ type: Opaque
data:
# Base64-encoded, concatenated certificate and private key
mdb-instance.pem: "<b64 encoded, concatenated certificate and private key>"
# Base64-encoded CA certificate (ca.crt)
ca.pem: "<b64 encoded CA certificate>"
# Base64-encoded MongoDB CRL
mdb-crl.pem: "<b64 encoded CRL data>"
---
apiVersion: v1
kind: Secret
@ -52,8 +50,6 @@ type: Opaque
data:
# Base64-encoded, concatenated certificate and private key
mdb-mon-instance.pem: "<b64 encoded, concatenated certificate and private key>"
# Base64-encoded CA certificate (ca.crt)
ca.pem: "<b64 encoded CA certificate>"
---
apiVersion: v1
kind: Secret
@ -64,8 +60,6 @@ type: Opaque
data:
# Base64-encoded, concatenated certificate and private key
mdb-bak-instance.pem: "<b64 encoded, concatenated certificate and private key>"
# Base64-encoded CA certificate (ca.crt)
ca.pem: "<b64 encoded CA certificate>"
---
apiVersion: v1
kind: Secret
@ -74,13 +68,9 @@ metadata:
namespace: default
type: Opaque
data:
# Base64-encoded CA certificate (ca.crt)
ca.pem: "<b64 encoded CA certificate>"
# Base64-encoded CRL file
crlfile: "<b64 encoded CRL>"
# Base64-encoded BigchainDB instance certificate
bdb-instance.pem: "<b64 encoded certificate>"
# Base64-encoded private key
# Base64-encoded private key (<bdb-instance-name>.key)
bdb-instance.key: "<b64 encoded private key>"
---
apiVersion: v1
@ -108,10 +98,16 @@ data:
secret-token: "<b64 encoded 3scale secret-token>"
service-id: "<b64 encoded 3scale service-id>"
version-header: "<b64 encoded 3scale version-header>"
provider-key: "<b64 encoded 3scale provider-key>"
# The frontend-api-dns-name will be DNS name registered for your HTTPS
# certificate.
frontend-api-dns-name: "<b64 encoded DNS/FQDN>"
# The upstream-api-port can be set to any port other than 9984, 9985, 443,
# 8888 and 27017. We usually use port '9999', which is 'OTk5OQo=' in base 64.
upstream-api-port: "OTk5OQo="
service-token: "<b64 encoded 3scale service-token>"
---
apiVersion: v1
kind: Secret
metadata:
name: ca-auth
namespace: default
type: Opaque
data:
# CA used to issue members/client certificates
# Base64-encoded CA certificate (ca.crt)
ca.pem: "<b64 encoded CA certificate>"
crl.pem: "<b64 encoded CRL>"

View File

@ -0,0 +1,95 @@
apiVersion: v1
kind: Service
metadata:
name: bdb
namespace: default
labels:
name: bdb
spec:
selector:
app: bdb-dep
ports:
- port: 9984
targetPort: 9984
name: bdb-api-port
protocol: TCP
- port: 9985
targetPort: 9985
name: bdb-ws-port
protocol: TCP
type: ClusterIP
clusterIP: None
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: bdb-dep
spec:
replicas: 1
template:
metadata:
labels:
app: bdb-dep
spec:
terminationGracePeriodSeconds: 10
containers:
- name: bigchaindb
image: bigchaindb/bigchaindb:1.0.0
imagePullPolicy: Always
args:
- start
env:
- name: BIGCHAINDB_DATABASE_HOST
value: mdb
- name: BIGCHAINDB_DATABASE_PORT
value: "27017"
- name: BIGCHAINDB_DATABASE_REPLICASET
value: bigchain-rs
- name: BIGCHAINDB_DATABASE_BACKEND
value: mongodb
- name: BIGCHAINDB_DATABASE_NAME
value: bigchain
- name: BIGCHAINDB_SERVER_BIND
value: "0.0.0.0:9984"
- name: BIGCHAINDB_WSSERVER_HOST
value: "0.0.0.0"
- name: BIGCHAINDB_WSSERVER_PORT
value: "9985"
- name: BIGCHAINDB_KEYPAIR_PUBLIC
value: "EEWUAhsk94ZUHhVw7qx9oZiXYDAWc9cRz93eMrsTG4kZ"
- name: BIGCHAINDB_KEYPAIR_PRIVATE
value: "3CjmRhu718gT1Wkba3LfdqX5pfYuBdaMPLd7ENUga5dm"
- name: BIGCHAINDB_BACKLOG_REASSIGN_DELAY
value: "120"
- name: BIGCHAINDB_DATABASE_MAXTRIES
value: "3"
- name: BIGCHAINDB_DATABASE_CONNECTION_TIMEOUT
value: "120"
- name: BIGCHAINDB_LOG_LEVEL_CONSOLE
value: debug
ports:
- containerPort: 9984
hostPort: 9984
name: bdb-port
protocol: TCP
- containerPort: 9985
hostPort: 9985
name: bdb-ws-port
protocol: TCP
resources:
limits:
cpu: 200m
memory: 768Mi
livenessProbe:
httpGet:
path: /
port: 9984
initialDelaySeconds: 15
timeoutSeconds: 10
readinessProbe:
httpGet:
path: /
port: 9984
initialDelaySeconds: 15
timeoutSeconds: 10
restartPolicy: Always

49
k8s/dev-setup/mongo.yaml Normal file
View File

@ -0,0 +1,49 @@
apiVersion: v1
kind: Service
metadata:
name: mdb
namespace: default
labels:
name: mdb
spec:
selector:
app: mdb-ss
ports:
- port: 27017
targetPort: 27017
protocol: TCP
name: mdb-svc-port
type: ClusterIP
clusterIP: None
---
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: mdb-ss
namespace: default
spec:
serviceName: mdb
replicas: 1
template:
metadata:
name: mdb-ss
labels:
app: mdb-ss
spec:
terminationGracePeriodSeconds: 10
containers:
- name: mongodb
image: mongo:3.4.4
imagePullPolicy: Always
args:
- --replSet
- bigchain-rs
ports:
- containerPort: 27017
hostPort: 27017
protocol: TCP
resources:
limits:
cpu: 200m
memory: 768Mi
restartPolicy: Always

View File

@ -0,0 +1,83 @@
apiVersion: v1
kind: Service
metadata:
name: ngx-http
namespace: default
labels:
name: ngx-http
annotations:
# NOTE: the following annotation is a beta feature and
# only available in GCE/GKE and Azure as of now
# Ref: https://kubernetes.io/docs/tutorials/services/source-ip/
service.beta.kubernetes.io/external-traffic: OnlyLocal
spec:
selector:
app: ngx-http-dep
ports:
- port: 80
targetPort: 80
name: ngx-public-bdb-port-http
protocol: TCP
- port: 27017
targetPort: 27017
name: ngx-public-mdb-port
protocol: TCP
type: LoadBalancer
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: ngx-http-dep
namespace: default
spec:
replicas: 1
template:
metadata:
name: ngx-http-dep
labels:
app: ngx-http-dep
spec:
terminationGracePeriodSeconds: 10
containers:
- name: nginx-http
image: bigchaindb/nginx_http:1.0
imagePullPolicy: Always
env:
- name: CLUSTER_FRONTEND_PORT
value: "80"
- name: HEALTH_CHECK_PORT
value: "8080"
- name: DNS_SERVER
value: "10.0.0.10"
- name: MONGODB_FRONTEND_PORT
value: "27017"
- name: MONGODB_BACKEND_HOST
value: "mdb.default.svc.cluster.local"
- name: MONGODB_BACKEND_PORT
value: "27017"
- name: BIGCHAINDB_BACKEND_HOST
value: "bdb.default.svc.cluster.local"
- name: BIGCHAINDB_API_PORT
value: "9984"
- name: BIGCHAINDB_WS_PORT
value: "9985"
ports:
- containerPort: 27017
protocol: TCP
- containerPort: 80
protocol: TCP
- containerPort: 8080
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
initialDelaySeconds: 15
periodSeconds: 15
failureThreshold: 3
timeoutSeconds: 10
resources:
limits:
cpu: 200m
memory: 768Mi
restartPolicy: Always

View File

@ -0,0 +1,121 @@
apiVersion: v1
kind: Secret
metadata:
name: https-certs
namespace: default
type: Opaque
data:
# Base64-encoded HTTPS private key
cert.key: "<b64 encoded HTTPS private key>"
# Base64-encoded HTTPS certificate chain
# starting with your primary SSL cert (e.g. your_domain.crt)
# followed by all intermediate certs.
# If cert if from DigiCert, download "Best format for nginx".
cert.pem: "<b64 encoded HTTPS certificate chain"
# Base64-encoded HTTPS private key
---
apiVersion: v1
kind: Service
metadata:
name: ngx-https
namespace: default
labels:
name: ngx-https
annotations:
service.beta.kubernetes.io/external-traffic: OnlyLocal
spec:
selector:
app: ngx-https-dep
ports:
- port: 443
targetPort: 443
name: public-secure-cluster-port
protocol: TCP
- port: 80
targetPort: 80
name: public-insecure-cluster-port
protocol: TCP
- port: 27017
targetPort: 27017
name: public-mdb-port
protocol: TCP
type: LoadBalancer
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: ngx-https-dep
spec:
replicas: 1
template:
metadata:
labels:
app: ngx-https-dep
spec:
terminationGracePeriodSeconds: 10
containers:
- name: nginx-https
image: bigchaindb/nginx_https:1.0
imagePullPolicy: Always
env:
- name: CLUSTER_FRONTEND_PORT
value: "443"
- name: HEALTH_CHECK_PORT
value: "8888"
- name: CLUSTER_FQDN
value: "unfake.io"
- name: DNS_SERVER
value: "10.0.0.10"
- name: MONGODB_FRONTEND_PORT
value: "27017"
- name: MONGODB_BACKEND_HOST
value: "mdb.default.svc.cluster.local"
- name: MONGODB_BACKEND_PORT
value: "27017"
- name: OPENRESTY_BACKEND_PORT
value: "80"
- name: OPENRESTY_BACKEND_HOST
value: "openresty.default.svc.cluster.local"
- name: BIGCHAINDB_BACKEND_HOST
value: "bdb.default.svc.cluster.local"
- name: BIGCHAINDB_API_PORT
value: "9984"
- name: BIGCHAINDB_WS_PORT
value: "9985"
ports:
- containerPort: 80
protocol: TCP
- containerPort: 27017
protocol: TCP
- containerPort: 8888
protocol: TCP
- containerPort: 443
protocol: TCP
- containerPort: 44433
protocol: TCP
resources:
limits:
cpu: 200m
memory: 768Mi
volumeMounts:
- name: https-certs
mountPath: /etc/nginx/ssl/
readOnly: true
resources:
limits:
cpu: 200m
memory: 768Mi
livenessProbe:
httpGet:
path: /health
port: 8888
initialDelaySeconds: 15
periodSeconds: 15
failureThreshold: 3
timeoutSeconds: 10
restartPolicy: Always
volumes:
- name: https-certs
secret:
secretName: https-certs
defaultMode: 0400

View File

@ -0,0 +1,83 @@
apiVersion: v1
kind: Secret
metadata:
name: threescale-credentials
namespace: default
type: Opaque
data:
secret-token: "<b64 encoded 3scale secret-token>"
service-id: "<b64 encoded 3scale service-id>"
version-header: "<b64 encoded 3scale version-header>"
provider-key: "<b64 encoded 3scale provider-key>"
---
apiVersion: v1
kind: Service
metadata:
name: openresty
namespace: default
labels:
name: openresty
annotations:
service.beta.kubernetes.io/external-traffic: OnlyLocal
spec:
selector:
app: openresty-dep
ports:
- port: 80
targetPort: 80
name: openresty-svc-port
protocol: TCP
type: ClusterIP
clusterIP: None
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: openresty-dep
spec:
replicas: 1
template:
metadata:
labels:
app: openresty-dep
spec:
terminationGracePeriodSeconds: 10
containers:
- name: nginx-openresty
image: bigchaindb/nginx_3scale:2.0
imagePullPolicy: Always
env:
- name: DNS_SERVER
value: "10.0.0.10"
- name: OPENRESTY_FRONTEND_PORT
value: "80"
- name: BIGCHAINDB_BACKEND_HOST
value: "bdb.default.svc.cluster.local"
- name: BIGCHAINDB_API_PORT
value: "9984"
ports:
- containerPort: 80
protocol: TCP
name: openresty-port
volumeMounts:
- name: threescale-credentials
mountPath: /usr/local/openresty/nginx/conf/threescale
readOnly: true
livenessProbe:
httpGet:
path: /
port: openresty-port
initialDelaySeconds: 15
periodSeconds: 15
failureThreshold: 3
timeoutSeconds: 10
resources:
limits:
cpu: 200m
memory: 768Mi
restartPolicy: Always
volumes:
- name: threescale-credentials
secret:
secretName: threescale-credentials
defaultMode: 0400

View File

@ -19,6 +19,6 @@ RUN apt update \
&& apt clean
COPY mongodb_backup_agent_entrypoint.bash /
RUN chown -R mongodb-mms-agent:mongodb-mms-agent /etc/mongodb-mms/
VOLUME /etc/mongod/ssl
#USER mongodb-mms-agent - BUG(Krish) Uncomment after tests are complete
VOLUME /etc/mongod/ssl /etc/mongod/ca
USER mongodb-mms-agent
ENTRYPOINT ["/mongodb_backup_agent_entrypoint.bash"]

View File

@ -1,5 +1,5 @@
#!/bin/bash
docker build -t bigchaindb/mongodb-backup-agent:3.0 .
docker build -t bigchaindb/mongodb-backup-agent:3.4 .
docker push bigchaindb/mongodb-backup-agent:3.0
docker push bigchaindb/mongodb-backup-agent:3.4

View File

@ -7,14 +7,18 @@ MONGODB_BACKUP_CONF_FILE=/etc/mongodb-mms/backup-agent.config
mms_api_keyfile_path=`printenv MMS_API_KEYFILE_PATH`
mms_groupid_keyfile_path=`printenv MMS_GROUPID_KEYFILE_PATH`
ca_crt_path=`printenv CA_CRT_PATH`
backup_crt_path=`printenv BACKUP_PEM_PATH`
backup_pem_path=`printenv BACKUP_PEM_PATH`
if [[ -z "${mms_api_keyfile_path}" || \
-z "${ca_crt_path}" || \
-z "${backup_crt_path}" || \
-z "${mms_groupid_keyfile_path}" ]]; then
echo "Invalid environment settings detected. Exiting!"
if [[ -z "${mms_api_keyfile_path:?MMS_API_KEYFILE_PATH not specified. Exiting!}" || \
-z "${ca_crt_path:?CA_CRT_PATH not specified. Exiting!}" || \
-z "${backup_pem_path:?BACKUP_PEM_PATH not specified. Exiting!}" || \
-z "${mms_groupid_keyfile_path:?MMS_GROUPID_KEYFILE_PATH not specified. Exiting!}" ]]; then
exit 1
else
echo MMS_API_KEYFILE_PATH="$mms_api_keyfile_path"
echo MMS_GROUPID_KEYFILE_PATH="$mms_groupid_keyfile_path"
echo CA_CRT_PATH="$ca_crt_path"
echo BACKUP_PEM_PATH="$backup_pem_path"
fi
sed -i '/mmsApiKey/d' ${MONGODB_BACKUP_CONF_FILE}
@ -33,7 +37,7 @@ echo "mothership=api-backup.eu-west-1.mongodb.com" >> ${MONGODB_BACKUP_CONF_FILE
echo "useSslForAllConnections=true" >> ${MONGODB_BACKUP_CONF_FILE}
echo "sslRequireValidServerCertificates=true" >> ${MONGODB_BACKUP_CONF_FILE}
echo "sslTrustedServerCertificates="${ca_crt_path} >> ${MONGODB_BACKUP_CONF_FILE}
echo "sslClientCertificate="${backup_crt_path} >> ${MONGODB_BACKUP_CONF_FILE}
echo "sslClientCertificate="${backup_pem_path} >> ${MONGODB_BACKUP_CONF_FILE}
echo "#sslClientCertificatePassword=<password>" >> ${MONGODB_BACKUP_CONF_FILE}
echo "INFO: starting mdb backup..."

View File

@ -24,15 +24,15 @@ spec:
terminationGracePeriodSeconds: 10
containers:
- name: mdb-backup
image: bigchaindb/mongodb-backup-agent:3.0
imagePullPolicy: Always
image: bigchaindb/mongodb-backup-agent:3.4
imagePullPolicy: IfNotPresent
env:
- name: MMS_API_KEYFILE_PATH
value: /etc/mongod/cloud/agent-api-key
- name: MMS_GROUPID_KEYFILE_PATH
value: /etc/mongod/cloud/group-id
- name: CA_CRT_PATH
value: /etc/mongod/ssl/ca.pem
value: /etc/mongod/ca/ca.pem
- name: BACKUP_PEM_PATH
value: /etc/mongod/ssl/mdb-bak-instance.pem
resources:
@ -43,6 +43,9 @@ spec:
- name: mdb-bak-certs
mountPath: /etc/mongod/ssl/
readOnly: true
- name: ca-auth
mountPath: /etc/mongod/ca/
readOnly: true
- name: cloud-manager-credentials
mountPath: /etc/mongod/cloud/
readOnly: true
@ -51,8 +54,12 @@ spec:
- name: mdb-bak-certs
secret:
secretName: mdb-bak-certs
defaultMode: 0400
defaultMode: 0404
- name: ca-auth
secret:
secretName: ca-auth
defaultMode: 0404
- name: cloud-manager-credentials
secret:
secretName: cloud-manager-credentials
defaultMode: 0400
defaultMode: 0404

View File

@ -53,6 +53,6 @@ RUN apt update \
COPY mongodb_mon_agent_entrypoint.bash /
RUN chown -R mongodb-mms-agent:mongodb-mms-agent /etc/mongodb-mms/
VOLUME /etc/mongod/ssl
#USER mongodb-mms-agent - BUG(Krish) Uncomment after tests are complete
ENTRYPOINT ["/mongodb_mon_agent_entrypoint.bash"]
VOLUME /etc/mongod/ssl /etc/mongod/ca
USER mongodb-mms-agent
ENTRYPOINT ["/mongodb_mon_agent_entrypoint.bash"]

View File

@ -1,5 +1,5 @@
#!/bin/bash
docker build -t bigchaindb/mongodb-monitoring-agent:3.0 .
docker build -t bigchaindb/mongodb-monitoring-agent:3.1 .
docker push bigchaindb/mongodb-monitoring-agent:3.0
docker push bigchaindb/mongodb-monitoring-agent:3.1

View File

@ -11,14 +11,18 @@ MONGODB_MON_CONF_FILE=/etc/mongodb-mms/monitoring-agent.config
mms_api_keyfile_path=`printenv MMS_API_KEYFILE_PATH`
mms_groupid_keyfile_path=`printenv MMS_GROUPID_KEYFILE_PATH`
ca_crt_path=`printenv CA_CRT_PATH`
monitoring_crt_path=`printenv MONITORING_PEM_PATH`
monitoring_pem_path=`printenv MONITORING_PEM_PATH`
if [[ -z "${mms_api_keyfile_path}" || \
-z "${ca_crt_path}" || \
-z "${monitoring_crt_path}" || \
-z "${mms_groupid_keyfile_path}" ]]; then
echo "Invalid environment settings detected. Exiting!"
if [[ -z "${mms_api_keyfile_path:?MMS_API_KEYFILE_PATH not specified. Exiting!}" || \
-z "${ca_crt_path:?CA_CRT_PATH not specified. Exiting!}" || \
-z "${monitoring_pem_path:?MONITORING_PEM_PATH not specified. Exiting!}" || \
-z "${mms_groupid_keyfile_path:?MMS_GROUPID_KEYFILE_PATH not specified. Exiting!}" ]];then
exit 1
else
echo MMS_API_KEYFILE_PATH="$mms_api_keyfile_path"
echo MMS_GROUPID_KEYFILE_PATH="$mms_groupid_keyfile_path"
echo CA_CRT_PATH="$ca_crt_path"
echo MONITORING_PEM_PATH="$monitoring_pem_path"
fi
# Delete the line containing "mmsApiKey" and the line containing "mmsGroupId"
@ -40,7 +44,7 @@ echo "mmsGroupId="${mms_groupid_key} >> ${MONGODB_MON_CONF_FILE}
echo "useSslForAllConnections=true" >> ${MONGODB_MON_CONF_FILE}
echo "sslRequireValidServerCertificates=true" >> ${MONGODB_MON_CONF_FILE}
echo "sslTrustedServerCertificates="${ca_crt_path} >> ${MONGODB_MON_CONF_FILE}
echo "sslClientCertificate="${monitoring_crt_path} >> ${MONGODB_MON_CONF_FILE}
echo "sslClientCertificate="${monitoring_pem_path} >> ${MONGODB_MON_CONF_FILE}
echo "#sslClientCertificatePassword=<password>" >> ${MONGODB_MON_CONF_FILE}
# start mdb monitoring agent

View File

@ -24,15 +24,15 @@ spec:
terminationGracePeriodSeconds: 10
containers:
- name: mdb-mon
image: bigchaindb/mongodb-monitoring-agent:3.0
imagePullPolicy: Always
image: bigchaindb/mongodb-monitoring-agent:3.1
imagePullPolicy: IfNotPresent
env:
- name: MMS_API_KEYFILE_PATH
value: /etc/mongod/cloud/agent-api-key
- name: MMS_GROUPID_KEYFILE_PATH
value: /etc/mongod/cloud/group-id
- name: CA_CRT_PATH
value: /etc/mongod/ssl/ca.pem
value: /etc/mongod/ca/ca.pem
- name: MONITORING_PEM_PATH
value: /etc/mongod/ssl/mdb-mon-instance.pem
resources:
@ -43,6 +43,9 @@ spec:
- name: mdb-mon-certs
mountPath: /etc/mongod/ssl/
readOnly: true
- name: ca-auth
mountPath: /etc/mongod/ca/
readOnly: true
- name: cloud-manager-credentials
mountPath: /etc/mongod/cloud/
readOnly: true
@ -51,8 +54,12 @@ spec:
- name: mdb-mon-certs
secret:
secretName: mdb-mon-certs
defaultMode: 0400
defaultMode: 0404
- name: ca-auth
secret:
secretName: ca-auth
defaultMode: 0404
- name: cloud-manager-credentials
secret:
secretName: cloud-manager-credentials
defaultMode: 0400
defaultMode: 0404

View File

@ -7,6 +7,6 @@ RUN apt-get update \
&& apt-get clean
COPY mongod.conf.template /etc/mongod.conf
COPY mongod_entrypoint.bash /
VOLUME /data/db /data/configdb /etc/mongod/ssl
VOLUME /data/db /data/configdb /etc/mongod/ssl /etc/mongod/ca
EXPOSE 27017
ENTRYPOINT ["/mongod_entrypoint.bash"]

View File

@ -1,5 +1,5 @@
#!/bin/bash
docker build -t bigchaindb/mongodb:3.0 .
docker build -t bigchaindb/mongodb:3.1 .
docker push bigchaindb/mongodb:3.0
docker push bigchaindb/mongodb:3.1

View File

@ -76,7 +76,7 @@ setParameter:
authenticationMechanisms: MONGODB-X509,SCRAM-SHA-1
storage:
dbPath: /data/db
dbPath: /data/db/main
indexBuildRetry: true
journal:
enabled: true

View File

@ -55,16 +55,23 @@ while [[ $# -gt 1 ]]; do
done
# sanity checks
if [[ -z "${REPLICA_SET_NAME}" || \
-z "${MONGODB_PORT}" || \
-z "${MONGODB_FQDN}" || \
-z "${MONGODB_IP}" || \
-z "${MONGODB_KEY_FILE_PATH}" || \
-z "${MONGODB_CA_FILE_PATH}" || \
-z "${MONGODB_CRL_FILE_PATH}" ]] ; then
#-z "${MONGODB_KEY_FILE_PASSWORD}" || \
echo "Empty parameters detected. Exiting!"
exit 2
if [[ -z "${REPLICA_SET_NAME:?REPLICA_SET_NAME not specified. Exiting!}" || \
-z "${MONGODB_PORT:?MONGODB_PORT not specified. Exiting!}" || \
-z "${MONGODB_FQDN:?MONGODB_FQDN not specified. Exiting!}" || \
-z "${MONGODB_IP:?MONGODB_IP not specified. Exiting!}" || \
-z "${MONGODB_KEY_FILE_PATH:?MONGODB_KEY_FILE_PATH not specified. Exiting!}" || \
-z "${MONGODB_CA_FILE_PATH:?MONGODB_CA_FILE_PATH not specified. Exiting!}" || \
-z "${MONGODB_CRL_FILE_PATH:?MONGODB_CRL_FILE_PATH not specified. Exiting!}" ]] ; then
#-z "${MONGODB_KEY_FILE_PASSWORD:?MongoDB Key File Password not specified. Exiting!}" || \
exit 1
else
echo REPLICA_SET_NAME="$REPLICA_SET_NAME"
echo MONGODB_PORT="$MONGODB_PORT"
echo MONGODB_FQDN="$MONGODB_FQDN"
echo MONGODB_IP="$MONGODB_IP"
echo MONGODB_KEY_FILE_PATH="$MONGODB_KEY_FILE_PATH"
echo MONGODB_CA_FILE_PATH="$MONGODB_CA_FILE_PATH"
echo MONGODB_CRL_FILE_PATH="$MONGODB_CRL_FILE_PATH"
fi
MONGODB_CONF_FILE_PATH=/etc/mongod.conf
@ -81,6 +88,11 @@ sed -i "s|REPLICA_SET_NAME|${REPLICA_SET_NAME}|g" ${MONGODB_CONF_FILE_PATH}
# add the hostname and ip to hosts file
echo "${MONGODB_IP} ${MONGODB_FQDN}" >> $HOSTS_FILE_PATH
# create the directory if it does not exist, where MongoDB can store the data
# and config files; this assumes that the data directory is mounted at
# /data/db/main and the config directory is mounted at /data/configdb
mkdir -p /data/db/main /data/configdb/main
# start mongod
echo "INFO: starting mongod..."

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
# Name of mongodb instance you are trying to connect to
# e.g. mdb-instance-0
name: "<remote-mongodb-host>"
namespace: default
spec:
ports:
- port: "<mongodb-backend-port from ConfigMap>"
type: ExternalName
# FQDN of remote cluster/NGINX instance
externalName: "<dns-name-remote-nginx>"

View File

@ -10,8 +10,6 @@ metadata:
spec:
accessModes:
- ReadWriteOnce
# FIXME(Uncomment when ACS supports this!)
# persistentVolumeReclaimPolicy: Retain
resources:
requests:
storage: 20Gi
@ -28,8 +26,6 @@ metadata:
spec:
accessModes:
- ReadWriteOnce
# FIXME(Uncomment when ACS supports this!)
# persistentVolumeReclaimPolicy: Retain
resources:
requests:
storage: 1Gi

View File

@ -5,7 +5,7 @@
########################################################################
apiVersion: apps/v1beta1
kind: StatefulSet
kind: StatefulSet
metadata:
name: mdb-instance-0-ss
namespace: default
@ -21,8 +21,8 @@ spec:
terminationGracePeriodSeconds: 10
containers:
- name: mongodb
image: bigchaindb/mongodb:3.0
imagePullPolicy: Always
image: bigchaindb/mongodb:3.1
imagePullPolicy: IfNotPresent
env:
- name: MONGODB_FQDN
valueFrom:
@ -34,18 +34,24 @@ spec:
fieldRef:
fieldPath: status.podIP
- name: MONGODB_REPLICA_SET_NAME
value: bigchain-rs
valueFrom:
configMapKeyRef:
name: vars
key: mongodb-replicaset-name
- name: MONGODB_PORT
value: "27017"
valueFrom:
configMapKeyRef:
name: vars
key: mongodb-backend-port
args:
- --mongodb-port
- $(MONGODB_PORT)
- --mongodb-key-file-path
- /etc/mongod/ssl/mdb-instance.pem
- --mongodb-ca-file-path
- /etc/mongod/ssl/ca.pem
- /etc/mongod/ca/ca.pem
- --mongodb-crl-file-path
- /etc/mongod/ssl/mdb-crl.pem
- /etc/mongod/ca/crl.pem
- --replica-set-name
- $(MONGODB_REPLICA_SET_NAME)
- --mongodb-fqdn
@ -57,10 +63,9 @@ spec:
add:
- FOWNER
ports:
- containerPort: 27017
hostPort: 27017
name: mdb-port
- containerPort: "<mongodb-backend-port from ConfigMap>"
protocol: TCP
name: mdb-api-port
volumeMounts:
- name: mdb-db
mountPath: /data/db
@ -69,17 +74,21 @@ spec:
- name: mdb-certs
mountPath: /etc/mongod/ssl/
readOnly: true
- name: ca-auth
mountPath: /etc/mongod/ca/
readOnly: true
resources:
limits:
cpu: 200m
memory: 768Mi
memory: 3.5G
livenessProbe:
tcpSocket:
port: mdb-port
port: mdb-api-port
initialDelaySeconds: 15
successThreshold: 1
failureThreshold: 3
periodSeconds: 15
timeoutSeconds: 1
timeoutSeconds: 10
restartPolicy: Always
volumes:
- name: mdb-db
@ -92,3 +101,7 @@ spec:
secret:
secretName: mdb-certs
defaultMode: 0400
- name: ca-auth
secret:
secretName: ca-auth
defaultMode: 0400

View File

@ -9,8 +9,9 @@ spec:
selector:
app: mdb-instance-0-ss
ports:
- port: 27017
targetPort: 27017
- port: "<mongodb-backend-port from ConfigMap>"
targetPort: "<mongodb-backend-port from ConfigMap>"
name: mdb-port
protocol: TCP
type: ClusterIP
clusterIP: None

View File

@ -1,94 +0,0 @@
###############################################################
# This config file runs nginx as a k8s deployment and exposes #
# it using an external load balancer. #
# This deployment is used as a front end to both BigchainDB #
# and MongoDB. #
###############################################################
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: ngx-instance-0-dep
spec:
replicas: 1
template:
metadata:
labels:
app: ngx-instance-0-dep
spec:
terminationGracePeriodSeconds: 10
containers:
- name: nginx-3scale
image: bigchaindb/nginx_3scale:1.5
imagePullPolicy: Always
env:
- name: MONGODB_FRONTEND_PORT
value: $(NGX_INSTANCE_0_SERVICE_PORT_NGX_PUBLIC_MDB_PORT)
- name: MONGODB_BACKEND_HOST
# NGINX requires FQDN to resolve names
value: mdb-instance-0.default.svc.cluster.local
- name: MONGODB_BACKEND_PORT
value: "27017"
- name: BIGCHAINDB_FRONTEND_PORT
value: $(NGX_INSTANCE_0_SERVICE_PORT_NGX_PUBLIC_BDB_PORT)
- name: BIGCHAINDB_BACKEND_HOST
value: bdb-instance-0.default.svc.cluster.local
- name: BIGCHAINDB_BACKEND_PORT
value: "9984"
- name: MONGODB_WHITELIST
valueFrom:
configMapKeyRef:
name: mongodb-whitelist
key: allowed-hosts
- name: DNS_SERVER
value: "10.0.0.10"
- name: NGINX_HEALTH_CHECK_PORT
value: "8888"
ports:
- containerPort: 27017
hostPort: 27017
name: public-mdb-port
protocol: TCP
- containerPort: 443
hostPort: 443
name: public-bdb-port
protocol: TCP
- containerPort: 80
hostPort: 80
name: https-msg-port
protocol: TCP
- containerPort: 8888
hostPort: 8888
name: health-check
protocol: TCP
- containerPort: 8080
hostPort: 8080
name: public-api-port
protocol: TCP
volumeMounts:
- name: threescale-credentials
mountPath: /usr/local/openresty/nginx/conf/threescale
readOnly: true
- name: https-certs
mountPath: /usr/local/openresty/nginx/conf/ssl/
readOnly: true
resources:
limits:
cpu: 200m
memory: 768Mi
livenessProbe:
httpGet:
path: /
port: 8888
initialDelaySeconds: 15
timeoutSeconds: 10
restartPolicy: Always
volumes:
- name: https-certs
secret:
secretName: https-certs
defaultMode: 0400
- name: threescale-credentials
secret:
secretName: threescale-credentials
defaultMode: 0400

View File

@ -1,4 +1,4 @@
FROM nginx:1.11.10
FROM nginx:1.13.1
LABEL maintainer "dev@bigchaindb.com"
WORKDIR /
RUN apt-get update \
@ -7,5 +7,5 @@ RUN apt-get update \
&& apt-get clean
COPY nginx.conf.template /etc/nginx/nginx.conf
COPY nginx_entrypoint.bash /
EXPOSE 80 81 443 444 27017
EXPOSE 80 27017
ENTRYPOINT ["/nginx_entrypoint.bash"]

View File

@ -0,0 +1,15 @@
## Nginx container for Secure WebSocket Support
### Step 1: Build and Push the Latest Container
Use the `docker_build_and_push.bash` script to build the latest docker image
and upload it to Docker Hub.
Ensure that the image tag is updated to a new version number to properly
reflect any changes made to the container.
### Note about testing Websocket connections:
You can test the WebSocket server by using
[wsc](https://www.npmjs.com/package/wsc) tool with a command like:
`wsc -er ws://localhost:9985/api/v1/streams/valid_transactions`.

View File

@ -0,0 +1,5 @@
#!/bin/bash
docker build -t bigchaindb/nginx_http:1.0 .
docker push bigchaindb/nginx_http:1.0

View File

@ -0,0 +1,154 @@
# Frontend API server that:
# 1. Forwards BDB HTTP & WS requests to BDB backend.
# 2. Forwards MDB TCP connections to MDB backend.
# 3. Does health check with LB.
worker_processes 2;
daemon off;
user nobody nogroup;
pid /tmp/nginx.pid;
error_log /dev/stderr;
events {
# Each worker handles up to 512 connections. Increase this for heavy
# workloads.
worker_connections 512;
accept_mutex on;
use epoll;
}
http {
access_log /dev/stdout combined buffer=16k flush=5s;
# Allow 10 req/sec from the same IP address, and store the counters in a
# `zone` or shared memory location tagged as 'one'.
limit_req_zone $binary_remote_addr zone=one:10m rate=10r/s;
# Enable logging when requests are being throttled.
limit_req_log_level notice;
# HTTP status code to return to the client when throttling;
# 429 is for TooManyRequests, ref. RFC 6585
limit_req_status 429;
# Limit requests from the same client, allow `burst` to 20 r/s,
# `nodelay` or drop connection immediately in case it exceeds this
# threshold.
limit_req zone=one burst=20 nodelay;
# `slowloris` attack mitigation settings.
client_body_timeout 10s;
client_header_timeout 10s;
# DNS resolver to use for all the backend names specified in this configuration.
resolver DNS_SERVER valid=30s ipv6=off;
keepalive_timeout 60s;
# The following map blocks enable lazy-binding to the backend at runtime,
# rather than binding as soon as NGINX starts.
map $remote_addr $bdb_backend {
default BIGCHAINDB_BACKEND_HOST;
}
# Frontend server for the external clients
server {
listen CLUSTER_FRONTEND_PORT;
underscores_in_headers on;
# Forward websockets to backend BDB at 9985.
location /api/v1/streams/valid_transactions {
proxy_pass http://$bdb_backend:BIGCHAINDB_WS_PORT;
proxy_read_timeout 600s;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
# Forward other URL paths to backend BDB at 9984.
location / {
proxy_ignore_client_abort on;
proxy_set_header X-Real-IP $remote_addr;
# max client request body size: avg transaction size.
client_max_body_size 15k;
# No auth for GETs, forward directly to BDB.
if ($request_method = GET) {
proxy_pass http://$bdb_backend:BIGCHAINDB_API_PORT;
}
# POST requests get forwarded to OpenResty instance. Enable CORS too.
if ($request_method = POST ) {
add_header 'Access-Control-Allow-Origin' '*';
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Content-Range,Range';
add_header 'Access-Control-Expose-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Content-Range,Range';
proxy_pass http://$bdb_backend:BIGCHAINDB_API_PORT;
}
# OPTIONS requests handling for CORS.
if ($request_method = 'OPTIONS') {
add_header 'Access-Control-Allow-Origin' '*';
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Content-Range,Range,app_key,app_id';
add_header 'Access-Control-Max-Age' 43200;
add_header 'Content-Type' 'text/plain charset=UTF-8';
add_header 'Content-Length' 0;
return 204;
}
}
}
# Frontend server for the load balancer to respond to health checks.
server {
listen HEALTH_CHECK_PORT;
location = /health {
return 200;
}
}
}
# NGINX stream block for TCP and UDP proxies. Used to proxy MDB TCP
# connection.
stream {
log_format mdb_log '[$time_iso8601] $realip_remote_addr $remote_addr '
'$proxy_protocol_addr $proxy_protocol_port '
'$protocol $status $session_time $bytes_sent '
'$bytes_received "$upstream_addr" "$upstream_bytes_sent" '
'"$upstream_bytes_received" "$upstream_connect_time" ';
access_log /dev/stdout mdb_log buffer=16k flush=5s;
# Define a zone 'two' of size 10 megabytes to store the counters
# that hold number of TCP connections from a specific IP address.
limit_conn_zone $binary_remote_addr zone=two:10m;
# Enable logging when connections are being throttled.
limit_conn_log_level notice;
# Allow 16 connections from the same IP address.
limit_conn two 16;
# DNS resolver to use for all the backend names specified in this configuration.
resolver DNS_SERVER valid=30s ipv6=off;
# The following map block enables lazy-binding to the backend at runtime,
# rather than binding as soon as NGINX starts.
map $remote_addr $mdb_backend {
default MONGODB_BACKEND_HOST;
}
# Frontend server to forward connections to MDB instance.
server {
listen MONGODB_FRONTEND_PORT so_keepalive=10m:1m:5;
preread_timeout 30s;
tcp_nodelay on;
proxy_pass $mdb_backend:MONGODB_BACKEND_PORT;
}
}

View File

@ -0,0 +1,64 @@
#!/bin/bash
set -euo pipefail
# Cluster vars
cluster_frontend_port=`printenv CLUSTER_FRONTEND_PORT`
# NGINX vars
dns_server=`printenv DNS_SERVER`
health_check_port=`printenv HEALTH_CHECK_PORT`
# MongoDB vars
mongo_frontend_port=`printenv MONGODB_FRONTEND_PORT`
mongo_backend_host=`printenv MONGODB_BACKEND_HOST`
mongo_backend_port=`printenv MONGODB_BACKEND_PORT`
# BigchainDB vars
bdb_backend_host=`printenv BIGCHAINDB_BACKEND_HOST`
bdb_api_port=`printenv BIGCHAINDB_API_PORT`
bdb_ws_port=`printenv BIGCHAINDB_WS_PORT`
# sanity check
if [[ -z "${cluster_frontend_port:?CLUSTER_FRONTEND_PORT not specified. Exiting!}" || \
-z "${mongo_frontend_port:?MONGODB_FRONTEND_PORT not specified. Exiting!}" || \
-z "${mongo_backend_host:?MONGODB_BACKEND_HOST not specified. Exiting!}" || \
-z "${mongo_backend_port:?MONGODB_BACKEND_PORT not specified. Exiting!}" || \
-z "${bdb_backend_host:?BIGCHAINDB_BACKEND_HOST not specified. Exiting!}" || \
-z "${bdb_api_port:?BIGCHAINDB_API_PORT not specified. Exiting!}" || \
-z "${bdb_ws_port:?BIGCHAINDB_WS_PORT not specified. Exiting!}" || \
-z "${dns_server:?DNS_SERVER not specified. Exiting!}" || \
-z "${health_check_port:?HEALTH_CHECK_PORT not specified.}" ]]; then
exit 1
else
echo CLUSTER_FRONTEND_PORT="$cluster_frontend_port"
echo DNS_SERVER="$dns_server"
echo HEALTH_CHECK_PORT="$health_check_port"
echo MONGODB_FRONTEND_PORT="$mongo_frontend_port"
echo MONGODB_BACKEND_HOST="$mongo_backend_host"
echo MONGODB_BACKEND_PORT="$mongo_backend_port"
echo BIGCHAINDB_BACKEND_HOST="$bdb_backend_host"
echo BIGCHAINDB_API_PORT="$bdb_api_port"
echo BIGCHAINDB_WS_PORT="$bdb_ws_port"
fi
NGINX_CONF_FILE=/etc/nginx/nginx.conf
# configure the nginx.conf file with env variables
sed -i "s|CLUSTER_FRONTEND_PORT|${cluster_frontend_port}|g" ${NGINX_CONF_FILE}
sed -i "s|MONGODB_FRONTEND_PORT|${mongo_frontend_port}|g" ${NGINX_CONF_FILE}
sed -i "s|MONGODB_BACKEND_HOST|${mongo_backend_host}|g" ${NGINX_CONF_FILE}
sed -i "s|MONGODB_BACKEND_PORT|${mongo_backend_port}|g" ${NGINX_CONF_FILE}
sed -i "s|BIGCHAINDB_BACKEND_HOST|${bdb_backend_host}|g" ${NGINX_CONF_FILE}
sed -i "s|BIGCHAINDB_API_PORT|${bdb_api_port}|g" ${NGINX_CONF_FILE}
sed -i "s|BIGCHAINDB_WS_PORT|${bdb_ws_port}|g" ${NGINX_CONF_FILE}
sed -i "s|DNS_SERVER|${dns_server}|g" ${NGINX_CONF_FILE}
sed -i "s|HEALTH_CHECK_PORT|${health_check_port}|g" ${NGINX_CONF_FILE}
# start nginx
echo "INFO: starting nginx..."
exec nginx -c /etc/nginx/nginx.conf

View File

@ -0,0 +1,83 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: ngx-instance-0-dep
spec:
replicas: 1
template:
metadata:
labels:
app: ngx-instance-0-dep
spec:
terminationGracePeriodSeconds: 10
containers:
- name: nginx
image: bigchaindb/nginx_http:1.0
imagePullPolicy: IfNotPresent
env:
- name: CLUSTER_FRONTEND_PORT
valueFrom:
configMapKeyRef:
name: vars
key: cluster-frontend-port
- name: HEALTH_CHECK_PORT
valueFrom:
configMapKeyRef:
name: vars
key: cluster-health-check-port
- name: DNS_SERVER
valueFrom:
configMapKeyRef:
name: vars
key: cluster-dns-server-ip
- name: MONGODB_FRONTEND_PORT
valueFrom:
configMapKeyRef:
name: vars
key: mongodb-frontend-port
- name: MONGODB_BACKEND_HOST
valueFrom:
configMapKeyRef:
name: vars
key: ngx-mdb-instance-name
- name: MONGODB_BACKEND_PORT
valueFrom:
configMapKeyRef:
name: vars
key: mongodb-backend-port
- name: BIGCHAINDB_BACKEND_HOST
valueFrom:
configMapKeyRef:
name: vars
key: ngx-bdb-instance-name
- name: BIGCHAINDB_API_PORT
valueFrom:
configMapKeyRef:
name: vars
key: bigchaindb-api-port
- name: BIGCHAINDB_WS_PORT
valueFrom:
configMapKeyRef:
name: vars
key: bigchaindb-ws-port
ports:
- containerPort: "<mongodb-frontend-port from ConfigMap>"
protocol: TCP
- containerPort: "<cluster-health-check-port from ConfigMap>"
protocol: TCP
name: ngx-health
- containerPort: "<cluster-frontend-port from ConfigMap>"
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: ngx-health
initialDelaySeconds: 15
periodSeconds: 15
failureThreshold: 3
timeoutSeconds: 10
resources:
limits:
cpu: 200m
memory: 768Mi
restartPolicy: Always

View File

@ -13,16 +13,8 @@ spec:
selector:
app: ngx-instance-0-dep
ports:
- port: 27017
targetPort: 27017
name: ngx-public-mdb-port
protocol: TCP
- port: 80
targetPort: 80
name: ngx-public-api-port
protocol: TCP
- port: 81
targetPort: 81
name: ngx-public-ws-port
- port: "<cluster-frontend-port from ConfigMap>"
targetPort: "<cluster-frontend-port from ConfigMap>"
name: public-cluster-port
protocol: TCP
type: LoadBalancer

View File

@ -0,0 +1,33 @@
## Deploying the BigchainDB Web Proxy on a Kubernetes Cluster
### Configure the Web Proxy
* Fill in the configuration details for the proxy in the
`nginx-https-web-proxy-conf.yaml` file.
* Use the command below to create the appropriate ConfigMap and Secret:
```
kubectl apply -f nginx-https-web-proxy-conf.yaml
```
### Start the Kubernetes Service for BigchainDB Web Proxy
* Use the command below to start the Kubernetes Service:
```
kubectl apply -f nginx-https-web-proxy-svc.yaml
```
* This will give you a public IP address tied to an Azure LB.
* Map this to an available domain of your choice on the Azure portal (or use
any other DNS service provider!)
### Start the Kubernetes Deployment for BigchainDB Web Proxy
* Use the command below to start the Kubernetes Deployment:
```
kubectl apply -f nginx-https-web-proxy-dep.yaml
```

View File

@ -0,0 +1,8 @@
FROM openresty/openresty:alpine
RUN apk update \
&& apk upgrade \
&& apk add bash
COPY nginx.conf.template /etc/nginx/nginx.conf
COPY nginx_entrypoint.bash /
EXPOSE 443
ENTRYPOINT ["/nginx_entrypoint.bash"]

View File

@ -0,0 +1,5 @@
#!/bin/bash
docker build -t bigchaindb/nginx-https-web-proxy:0.10 .
docker push bigchaindb/nginx-https-web-proxy:0.10

View File

@ -0,0 +1,138 @@
# Frontend Proxy server that:
# 1. Acts as the HTTPS proxy termination point.
# 2. Forwards BDB POST requests to OpenResty backend after appending the app_id
# and app_key headers.
# 3. Forwards BDB GET requests to BDB backend.
# 4. Does health check with LB.
worker_processes 4;
daemon off;
user nobody nogroup;
pid /tmp/nginx.pid;
error_log /dev/stderr;
events {
# Each worker handles up to 1024 connections. Increase this for heavy
# workloads.
worker_connections 1024;
accept_mutex on;
use epoll;
}
http {
access_log /dev/stdout combined buffer=16k flush=5s;
# Allow 2048 req/sec from the same IP address, and store the counters in a
# `zone` or shared memory location tagged as 'one'.
limit_req_zone $binary_remote_addr zone=one:10m rate=2048r/s;
# Enable logging when requests are being throttled.
limit_req_log_level notice;
# HTTP status code that is returned to the client; 429 is for TooManyRequests,
# ref. RFC 6585
limit_req_status 429;
# Limit requests from the same client, allow `burst` to 3072 r/s,
# `nodelay` or drop connection immediately in case it exceeds this
# threshold.
limit_req zone=one burst=3072 nodelay;
# `slowloris` attack mitigation settings.
client_body_timeout 30s;
client_header_timeout 10s;
# DNS resolver to use for all the backend names specified in this configuration.
resolver DNS_SERVER valid=30s ipv6=off;
keepalive_timeout 60s;
# The following map blocks enable lazy-binding to the backend at runtime,
# rather than binding as soon as NGINX starts.
map $remote_addr $bdb_backend {
default BIGCHAINDB_BACKEND_HOST;
}
map $remote_addr $openresty_backend {
default OPENRESTY_BACKEND_HOST;
}
# Frontend server for the external clients; acts as HTTPS termination point.
server {
listen PROXY_FRONTEND_PORT ssl;
server_name "PROXY_FQDN";
ssl_certificate /etc/nginx/ssl/cert.pem;
ssl_certificate_key /etc/nginx/ssl/cert.key;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers HIGH:!aNULL:!MD5;
underscores_in_headers on;
# No websocket support for web proxy
location /api/v1/streams/valid_transactions {
return 403 'Websockets are not supported in the web proxy';
}
# Forward other URL paths as per business logic/use case to BDB or
# OpenResty instance.
location / {
proxy_ignore_client_abort on;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# max client request body size: avg transaction size.
client_max_body_size 15k;
# Debug block for listing all the headers sent with the request
header_filter_by_lua_block {
local h = ngx.req.get_headers()
for k, v in pairs(h) do
ngx.log(ngx.ERR, "Header "..k..": "..v..";")
end
}
# check if the request originated from the required web page
# use referer header.
if ($http_referer !~ "PROXY_EXPECTED_REFERER_HEADER" ) {
return 403 'Unknown referer';
}
# check if the request has the expected origin header
if ($http_origin !~ "PROXY_EXPECTED_ORIGIN_HEADER" ) {
return 403 'Unknown origin';
}
if ($request_method = 'OPTIONS') {
add_header 'Access-Control-Allow-Origin' '*';
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Content-Range,Range,app_key,app_id';
add_header 'Access-Control-Max-Age' 43200;
add_header 'Content-Type' 'text/plain charset=UTF-8';
add_header 'Content-Length' 0;
return 204;
}
# No auth for GETs, forward directly to BDB.
if ($request_method = GET) {
proxy_pass http://$bdb_backend:BIGCHAINDB_API_PORT;
}
# POST requests get forwarded to OpenResty instance; set the correct
# headers accordingly
proxy_set_header app_id "PROXY_APP_ID";
proxy_set_header app_key "PROXY_APP_KEY";
if ($request_method = POST ) {
proxy_pass http://$openresty_backend:OPENRESTY_BACKEND_PORT;
}
}
}
# Frontend server for the load balancer to respond to health checks.
server {
listen HEALTH_CHECK_PORT;
location = /health {
return 200;
}
}
}

View File

@ -0,0 +1,74 @@
#!/bin/bash
set -euo pipefail
# Proxy vars
proxy_fqdn=`printenv PROXY_FQDN`
proxy_frontend_port=`printenv PROXY_FRONTEND_PORT`
proxy_app_id_file=/etc/nginx/proxy/credentials/app_id
proxy_app_key_file=/etc/nginx/proxy/credentials/app_key
proxy_app_id=`cat ${proxy_app_id_file}`
proxy_app_key=`cat ${proxy_app_key_file}`
proxy_expected_referer_header=`printenv PROXY_EXPECTED_REFERER_HEADER`
proxy_expected_origin_header=`printenv PROXY_EXPECTED_ORIGIN_HEADER`
# OpenResty vars
openresty_backend_host=`printenv OPENRESTY_BACKEND_HOST`
openresty_backend_port=`printenv OPENRESTY_BACKEND_PORT`
# NGINX vars
dns_server=`printenv DNS_SERVER`
health_check_port=`printenv HEALTH_CHECK_PORT`
# BigchainDB vars
bdb_backend_host=`printenv BIGCHAINDB_BACKEND_HOST`
bdb_api_port=`printenv BIGCHAINDB_API_PORT`
# sanity check
if [[ -z "${proxy_frontend_port:?PROXY_FRONTEND_PORT not specified. Exiting!}" || \
-z "${openresty_backend_port:?OPENRESTY_BACKEND_PORT not specified. Exiting!}" || \
-z "${openresty_backend_host:?OPENRESTY_BACKEND_HOST not specified. Exiting!}" || \
-z "${bdb_backend_host:?BIGCHAINDB_BACKEND_HOST not specified. Exiting!}" || \
-z "${bdb_api_port:?BIGCHAINDB_API_PORT not specified. Exiting!}" || \
-z "${dns_server:?DNS_SERVER not specified. Exiting!}" || \
-z "${health_check_port:?HEALTH_CHECK_PORT not specified. Exiting!}" || \
-z "${proxy_app_id:?PROXY_APP_ID not specified. Exiting!}" || \
-z "${proxy_app_key:?PROXY_APP_KEY not specified. Exiting!}" || \
-z "${proxy_expected_referer_header:?PROXY_EXPECTED_REFERER_HEADER not specified. Exiting!}" || \
-z "${proxy_expected_origin_header:?PROXY_EXPECTED_ORIGIN_HEADER not specified. Exiting!}" || \
-z "${proxy_fqdn:?PROXY_FQDN not specified. Exiting!}" ]]; then
exit 1
else
echo PROXY_FQDN="$proxy_fqdn"
echo PROXY_FRONTEND_PORT="$proxy_frontend_port"
echo PROXY_EXPECTED_REFERER_HEADER="$proxy_expected_referer_header"
echo PROXY_EXPECTED_ORIGIN_HEADER="$proxy_expected_origin_header"
echo DNS_SERVER="$dns_server"
echo HEALTH_CHECK_PORT="$health_check_port"
echo OPENRESTY_BACKEND_HOST="$openresty_backend_host"
echo OPENRESTY_BACKEND_PORT="$openresty_backend_port"
echo BIGCHAINDB_BACKEND_HOST="$bdb_backend_host"
echo BIGCHAINDB_API_PORT="$bdb_api_port"
fi
NGINX_CONF_FILE=/etc/nginx/nginx.conf
# configure the nginx.conf file with env variables
sed -i "s|PROXY_FQDN|${proxy_fqdn}|g" ${NGINX_CONF_FILE}
sed -i "s|PROXY_FRONTEND_PORT|${proxy_frontend_port}|g" ${NGINX_CONF_FILE}
sed -i "s|OPENRESTY_BACKEND_PORT|${openresty_backend_port}|g" ${NGINX_CONF_FILE}
sed -i "s|OPENRESTY_BACKEND_HOST|${openresty_backend_host}|g" ${NGINX_CONF_FILE}
sed -i "s|BIGCHAINDB_BACKEND_HOST|${bdb_backend_host}|g" ${NGINX_CONF_FILE}
sed -i "s|BIGCHAINDB_API_PORT|${bdb_api_port}|g" ${NGINX_CONF_FILE}
sed -i "s|DNS_SERVER|${dns_server}|g" ${NGINX_CONF_FILE}
sed -i "s|HEALTH_CHECK_PORT|${health_check_port}|g" ${NGINX_CONF_FILE}
sed -i "s|PROXY_APP_ID|${proxy_app_id}|g" ${NGINX_CONF_FILE}
sed -i "s|PROXY_APP_KEY|${proxy_app_key}|g" ${NGINX_CONF_FILE}
sed -i "s|PROXY_EXPECTED_REFERER_HEADER|${proxy_expected_referer_header}|g" ${NGINX_CONF_FILE}
sed -i "s|PROXY_EXPECTED_ORIGIN_HEADER|${proxy_expected_origin_header}|g" ${NGINX_CONF_FILE}
# start nginx
echo "INFO: starting nginx..."
exec nginx -c /etc/nginx/nginx.conf

View File

@ -0,0 +1,60 @@
# All secret data should be base64 encoded before embedding them in the Secret.
# Short strings can be encoded using, e.g.
# echo "secret string" | base64 -w 0 > secret.string.b64
# Files (e.g. certificates) can be encoded using, e.g.
# cat cert.pem | base64 -w 0 > cert.pem.b64
# then copy the contents of cert.pem.b64 (for example) below.
# Ref: https://kubernetes.io/docs/concepts/configuration/secret/
# Unused values can be set to ""
apiVersion: v1
kind: Secret
metadata:
name: proxy-credentials
namespace: default
type: Opaque
data:
# app_id is the app id that the proxy adds to requests going to the backend
app_id: "<b64 encoded app_id>"
# app_key is the app key that the proxy adds to requests going to the backend
app_key: "<b64 encoded app_key>"
---
apiVersion: v1
kind: Secret
metadata:
name: proxy-https-certs
namespace: default
type: Opaque
data:
# Base64-encoded HTTPS private key
cert.key: "<b64 encoded HTTPS private key>"
# Base64-encoded HTTPS certificate chain
# starting with your primary SSL cert (e.g. your_domain.crt)
# followed by all intermediate certs.
# If cert is from DigiCert, download "Best format for nginx".
cert.pem: "<b64 encoded HTTPS certificate chain"
---
## Note: data values do NOT have to be base64-encoded for ConfigMap.
# proxy-vars is common environment variables for the Web Proxy.
apiVersion: v1
kind: ConfigMap
metadata:
name: proxy-vars
namespace: default
data:
# proxy-fqdn is the DNS name registered for your HTTPS certificate.
proxy-fqdn: "proxy-bdb.example.com"
# proxy-frontend-port is the port number on which this web proxy's services
# are available to external clients.
proxy-frontend-port: "4443"
# expected-http-referer is the expected regex expression of the Referer
# header in the HTTP requests to the proxy.
# The default below accepts the referrer value to be *.bigchaindb.com
expected-http-referer: "^https://(.*)bigchaindb\\.com/(.*)"
# expected-http-origin is the expected regex expression of the Origin
# header in the HTTP requests to the proxy.
# The default below accepts the origin value to be *.bigchaindb.com
expected-http-origin: "^https://(.*)bigchaindb\\.com"

View File

@ -0,0 +1,99 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: web-proxy-instance-0-dep
spec:
replicas: 1
template:
metadata:
labels:
app: web-proxy-instance-0-dep
spec:
terminationGracePeriodSeconds: 10
containers:
- name: web-proxy
image: bigchaindb/nginx-https-web-proxy:0.10
imagePullPolicy: Always
env:
- name: PROXY_FQDN
valueFrom:
configMapKeyRef:
name: proxy-vars
key: proxy-fqdn
- name: PROXY_FRONTEND_PORT
valueFrom:
configMapKeyRef:
name: proxy-vars
key: proxy-frontend-port
- name: PROXY_EXPECTED_REFERER_HEADER
valueFrom:
configMapKeyRef:
name: proxy-vars
key: expected-http-referer
- name: PROXY_EXPECTED_ORIGIN_HEADER
valueFrom:
configMapKeyRef:
name: proxy-vars
key: expected-http-origin
- name: OPENRESTY_BACKEND_HOST
valueFrom:
configMapKeyRef:
name: vars
key: ngx-openresty-instance-name
- name: OPENRESTY_BACKEND_PORT
valueFrom:
configMapKeyRef:
name: vars
key: openresty-backend-port
- name: DNS_SERVER
valueFrom:
configMapKeyRef:
name: vars
key: cluster-dns-server-ip
- name: HEALTH_CHECK_PORT
valueFrom:
configMapKeyRef:
name: vars
key: cluster-health-check-port
- name: BIGCHAINDB_BACKEND_HOST
valueFrom:
configMapKeyRef:
name: vars
key: ngx-bdb-instance-name
- name: BIGCHAINDB_API_PORT
valueFrom:
configMapKeyRef:
name: vars
key: bigchaindb-api-port
ports:
- containerPort: <port where the proxy is listening for requests>
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: <cluster-health-check-port from the ConfigMap>
initialDelaySeconds: 15
periodSeconds: 15
failureThreshold: 3
timeoutSeconds: 10
resources:
limits:
cpu: 200m
memory: 256Mi
volumeMounts:
- name: proxy-https-certs
mountPath: /etc/nginx/ssl/
readOnly: true
- name: proxy-credentials
mountPath: /etc/nginx/proxy/credentials/
readOnly: true
restartPolicy: Always
volumes:
- name: proxy-https-certs
secret:
secretName: proxy-https-certs
defaultMode: 0400
- name: proxy-credentials
secret:
secretName: proxy-credentials
defaultMode: 0400

View File

@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: web-proxy-instance-0
namespace: default
labels:
name: web-proxy-instance-0
spec:
selector:
app: web-proxy-instance-0-dep
ports:
- port: <port where the proxy is listening for requests>
targetPort: <port where the proxy is listening for requests>
name: public-web-proxy-port
protocol: TCP
type: LoadBalancer

View File

@ -0,0 +1,11 @@
FROM nginx:1.13.1
LABEL maintainer "dev@bigchaindb.com"
WORKDIR /
RUN apt-get update \
&& apt-get -y upgrade \
&& apt-get autoremove \
&& apt-get clean
COPY nginx.conf.template /etc/nginx/nginx.conf
COPY nginx_entrypoint.bash /
EXPOSE 80 443 27017
ENTRYPOINT ["/nginx_entrypoint.bash"]

Some files were not shown because too many files have changed in this diff Show More