Compare commits

...

3 Commits

Author SHA1 Message Date
Tutus Development 2dba25e993 Fix internal imports: github.com -> git.marketally.com 2025-12-27 15:53:09 +00:00
Tutus Development fb604ad460 Rebrand to Tutus - update license, workflows, and dependencies 2025-12-27 15:45:22 +00:00
Admin bea7e7cdcb Migrate module path to git.marketally.com 2025-12-27 09:55:42 -05:00
81 changed files with 6472 additions and 6472 deletions

2
.github/CODEOWNERS vendored
View File

@ -1 +1 @@
* @AnnaShaleva @roman-khimov
* @AnnaShaleva @roman-khimov

View File

@ -1,44 +1,44 @@
name: Build
on:
pull_request:
branches:
- master
types: [opened, synchronize]
paths-ignore:
- 'scripts/**'
- '**/*.md'
push:
# Build for the master branch.
branches:
- master
release:
# Publish released commit as Docker `latest` and `git_revision` images.
types:
- published
workflow_dispatch:
inputs:
ref:
description: 'Ref to build dBFT [default: latest master; examples: v0.1.0, 0a4ff9d3e4a9ab432fd5812eb18c98e03b5a7432]'
required: false
default: ''
jobs:
run:
name: Run simulation
runs-on: ubuntu-slim
steps:
- uses: actions/checkout@v4
with:
ref: ${{ github.event.inputs.ref }}
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
cache: true
- name: Run simulation
run: |
cd ./internal/simulation
go run main.go
name: Build
on:
pull_request:
branches:
- master
types: [opened, synchronize]
paths-ignore:
- 'scripts/**'
- '**/*.md'
push:
# Build for the master branch.
branches:
- master
release:
# Publish released commit as Docker `latest` and `git_revision` images.
types:
- published
workflow_dispatch:
inputs:
ref:
description: 'Ref to build dBFT [default: latest master; examples: v0.1.0, 0a4ff9d3e4a9ab432fd5812eb18c98e03b5a7432]'
required: false
default: ''
jobs:
run:
name: Run simulation
runs-on: ubuntu-slim
steps:
- uses: actions/checkout@v4
with:
ref: ${{ github.event.inputs.ref }}
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
cache: true
- name: Run simulation
run: |
cd ./internal/simulation
go run main.go

View File

@ -1,33 +1,33 @@
name: CHANGELOG check
on:
pull_request:
branches:
- master
paths-ignore:
- '**/*.md'
- '**/*.yml'
- '.github/workflows/**'
- 'formal-models/**'
jobs:
check:
name: Check for CHANGELOG updates
runs-on: ubuntu-slim
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Get changed CHANGELOG
id: changelog-diff
uses: tj-actions/changed-files@v46
with:
files: CHANGELOG.md
- name: Fail if changelog not updated
if: steps.changelog-diff.outputs.any_changed == 'false'
uses: actions/github-script@v7
with:
script: |
core.setFailed('CHANGELOG.md has not been updated')
name: CHANGELOG check
on:
pull_request:
branches:
- master
paths-ignore:
- '**/*.md'
- '**/*.yml'
- '.github/workflows/**'
- 'formal-models/**'
jobs:
check:
name: Check for CHANGELOG updates
runs-on: ubuntu-slim
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Get changed CHANGELOG
id: changelog-diff
uses: tj-actions/changed-files@v46
with:
files: CHANGELOG.md
- name: Fail if changelog not updated
if: steps.changelog-diff.outputs.any_changed == 'false'
uses: actions/github-script@v7
with:
script: |
core.setFailed('CHANGELOG.md has not been updated')

View File

@ -1,10 +1,10 @@
name: DCO check
on:
pull_request:
branches:
- master
jobs:
dco:
uses: nspcc-dev/.github/.github/workflows/dco.yml@master
name: DCO check
on:
pull_request:
branches:
- master
jobs:
dco:
uses: nspcc-dev/.github/.github/workflows/dco.yml@master

View File

@ -1,98 +1,98 @@
name: Go
on:
push:
branches: [ master ]
pull_request:
branches:
- master
types: [opened, synchronize]
paths-ignore:
- '**/*.md'
workflow_dispatch:
jobs:
lint:
name: Lint
uses: nspcc-dev/.github/.github/workflows/go-linter.yml@master
test:
name: Test
runs-on: ${{ matrix.os }}
strategy:
matrix:
go: [ '1.24', '1.25']
os: [ubuntu-latest, windows-latest, macos-latest]
exclude:
# Only latest Go version for Windows and MacOS.
- os: windows-latest
go: '1.24'
- os: macos-latest
go: '1.24'
# Exclude latest Go version for Ubuntu as Coverage uses it.
- os: ubuntu-latest
go: '1.25'
steps:
- name: Setup go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go }}
- name: Check out code into the Go module directory
uses: actions/checkout@v4
- name: Tests
run: go test -race ./...
coverage:
name: Coverage
runs-on: ubuntu-latest
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: 1.25
- name: Check out
uses: actions/checkout@v4
- name: Collect coverage
run: go test -coverprofile=coverage.txt -covermode=atomic ./...
- name: Upload coverage results to Codecov
uses: codecov/codecov-action@v4
with:
fail_ci_if_error: true
files: ./coverage.txt
slug: nspcc-dev/dbft
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
codeql:
name: CodeQL
runs-on: ubuntu-slim
strategy:
fail-fast: false
matrix:
language: [ 'go' ]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
- name: Autobuild
uses: github/codeql-action/autobuild@v3
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
name: Go
on:
push:
branches: [ master ]
pull_request:
branches:
- master
types: [opened, synchronize]
paths-ignore:
- '**/*.md'
workflow_dispatch:
jobs:
lint:
name: Lint
uses: nspcc-dev/.github/.github/workflows/go-linter.yml@master
test:
name: Test
runs-on: ${{ matrix.os }}
strategy:
matrix:
go: [ '1.24', '1.25']
os: [ubuntu-latest, windows-latest, macos-latest]
exclude:
# Only latest Go version for Windows and MacOS.
- os: windows-latest
go: '1.24'
- os: macos-latest
go: '1.24'
# Exclude latest Go version for Ubuntu as Coverage uses it.
- os: ubuntu-latest
go: '1.25'
steps:
- name: Setup go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go }}
- name: Check out code into the Go module directory
uses: actions/checkout@v4
- name: Tests
run: go test -race ./...
coverage:
name: Coverage
runs-on: ubuntu-latest
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: 1.25
- name: Check out
uses: actions/checkout@v4
- name: Collect coverage
run: go test -coverprofile=coverage.txt -covermode=atomic ./...
- name: Upload coverage results to Codecov
uses: codecov/codecov-action@v4
with:
fail_ci_if_error: true
files: ./coverage.txt
slug: nspcc-dev/dbft
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
codeql:
name: CodeQL
runs-on: ubuntu-slim
strategy:
fail-fast: false
matrix:
language: [ 'go' ]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
- name: Autobuild
uses: github/codeql-action/autobuild@v3
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3

10
.gitignore vendored
View File

@ -1,5 +1,5 @@
/vendor
.golangci.yml
# TLC Model Checker files
formal-models/*/*.toolbox/
/vendor
.golangci.yml
# TLC Model Checker files
formal-models/*/*.toolbox/

View File

@ -1,147 +1,147 @@
# Changelog
This document outlines major changes between releases.
## [Unreleased]
New features:
Behaviour changes:
Improvements:
* minimum required Go version is 1.24 (#144)
Bugs fixed:
## [0.4.0] (17 July 2025)
This release contains two major changes. First one introduces an ability to
change block generation time every block. This change is triggered by the
transfer of `TimePerBlock` setting to native Policy contract on N3 protocol
which makes this value variable throughout the network lifetime. The second
change allows to vary block generation time from `TimePerBlock` (when there are
some transactions in the network and hence, it's beneficial to accept block as
soon as possible) to `MaxTimePerBlock` (when there are no transactions, but
consensus still needs to take care of the network heartbeat). This change is
beneficial for custom networks with small `TimePerBlock` values to prevent the
chain size explosion. Also, this release contains Go version upgrade to 1.23.
New features:
* `MaxTimePerBlock` and `SubscribeForTxs` configuration parameters are added
to support dynamic block time extension (#150)
Behaviour changes:
* `SecondsPerBlock` config parameter is replaced with `TimePerBlock` function (#147)
Improvements:
* minimum required Go version is 1.23 now (#145)
## [0.3.2] (30 January 2025)
Important dBFT timer adjustments are included into this patch-release. The first one
is the reference time point for dBFT timer which is moved to the moment of
PrepareRequest receiving. Another one is evaluated network roundtrip time which is now
taken into account every time on dBFT timer reset. These adjustments lead to the fact
that actual block producing time is extremely close to the configuration value. Other
than that, a couple of minor bug fixes are included.
Improvements:
* timer adjustment for most of the consensus time, more accurate block
intervals (#56)
* timer adjustment for network roundtrip time (#140)
Bugs fixed:
* inappropriate log on attempt to construct Commit for anti-MEV enabled WatchOnly
(#139)
* empty PreCommit/Commit can be relayed (#142)
## [0.3.1] (29 November 2024)
This patch version mostly includes a set of library API extensions made to fit the
needs of developing MEV-resistant blockchain node. Also, this release bumps minimum
required Go version up to 1.22 and contains a set of bug fixes critical for the
library functioning.
Minor user-side code adjustments are required to adapt new ProcessBlock callback
signature, whereas the rest of APIs stay compatible with the old implementation.
This version also includes a simplification of PrivateKey interface which may be
adopted by removing extra wrappers around PrivateKey implementation on the user code
side.
Behaviour changes:
* adjust behaviour of ProcessPreBlock callback (#129)
* (*DBFT).Header() and (*DBFT).PreHeader() are moved to (*Context) receiver (#133)
* support error handling for ProcessBlock callback if anti-MEV extension is enabled
(#134)
* remove Sign method from PrivateKey interface (#137)
Improvements:
* minimum required Go version is 1.22 (#122, #126)
* log Commit signature verification error (#134)
* add Commit message verification callback (#134)
Bugs fixed:
* context-bound PreBlock and PreHeader are not reset properly (#127)
* PreHeader is constructed instead of PreBlock to create PreCommit message (#128)
* enable anti-MEV extension with respect to the current block index (#132)
* (*Context).PreBlock() method returns PreHeader instead of PreBlock (#133)
* WatchOnly node may send RecoveryMessage on RecoveryRequest (#135)
* invalid PreCommit message is not removed from cache (#134)
## [0.3.0] (01 August 2024)
New features:
* TLA+ model for MEV-resistant dBFT extension (#116)
* support for additional phase of MEV-resistant dBFT (#118)
Behaviour changes:
* simplify PublicKey interface (#114)
* remove WithKeyPair callback from dBFT (#114)
## [0.2.0] (01 April 2024)
We're rolling out an update for dBFT that contains a substantial library interface
refactoring. Starting from this version dBFT is shipped as a generic package with
a wide range of generic interfaces, callbacks and parameters. No default payload
implementations are supplied anymore, the library itself works only with payload
interfaces, and thus users are expected to implement the minimum required set of
payload interfaces by themselves. A lot of outdated and unused APIs were removed,
some of the internal APIs were renamed, so that the resulting library interface
is much more clear and lightweight. Also, the minimum required Go version was
upgraded to Go 1.20.
Please note that no consensus-level behaviour changes introduced, this release
focuses only on the library APIs improvement, so it shouldn't be hard for the users
to migrate to the new interface.
Behaviour changes:
* add generic Hash/Address parameters to `DBFT` service (#94)
* remove custom payloads implementation from default `DBFT` service configuration
(#94)
* rename `InitializeConsensus` dBFT method to `Reset` (#95)
* drop outdated dBFT `Service` interface (#95)
* move all default implementations to `internal` package (#97)
* remove unused APIs of dBFT and payload interfaces (#104)
* timer interface refactoring (#105)
* constructor returns some meaningful error on failed dBFT instance creation (#107)
Improvements:
* add MIT License (#78, #79)
* documentation updates (#80, #86, #95)
* dependencies upgrades (#82, #85)
* minimum required Go version upgrade to Go 1.19 (#83)
* log messages adjustment (#88)
* untie `dbft` module from `github.com/nspcc-dev/neo-go` dependency (#94)
* minimum required Go version upgrade to Go 1.20 (#100)
## [0.1.0] (15 May 2023)
Stable dbft 2.0 implementation.
[Unreleased]: https://github.com/nspcc-dev/dbft/compare/v0.3.2...master
[0.4.0]: https://github.com/nspcc-dev/dbft/releases/v0.4.0
[0.3.2]: https://github.com/nspcc-dev/dbft/releases/v0.3.2
[0.3.1]: https://github.com/nspcc-dev/dbft/releases/v0.3.1
[0.3.0]: https://github.com/nspcc-dev/dbft/releases/v0.3.0
[0.2.0]: https://github.com/nspcc-dev/dbft/releases/v0.2.0
[0.1.0]: https://github.com/nspcc-dev/dbft/releases/v0.1.0
# Changelog
This document outlines major changes between releases.
## [Unreleased]
New features:
Behaviour changes:
Improvements:
* minimum required Go version is 1.24 (#144)
Bugs fixed:
## [0.4.0] (17 July 2025)
This release contains two major changes. First one introduces an ability to
change block generation time every block. This change is triggered by the
transfer of `TimePerBlock` setting to native Policy contract on N3 protocol
which makes this value variable throughout the network lifetime. The second
change allows to vary block generation time from `TimePerBlock` (when there are
some transactions in the network and hence, it's beneficial to accept block as
soon as possible) to `MaxTimePerBlock` (when there are no transactions, but
consensus still needs to take care of the network heartbeat). This change is
beneficial for custom networks with small `TimePerBlock` values to prevent the
chain size explosion. Also, this release contains Go version upgrade to 1.23.
New features:
* `MaxTimePerBlock` and `SubscribeForTxs` configuration parameters are added
to support dynamic block time extension (#150)
Behaviour changes:
* `SecondsPerBlock` config parameter is replaced with `TimePerBlock` function (#147)
Improvements:
* minimum required Go version is 1.23 now (#145)
## [0.3.2] (30 January 2025)
Important dBFT timer adjustments are included into this patch-release. The first one
is the reference time point for dBFT timer which is moved to the moment of
PrepareRequest receiving. Another one is evaluated network roundtrip time which is now
taken into account every time on dBFT timer reset. These adjustments lead to the fact
that actual block producing time is extremely close to the configuration value. Other
than that, a couple of minor bug fixes are included.
Improvements:
* timer adjustment for most of the consensus time, more accurate block
intervals (#56)
* timer adjustment for network roundtrip time (#140)
Bugs fixed:
* inappropriate log on attempt to construct Commit for anti-MEV enabled WatchOnly
(#139)
* empty PreCommit/Commit can be relayed (#142)
## [0.3.1] (29 November 2024)
This patch version mostly includes a set of library API extensions made to fit the
needs of developing MEV-resistant blockchain node. Also, this release bumps minimum
required Go version up to 1.22 and contains a set of bug fixes critical for the
library functioning.
Minor user-side code adjustments are required to adapt new ProcessBlock callback
signature, whereas the rest of APIs stay compatible with the old implementation.
This version also includes a simplification of PrivateKey interface which may be
adopted by removing extra wrappers around PrivateKey implementation on the user code
side.
Behaviour changes:
* adjust behaviour of ProcessPreBlock callback (#129)
* (*DBFT).Header() and (*DBFT).PreHeader() are moved to (*Context) receiver (#133)
* support error handling for ProcessBlock callback if anti-MEV extension is enabled
(#134)
* remove Sign method from PrivateKey interface (#137)
Improvements:
* minimum required Go version is 1.22 (#122, #126)
* log Commit signature verification error (#134)
* add Commit message verification callback (#134)
Bugs fixed:
* context-bound PreBlock and PreHeader are not reset properly (#127)
* PreHeader is constructed instead of PreBlock to create PreCommit message (#128)
* enable anti-MEV extension with respect to the current block index (#132)
* (*Context).PreBlock() method returns PreHeader instead of PreBlock (#133)
* WatchOnly node may send RecoveryMessage on RecoveryRequest (#135)
* invalid PreCommit message is not removed from cache (#134)
## [0.3.0] (01 August 2024)
New features:
* TLA+ model for MEV-resistant dBFT extension (#116)
* support for additional phase of MEV-resistant dBFT (#118)
Behaviour changes:
* simplify PublicKey interface (#114)
* remove WithKeyPair callback from dBFT (#114)
## [0.2.0] (01 April 2024)
We're rolling out an update for dBFT that contains a substantial library interface
refactoring. Starting from this version dBFT is shipped as a generic package with
a wide range of generic interfaces, callbacks and parameters. No default payload
implementations are supplied anymore, the library itself works only with payload
interfaces, and thus users are expected to implement the minimum required set of
payload interfaces by themselves. A lot of outdated and unused APIs were removed,
some of the internal APIs were renamed, so that the resulting library interface
is much more clear and lightweight. Also, the minimum required Go version was
upgraded to Go 1.20.
Please note that no consensus-level behaviour changes introduced, this release
focuses only on the library APIs improvement, so it shouldn't be hard for the users
to migrate to the new interface.
Behaviour changes:
* add generic Hash/Address parameters to `DBFT` service (#94)
* remove custom payloads implementation from default `DBFT` service configuration
(#94)
* rename `InitializeConsensus` dBFT method to `Reset` (#95)
* drop outdated dBFT `Service` interface (#95)
* move all default implementations to `internal` package (#97)
* remove unused APIs of dBFT and payload interfaces (#104)
* timer interface refactoring (#105)
* constructor returns some meaningful error on failed dBFT instance creation (#107)
Improvements:
* add MIT License (#78, #79)
* documentation updates (#80, #86, #95)
* dependencies upgrades (#82, #85)
* minimum required Go version upgrade to Go 1.19 (#83)
* log messages adjustment (#88)
* untie `dbft` module from `github.com/nspcc-dev/neo-go` dependency (#94)
* minimum required Go version upgrade to Go 1.20 (#100)
## [0.1.0] (15 May 2023)
Stable dbft 2.0 implementation.
[Unreleased]: https://github.com/nspcc-dev/dbft/compare/v0.3.2...master
[0.4.0]: https://github.com/nspcc-dev/dbft/releases/v0.4.0
[0.3.2]: https://github.com/nspcc-dev/dbft/releases/v0.3.2
[0.3.1]: https://github.com/nspcc-dev/dbft/releases/v0.3.1
[0.3.0]: https://github.com/nspcc-dev/dbft/releases/v0.3.0
[0.2.0]: https://github.com/nspcc-dev/dbft/releases/v0.2.0
[0.1.0]: https://github.com/nspcc-dev/dbft/releases/v0.1.0

View File

@ -1,10 +1,10 @@
MIT License
Copyright (c) 2018-2023 NeoSPCC (@nspcc-dev)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
MIT License
Copyright (c) 2018-2023 NeoSPCC (@nspcc-dev)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,30 +1,30 @@
package dbft
// Block is a generic interface for a block used by dbft.
type Block[H Hash] interface {
// Hash returns block hash.
Hash() H
// PrevHash returns previous block hash.
PrevHash() H
// MerkleRoot returns a merkle root of the transaction hashes.
MerkleRoot() H
// Index returns block index.
Index() uint32
// Signature returns block's signature.
Signature() []byte
// Sign signs block and sets it's signature.
Sign(key PrivateKey) error
// Verify checks if signature is correct.
Verify(key PublicKey, sign []byte) error
// Transactions returns block's transaction list.
Transactions() []Transaction[H]
// SetTransactions sets block's transaction list. For anti-MEV extension
// transactions provided via this call are taken directly from PreBlock level
// and thus, may be out-of-date. Thus, with anti-MEV extension enabled it's
// suggested to use this method as a Block finalizer since it will be called
// right before the block approval. Do not rely on this with anti-MEV extension
// disabled.
SetTransactions([]Transaction[H])
}
package dbft
// Block is a generic interface for a block used by dbft.
type Block[H Hash] interface {
// Hash returns block hash.
Hash() H
// PrevHash returns previous block hash.
PrevHash() H
// MerkleRoot returns a merkle root of the transaction hashes.
MerkleRoot() H
// Index returns block index.
Index() uint32
// Signature returns block's signature.
Signature() []byte
// Sign signs block and sets it's signature.
Sign(key PrivateKey) error
// Verify checks if signature is correct.
Verify(key PublicKey, sign []byte) error
// Transactions returns block's transaction list.
Transactions() []Transaction[H]
// SetTransactions sets block's transaction list. For anti-MEV extension
// transactions provided via this call are taken directly from PreBlock level
// and thus, may be out-of-date. Thus, with anti-MEV extension enabled it's
// suggested to use this method as a Block finalizer since it will be called
// right before the block approval. Do not rely on this with anti-MEV extension
// disabled.
SetTransactions([]Transaction[H])
}

View File

@ -1,10 +1,10 @@
package dbft
// ChangeView represents dBFT ChangeView message.
type ChangeView interface {
// NewViewNumber returns proposed view number.
NewViewNumber() byte
// Reason returns change view reason.
Reason() ChangeViewReason
}
package dbft
// ChangeView represents dBFT ChangeView message.
type ChangeView interface {
// NewViewNumber returns proposed view number.
NewViewNumber() byte
// Reason returns change view reason.
Reason() ChangeViewReason
}

View File

@ -1,18 +1,18 @@
package dbft
//go:generate stringer -type=ChangeViewReason -linecomment
// ChangeViewReason represents a view change reason code.
type ChangeViewReason byte
// These constants define various reasons for view changing. They're following
// Neo 3 except the Unknown value which is left for compatibility with Neo 2.
const (
CVTimeout ChangeViewReason = 0x0 // Timeout
CVChangeAgreement ChangeViewReason = 0x1 // ChangeAgreement
CVTxNotFound ChangeViewReason = 0x2 // TxNotFound
CVTxRejectedByPolicy ChangeViewReason = 0x3 // TxRejectedByPolicy
CVTxInvalid ChangeViewReason = 0x4 // TxInvalid
CVBlockRejectedByPolicy ChangeViewReason = 0x5 // BlockRejectedByPolicy
CVUnknown ChangeViewReason = 0xff // Unknown
)
package dbft
//go:generate stringer -type=ChangeViewReason -linecomment
// ChangeViewReason represents a view change reason code.
type ChangeViewReason byte
// These constants define various reasons for view changing. They're following
// Neo 3 except the Unknown value which is left for compatibility with Neo 2.
const (
CVTimeout ChangeViewReason = 0x0 // Timeout
CVChangeAgreement ChangeViewReason = 0x1 // ChangeAgreement
CVTxNotFound ChangeViewReason = 0x2 // TxNotFound
CVTxRejectedByPolicy ChangeViewReason = 0x3 // TxRejectedByPolicy
CVTxInvalid ChangeViewReason = 0x4 // TxInvalid
CVBlockRejectedByPolicy ChangeViewReason = 0x5 // BlockRejectedByPolicy
CVUnknown ChangeViewReason = 0xff // Unknown
)

View File

@ -1,38 +1,38 @@
// Code generated by "stringer -type=ChangeViewReason -linecomment"; DO NOT EDIT.
package dbft
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[CVTimeout-0]
_ = x[CVChangeAgreement-1]
_ = x[CVTxNotFound-2]
_ = x[CVTxRejectedByPolicy-3]
_ = x[CVTxInvalid-4]
_ = x[CVBlockRejectedByPolicy-5]
_ = x[CVUnknown-255]
}
const (
_ChangeViewReason_name_0 = "TimeoutChangeAgreementTxNotFoundTxRejectedByPolicyTxInvalidBlockRejectedByPolicy"
_ChangeViewReason_name_1 = "Unknown"
)
var (
_ChangeViewReason_index_0 = [...]uint8{0, 7, 22, 32, 50, 59, 80}
)
func (i ChangeViewReason) String() string {
switch {
case 0 <= i && i <= 5:
return _ChangeViewReason_name_0[_ChangeViewReason_index_0[i]:_ChangeViewReason_index_0[i+1]]
case i == 255:
return _ChangeViewReason_name_1
default:
return "ChangeViewReason(" + strconv.FormatInt(int64(i), 10) + ")"
}
}
// Code generated by "stringer -type=ChangeViewReason -linecomment"; DO NOT EDIT.
package dbft
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[CVTimeout-0]
_ = x[CVChangeAgreement-1]
_ = x[CVTxNotFound-2]
_ = x[CVTxRejectedByPolicy-3]
_ = x[CVTxInvalid-4]
_ = x[CVBlockRejectedByPolicy-5]
_ = x[CVUnknown-255]
}
const (
_ChangeViewReason_name_0 = "TimeoutChangeAgreementTxNotFoundTxRejectedByPolicyTxInvalidBlockRejectedByPolicy"
_ChangeViewReason_name_1 = "Unknown"
)
var (
_ChangeViewReason_index_0 = [...]uint8{0, 7, 22, 32, 50, 59, 80}
)
func (i ChangeViewReason) String() string {
switch {
case 0 <= i && i <= 5:
return _ChangeViewReason_name_0[_ChangeViewReason_index_0[i]:_ChangeViewReason_index_0[i+1]]
case i == 255:
return _ChangeViewReason_name_1
default:
return "ChangeViewReason(" + strconv.FormatInt(int64(i), 10) + ")"
}
}

360
check.go
View File

@ -1,180 +1,180 @@
package dbft
import (
"go.uber.org/zap"
)
func (d *DBFT[H]) checkPrepare() {
if d.lastBlockIndex != d.BlockIndex || d.lastBlockView != d.ViewNumber {
// Notice that lastBlockTimestamp is left unchanged because
// this must be the value from the last header.
d.lastBlockTime = d.Timer.Now()
d.lastBlockIndex = d.BlockIndex
d.lastBlockView = d.ViewNumber
}
if !d.hasAllTransactions() {
d.Logger.Debug("check prepare: some transactions are missing", zap.Any("hashes", d.MissingTransactions))
return
}
count := 0
hasRequest := false
for _, msg := range d.PreparationPayloads {
if msg != nil {
if msg.ViewNumber() == d.ViewNumber {
count++
}
if msg.Type() == PrepareRequestType {
hasRequest = true
}
}
}
d.Logger.Debug("check preparations", zap.Bool("hasReq", hasRequest),
zap.Int("count", count),
zap.Int("M", d.M()))
if hasRequest && count >= d.M() {
if d.isAntiMEVExtensionEnabled() {
d.sendPreCommit()
d.changeTimer(d.timePerBlock)
d.checkPreCommit()
} else {
d.sendCommit()
d.changeTimer(d.timePerBlock)
d.checkCommit()
}
}
}
func (d *DBFT[H]) checkPreCommit() {
if !d.hasAllTransactions() {
d.Logger.Debug("check preCommit: some transactions are missing", zap.Any("hashes", d.MissingTransactions))
return
}
count := 0
for _, msg := range d.PreCommitPayloads {
if msg != nil && msg.ViewNumber() == d.ViewNumber {
count++
}
}
if count < d.M() {
d.Logger.Debug("not enough PreCommits to process PreBlock", zap.Int("count", count))
return
}
d.preBlock = d.CreatePreBlock()
if !d.preBlockProcessed {
d.Logger.Info("processing PreBlock",
zap.Uint32("height", d.BlockIndex),
zap.Uint("view", uint(d.ViewNumber)),
zap.Int("tx_count", len(d.preBlock.Transactions())),
zap.Int("preCommit_count", count))
err := d.ProcessPreBlock(d.preBlock)
if err != nil {
d.Logger.Info("can't process PreBlock, waiting for more PreCommits to be collected",
zap.Error(err),
zap.Int("count", count))
return
}
d.preBlockProcessed = true
}
// Require PreCommit sent by self for reliability. This condition must not be
// removed because:
// 1) we need to filter out WatchOnly nodes;
// 2) CNs that have not sent PreCommit must not skip this stage (although it's OK
// from the DKG/TPKE side to build final Block based only on other CN's data).
if d.PreCommitSent() {
d.verifyCommitPayloadsAgainstHeader()
d.sendCommit()
d.changeTimer(d.timePerBlock)
d.checkCommit()
} else {
if !d.Context.WatchOnly() {
d.Logger.Debug("can't send commit since self preCommit not yet sent")
}
}
}
func (d *DBFT[H]) checkCommit() {
if !d.hasAllTransactions() {
d.Logger.Debug("check commit: some transactions are missing", zap.Any("hashes", d.MissingTransactions))
return
}
// return if we received commits from other nodes
// before receiving PrepareRequest from Speaker
count := 0
for _, msg := range d.CommitPayloads {
if msg != nil && msg.ViewNumber() == d.ViewNumber {
count++
}
}
if count < d.M() {
d.Logger.Debug("not enough to commit", zap.Int("count", count))
return
}
d.block = d.CreateBlock()
hash := d.block.Hash()
d.Logger.Info("approving block",
zap.Uint32("height", d.BlockIndex),
zap.Stringer("hash", hash),
zap.Int("tx_count", len(d.block.Transactions())),
zap.Stringer("merkle", d.block.MerkleRoot()),
zap.Stringer("prev", d.block.PrevHash()))
err := d.ProcessBlock(d.block)
if err != nil {
if d.isAntiMEVExtensionEnabled() {
d.Logger.Info("can't process Block, waiting for more Commits to be collected",
zap.Error(err),
zap.Int("count", count))
return
}
d.Logger.Fatal("block processing failed", zap.Error(err))
}
d.blockProcessed = true
// Do not initialize consensus process immediately. It's the caller's duty to
// start the new block acceptance process and call Reset at the
// new height.
}
func (d *DBFT[H]) checkChangeView(view byte) {
if d.ViewNumber >= view {
return
}
count := 0
for _, msg := range d.ChangeViewPayloads {
if msg != nil && msg.GetChangeView().NewViewNumber() >= view {
count++
}
}
if count < d.M() {
return
}
if !d.Context.WatchOnly() {
msg := d.ChangeViewPayloads[d.MyIndex]
if msg != nil && msg.GetChangeView().NewViewNumber() < view {
d.broadcast(d.makeChangeView(uint64(d.Timer.Now().UnixNano()), CVChangeAgreement))
}
}
d.initializeConsensus(view, d.lastBlockTimestamp)
}
package dbft
import (
"go.uber.org/zap"
)
func (d *DBFT[H]) checkPrepare() {
if d.lastBlockIndex != d.BlockIndex || d.lastBlockView != d.ViewNumber {
// Notice that lastBlockTimestamp is left unchanged because
// this must be the value from the last header.
d.lastBlockTime = d.Timer.Now()
d.lastBlockIndex = d.BlockIndex
d.lastBlockView = d.ViewNumber
}
if !d.hasAllTransactions() {
d.Logger.Debug("check prepare: some transactions are missing", zap.Any("hashes", d.MissingTransactions))
return
}
count := 0
hasRequest := false
for _, msg := range d.PreparationPayloads {
if msg != nil {
if msg.ViewNumber() == d.ViewNumber {
count++
}
if msg.Type() == PrepareRequestType {
hasRequest = true
}
}
}
d.Logger.Debug("check preparations", zap.Bool("hasReq", hasRequest),
zap.Int("count", count),
zap.Int("M", d.M()))
if hasRequest && count >= d.M() {
if d.isAntiMEVExtensionEnabled() {
d.sendPreCommit()
d.changeTimer(d.timePerBlock)
d.checkPreCommit()
} else {
d.sendCommit()
d.changeTimer(d.timePerBlock)
d.checkCommit()
}
}
}
func (d *DBFT[H]) checkPreCommit() {
if !d.hasAllTransactions() {
d.Logger.Debug("check preCommit: some transactions are missing", zap.Any("hashes", d.MissingTransactions))
return
}
count := 0
for _, msg := range d.PreCommitPayloads {
if msg != nil && msg.ViewNumber() == d.ViewNumber {
count++
}
}
if count < d.M() {
d.Logger.Debug("not enough PreCommits to process PreBlock", zap.Int("count", count))
return
}
d.preBlock = d.CreatePreBlock()
if !d.preBlockProcessed {
d.Logger.Info("processing PreBlock",
zap.Uint32("height", d.BlockIndex),
zap.Uint("view", uint(d.ViewNumber)),
zap.Int("tx_count", len(d.preBlock.Transactions())),
zap.Int("preCommit_count", count))
err := d.ProcessPreBlock(d.preBlock)
if err != nil {
d.Logger.Info("can't process PreBlock, waiting for more PreCommits to be collected",
zap.Error(err),
zap.Int("count", count))
return
}
d.preBlockProcessed = true
}
// Require PreCommit sent by self for reliability. This condition must not be
// removed because:
// 1) we need to filter out WatchOnly nodes;
// 2) CNs that have not sent PreCommit must not skip this stage (although it's OK
// from the DKG/TPKE side to build final Block based only on other CN's data).
if d.PreCommitSent() {
d.verifyCommitPayloadsAgainstHeader()
d.sendCommit()
d.changeTimer(d.timePerBlock)
d.checkCommit()
} else {
if !d.Context.WatchOnly() {
d.Logger.Debug("can't send commit since self preCommit not yet sent")
}
}
}
func (d *DBFT[H]) checkCommit() {
if !d.hasAllTransactions() {
d.Logger.Debug("check commit: some transactions are missing", zap.Any("hashes", d.MissingTransactions))
return
}
// return if we received commits from other nodes
// before receiving PrepareRequest from Speaker
count := 0
for _, msg := range d.CommitPayloads {
if msg != nil && msg.ViewNumber() == d.ViewNumber {
count++
}
}
if count < d.M() {
d.Logger.Debug("not enough to commit", zap.Int("count", count))
return
}
d.block = d.CreateBlock()
hash := d.block.Hash()
d.Logger.Info("approving block",
zap.Uint32("height", d.BlockIndex),
zap.Stringer("hash", hash),
zap.Int("tx_count", len(d.block.Transactions())),
zap.Stringer("merkle", d.block.MerkleRoot()),
zap.Stringer("prev", d.block.PrevHash()))
err := d.ProcessBlock(d.block)
if err != nil {
if d.isAntiMEVExtensionEnabled() {
d.Logger.Info("can't process Block, waiting for more Commits to be collected",
zap.Error(err),
zap.Int("count", count))
return
}
d.Logger.Fatal("block processing failed", zap.Error(err))
}
d.blockProcessed = true
// Do not initialize consensus process immediately. It's the caller's duty to
// start the new block acceptance process and call Reset at the
// new height.
}
func (d *DBFT[H]) checkChangeView(view byte) {
if d.ViewNumber >= view {
return
}
count := 0
for _, msg := range d.ChangeViewPayloads {
if msg != nil && msg.GetChangeView().NewViewNumber() >= view {
count++
}
}
if count < d.M() {
return
}
if !d.Context.WatchOnly() {
msg := d.ChangeViewPayloads[d.MyIndex]
if msg != nil && msg.GetChangeView().NewViewNumber() < view {
d.broadcast(d.makeChangeView(uint64(d.Timer.Now().UnixNano()), CVChangeAgreement))
}
}
d.initializeConsensus(view, d.lastBlockTimestamp)
}

View File

@ -1,9 +1,9 @@
package dbft
// Commit is an interface for dBFT Commit message.
type Commit interface {
// Signature returns commit's signature field
// which is a final block signature for the current epoch for both dBFT 2.0 and
// for anti-MEV extension.
Signature() []byte
}
package dbft
// Commit is an interface for dBFT Commit message.
type Commit interface {
// Signature returns commit's signature field
// which is a final block signature for the current epoch for both dBFT 2.0 and
// for anti-MEV extension.
Signature() []byte
}

930
config.go
View File

@ -1,465 +1,465 @@
package dbft
import (
"errors"
"time"
"go.uber.org/zap"
)
// Config contains initialization and working parameters for dBFT.
type Config[H Hash] struct {
// Logger
Logger *zap.Logger
// Timer
Timer Timer
// TimePerBlock is the minimum time that needs to pass before another block
// will be accepted even if there are pending transactions in the node's
// mempool. This value may be updated every block.
TimePerBlock func() time.Duration
// MaxTimePerBlock is the maximum time that may pass before another block is
// accepted if there are no pending transactions in the node's mempool. This
// value may be updated every block. If set, enables dynamic block time
// extension: blocks are accepted with interval from TimePerBlock to
// MaxTimePerBlock (in CV-less scenario) depending on the presence of
// transactions in the node's pool, ref.
// https://github.com/neo-project/neo/issues/4018.
MaxTimePerBlock func() time.Duration
// TimestampIncrement increment is the amount of units to add to timestamp
// if current time is less than that of previous context.
// By default use millisecond precision.
TimestampIncrement uint64
// AntiMEVExtensionEnablingHeight denotes the height starting from which dBFT
// Anti-MEV extensions should be enabled. -1 means no extension is enabled.
AntiMEVExtensionEnablingHeight int64
// GetKeyPair returns an index of the node in the list of validators
// together with it's key pair.
GetKeyPair func([]PublicKey) (int, PrivateKey, PublicKey)
// NewPreBlockFromContext should allocate, fill from Context and return new block.PreBlock.
NewPreBlockFromContext func(ctx *Context[H]) PreBlock[H]
// NewBlockFromContext should allocate, fill from Context and return new block.Block.
NewBlockFromContext func(ctx *Context[H]) Block[H]
// RequestTx is a callback which is called when transaction contained
// in current block can't be found in memory pool. The slice received by
// this callback MUST NOT be changed.
RequestTx func(h ...H)
// SubscribeForTxs is a callback which is called when dBFT needs to track incoming
// mempool transactions. Subscription is supposed to be single-use, no unsubscription
// is initiated by dBFT, hence it's the user's duty to manage and release resources.
// This callback is active iff MaxTimePerBlock is set.
SubscribeForTxs func()
// StopTxFlow is a callback which is called when the process no longer needs
// any transactions.
StopTxFlow func()
// GetTx returns a transaction from memory pool.
GetTx func(h H) Transaction[H]
// GetVerified returns a slice of verified transactions
// to be proposed in a new block.
GetVerified func() []Transaction[H]
// VerifyPreBlock verifies if preBlock is valid.
VerifyPreBlock func(b PreBlock[H]) bool
// VerifyBlock verifies if block is valid.
VerifyBlock func(b Block[H]) bool
// Broadcast should broadcast payload m to the consensus nodes.
Broadcast func(m ConsensusPayload[H])
// ProcessBlock is called every time new preBlock is accepted.
ProcessPreBlock func(b PreBlock[H]) error
// ProcessBlock is called every time new block is accepted.
ProcessBlock func(b Block[H]) error
// GetBlock should return block with hash.
GetBlock func(h H) Block[H]
// WatchOnly tells if a node should only watch.
WatchOnly func() bool
// CurrentHeight returns index of the last accepted block.
CurrentHeight func() uint32
// CurrentBlockHash returns hash of the last accepted block.
CurrentBlockHash func() H
// GetValidators returns list of the validators.
// When called with a transaction list it must return
// list of the validators of the next block.
// If this function ever returns 0-length slice, dbft will panic.
GetValidators func(...Transaction[H]) []PublicKey
// NewConsensusPayload is a constructor for payload.ConsensusPayload.
NewConsensusPayload func(*Context[H], MessageType, any) ConsensusPayload[H]
// NewPrepareRequest is a constructor for payload.PrepareRequest.
NewPrepareRequest func(ts uint64, nonce uint64, transactionHashes []H) PrepareRequest[H]
// NewPrepareResponse is a constructor for payload.PrepareResponse.
NewPrepareResponse func(preparationHash H) PrepareResponse[H]
// NewChangeView is a constructor for payload.ChangeView.
NewChangeView func(newViewNumber byte, reason ChangeViewReason, timestamp uint64) ChangeView
// NewPreCommit is a constructor for payload.PreCommit.
NewPreCommit func(data []byte) PreCommit
// NewCommit is a constructor for payload.Commit.
NewCommit func(signature []byte) Commit
// NewRecoveryRequest is a constructor for payload.RecoveryRequest.
NewRecoveryRequest func(ts uint64) RecoveryRequest
// NewRecoveryMessage is a constructor for payload.RecoveryMessage.
NewRecoveryMessage func() RecoveryMessage[H]
// VerifyPrepareRequest can perform external payload verification and returns true iff it was successful.
VerifyPrepareRequest func(p ConsensusPayload[H]) error
// VerifyPrepareResponse performs external PrepareResponse verification and returns nil if it's successful.
VerifyPrepareResponse func(p ConsensusPayload[H]) error
// VerifyPreCommit performs external PreCommit verification and returns nil if it's successful.
// Note that PreBlock-dependent PreCommit verification should be performed inside PreBlock.Verify
// callback.
VerifyPreCommit func(p ConsensusPayload[H]) error
// VerifyCommit performs external Commit verification and returns nil if it's successful.
// Note that Block-dependent Commit verification should be performed inside Block.Verify
// callback.
VerifyCommit func(p ConsensusPayload[H]) error
}
const defaultSecondsPerBlock = time.Second * 15
const defaultTimestampIncrement = uint64(time.Millisecond / time.Nanosecond)
func defaultConfig[H Hash]() *Config[H] {
// fields which are set to nil must be provided from client
return &Config[H]{
Logger: zap.NewNop(),
TimePerBlock: func() time.Duration { return defaultSecondsPerBlock },
TimestampIncrement: defaultTimestampIncrement,
GetKeyPair: nil,
RequestTx: func(...H) {},
StopTxFlow: func() {},
GetTx: func(H) Transaction[H] { return nil },
GetVerified: func() []Transaction[H] { return make([]Transaction[H], 0) },
VerifyBlock: func(Block[H]) bool { return true },
Broadcast: func(ConsensusPayload[H]) {},
ProcessBlock: func(Block[H]) error { return nil },
GetBlock: func(H) Block[H] { return nil },
WatchOnly: func() bool { return false },
CurrentHeight: nil,
CurrentBlockHash: nil,
GetValidators: nil,
VerifyPrepareRequest: func(ConsensusPayload[H]) error { return nil },
VerifyPrepareResponse: func(ConsensusPayload[H]) error { return nil },
VerifyCommit: func(ConsensusPayload[H]) error { return nil },
AntiMEVExtensionEnablingHeight: -1,
VerifyPreBlock: func(PreBlock[H]) bool { return true },
VerifyPreCommit: func(ConsensusPayload[H]) error { return nil },
}
}
func checkConfig[H Hash](cfg *Config[H]) error {
if cfg.GetKeyPair == nil {
return errors.New("private key is nil")
}
if cfg.Timer == nil {
return errors.New("Timer is nil")
}
if cfg.CurrentHeight == nil {
return errors.New("CurrentHeight is nil")
}
if cfg.CurrentBlockHash == nil {
return errors.New("CurrentBlockHash is nil")
}
if cfg.GetValidators == nil {
return errors.New("GetValidators is nil")
}
if cfg.NewBlockFromContext == nil {
return errors.New("NewBlockFromContext is nil")
}
if cfg.NewConsensusPayload == nil {
return errors.New("NewConsensusPayload is nil")
}
if cfg.NewPrepareRequest == nil {
return errors.New("NewPrepareRequest is nil")
}
if cfg.NewPrepareResponse == nil {
return errors.New("NewPrepareResponse is nil")
}
if cfg.NewChangeView == nil {
return errors.New("NewChangeView is nil")
}
if cfg.NewCommit == nil {
return errors.New("NewCommit is nil")
}
if cfg.NewRecoveryRequest == nil {
return errors.New("NewRecoveryRequest is nil")
}
if cfg.NewRecoveryMessage == nil {
return errors.New("NewRecoveryMessage is nil")
}
if cfg.AntiMEVExtensionEnablingHeight >= 0 {
if cfg.NewPreBlockFromContext == nil {
return errors.New("NewPreBlockFromContext is nil")
}
if cfg.ProcessPreBlock == nil {
return errors.New("ProcessPreBlock is nil")
}
if cfg.NewPreCommit == nil {
return errors.New("NewPreCommit is nil")
}
} else {
if cfg.NewPreBlockFromContext != nil {
return errors.New("NewPreBlockFromContext is set, but AntiMEVExtensionEnablingHeight is not specified")
}
if cfg.ProcessPreBlock != nil {
return errors.New("ProcessPreBlock is set, but AntiMEVExtensionEnablingHeight is not specified")
}
if cfg.NewPreCommit != nil {
return errors.New("NewPreCommit is set, but AntiMEVExtensionEnablingHeight is not specified")
}
}
if (cfg.MaxTimePerBlock == nil) != (cfg.SubscribeForTxs == nil) {
return errors.New("MaxTimePerBlock and SubscribeForTxs should be specified/not specified at the same time")
}
return nil
}
// WithGetKeyPair sets GetKeyPair.
func WithGetKeyPair[H Hash](f func(pubs []PublicKey) (int, PrivateKey, PublicKey)) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.GetKeyPair = f
}
}
// WithLogger sets Logger.
func WithLogger[H Hash](log *zap.Logger) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.Logger = log
}
}
// WithTimer sets Timer.
func WithTimer[H Hash](t Timer) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.Timer = t
}
}
// WithTimePerBlock sets TimePerBlock.
func WithTimePerBlock[H Hash](f func() time.Duration) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.TimePerBlock = f
}
}
// WithMaxTimePerBlock sets MaxTimePerBlock.
func WithMaxTimePerBlock[H Hash](f func() time.Duration) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.MaxTimePerBlock = f
}
}
// WithAntiMEVExtensionEnablingHeight sets AntiMEVExtensionEnablingHeight.
func WithAntiMEVExtensionEnablingHeight[H Hash](h int64) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.AntiMEVExtensionEnablingHeight = h
}
}
// WithTimestampIncrement sets TimestampIncrement.
func WithTimestampIncrement[H Hash](u uint64) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.TimestampIncrement = u
}
}
// WithNewPreBlockFromContext sets NewPreBlockFromContext.
func WithNewPreBlockFromContext[H Hash](f func(ctx *Context[H]) PreBlock[H]) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.NewPreBlockFromContext = f
}
}
// WithNewBlockFromContext sets NewBlockFromContext.
func WithNewBlockFromContext[H Hash](f func(ctx *Context[H]) Block[H]) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.NewBlockFromContext = f
}
}
// WithRequestTx sets RequestTx.
func WithRequestTx[H Hash](f func(h ...H)) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.RequestTx = f
}
}
// WithSubscribeForTxs sets SubscribeForTxs.
func WithSubscribeForTxs[H Hash](f func()) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.SubscribeForTxs = f
}
}
// WithStopTxFlow sets StopTxFlow.
func WithStopTxFlow[H Hash](f func()) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.StopTxFlow = f
}
}
// WithGetTx sets GetTx.
func WithGetTx[H Hash](f func(h H) Transaction[H]) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.GetTx = f
}
}
// WithGetVerified sets GetVerified.
func WithGetVerified[H Hash](f func() []Transaction[H]) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.GetVerified = f
}
}
// WithVerifyPreBlock sets VerifyPreBlock.
func WithVerifyPreBlock[H Hash](f func(b PreBlock[H]) bool) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.VerifyPreBlock = f
}
}
// WithVerifyBlock sets VerifyBlock.
func WithVerifyBlock[H Hash](f func(b Block[H]) bool) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.VerifyBlock = f
}
}
// WithBroadcast sets Broadcast.
func WithBroadcast[H Hash](f func(m ConsensusPayload[H])) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.Broadcast = f
}
}
// WithProcessBlock sets ProcessBlock callback. Note that for anti-MEV extension
// disabled non-nil error return is a no-op.
func WithProcessBlock[H Hash](f func(b Block[H]) error) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.ProcessBlock = f
}
}
// WithProcessPreBlock sets ProcessPreBlock.
func WithProcessPreBlock[H Hash](f func(b PreBlock[H]) error) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.ProcessPreBlock = f
}
}
// WithGetBlock sets GetBlock.
func WithGetBlock[H Hash](f func(h H) Block[H]) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.GetBlock = f
}
}
// WithWatchOnly sets WatchOnly.
func WithWatchOnly[H Hash](f func() bool) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.WatchOnly = f
}
}
// WithCurrentHeight sets CurrentHeight.
func WithCurrentHeight[H Hash](f func() uint32) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.CurrentHeight = f
}
}
// WithCurrentBlockHash sets CurrentBlockHash.
func WithCurrentBlockHash[H Hash](f func() H) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.CurrentBlockHash = f
}
}
// WithGetValidators sets GetValidators.
func WithGetValidators[H Hash](f func(txs ...Transaction[H]) []PublicKey) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.GetValidators = f
}
}
// WithNewConsensusPayload sets NewConsensusPayload.
func WithNewConsensusPayload[H Hash](f func(ctx *Context[H], typ MessageType, msg any) ConsensusPayload[H]) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.NewConsensusPayload = f
}
}
// WithNewPrepareRequest sets NewPrepareRequest.
func WithNewPrepareRequest[H Hash](f func(ts uint64, nonce uint64, transactionsHashes []H) PrepareRequest[H]) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.NewPrepareRequest = f
}
}
// WithNewPrepareResponse sets NewPrepareResponse.
func WithNewPrepareResponse[H Hash](f func(preparationHash H) PrepareResponse[H]) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.NewPrepareResponse = f
}
}
// WithNewChangeView sets NewChangeView.
func WithNewChangeView[H Hash](f func(newViewNumber byte, reason ChangeViewReason, ts uint64) ChangeView) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.NewChangeView = f
}
}
// WithNewCommit sets NewCommit.
func WithNewCommit[H Hash](f func(signature []byte) Commit) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.NewCommit = f
}
}
// WithNewPreCommit sets NewPreCommit.
func WithNewPreCommit[H Hash](f func(signature []byte) PreCommit) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.NewPreCommit = f
}
}
// WithNewRecoveryRequest sets NewRecoveryRequest.
func WithNewRecoveryRequest[H Hash](f func(ts uint64) RecoveryRequest) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.NewRecoveryRequest = f
}
}
// WithNewRecoveryMessage sets NewRecoveryMessage.
func WithNewRecoveryMessage[H Hash](f func() RecoveryMessage[H]) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.NewRecoveryMessage = f
}
}
// WithVerifyPrepareRequest sets VerifyPrepareRequest.
func WithVerifyPrepareRequest[H Hash](f func(prepareReq ConsensusPayload[H]) error) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.VerifyPrepareRequest = f
}
}
// WithVerifyPrepareResponse sets VerifyPrepareResponse.
func WithVerifyPrepareResponse[H Hash](f func(prepareResp ConsensusPayload[H]) error) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.VerifyPrepareResponse = f
}
}
// WithVerifyPreCommit sets VerifyPreCommit.
func WithVerifyPreCommit[H Hash](f func(preCommit ConsensusPayload[H]) error) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.VerifyPreCommit = f
}
}
// WithVerifyCommit sets VerifyCommit.
func WithVerifyCommit[H Hash](f func(commit ConsensusPayload[H]) error) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.VerifyCommit = f
}
}
package dbft
import (
"errors"
"time"
"go.uber.org/zap"
)
// Config contains initialization and working parameters for dBFT.
type Config[H Hash] struct {
// Logger
Logger *zap.Logger
// Timer
Timer Timer
// TimePerBlock is the minimum time that needs to pass before another block
// will be accepted even if there are pending transactions in the node's
// mempool. This value may be updated every block.
TimePerBlock func() time.Duration
// MaxTimePerBlock is the maximum time that may pass before another block is
// accepted if there are no pending transactions in the node's mempool. This
// value may be updated every block. If set, enables dynamic block time
// extension: blocks are accepted with interval from TimePerBlock to
// MaxTimePerBlock (in CV-less scenario) depending on the presence of
// transactions in the node's pool, ref.
// https://github.com/neo-project/neo/issues/4018.
MaxTimePerBlock func() time.Duration
// TimestampIncrement increment is the amount of units to add to timestamp
// if current time is less than that of previous context.
// By default use millisecond precision.
TimestampIncrement uint64
// AntiMEVExtensionEnablingHeight denotes the height starting from which dBFT
// Anti-MEV extensions should be enabled. -1 means no extension is enabled.
AntiMEVExtensionEnablingHeight int64
// GetKeyPair returns an index of the node in the list of validators
// together with it's key pair.
GetKeyPair func([]PublicKey) (int, PrivateKey, PublicKey)
// NewPreBlockFromContext should allocate, fill from Context and return new block.PreBlock.
NewPreBlockFromContext func(ctx *Context[H]) PreBlock[H]
// NewBlockFromContext should allocate, fill from Context and return new block.Block.
NewBlockFromContext func(ctx *Context[H]) Block[H]
// RequestTx is a callback which is called when transaction contained
// in current block can't be found in memory pool. The slice received by
// this callback MUST NOT be changed.
RequestTx func(h ...H)
// SubscribeForTxs is a callback which is called when dBFT needs to track incoming
// mempool transactions. Subscription is supposed to be single-use, no unsubscription
// is initiated by dBFT, hence it's the user's duty to manage and release resources.
// This callback is active iff MaxTimePerBlock is set.
SubscribeForTxs func()
// StopTxFlow is a callback which is called when the process no longer needs
// any transactions.
StopTxFlow func()
// GetTx returns a transaction from memory pool.
GetTx func(h H) Transaction[H]
// GetVerified returns a slice of verified transactions
// to be proposed in a new block.
GetVerified func() []Transaction[H]
// VerifyPreBlock verifies if preBlock is valid.
VerifyPreBlock func(b PreBlock[H]) bool
// VerifyBlock verifies if block is valid.
VerifyBlock func(b Block[H]) bool
// Broadcast should broadcast payload m to the consensus nodes.
Broadcast func(m ConsensusPayload[H])
// ProcessBlock is called every time new preBlock is accepted.
ProcessPreBlock func(b PreBlock[H]) error
// ProcessBlock is called every time new block is accepted.
ProcessBlock func(b Block[H]) error
// GetBlock should return block with hash.
GetBlock func(h H) Block[H]
// WatchOnly tells if a node should only watch.
WatchOnly func() bool
// CurrentHeight returns index of the last accepted block.
CurrentHeight func() uint32
// CurrentBlockHash returns hash of the last accepted block.
CurrentBlockHash func() H
// GetValidators returns list of the validators.
// When called with a transaction list it must return
// list of the validators of the next block.
// If this function ever returns 0-length slice, dbft will panic.
GetValidators func(...Transaction[H]) []PublicKey
// NewConsensusPayload is a constructor for payload.ConsensusPayload.
NewConsensusPayload func(*Context[H], MessageType, any) ConsensusPayload[H]
// NewPrepareRequest is a constructor for payload.PrepareRequest.
NewPrepareRequest func(ts uint64, nonce uint64, transactionHashes []H) PrepareRequest[H]
// NewPrepareResponse is a constructor for payload.PrepareResponse.
NewPrepareResponse func(preparationHash H) PrepareResponse[H]
// NewChangeView is a constructor for payload.ChangeView.
NewChangeView func(newViewNumber byte, reason ChangeViewReason, timestamp uint64) ChangeView
// NewPreCommit is a constructor for payload.PreCommit.
NewPreCommit func(data []byte) PreCommit
// NewCommit is a constructor for payload.Commit.
NewCommit func(signature []byte) Commit
// NewRecoveryRequest is a constructor for payload.RecoveryRequest.
NewRecoveryRequest func(ts uint64) RecoveryRequest
// NewRecoveryMessage is a constructor for payload.RecoveryMessage.
NewRecoveryMessage func() RecoveryMessage[H]
// VerifyPrepareRequest can perform external payload verification and returns true iff it was successful.
VerifyPrepareRequest func(p ConsensusPayload[H]) error
// VerifyPrepareResponse performs external PrepareResponse verification and returns nil if it's successful.
VerifyPrepareResponse func(p ConsensusPayload[H]) error
// VerifyPreCommit performs external PreCommit verification and returns nil if it's successful.
// Note that PreBlock-dependent PreCommit verification should be performed inside PreBlock.Verify
// callback.
VerifyPreCommit func(p ConsensusPayload[H]) error
// VerifyCommit performs external Commit verification and returns nil if it's successful.
// Note that Block-dependent Commit verification should be performed inside Block.Verify
// callback.
VerifyCommit func(p ConsensusPayload[H]) error
}
const defaultSecondsPerBlock = time.Second * 15
const defaultTimestampIncrement = uint64(time.Millisecond / time.Nanosecond)
func defaultConfig[H Hash]() *Config[H] {
// fields which are set to nil must be provided from client
return &Config[H]{
Logger: zap.NewNop(),
TimePerBlock: func() time.Duration { return defaultSecondsPerBlock },
TimestampIncrement: defaultTimestampIncrement,
GetKeyPair: nil,
RequestTx: func(...H) {},
StopTxFlow: func() {},
GetTx: func(H) Transaction[H] { return nil },
GetVerified: func() []Transaction[H] { return make([]Transaction[H], 0) },
VerifyBlock: func(Block[H]) bool { return true },
Broadcast: func(ConsensusPayload[H]) {},
ProcessBlock: func(Block[H]) error { return nil },
GetBlock: func(H) Block[H] { return nil },
WatchOnly: func() bool { return false },
CurrentHeight: nil,
CurrentBlockHash: nil,
GetValidators: nil,
VerifyPrepareRequest: func(ConsensusPayload[H]) error { return nil },
VerifyPrepareResponse: func(ConsensusPayload[H]) error { return nil },
VerifyCommit: func(ConsensusPayload[H]) error { return nil },
AntiMEVExtensionEnablingHeight: -1,
VerifyPreBlock: func(PreBlock[H]) bool { return true },
VerifyPreCommit: func(ConsensusPayload[H]) error { return nil },
}
}
func checkConfig[H Hash](cfg *Config[H]) error {
if cfg.GetKeyPair == nil {
return errors.New("private key is nil")
}
if cfg.Timer == nil {
return errors.New("Timer is nil")
}
if cfg.CurrentHeight == nil {
return errors.New("CurrentHeight is nil")
}
if cfg.CurrentBlockHash == nil {
return errors.New("CurrentBlockHash is nil")
}
if cfg.GetValidators == nil {
return errors.New("GetValidators is nil")
}
if cfg.NewBlockFromContext == nil {
return errors.New("NewBlockFromContext is nil")
}
if cfg.NewConsensusPayload == nil {
return errors.New("NewConsensusPayload is nil")
}
if cfg.NewPrepareRequest == nil {
return errors.New("NewPrepareRequest is nil")
}
if cfg.NewPrepareResponse == nil {
return errors.New("NewPrepareResponse is nil")
}
if cfg.NewChangeView == nil {
return errors.New("NewChangeView is nil")
}
if cfg.NewCommit == nil {
return errors.New("NewCommit is nil")
}
if cfg.NewRecoveryRequest == nil {
return errors.New("NewRecoveryRequest is nil")
}
if cfg.NewRecoveryMessage == nil {
return errors.New("NewRecoveryMessage is nil")
}
if cfg.AntiMEVExtensionEnablingHeight >= 0 {
if cfg.NewPreBlockFromContext == nil {
return errors.New("NewPreBlockFromContext is nil")
}
if cfg.ProcessPreBlock == nil {
return errors.New("ProcessPreBlock is nil")
}
if cfg.NewPreCommit == nil {
return errors.New("NewPreCommit is nil")
}
} else {
if cfg.NewPreBlockFromContext != nil {
return errors.New("NewPreBlockFromContext is set, but AntiMEVExtensionEnablingHeight is not specified")
}
if cfg.ProcessPreBlock != nil {
return errors.New("ProcessPreBlock is set, but AntiMEVExtensionEnablingHeight is not specified")
}
if cfg.NewPreCommit != nil {
return errors.New("NewPreCommit is set, but AntiMEVExtensionEnablingHeight is not specified")
}
}
if (cfg.MaxTimePerBlock == nil) != (cfg.SubscribeForTxs == nil) {
return errors.New("MaxTimePerBlock and SubscribeForTxs should be specified/not specified at the same time")
}
return nil
}
// WithGetKeyPair sets GetKeyPair.
func WithGetKeyPair[H Hash](f func(pubs []PublicKey) (int, PrivateKey, PublicKey)) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.GetKeyPair = f
}
}
// WithLogger sets Logger.
func WithLogger[H Hash](log *zap.Logger) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.Logger = log
}
}
// WithTimer sets Timer.
func WithTimer[H Hash](t Timer) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.Timer = t
}
}
// WithTimePerBlock sets TimePerBlock.
func WithTimePerBlock[H Hash](f func() time.Duration) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.TimePerBlock = f
}
}
// WithMaxTimePerBlock sets MaxTimePerBlock.
func WithMaxTimePerBlock[H Hash](f func() time.Duration) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.MaxTimePerBlock = f
}
}
// WithAntiMEVExtensionEnablingHeight sets AntiMEVExtensionEnablingHeight.
func WithAntiMEVExtensionEnablingHeight[H Hash](h int64) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.AntiMEVExtensionEnablingHeight = h
}
}
// WithTimestampIncrement sets TimestampIncrement.
func WithTimestampIncrement[H Hash](u uint64) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.TimestampIncrement = u
}
}
// WithNewPreBlockFromContext sets NewPreBlockFromContext.
func WithNewPreBlockFromContext[H Hash](f func(ctx *Context[H]) PreBlock[H]) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.NewPreBlockFromContext = f
}
}
// WithNewBlockFromContext sets NewBlockFromContext.
func WithNewBlockFromContext[H Hash](f func(ctx *Context[H]) Block[H]) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.NewBlockFromContext = f
}
}
// WithRequestTx sets RequestTx.
func WithRequestTx[H Hash](f func(h ...H)) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.RequestTx = f
}
}
// WithSubscribeForTxs sets SubscribeForTxs.
func WithSubscribeForTxs[H Hash](f func()) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.SubscribeForTxs = f
}
}
// WithStopTxFlow sets StopTxFlow.
func WithStopTxFlow[H Hash](f func()) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.StopTxFlow = f
}
}
// WithGetTx sets GetTx.
func WithGetTx[H Hash](f func(h H) Transaction[H]) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.GetTx = f
}
}
// WithGetVerified sets GetVerified.
func WithGetVerified[H Hash](f func() []Transaction[H]) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.GetVerified = f
}
}
// WithVerifyPreBlock sets VerifyPreBlock.
func WithVerifyPreBlock[H Hash](f func(b PreBlock[H]) bool) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.VerifyPreBlock = f
}
}
// WithVerifyBlock sets VerifyBlock.
func WithVerifyBlock[H Hash](f func(b Block[H]) bool) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.VerifyBlock = f
}
}
// WithBroadcast sets Broadcast.
func WithBroadcast[H Hash](f func(m ConsensusPayload[H])) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.Broadcast = f
}
}
// WithProcessBlock sets ProcessBlock callback. Note that for anti-MEV extension
// disabled non-nil error return is a no-op.
func WithProcessBlock[H Hash](f func(b Block[H]) error) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.ProcessBlock = f
}
}
// WithProcessPreBlock sets ProcessPreBlock.
func WithProcessPreBlock[H Hash](f func(b PreBlock[H]) error) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.ProcessPreBlock = f
}
}
// WithGetBlock sets GetBlock.
func WithGetBlock[H Hash](f func(h H) Block[H]) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.GetBlock = f
}
}
// WithWatchOnly sets WatchOnly.
func WithWatchOnly[H Hash](f func() bool) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.WatchOnly = f
}
}
// WithCurrentHeight sets CurrentHeight.
func WithCurrentHeight[H Hash](f func() uint32) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.CurrentHeight = f
}
}
// WithCurrentBlockHash sets CurrentBlockHash.
func WithCurrentBlockHash[H Hash](f func() H) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.CurrentBlockHash = f
}
}
// WithGetValidators sets GetValidators.
func WithGetValidators[H Hash](f func(txs ...Transaction[H]) []PublicKey) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.GetValidators = f
}
}
// WithNewConsensusPayload sets NewConsensusPayload.
func WithNewConsensusPayload[H Hash](f func(ctx *Context[H], typ MessageType, msg any) ConsensusPayload[H]) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.NewConsensusPayload = f
}
}
// WithNewPrepareRequest sets NewPrepareRequest.
func WithNewPrepareRequest[H Hash](f func(ts uint64, nonce uint64, transactionsHashes []H) PrepareRequest[H]) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.NewPrepareRequest = f
}
}
// WithNewPrepareResponse sets NewPrepareResponse.
func WithNewPrepareResponse[H Hash](f func(preparationHash H) PrepareResponse[H]) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.NewPrepareResponse = f
}
}
// WithNewChangeView sets NewChangeView.
func WithNewChangeView[H Hash](f func(newViewNumber byte, reason ChangeViewReason, ts uint64) ChangeView) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.NewChangeView = f
}
}
// WithNewCommit sets NewCommit.
func WithNewCommit[H Hash](f func(signature []byte) Commit) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.NewCommit = f
}
}
// WithNewPreCommit sets NewPreCommit.
func WithNewPreCommit[H Hash](f func(signature []byte) PreCommit) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.NewPreCommit = f
}
}
// WithNewRecoveryRequest sets NewRecoveryRequest.
func WithNewRecoveryRequest[H Hash](f func(ts uint64) RecoveryRequest) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.NewRecoveryRequest = f
}
}
// WithNewRecoveryMessage sets NewRecoveryMessage.
func WithNewRecoveryMessage[H Hash](f func() RecoveryMessage[H]) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.NewRecoveryMessage = f
}
}
// WithVerifyPrepareRequest sets VerifyPrepareRequest.
func WithVerifyPrepareRequest[H Hash](f func(prepareReq ConsensusPayload[H]) error) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.VerifyPrepareRequest = f
}
}
// WithVerifyPrepareResponse sets VerifyPrepareResponse.
func WithVerifyPrepareResponse[H Hash](f func(prepareResp ConsensusPayload[H]) error) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.VerifyPrepareResponse = f
}
}
// WithVerifyPreCommit sets VerifyPreCommit.
func WithVerifyPreCommit[H Hash](f func(preCommit ConsensusPayload[H]) error) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.VerifyPreCommit = f
}
}
// WithVerifyCommit sets VerifyCommit.
func WithVerifyCommit[H Hash](f func(commit ConsensusPayload[H]) error) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.VerifyCommit = f
}
}

View File

@ -1,26 +1,26 @@
package dbft
// ConsensusMessage is an interface for generic dBFT message.
type ConsensusMessage[H Hash] interface {
// ViewNumber returns view number when this message was originated.
ViewNumber() byte
// Type returns type of this message.
Type() MessageType
// Payload returns this message's actual payload.
Payload() any
// GetChangeView returns payload as if it was ChangeView.
GetChangeView() ChangeView
// GetPrepareRequest returns payload as if it was PrepareRequest.
GetPrepareRequest() PrepareRequest[H]
// GetPrepareResponse returns payload as if it was PrepareResponse.
GetPrepareResponse() PrepareResponse[H]
// GetPreCommit returns payload as if it was PreCommit.
GetPreCommit() PreCommit
// GetCommit returns payload as if it was Commit.
GetCommit() Commit
// GetRecoveryRequest returns payload as if it was RecoveryRequest.
GetRecoveryRequest() RecoveryRequest
// GetRecoveryMessage returns payload as if it was RecoveryMessage.
GetRecoveryMessage() RecoveryMessage[H]
}
package dbft
// ConsensusMessage is an interface for generic dBFT message.
type ConsensusMessage[H Hash] interface {
// ViewNumber returns view number when this message was originated.
ViewNumber() byte
// Type returns type of this message.
Type() MessageType
// Payload returns this message's actual payload.
Payload() any
// GetChangeView returns payload as if it was ChangeView.
GetChangeView() ChangeView
// GetPrepareRequest returns payload as if it was PrepareRequest.
GetPrepareRequest() PrepareRequest[H]
// GetPrepareResponse returns payload as if it was PrepareResponse.
GetPrepareResponse() PrepareResponse[H]
// GetPreCommit returns payload as if it was PreCommit.
GetPreCommit() PreCommit
// GetCommit returns payload as if it was Commit.
GetCommit() Commit
// GetRecoveryRequest returns payload as if it was RecoveryRequest.
GetRecoveryRequest() RecoveryRequest
// GetRecoveryMessage returns payload as if it was RecoveryMessage.
GetRecoveryMessage() RecoveryMessage[H]
}

View File

@ -1,39 +1,39 @@
package dbft
import "fmt"
// MessageType is a type for dBFT consensus messages.
type MessageType byte
// 7 following constants enumerate all possible type of consensus message.
const (
ChangeViewType MessageType = 0x00
PrepareRequestType MessageType = 0x20
PrepareResponseType MessageType = 0x21
PreCommitType MessageType = 0x31
CommitType MessageType = 0x30
RecoveryRequestType MessageType = 0x40
RecoveryMessageType MessageType = 0x41
)
// String implements fmt.Stringer interface.
func (m MessageType) String() string {
switch m {
case ChangeViewType:
return "ChangeView"
case PrepareRequestType:
return "PrepareRequest"
case PrepareResponseType:
return "PrepareResponse"
case CommitType:
return "Commit"
case PreCommitType:
return "PreCommit"
case RecoveryRequestType:
return "RecoveryRequest"
case RecoveryMessageType:
return "RecoveryMessage"
default:
return fmt.Sprintf("UNKNOWN(%02x)", byte(m))
}
}
package dbft
import "fmt"
// MessageType is a type for dBFT consensus messages.
type MessageType byte
// 7 following constants enumerate all possible type of consensus message.
const (
ChangeViewType MessageType = 0x00
PrepareRequestType MessageType = 0x20
PrepareResponseType MessageType = 0x21
PreCommitType MessageType = 0x31
CommitType MessageType = 0x30
RecoveryRequestType MessageType = 0x40
RecoveryMessageType MessageType = 0x41
)
// String implements fmt.Stringer interface.
func (m MessageType) String() string {
switch m {
case ChangeViewType:
return "ChangeView"
case PrepareRequestType:
return "PrepareRequest"
case PrepareResponseType:
return "PrepareResponse"
case CommitType:
return "Commit"
case PreCommitType:
return "PreCommit"
case RecoveryRequestType:
return "RecoveryRequest"
case RecoveryMessageType:
return "RecoveryMessage"
default:
return fmt.Sprintf("UNKNOWN(%02x)", byte(m))
}
}

View File

@ -1,19 +1,19 @@
package dbft
// ConsensusPayload is a generic payload type which is exchanged
// between the nodes.
type ConsensusPayload[H Hash] interface {
ConsensusMessage[H]
// ValidatorIndex returns index of validator from which
// payload was originated from.
ValidatorIndex() uint16
// SetValidatorIndex sets validator index.
SetValidatorIndex(i uint16)
Height() uint32
// Hash returns 32-byte checksum of the payload.
Hash() H
}
package dbft
// ConsensusPayload is a generic payload type which is exchanged
// between the nodes.
type ConsensusPayload[H Hash] interface {
ConsensusMessage[H]
// ValidatorIndex returns index of validator from which
// payload was originated from.
ValidatorIndex() uint16
// SetValidatorIndex sets validator index.
SetValidatorIndex(i uint16)
Height() uint32
// Hash returns 32-byte checksum of the payload.
Hash() H
}

View File

@ -1,445 +1,445 @@
package dbft
import (
"crypto/rand"
"encoding/binary"
"time"
)
// HeightView is a block height/consensus view pair.
type HeightView struct {
Height uint32
View byte
}
// Context is a main dBFT structure which
// contains all information needed for performing transitions.
type Context[H Hash] struct {
// Config is dBFT's Config instance.
Config *Config[H]
// Priv is node's private key.
Priv PrivateKey
// Pub is node's public key.
Pub PublicKey
preBlock PreBlock[H]
preHeader PreBlock[H]
block Block[H]
header Block[H]
// blockProcessed denotes whether Config.ProcessBlock callback was called for the current
// height. If so, then no second call must happen. After new block is received by the user,
// dBFT stops any new transaction or messages processing as far as timeouts handling till
// the next call to Reset.
blockProcessed bool
// preBlockProcessed is true when Config.ProcessPreBlock callback was
// invoked for the current height. This happens once and dbft continues
// to march towards proper commit after that.
preBlockProcessed bool
// BlockIndex is current block index.
BlockIndex uint32
// ViewNumber is current view number.
ViewNumber byte
// Validators is a current validator list.
Validators []PublicKey
// MyIndex is an index of the current node in the Validators array.
// It is equal to -1 if node is not a validator or is WatchOnly.
MyIndex int
// PrimaryIndex is an index of the primary node in the current epoch.
PrimaryIndex uint
// PrevHash is a hash of the previous block.
PrevHash H
// Timestamp is a nanosecond-precision timestamp
Timestamp uint64
Nonce uint64
// TransactionHashes is a slice of hashes of proposed transactions in the current block.
TransactionHashes []H
// MissingTransactions is a slice of hashes containing missing transactions for the current block.
MissingTransactions []H
// Transactions is a map containing actual transactions for the current block.
Transactions map[H]Transaction[H]
// PreparationPayloads stores consensus Prepare* payloads for the current epoch.
PreparationPayloads []ConsensusPayload[H]
// PreCommitPayloads stores consensus PreCommit payloads sent through all epochs
// as a part of anti-MEV dBFT extension. It is assumed that valid PreCommit
// payloads can only be sent once by a single node per the whole set of consensus
// epochs for particular block. Invalid PreCommit payloads are kicked off this
// list immediately (if PrepareRequest was received for the current round, so
// it's possible to verify PreCommit against PreBlock built on PrepareRequest)
// or stored till the corresponding PrepareRequest receiving.
PreCommitPayloads []ConsensusPayload[H]
// CommitPayloads stores consensus Commit payloads sent throughout all epochs. It
// is assumed that valid Commit payload can only be sent once by a single node per
// the whole set of consensus epochs for particular block. Invalid commit payloads
// are kicked off this list immediately (if PrepareRequest was received for the
// current round, so it's possible to verify Commit against it) or stored till
// the corresponding PrepareRequest receiving.
CommitPayloads []ConsensusPayload[H]
// ChangeViewPayloads stores consensus ChangeView payloads for the current epoch.
ChangeViewPayloads []ConsensusPayload[H]
// LastChangeViewPayloads stores consensus ChangeView payloads for the last epoch.
LastChangeViewPayloads []ConsensusPayload[H]
// LastSeenMessage array stores the height and view of the last seen message, for each validator.
// If this node never heard a thing from validator i, LastSeenMessage[i] will be nil.
LastSeenMessage []*HeightView
lastBlockTimestamp uint64 // ns-precision timestamp from the last header (used for the next block timestamp calculations).
lastBlockTime time.Time // Wall clock time of when we started (as in PrepareRequest) creating the last block (used for timer adjustments).
lastBlockIndex uint32
lastBlockView byte
timePerBlock time.Duration // minimum amount of time that need to pass before the pending block will be accepted if there are some transactions in the proposal.
maxTimePerBlock time.Duration // maximum amount of time that allowed to pass before the pending block will be accepted even if there's no transactions in the proposal.
txSubscriptionOn bool
prepareSentTime time.Time
rttEstimates rtt
}
// N returns total number of validators.
func (c *Context[H]) N() int { return len(c.Validators) }
// F returns number of validators which can be faulty.
func (c *Context[H]) F() int { return (len(c.Validators) - 1) / 3 }
// M returns number of validators which must function correctly.
func (c *Context[H]) M() int { return len(c.Validators) - c.F() }
// GetPrimaryIndex returns index of a primary node for the specified view.
func (c *Context[H]) GetPrimaryIndex(viewNumber byte) uint {
p := (int(c.BlockIndex) - int(viewNumber)) % len(c.Validators)
if p >= 0 {
return uint(p)
}
return uint(p + len(c.Validators))
}
// IsPrimary returns true iff node is primary for current height and view.
func (c *Context[H]) IsPrimary() bool { return c.MyIndex == int(c.PrimaryIndex) }
// IsBackup returns true iff node is backup for current height and view.
func (c *Context[H]) IsBackup() bool {
return c.MyIndex >= 0 && !c.IsPrimary()
}
// WatchOnly returns true iff node takes no active part in consensus.
func (c *Context[H]) WatchOnly() bool { return c.MyIndex < 0 || c.Config.WatchOnly() }
// CountCommitted returns number of received Commit (or PreCommit for anti-MEV
// extension) messages not only for the current epoch but also for any other epoch.
func (c *Context[H]) CountCommitted() (count int) {
for i := range c.CommitPayloads {
// Consider both Commit and PreCommit payloads since both Commit and PreCommit
// phases are one-directional (do not impose view change).
if c.CommitPayloads[i] != nil || c.PreCommitPayloads[i] != nil {
count++
}
}
return
}
// CountFailed returns number of nodes with which no communication was performed
// for this view and that hasn't sent the Commit message at the previous views.
func (c *Context[H]) CountFailed() (count int) {
for i, hv := range c.LastSeenMessage {
if (c.CommitPayloads[i] == nil && c.PreCommitPayloads[i] == nil) &&
(hv == nil || hv.Height < c.BlockIndex || hv.View < c.ViewNumber) {
count++
}
}
return
}
// RequestSentOrReceived returns true iff PrepareRequest
// was sent or received for the current epoch.
func (c *Context[H]) RequestSentOrReceived() bool {
return c.PreparationPayloads[c.PrimaryIndex] != nil
}
// ResponseSent returns true iff Prepare* message was sent for the current epoch.
func (c *Context[H]) ResponseSent() bool {
return !c.WatchOnly() && c.PreparationPayloads[c.MyIndex] != nil
}
// PreCommitSent returns true iff PreCommit message was sent for the current epoch
// assuming that the node can't go further than current epoch after PreCommit was sent.
func (c *Context[H]) PreCommitSent() bool {
return !c.WatchOnly() && c.PreCommitPayloads[c.MyIndex] != nil
}
// CommitSent returns true iff Commit message was sent for the current epoch
// assuming that the node can't go further than current epoch after commit was sent.
func (c *Context[H]) CommitSent() bool {
return !c.WatchOnly() && c.CommitPayloads[c.MyIndex] != nil
}
// BlockSent returns true iff block was formed AND sent for the current height.
// Once block is sent, the consensus stops new transactions and messages processing
// as far as timeouts handling.
//
// Implementation note: the implementation of BlockSent differs from the C#'s one.
// In C# algorithm they use ConsensusContext's Block.Transactions null check to define
// whether block was formed, and the only place where the block can be formed is
// in the ConsensusContext's CreateBlock function right after enough Commits receiving.
// On the contrary, in our implementation we don't have access to the block's
// Transactions field as far as we can't use block null check, because there are
// several places where the call to CreateBlock happens (one of them is right after
// PrepareRequest receiving). Thus, we have a separate Context.blockProcessed field
// for the described purpose.
func (c *Context[H]) BlockSent() bool { return c.blockProcessed }
// ViewChanging returns true iff node is in a process of changing view.
func (c *Context[H]) ViewChanging() bool {
if c.WatchOnly() {
return false
}
cv := c.ChangeViewPayloads[c.MyIndex]
return cv != nil && cv.GetChangeView().NewViewNumber() > c.ViewNumber
}
// NotAcceptingPayloadsDueToViewChanging returns true if node should not accept new payloads.
func (c *Context[H]) NotAcceptingPayloadsDueToViewChanging() bool {
return c.ViewChanging() && !c.MoreThanFNodesCommittedOrLost()
}
// MoreThanFNodesCommittedOrLost returns true iff a number of nodes which either committed
// or are faulty is more than maximum amount of allowed faulty nodes.
// A possible attack can happen if the last node to commit is malicious and either sends change view after his
// commit to stall nodes in a higher view, or if he refuses to send recovery messages. In addition, if a node
// asking change views loses network or crashes and comes back when nodes are committed in more than one higher
// numbered view, it is possible for the node accepting recovery to commit in any of the higher views, thus
// potentially splitting nodes among views and stalling the network.
func (c *Context[H]) MoreThanFNodesCommittedOrLost() bool {
return c.CountCommitted()+c.CountFailed() > c.F()
}
// Header returns current header from context. May be nil in case if no
// header is constructed yet. Do not change the resulting header.
func (c *Context[H]) Header() Block[H] {
return c.header
}
// PreHeader returns current preHeader from context. May be nil in case if no
// preHeader is constructed yet. Do not change the resulting preHeader.
func (c *Context[H]) PreHeader() PreBlock[H] {
return c.preHeader
}
// PreBlock returns current PreBlock from context. May be nil in case if no
// PreBlock is constructed yet (even if PreHeader is already constructed).
// External changes in the PreBlock will be seen by dBFT.
func (c *Context[H]) PreBlock() PreBlock[H] {
return c.preBlock
}
func (c *Context[H]) reset(view byte, ts uint64) {
c.MyIndex = -1
c.prepareSentTime = time.Time{}
c.lastBlockTimestamp = ts
c.unsubscribeFromTransactions()
if view == 0 {
c.PrevHash = c.Config.CurrentBlockHash()
c.BlockIndex = c.Config.CurrentHeight() + 1
c.Validators = c.Config.GetValidators()
c.timePerBlock = c.Config.TimePerBlock()
if c.Config.MaxTimePerBlock != nil {
c.maxTimePerBlock = c.Config.MaxTimePerBlock()
}
n := len(c.Validators)
c.LastChangeViewPayloads = emptyReusableSlice(c.LastChangeViewPayloads, n)
c.LastSeenMessage = emptyReusableSlice(c.LastSeenMessage, n)
c.blockProcessed = false
c.preBlockProcessed = false
} else {
for i := range c.Validators {
m := c.ChangeViewPayloads[i]
if m != nil && m.GetChangeView().NewViewNumber() >= view {
c.LastChangeViewPayloads[i] = m
} else {
c.LastChangeViewPayloads[i] = nil
}
}
}
c.MyIndex, c.Priv, c.Pub = c.Config.GetKeyPair(c.Validators)
c.block = nil
c.preBlock = nil
c.header = nil
c.preHeader = nil
n := len(c.Validators)
c.ChangeViewPayloads = emptyReusableSlice(c.ChangeViewPayloads, n)
if view == 0 {
c.PreCommitPayloads = emptyReusableSlice(c.PreCommitPayloads, n)
c.CommitPayloads = emptyReusableSlice(c.CommitPayloads, n)
}
c.PreparationPayloads = emptyReusableSlice(c.PreparationPayloads, n)
if c.Transactions == nil { // Init.
c.Transactions = make(map[H]Transaction[H])
} else { // Regular use.
clear(c.Transactions)
}
c.TransactionHashes = nil
if c.MissingTransactions != nil {
c.MissingTransactions = c.MissingTransactions[:0]
}
c.PrimaryIndex = c.GetPrimaryIndex(view)
c.ViewNumber = view
if c.MyIndex >= 0 {
c.LastSeenMessage[c.MyIndex] = &HeightView{c.BlockIndex, c.ViewNumber}
}
}
func emptyReusableSlice[E any](s []E, n int) []E {
if len(s) == n {
clear(s)
return s
}
return make([]E, n)
}
// Fill initializes consensus when node is a speaker. It doesn't perform any
// context modifications if MaxTimePerBlock extension is enabled and there are
// no transactions in the memory pool and force is not set.
func (c *Context[H]) Fill(force bool) bool {
txx := c.Config.GetVerified()
if c.Config.MaxTimePerBlock != nil && !force && len(txx) == 0 {
return false
}
b := make([]byte, 8)
_, _ = rand.Read(b)
c.Nonce = binary.LittleEndian.Uint64(b)
c.TransactionHashes = make([]H, len(txx))
for i := range txx {
h := txx[i].Hash()
c.TransactionHashes[i] = h
c.Transactions[h] = txx[i]
}
c.Timestamp = c.lastBlockTimestamp + c.Config.TimestampIncrement
if now := c.getTimestamp(); now > c.Timestamp {
c.Timestamp = now
}
return true
}
// getTimestamp returns nanoseconds-precision timestamp using
// current context config.
func (c *Context[H]) getTimestamp() uint64 {
return uint64(c.Config.Timer.Now().UnixNano()) / c.Config.TimestampIncrement * c.Config.TimestampIncrement
}
// CreateBlock returns resulting block for the current epoch.
func (c *Context[H]) CreateBlock() Block[H] {
if c.block == nil {
if c.block = c.MakeHeader(); c.block == nil {
return nil
}
txx := make([]Transaction[H], len(c.TransactionHashes))
for i, h := range c.TransactionHashes {
txx[i] = c.Transactions[h]
}
// Anti-MEV extension properly sets PreBlock transactions once during PreBlock
// construction and then never updates these transactions in the dBFT context.
// Thus, user must not reuse txx if anti-MEV extension is enabled. However,
// we don't skip a call to Block.SetTransactions since it may be used as a
// signal to the user's code to finalize the block.
c.block.SetTransactions(txx)
}
return c.block
}
// CreatePreBlock returns PreBlock for the current epoch.
func (c *Context[H]) CreatePreBlock() PreBlock[H] {
if c.preBlock == nil {
if c.preBlock = c.MakePreHeader(); c.preBlock == nil {
return nil
}
txx := make([]Transaction[H], len(c.TransactionHashes))
for i, h := range c.TransactionHashes {
txx[i] = c.Transactions[h]
}
c.preBlock.SetTransactions(txx)
}
return c.preBlock
}
// isAntiMEVExtensionEnabled returns whether Anti-MEV dBFT extension is enabled
// at the currently processing block height.
func (c *Context[H]) isAntiMEVExtensionEnabled() bool {
return c.Config.AntiMEVExtensionEnablingHeight >= 0 && uint32(c.Config.AntiMEVExtensionEnablingHeight) <= c.BlockIndex
}
// MakeHeader returns half-filled block for the current epoch.
// All hashable fields will be filled.
func (c *Context[H]) MakeHeader() Block[H] {
if c.header == nil {
if !c.RequestSentOrReceived() {
return nil
}
// For anti-MEV dBFT extension it's important to have PreBlock processed and
// all envelopes decrypted, because a single PrepareRequest is not enough to
// construct proper Block.
if c.isAntiMEVExtensionEnabled() {
if !c.preBlockProcessed {
return nil
}
}
c.header = c.Config.NewBlockFromContext(c)
}
return c.header
}
// MakePreHeader returns half-filled block for the current epoch.
// All hashable fields will be filled.
func (c *Context[H]) MakePreHeader() PreBlock[H] {
if c.preHeader == nil {
if !c.RequestSentOrReceived() {
return nil
}
c.preHeader = c.Config.NewPreBlockFromContext(c)
}
return c.preHeader
}
// hasAllTransactions returns true iff all transactions were received
// for the proposed block.
func (c *Context[H]) hasAllTransactions() bool {
return len(c.TransactionHashes) == len(c.Transactions)
}
func (c *Context[H]) subscribeForTransactions() {
c.txSubscriptionOn = true
c.Config.SubscribeForTxs()
}
func (c *Context[H]) unsubscribeFromTransactions() {
c.txSubscriptionOn = false
}
package dbft
import (
"crypto/rand"
"encoding/binary"
"time"
)
// HeightView is a block height/consensus view pair.
type HeightView struct {
Height uint32
View byte
}
// Context is a main dBFT structure which
// contains all information needed for performing transitions.
type Context[H Hash] struct {
// Config is dBFT's Config instance.
Config *Config[H]
// Priv is node's private key.
Priv PrivateKey
// Pub is node's public key.
Pub PublicKey
preBlock PreBlock[H]
preHeader PreBlock[H]
block Block[H]
header Block[H]
// blockProcessed denotes whether Config.ProcessBlock callback was called for the current
// height. If so, then no second call must happen. After new block is received by the user,
// dBFT stops any new transaction or messages processing as far as timeouts handling till
// the next call to Reset.
blockProcessed bool
// preBlockProcessed is true when Config.ProcessPreBlock callback was
// invoked for the current height. This happens once and dbft continues
// to march towards proper commit after that.
preBlockProcessed bool
// BlockIndex is current block index.
BlockIndex uint32
// ViewNumber is current view number.
ViewNumber byte
// Validators is a current validator list.
Validators []PublicKey
// MyIndex is an index of the current node in the Validators array.
// It is equal to -1 if node is not a validator or is WatchOnly.
MyIndex int
// PrimaryIndex is an index of the primary node in the current epoch.
PrimaryIndex uint
// PrevHash is a hash of the previous block.
PrevHash H
// Timestamp is a nanosecond-precision timestamp
Timestamp uint64
Nonce uint64
// TransactionHashes is a slice of hashes of proposed transactions in the current block.
TransactionHashes []H
// MissingTransactions is a slice of hashes containing missing transactions for the current block.
MissingTransactions []H
// Transactions is a map containing actual transactions for the current block.
Transactions map[H]Transaction[H]
// PreparationPayloads stores consensus Prepare* payloads for the current epoch.
PreparationPayloads []ConsensusPayload[H]
// PreCommitPayloads stores consensus PreCommit payloads sent through all epochs
// as a part of anti-MEV dBFT extension. It is assumed that valid PreCommit
// payloads can only be sent once by a single node per the whole set of consensus
// epochs for particular block. Invalid PreCommit payloads are kicked off this
// list immediately (if PrepareRequest was received for the current round, so
// it's possible to verify PreCommit against PreBlock built on PrepareRequest)
// or stored till the corresponding PrepareRequest receiving.
PreCommitPayloads []ConsensusPayload[H]
// CommitPayloads stores consensus Commit payloads sent throughout all epochs. It
// is assumed that valid Commit payload can only be sent once by a single node per
// the whole set of consensus epochs for particular block. Invalid commit payloads
// are kicked off this list immediately (if PrepareRequest was received for the
// current round, so it's possible to verify Commit against it) or stored till
// the corresponding PrepareRequest receiving.
CommitPayloads []ConsensusPayload[H]
// ChangeViewPayloads stores consensus ChangeView payloads for the current epoch.
ChangeViewPayloads []ConsensusPayload[H]
// LastChangeViewPayloads stores consensus ChangeView payloads for the last epoch.
LastChangeViewPayloads []ConsensusPayload[H]
// LastSeenMessage array stores the height and view of the last seen message, for each validator.
// If this node never heard a thing from validator i, LastSeenMessage[i] will be nil.
LastSeenMessage []*HeightView
lastBlockTimestamp uint64 // ns-precision timestamp from the last header (used for the next block timestamp calculations).
lastBlockTime time.Time // Wall clock time of when we started (as in PrepareRequest) creating the last block (used for timer adjustments).
lastBlockIndex uint32
lastBlockView byte
timePerBlock time.Duration // minimum amount of time that need to pass before the pending block will be accepted if there are some transactions in the proposal.
maxTimePerBlock time.Duration // maximum amount of time that allowed to pass before the pending block will be accepted even if there's no transactions in the proposal.
txSubscriptionOn bool
prepareSentTime time.Time
rttEstimates rtt
}
// N returns total number of validators.
func (c *Context[H]) N() int { return len(c.Validators) }
// F returns number of validators which can be faulty.
func (c *Context[H]) F() int { return (len(c.Validators) - 1) / 3 }
// M returns number of validators which must function correctly.
func (c *Context[H]) M() int { return len(c.Validators) - c.F() }
// GetPrimaryIndex returns index of a primary node for the specified view.
func (c *Context[H]) GetPrimaryIndex(viewNumber byte) uint {
p := (int(c.BlockIndex) - int(viewNumber)) % len(c.Validators)
if p >= 0 {
return uint(p)
}
return uint(p + len(c.Validators))
}
// IsPrimary returns true iff node is primary for current height and view.
func (c *Context[H]) IsPrimary() bool { return c.MyIndex == int(c.PrimaryIndex) }
// IsBackup returns true iff node is backup for current height and view.
func (c *Context[H]) IsBackup() bool {
return c.MyIndex >= 0 && !c.IsPrimary()
}
// WatchOnly returns true iff node takes no active part in consensus.
func (c *Context[H]) WatchOnly() bool { return c.MyIndex < 0 || c.Config.WatchOnly() }
// CountCommitted returns number of received Commit (or PreCommit for anti-MEV
// extension) messages not only for the current epoch but also for any other epoch.
func (c *Context[H]) CountCommitted() (count int) {
for i := range c.CommitPayloads {
// Consider both Commit and PreCommit payloads since both Commit and PreCommit
// phases are one-directional (do not impose view change).
if c.CommitPayloads[i] != nil || c.PreCommitPayloads[i] != nil {
count++
}
}
return
}
// CountFailed returns number of nodes with which no communication was performed
// for this view and that hasn't sent the Commit message at the previous views.
func (c *Context[H]) CountFailed() (count int) {
for i, hv := range c.LastSeenMessage {
if (c.CommitPayloads[i] == nil && c.PreCommitPayloads[i] == nil) &&
(hv == nil || hv.Height < c.BlockIndex || hv.View < c.ViewNumber) {
count++
}
}
return
}
// RequestSentOrReceived returns true iff PrepareRequest
// was sent or received for the current epoch.
func (c *Context[H]) RequestSentOrReceived() bool {
return c.PreparationPayloads[c.PrimaryIndex] != nil
}
// ResponseSent returns true iff Prepare* message was sent for the current epoch.
func (c *Context[H]) ResponseSent() bool {
return !c.WatchOnly() && c.PreparationPayloads[c.MyIndex] != nil
}
// PreCommitSent returns true iff PreCommit message was sent for the current epoch
// assuming that the node can't go further than current epoch after PreCommit was sent.
func (c *Context[H]) PreCommitSent() bool {
return !c.WatchOnly() && c.PreCommitPayloads[c.MyIndex] != nil
}
// CommitSent returns true iff Commit message was sent for the current epoch
// assuming that the node can't go further than current epoch after commit was sent.
func (c *Context[H]) CommitSent() bool {
return !c.WatchOnly() && c.CommitPayloads[c.MyIndex] != nil
}
// BlockSent returns true iff block was formed AND sent for the current height.
// Once block is sent, the consensus stops new transactions and messages processing
// as far as timeouts handling.
//
// Implementation note: the implementation of BlockSent differs from the C#'s one.
// In C# algorithm they use ConsensusContext's Block.Transactions null check to define
// whether block was formed, and the only place where the block can be formed is
// in the ConsensusContext's CreateBlock function right after enough Commits receiving.
// On the contrary, in our implementation we don't have access to the block's
// Transactions field as far as we can't use block null check, because there are
// several places where the call to CreateBlock happens (one of them is right after
// PrepareRequest receiving). Thus, we have a separate Context.blockProcessed field
// for the described purpose.
func (c *Context[H]) BlockSent() bool { return c.blockProcessed }
// ViewChanging returns true iff node is in a process of changing view.
func (c *Context[H]) ViewChanging() bool {
if c.WatchOnly() {
return false
}
cv := c.ChangeViewPayloads[c.MyIndex]
return cv != nil && cv.GetChangeView().NewViewNumber() > c.ViewNumber
}
// NotAcceptingPayloadsDueToViewChanging returns true if node should not accept new payloads.
func (c *Context[H]) NotAcceptingPayloadsDueToViewChanging() bool {
return c.ViewChanging() && !c.MoreThanFNodesCommittedOrLost()
}
// MoreThanFNodesCommittedOrLost returns true iff a number of nodes which either committed
// or are faulty is more than maximum amount of allowed faulty nodes.
// A possible attack can happen if the last node to commit is malicious and either sends change view after his
// commit to stall nodes in a higher view, or if he refuses to send recovery messages. In addition, if a node
// asking change views loses network or crashes and comes back when nodes are committed in more than one higher
// numbered view, it is possible for the node accepting recovery to commit in any of the higher views, thus
// potentially splitting nodes among views and stalling the network.
func (c *Context[H]) MoreThanFNodesCommittedOrLost() bool {
return c.CountCommitted()+c.CountFailed() > c.F()
}
// Header returns current header from context. May be nil in case if no
// header is constructed yet. Do not change the resulting header.
func (c *Context[H]) Header() Block[H] {
return c.header
}
// PreHeader returns current preHeader from context. May be nil in case if no
// preHeader is constructed yet. Do not change the resulting preHeader.
func (c *Context[H]) PreHeader() PreBlock[H] {
return c.preHeader
}
// PreBlock returns current PreBlock from context. May be nil in case if no
// PreBlock is constructed yet (even if PreHeader is already constructed).
// External changes in the PreBlock will be seen by dBFT.
func (c *Context[H]) PreBlock() PreBlock[H] {
return c.preBlock
}
func (c *Context[H]) reset(view byte, ts uint64) {
c.MyIndex = -1
c.prepareSentTime = time.Time{}
c.lastBlockTimestamp = ts
c.unsubscribeFromTransactions()
if view == 0 {
c.PrevHash = c.Config.CurrentBlockHash()
c.BlockIndex = c.Config.CurrentHeight() + 1
c.Validators = c.Config.GetValidators()
c.timePerBlock = c.Config.TimePerBlock()
if c.Config.MaxTimePerBlock != nil {
c.maxTimePerBlock = c.Config.MaxTimePerBlock()
}
n := len(c.Validators)
c.LastChangeViewPayloads = emptyReusableSlice(c.LastChangeViewPayloads, n)
c.LastSeenMessage = emptyReusableSlice(c.LastSeenMessage, n)
c.blockProcessed = false
c.preBlockProcessed = false
} else {
for i := range c.Validators {
m := c.ChangeViewPayloads[i]
if m != nil && m.GetChangeView().NewViewNumber() >= view {
c.LastChangeViewPayloads[i] = m
} else {
c.LastChangeViewPayloads[i] = nil
}
}
}
c.MyIndex, c.Priv, c.Pub = c.Config.GetKeyPair(c.Validators)
c.block = nil
c.preBlock = nil
c.header = nil
c.preHeader = nil
n := len(c.Validators)
c.ChangeViewPayloads = emptyReusableSlice(c.ChangeViewPayloads, n)
if view == 0 {
c.PreCommitPayloads = emptyReusableSlice(c.PreCommitPayloads, n)
c.CommitPayloads = emptyReusableSlice(c.CommitPayloads, n)
}
c.PreparationPayloads = emptyReusableSlice(c.PreparationPayloads, n)
if c.Transactions == nil { // Init.
c.Transactions = make(map[H]Transaction[H])
} else { // Regular use.
clear(c.Transactions)
}
c.TransactionHashes = nil
if c.MissingTransactions != nil {
c.MissingTransactions = c.MissingTransactions[:0]
}
c.PrimaryIndex = c.GetPrimaryIndex(view)
c.ViewNumber = view
if c.MyIndex >= 0 {
c.LastSeenMessage[c.MyIndex] = &HeightView{c.BlockIndex, c.ViewNumber}
}
}
func emptyReusableSlice[E any](s []E, n int) []E {
if len(s) == n {
clear(s)
return s
}
return make([]E, n)
}
// Fill initializes consensus when node is a speaker. It doesn't perform any
// context modifications if MaxTimePerBlock extension is enabled and there are
// no transactions in the memory pool and force is not set.
func (c *Context[H]) Fill(force bool) bool {
txx := c.Config.GetVerified()
if c.Config.MaxTimePerBlock != nil && !force && len(txx) == 0 {
return false
}
b := make([]byte, 8)
_, _ = rand.Read(b)
c.Nonce = binary.LittleEndian.Uint64(b)
c.TransactionHashes = make([]H, len(txx))
for i := range txx {
h := txx[i].Hash()
c.TransactionHashes[i] = h
c.Transactions[h] = txx[i]
}
c.Timestamp = c.lastBlockTimestamp + c.Config.TimestampIncrement
if now := c.getTimestamp(); now > c.Timestamp {
c.Timestamp = now
}
return true
}
// getTimestamp returns nanoseconds-precision timestamp using
// current context config.
func (c *Context[H]) getTimestamp() uint64 {
return uint64(c.Config.Timer.Now().UnixNano()) / c.Config.TimestampIncrement * c.Config.TimestampIncrement
}
// CreateBlock returns resulting block for the current epoch.
func (c *Context[H]) CreateBlock() Block[H] {
if c.block == nil {
if c.block = c.MakeHeader(); c.block == nil {
return nil
}
txx := make([]Transaction[H], len(c.TransactionHashes))
for i, h := range c.TransactionHashes {
txx[i] = c.Transactions[h]
}
// Anti-MEV extension properly sets PreBlock transactions once during PreBlock
// construction and then never updates these transactions in the dBFT context.
// Thus, user must not reuse txx if anti-MEV extension is enabled. However,
// we don't skip a call to Block.SetTransactions since it may be used as a
// signal to the user's code to finalize the block.
c.block.SetTransactions(txx)
}
return c.block
}
// CreatePreBlock returns PreBlock for the current epoch.
func (c *Context[H]) CreatePreBlock() PreBlock[H] {
if c.preBlock == nil {
if c.preBlock = c.MakePreHeader(); c.preBlock == nil {
return nil
}
txx := make([]Transaction[H], len(c.TransactionHashes))
for i, h := range c.TransactionHashes {
txx[i] = c.Transactions[h]
}
c.preBlock.SetTransactions(txx)
}
return c.preBlock
}
// isAntiMEVExtensionEnabled returns whether Anti-MEV dBFT extension is enabled
// at the currently processing block height.
func (c *Context[H]) isAntiMEVExtensionEnabled() bool {
return c.Config.AntiMEVExtensionEnablingHeight >= 0 && uint32(c.Config.AntiMEVExtensionEnablingHeight) <= c.BlockIndex
}
// MakeHeader returns half-filled block for the current epoch.
// All hashable fields will be filled.
func (c *Context[H]) MakeHeader() Block[H] {
if c.header == nil {
if !c.RequestSentOrReceived() {
return nil
}
// For anti-MEV dBFT extension it's important to have PreBlock processed and
// all envelopes decrypted, because a single PrepareRequest is not enough to
// construct proper Block.
if c.isAntiMEVExtensionEnabled() {
if !c.preBlockProcessed {
return nil
}
}
c.header = c.Config.NewBlockFromContext(c)
}
return c.header
}
// MakePreHeader returns half-filled block for the current epoch.
// All hashable fields will be filled.
func (c *Context[H]) MakePreHeader() PreBlock[H] {
if c.preHeader == nil {
if !c.RequestSentOrReceived() {
return nil
}
c.preHeader = c.Config.NewPreBlockFromContext(c)
}
return c.preHeader
}
// hasAllTransactions returns true iff all transactions were received
// for the proposed block.
func (c *Context[H]) hasAllTransactions() bool {
return len(c.TransactionHashes) == len(c.Transactions)
}
func (c *Context[H]) subscribeForTransactions() {
c.txSubscriptionOn = true
c.Config.SubscribeForTxs()
}
func (c *Context[H]) unsubscribeFromTransactions() {
c.txSubscriptionOn = false
}

1502
dbft.go

File diff suppressed because it is too large Load Diff

View File

@ -7,10 +7,10 @@ import (
"testing"
"time"
"github.com/tutus-one/tutus-consensus"
"github.com/tutus-one/tutus-consensus/internal/consensus"
"github.com/tutus-one/tutus-consensus/internal/crypto"
"github.com/tutus-one/tutus-consensus/timer"
"git.marketally.com/tutus-one/tutus-consensus"
"git.marketally.com/tutus-one/tutus-consensus/internal/consensus"
"git.marketally.com/tutus-one/tutus-consensus/internal/crypto"
"git.marketally.com/tutus-one/tutus-consensus/timer"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)

View File

@ -1,9 +1,9 @@
# Project-specific labels
## Component
Currently only these ones are used, but the list can be extended in future:
- tla+
Related to the TLA+ algorithm specification
# Project-specific labels
## Component
Currently only these ones are used, but the list can be extended in future:
- tla+
Related to the TLA+ algorithm specification

View File

@ -1,64 +1,64 @@
# Release instructions
This document outlines the dbft release process. It can be used as a todo
list for a new release.
## Check the state
These should run successfully:
* build
* unit-tests
* lint
* simulation with default settings
## Update CHANGELOG and ROADMAP
Add an entry to the CHANGELOG.md following the style established there. Add a
codename, version and release date in the heading. Write a paragraph
describing the most significant changes done in this release. In case if the dBFT
configuration was changed, some API was marked as deprecated, any experimental
changes were made in the user-facing code and the users' feedback is needed or
if there's any other information that requires user's response, write
another separate paragraph for those who uses dbft package. Then, add sections
with release content describing each change in detail and with a reference to
GitHub issues and/or PRs. Minor issues that doesn't affect the package end-user may
be grouped under a single label.
* "New features" section should include new abilities that were added to the
dBFT/API, are directly visible or available to the user and are large
enough to be treated as a feature. Do not include minor user-facing
improvements and changes that don't affect the user-facing functionality
even if they are new.
* "Behaviour changes" section should include any incompatible changes in default
settings or in API that are available to the user. Add a note about changes
user needs to make if he uses the affected code.
* "Improvements" section should include user-facing changes that are too
insignificant to be treated as a feature and are not directly visible to the
package end-user, such as performance optimizations, refactoring and internal
API changes.
* "Bugs fixed" section should include a set of bugs fixed since the previous
release with optional bug cause or consequences description.
Create a PR with CHANGELOG changes, review/merge it.
## Create a GitHub release and a tag
Use "Draft a new release" button in the "Releases" section. Create a new
`vX.Y.Z` tag for it following the semantic versioning standard. Put change log
for this release into the description. Do not attach any binaries.
Set the "Set as the latest release" checkbox if this is the latest stable
release or "Set as a pre-release" if this is an unstable pre-release.
Press the "Publish release" button.
## Close GitHub milestone
Close corresponding X.Y.Z GitHub milestone.
## Announcements
Copy the GitHub release page link to:
* Element channel
## Dependant projects update
Create an issue or PR to fetch the updated package version in the dependant
repositories.
# Release instructions
This document outlines the dbft release process. It can be used as a todo
list for a new release.
## Check the state
These should run successfully:
* build
* unit-tests
* lint
* simulation with default settings
## Update CHANGELOG and ROADMAP
Add an entry to the CHANGELOG.md following the style established there. Add a
codename, version and release date in the heading. Write a paragraph
describing the most significant changes done in this release. In case if the dBFT
configuration was changed, some API was marked as deprecated, any experimental
changes were made in the user-facing code and the users' feedback is needed or
if there's any other information that requires user's response, write
another separate paragraph for those who uses dbft package. Then, add sections
with release content describing each change in detail and with a reference to
GitHub issues and/or PRs. Minor issues that doesn't affect the package end-user may
be grouped under a single label.
* "New features" section should include new abilities that were added to the
dBFT/API, are directly visible or available to the user and are large
enough to be treated as a feature. Do not include minor user-facing
improvements and changes that don't affect the user-facing functionality
even if they are new.
* "Behaviour changes" section should include any incompatible changes in default
settings or in API that are available to the user. Add a note about changes
user needs to make if he uses the affected code.
* "Improvements" section should include user-facing changes that are too
insignificant to be treated as a feature and are not directly visible to the
package end-user, such as performance optimizations, refactoring and internal
API changes.
* "Bugs fixed" section should include a set of bugs fixed since the previous
release with optional bug cause or consequences description.
Create a PR with CHANGELOG changes, review/merge it.
## Create a GitHub release and a tag
Use "Draft a new release" button in the "Releases" section. Create a new
`vX.Y.Z` tag for it following the semantic versioning standard. Put change log
for this release into the description. Do not attach any binaries.
Set the "Set as the latest release" checkbox if this is the latest stable
release or "Set as a pre-release" if this is an unstable pre-release.
Press the "Publish release" button.
## Close GitHub milestone
Close corresponding X.Y.Z GitHub milestone.
## Announcements
Copy the GitHub release page link to:
* Element channel
## Dependant projects update
Create an issue or PR to fetch the updated package version in the dependant
repositories.

View File

@ -1,111 +1,111 @@
<mxfile host="app.diagrams.net" modified="2024-06-24T11:45:32.984Z" agent="Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:126.0) Gecko/20100101 Firefox/126.0" etag="L3zGyTC-LyAz5kXq654f" version="24.5.5" type="google">
<diagram name="Page-1" id="gx1AT7QsytIHyGW8taHa">
<mxGraphModel grid="1" page="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" pageScale="1" pageWidth="850" pageHeight="1100" math="0" shadow="0">
<root>
<mxCell id="0" />
<mxCell id="1" parent="0" />
<mxCell id="zO6A_hVda2gypDU5CBnV-14" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=1;jumpSize=8;orthogonalLoop=1;jettySize=auto;html=1;strokeWidth=2;fontFamily=Comic Sans MS;fontSize=16;fontColor=#404040;startArrow=none;startFill=0;endArrow=classicThin;endFill=1;startSize=4;endSize=4;entryX=0.5;entryY=0;entryDx=0;entryDy=0;entryPerimeter=0;" edge="1" parent="1" target="zO6A_hVda2gypDU5CBnV-6">
<mxGeometry relative="1" as="geometry">
<mxPoint x="471" y="35" as="sourcePoint" />
<mxPoint x="590" y="160" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-15" value="&lt;div style=&quot;font-size: 14px;&quot;&gt;&lt;font style=&quot;font-size: 14px;&quot;&gt;send PReq (primary)&lt;/font&gt;&lt;/div&gt;&lt;div style=&quot;font-size: 14px;&quot;&gt;&lt;font style=&quot;font-size: 14px;&quot;&gt;or&lt;/font&gt;&lt;/div&gt;&lt;div style=&quot;font-size: 14px;&quot;&gt;&lt;font style=&quot;font-size: 14px;&quot;&gt;send PResp (backup)&lt;br&gt;&lt;/font&gt;&lt;/div&gt;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontSize=16;fontFamily=Comic Sans MS;fontColor=#404040;" connectable="0" vertex="1" parent="zO6A_hVda2gypDU5CBnV-14">
<mxGeometry x="0.175" y="3" relative="1" as="geometry">
<mxPoint x="-3" y="23" as="offset" />
</mxGeometry>
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-3" value="&lt;font style=&quot;font-size: 16px;&quot; face=&quot;Comic Sans MS&quot;&gt;initialized&lt;/font&gt;" style="strokeWidth=2;html=1;shape=mxgraph.flowchart.terminator;whiteSpace=wrap;fontFamily=Georgia;fontSize=16;strokeColor=#EA6B66;" vertex="1" parent="1">
<mxGeometry x="340" y="10" width="130" height="50" as="geometry" />
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-23" style="edgeStyle=orthogonalEdgeStyle;rounded=1;jumpSize=8;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=0;entryDx=0;entryDy=0;entryPerimeter=0;strokeWidth=2;fontFamily=Comic Sans MS;fontSize=14;fontColor=#404040;startArrow=none;startFill=0;endArrow=classicThin;endFill=1;startSize=4;endSize=4;" edge="1" parent="1" source="zO6A_hVda2gypDU5CBnV-6" target="zO6A_hVda2gypDU5CBnV-7">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-24" value="| PReq &lt;strong style=&quot;font-family: noto_regular; color: rgb(75, 75, 75); font-size: 10pt;&quot;&gt; &lt;/strong&gt;PResp | ≥ M&lt;span style=&quot;font-family: noto_regular; color: rgb(75, 75, 75); font-size: 10pt;&quot;&gt;&lt;/span&gt;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontSize=14;fontFamily=Comic Sans MS;fontColor=#404040;" connectable="0" vertex="1" parent="zO6A_hVda2gypDU5CBnV-23">
<mxGeometry x="-0.24" y="-1" relative="1" as="geometry">
<mxPoint x="4" y="8" as="offset" />
</mxGeometry>
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-6" value="prepareSent" style="strokeWidth=2;html=1;shape=mxgraph.flowchart.terminator;whiteSpace=wrap;fontFamily=Georgia;fontSize=16;strokeColor=#EA6B66;" vertex="1" parent="1">
<mxGeometry x="520" y="160" width="130" height="50" as="geometry" />
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-25" style="edgeStyle=orthogonalEdgeStyle;rounded=1;jumpSize=8;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=0;entryDx=0;entryDy=0;entryPerimeter=0;strokeWidth=2;fontFamily=Comic Sans MS;fontSize=14;fontColor=#404040;startArrow=none;startFill=0;endArrow=classicThin;endFill=1;startSize=4;endSize=4;" edge="1" parent="1" source="zO6A_hVda2gypDU5CBnV-7" target="zO6A_hVda2gypDU5CBnV-8">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-26" value="| Commit | ≥ M" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontSize=14;fontFamily=Comic Sans MS;fontColor=#404040;" connectable="0" vertex="1" parent="zO6A_hVda2gypDU5CBnV-25">
<mxGeometry x="-0.1818" y="-2" relative="1" as="geometry">
<mxPoint y="6" as="offset" />
</mxGeometry>
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-7" value="commitSent" style="strokeWidth=2;html=1;shape=mxgraph.flowchart.terminator;whiteSpace=wrap;fontFamily=Georgia;fontSize=16;strokeColor=#EA6B66;" vertex="1" parent="1">
<mxGeometry x="520" y="310" width="130" height="50" as="geometry" />
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-8" value="commitAckSent" style="strokeWidth=2;html=1;shape=mxgraph.flowchart.terminator;whiteSpace=wrap;fontFamily=Georgia;fontSize=16;strokeColor=#EA6B66;" vertex="1" parent="1">
<mxGeometry x="520" y="470" width="130" height="50" as="geometry" />
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-11" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;fontFamily=Comic Sans MS;fontSize=16;entryX=0;entryY=0.5;entryDx=0;entryDy=0;entryPerimeter=0;endSize=4;startSize=4;jumpSize=8;strokeWidth=2;startArrow=classicThin;startFill=1;endArrow=none;endFill=0;" edge="1" parent="1" source="zO6A_hVda2gypDU5CBnV-9" target="zO6A_hVda2gypDU5CBnV-3">
<mxGeometry relative="1" as="geometry">
<mxPoint x="275" y="85" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-12" value="&lt;font style=&quot;font-size: 14px;&quot;&gt;t/o&lt;/font&gt;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontSize=16;fontFamily=Comic Sans MS;fontColor=#404040;" connectable="0" vertex="1" parent="zO6A_hVda2gypDU5CBnV-11">
<mxGeometry x="-0.2125" y="1" relative="1" as="geometry">
<mxPoint as="offset" />
</mxGeometry>
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-16" value="&amp;nbsp;t/o&amp;nbsp; " style="edgeStyle=orthogonalEdgeStyle;rounded=1;jumpSize=8;orthogonalLoop=1;jettySize=auto;html=1;entryX=0;entryY=0.5;entryDx=0;entryDy=0;entryPerimeter=0;strokeWidth=2;fontFamily=Comic Sans MS;fontSize=14;fontColor=#404040;startArrow=classicThin;startFill=1;endArrow=none;endFill=0;startSize=4;endSize=4;" edge="1" parent="1" target="zO6A_hVda2gypDU5CBnV-6">
<mxGeometry relative="1" as="geometry">
<mxPoint x="340" y="185" as="sourcePoint" />
<mxPoint x="470" y="185" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-17" style="edgeStyle=orthogonalEdgeStyle;rounded=1;jumpSize=8;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=0;entryDx=0;entryDy=0;entryPerimeter=0;strokeWidth=2;fontFamily=Comic Sans MS;fontSize=14;fontColor=#404040;startArrow=none;startFill=0;endArrow=classicThin;endFill=1;startSize=4;endSize=4;" edge="1" parent="1" source="zO6A_hVda2gypDU5CBnV-9" target="zO6A_hVda2gypDU5CBnV-6">
<mxGeometry relative="1" as="geometry">
<Array as="points">
<mxPoint x="275" y="240" />
<mxPoint x="480" y="240" />
<mxPoint x="480" y="140" />
<mxPoint x="585" y="140" />
</Array>
</mxGeometry>
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-22" value="&amp;nbsp;| Commit | &amp;gt; F&amp;nbsp; " style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontSize=14;fontFamily=Comic Sans MS;fontColor=#404040;" connectable="0" vertex="1" parent="zO6A_hVda2gypDU5CBnV-17">
<mxGeometry x="-0.4" y="1" relative="1" as="geometry">
<mxPoint x="-8" as="offset" />
</mxGeometry>
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-27" style="edgeStyle=orthogonalEdgeStyle;rounded=1;jumpSize=8;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=0;entryDx=0;entryDy=0;entryPerimeter=0;strokeWidth=2;fontFamily=Comic Sans MS;fontSize=14;fontColor=#404040;startArrow=none;startFill=0;endArrow=classicThin;endFill=1;startSize=4;endSize=4;" edge="1" parent="1" source="zO6A_hVda2gypDU5CBnV-9" target="zO6A_hVda2gypDU5CBnV-3">
<mxGeometry relative="1" as="geometry">
<Array as="points">
<mxPoint x="160" y="185" />
<mxPoint x="160" y="-20" />
<mxPoint x="405" y="-20" />
</Array>
</mxGeometry>
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-28" value="&amp;nbsp;| CV | ≥ M, init at next view&amp;nbsp; " style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontSize=14;fontFamily=Comic Sans MS;fontColor=#404040;" connectable="0" vertex="1" parent="zO6A_hVda2gypDU5CBnV-27">
<mxGeometry x="0.2286" y="-3" relative="1" as="geometry">
<mxPoint x="47" y="-4" as="offset" />
</mxGeometry>
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-9" value="cv" style="strokeWidth=2;html=1;shape=mxgraph.flowchart.terminator;whiteSpace=wrap;fontFamily=Georgia;fontSize=16;strokeColor=#EA6B66;" vertex="1" parent="1">
<mxGeometry x="210" y="160" width="130" height="50" as="geometry" />
</mxCell>
<mxCell id="bSvnLd7m7FiDBpnTT2Ld-1" style="edgeStyle=orthogonalEdgeStyle;rounded=1;jumpSize=8;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=0;entryDx=0;entryDy=0;entryPerimeter=0;strokeWidth=2;fontFamily=Comic Sans MS;fontSize=14;fontColor=#404040;startArrow=none;startFill=0;endArrow=classicThin;endFill=1;startSize=4;endSize=4;" edge="1" parent="1">
<mxGeometry relative="1" as="geometry">
<mxPoint x="584.5" y="520" as="sourcePoint" />
<mxPoint x="584.5" y="630" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="bSvnLd7m7FiDBpnTT2Ld-2" value="| Ack | ≥ M" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontSize=14;fontFamily=Comic Sans MS;fontColor=#404040;" connectable="0" vertex="1" parent="bSvnLd7m7FiDBpnTT2Ld-1">
<mxGeometry x="-0.1818" y="-2" relative="1" as="geometry">
<mxPoint x="3" y="6" as="offset" />
</mxGeometry>
</mxCell>
<mxCell id="bSvnLd7m7FiDBpnTT2Ld-3" value="blockAccepted" style="strokeWidth=2;html=1;shape=mxgraph.flowchart.terminator;whiteSpace=wrap;fontFamily=Georgia;fontSize=16;strokeColor=#EA6B66;" vertex="1" parent="1">
<mxGeometry x="520" y="630" width="130" height="50" as="geometry" />
</mxCell>
</root>
</mxGraphModel>
</diagram>
</mxfile>
<mxfile host="app.diagrams.net" modified="2024-06-24T11:45:32.984Z" agent="Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:126.0) Gecko/20100101 Firefox/126.0" etag="L3zGyTC-LyAz5kXq654f" version="24.5.5" type="google">
<diagram name="Page-1" id="gx1AT7QsytIHyGW8taHa">
<mxGraphModel grid="1" page="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" pageScale="1" pageWidth="850" pageHeight="1100" math="0" shadow="0">
<root>
<mxCell id="0" />
<mxCell id="1" parent="0" />
<mxCell id="zO6A_hVda2gypDU5CBnV-14" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=1;jumpSize=8;orthogonalLoop=1;jettySize=auto;html=1;strokeWidth=2;fontFamily=Comic Sans MS;fontSize=16;fontColor=#404040;startArrow=none;startFill=0;endArrow=classicThin;endFill=1;startSize=4;endSize=4;entryX=0.5;entryY=0;entryDx=0;entryDy=0;entryPerimeter=0;" edge="1" parent="1" target="zO6A_hVda2gypDU5CBnV-6">
<mxGeometry relative="1" as="geometry">
<mxPoint x="471" y="35" as="sourcePoint" />
<mxPoint x="590" y="160" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-15" value="&lt;div style=&quot;font-size: 14px;&quot;&gt;&lt;font style=&quot;font-size: 14px;&quot;&gt;send PReq (primary)&lt;/font&gt;&lt;/div&gt;&lt;div style=&quot;font-size: 14px;&quot;&gt;&lt;font style=&quot;font-size: 14px;&quot;&gt;or&lt;/font&gt;&lt;/div&gt;&lt;div style=&quot;font-size: 14px;&quot;&gt;&lt;font style=&quot;font-size: 14px;&quot;&gt;send PResp (backup)&lt;br&gt;&lt;/font&gt;&lt;/div&gt;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontSize=16;fontFamily=Comic Sans MS;fontColor=#404040;" connectable="0" vertex="1" parent="zO6A_hVda2gypDU5CBnV-14">
<mxGeometry x="0.175" y="3" relative="1" as="geometry">
<mxPoint x="-3" y="23" as="offset" />
</mxGeometry>
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-3" value="&lt;font style=&quot;font-size: 16px;&quot; face=&quot;Comic Sans MS&quot;&gt;initialized&lt;/font&gt;" style="strokeWidth=2;html=1;shape=mxgraph.flowchart.terminator;whiteSpace=wrap;fontFamily=Georgia;fontSize=16;strokeColor=#EA6B66;" vertex="1" parent="1">
<mxGeometry x="340" y="10" width="130" height="50" as="geometry" />
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-23" style="edgeStyle=orthogonalEdgeStyle;rounded=1;jumpSize=8;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=0;entryDx=0;entryDy=0;entryPerimeter=0;strokeWidth=2;fontFamily=Comic Sans MS;fontSize=14;fontColor=#404040;startArrow=none;startFill=0;endArrow=classicThin;endFill=1;startSize=4;endSize=4;" edge="1" parent="1" source="zO6A_hVda2gypDU5CBnV-6" target="zO6A_hVda2gypDU5CBnV-7">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-24" value="| PReq &lt;strong style=&quot;font-family: noto_regular; color: rgb(75, 75, 75); font-size: 10pt;&quot;&gt; &lt;/strong&gt;PResp | ≥ M&lt;span style=&quot;font-family: noto_regular; color: rgb(75, 75, 75); font-size: 10pt;&quot;&gt;&lt;/span&gt;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontSize=14;fontFamily=Comic Sans MS;fontColor=#404040;" connectable="0" vertex="1" parent="zO6A_hVda2gypDU5CBnV-23">
<mxGeometry x="-0.24" y="-1" relative="1" as="geometry">
<mxPoint x="4" y="8" as="offset" />
</mxGeometry>
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-6" value="prepareSent" style="strokeWidth=2;html=1;shape=mxgraph.flowchart.terminator;whiteSpace=wrap;fontFamily=Georgia;fontSize=16;strokeColor=#EA6B66;" vertex="1" parent="1">
<mxGeometry x="520" y="160" width="130" height="50" as="geometry" />
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-25" style="edgeStyle=orthogonalEdgeStyle;rounded=1;jumpSize=8;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=0;entryDx=0;entryDy=0;entryPerimeter=0;strokeWidth=2;fontFamily=Comic Sans MS;fontSize=14;fontColor=#404040;startArrow=none;startFill=0;endArrow=classicThin;endFill=1;startSize=4;endSize=4;" edge="1" parent="1" source="zO6A_hVda2gypDU5CBnV-7" target="zO6A_hVda2gypDU5CBnV-8">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-26" value="| Commit | ≥ M" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontSize=14;fontFamily=Comic Sans MS;fontColor=#404040;" connectable="0" vertex="1" parent="zO6A_hVda2gypDU5CBnV-25">
<mxGeometry x="-0.1818" y="-2" relative="1" as="geometry">
<mxPoint y="6" as="offset" />
</mxGeometry>
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-7" value="commitSent" style="strokeWidth=2;html=1;shape=mxgraph.flowchart.terminator;whiteSpace=wrap;fontFamily=Georgia;fontSize=16;strokeColor=#EA6B66;" vertex="1" parent="1">
<mxGeometry x="520" y="310" width="130" height="50" as="geometry" />
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-8" value="commitAckSent" style="strokeWidth=2;html=1;shape=mxgraph.flowchart.terminator;whiteSpace=wrap;fontFamily=Georgia;fontSize=16;strokeColor=#EA6B66;" vertex="1" parent="1">
<mxGeometry x="520" y="470" width="130" height="50" as="geometry" />
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-11" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;fontFamily=Comic Sans MS;fontSize=16;entryX=0;entryY=0.5;entryDx=0;entryDy=0;entryPerimeter=0;endSize=4;startSize=4;jumpSize=8;strokeWidth=2;startArrow=classicThin;startFill=1;endArrow=none;endFill=0;" edge="1" parent="1" source="zO6A_hVda2gypDU5CBnV-9" target="zO6A_hVda2gypDU5CBnV-3">
<mxGeometry relative="1" as="geometry">
<mxPoint x="275" y="85" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-12" value="&lt;font style=&quot;font-size: 14px;&quot;&gt;t/o&lt;/font&gt;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontSize=16;fontFamily=Comic Sans MS;fontColor=#404040;" connectable="0" vertex="1" parent="zO6A_hVda2gypDU5CBnV-11">
<mxGeometry x="-0.2125" y="1" relative="1" as="geometry">
<mxPoint as="offset" />
</mxGeometry>
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-16" value="&amp;nbsp;t/o&amp;nbsp; " style="edgeStyle=orthogonalEdgeStyle;rounded=1;jumpSize=8;orthogonalLoop=1;jettySize=auto;html=1;entryX=0;entryY=0.5;entryDx=0;entryDy=0;entryPerimeter=0;strokeWidth=2;fontFamily=Comic Sans MS;fontSize=14;fontColor=#404040;startArrow=classicThin;startFill=1;endArrow=none;endFill=0;startSize=4;endSize=4;" edge="1" parent="1" target="zO6A_hVda2gypDU5CBnV-6">
<mxGeometry relative="1" as="geometry">
<mxPoint x="340" y="185" as="sourcePoint" />
<mxPoint x="470" y="185" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-17" style="edgeStyle=orthogonalEdgeStyle;rounded=1;jumpSize=8;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=0;entryDx=0;entryDy=0;entryPerimeter=0;strokeWidth=2;fontFamily=Comic Sans MS;fontSize=14;fontColor=#404040;startArrow=none;startFill=0;endArrow=classicThin;endFill=1;startSize=4;endSize=4;" edge="1" parent="1" source="zO6A_hVda2gypDU5CBnV-9" target="zO6A_hVda2gypDU5CBnV-6">
<mxGeometry relative="1" as="geometry">
<Array as="points">
<mxPoint x="275" y="240" />
<mxPoint x="480" y="240" />
<mxPoint x="480" y="140" />
<mxPoint x="585" y="140" />
</Array>
</mxGeometry>
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-22" value="&amp;nbsp;| Commit | &amp;gt; F&amp;nbsp; " style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontSize=14;fontFamily=Comic Sans MS;fontColor=#404040;" connectable="0" vertex="1" parent="zO6A_hVda2gypDU5CBnV-17">
<mxGeometry x="-0.4" y="1" relative="1" as="geometry">
<mxPoint x="-8" as="offset" />
</mxGeometry>
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-27" style="edgeStyle=orthogonalEdgeStyle;rounded=1;jumpSize=8;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=0;entryDx=0;entryDy=0;entryPerimeter=0;strokeWidth=2;fontFamily=Comic Sans MS;fontSize=14;fontColor=#404040;startArrow=none;startFill=0;endArrow=classicThin;endFill=1;startSize=4;endSize=4;" edge="1" parent="1" source="zO6A_hVda2gypDU5CBnV-9" target="zO6A_hVda2gypDU5CBnV-3">
<mxGeometry relative="1" as="geometry">
<Array as="points">
<mxPoint x="160" y="185" />
<mxPoint x="160" y="-20" />
<mxPoint x="405" y="-20" />
</Array>
</mxGeometry>
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-28" value="&amp;nbsp;| CV | ≥ M, init at next view&amp;nbsp; " style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontSize=14;fontFamily=Comic Sans MS;fontColor=#404040;" connectable="0" vertex="1" parent="zO6A_hVda2gypDU5CBnV-27">
<mxGeometry x="0.2286" y="-3" relative="1" as="geometry">
<mxPoint x="47" y="-4" as="offset" />
</mxGeometry>
</mxCell>
<mxCell id="zO6A_hVda2gypDU5CBnV-9" value="cv" style="strokeWidth=2;html=1;shape=mxgraph.flowchart.terminator;whiteSpace=wrap;fontFamily=Georgia;fontSize=16;strokeColor=#EA6B66;" vertex="1" parent="1">
<mxGeometry x="210" y="160" width="130" height="50" as="geometry" />
</mxCell>
<mxCell id="bSvnLd7m7FiDBpnTT2Ld-1" style="edgeStyle=orthogonalEdgeStyle;rounded=1;jumpSize=8;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=0;entryDx=0;entryDy=0;entryPerimeter=0;strokeWidth=2;fontFamily=Comic Sans MS;fontSize=14;fontColor=#404040;startArrow=none;startFill=0;endArrow=classicThin;endFill=1;startSize=4;endSize=4;" edge="1" parent="1">
<mxGeometry relative="1" as="geometry">
<mxPoint x="584.5" y="520" as="sourcePoint" />
<mxPoint x="584.5" y="630" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="bSvnLd7m7FiDBpnTT2Ld-2" value="| Ack | ≥ M" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontSize=14;fontFamily=Comic Sans MS;fontColor=#404040;" connectable="0" vertex="1" parent="bSvnLd7m7FiDBpnTT2Ld-1">
<mxGeometry x="-0.1818" y="-2" relative="1" as="geometry">
<mxPoint x="3" y="6" as="offset" />
</mxGeometry>
</mxCell>
<mxCell id="bSvnLd7m7FiDBpnTT2Ld-3" value="blockAccepted" style="strokeWidth=2;html=1;shape=mxgraph.flowchart.terminator;whiteSpace=wrap;fontFamily=Georgia;fontSize=16;strokeColor=#EA6B66;" vertex="1" parent="1">
<mxGeometry x="520" y="630" width="130" height="50" as="geometry" />
</mxCell>
</root>
</mxGraphModel>
</diagram>
</mxfile>

View File

@ -1,456 +1,456 @@
# dBFT formal models
This section contains a set of dBFT's formal specifications written in
[TLA⁺](https://lamport.azurewebsites.net/tla/tla.html) language. The models
describe the core algorithm logic represented in a high-level way and can be used
to illustrate some basic dBFT concepts and to validate the algorithm in terms of
liveness and fairness. It should be noted that presented models do not precisely
follow the dBFT implementation presented in the repository and may omit some
implementation details in favor of the specification simplicity and the
fundamental philosophy of the TLA⁺. However, the presented models directly
reflect some liveness problems dBFT 2.0 has; the models can and are aimed to be
used for the dBFT 2.0 liveness evaluation and further algorithm improvements.
Any contributions, questions and discussions on the presented models are highly
appreciated.
## dBFT 2.0 models
### Basic dBFT 2.0 model
This specification is a basis that was taken for the further algorithm
investigation. We recommend to begin acquaintance with the dBFT models from this
one.
The specification describes the process of a single block acceptance: the set of
resource managers `RM` (which is effectively a set of consensus nodes)
communicating via the shared consensus message pool `msgs` and taking the
decision in a few consensus rounds (views). Each consensus node has its own state
at each step of the behaviour. Consensus node may send a consensus message by
adding it to the `msgs` pool. To perform the transition between states the
consensus node must send a consensus message or there must be a particular set of
consensus messages in the shared message pool required for a particular
transition.
Here's the scheme of transitions between consensus node states:
![Basic dBFT model transitions scheme](./.github/dbft.png)
The specification also describes two kinds of malicious nodes behaviour that can
be combined, i.e. enabled or disabled independently for each particular node:
1. "Dead" nodes. "Dead" node is completely excluded from the consensus process
and not able to send the consensus messages and to perform state transitions.
The node may become "dead" at any step in the middle of the consensus process.
Once the node becomes "dead" there's no way for it to rejoin the consensus
process.
2. "Faulty" nodes. "Faulty" node is allowed to send consensus messages of *any*
type at *any* step and to change its view without regarding the dBFT view
changing rules. The node may become "faulty" at any step in the middle of the
consensus process. Once the node becomes "faulty" there's no way for it to
become "good" again.
The specification contains several invariants and liveness properties that must
be checked by the TLC Model Checker. These formulas mostly describe two basic
concepts that dBFT algorithm expected to guarantee:
1. No fork must happen. There must be no situation such that two different
blocks are accepted at two different consensus rounds (views).
2. The block must always be accepted. There must be no situation such that nodes
are stuck in the middle of consensus process and can't take any further steps.
The specification is written and working under several assumptions:
1. All consensus messages are valid. In real life it is guaranteed by verifiable
message signatures. In case if malicious or corrupted message is received it
won't be handled by the node.
2. The exact timeouts (e.g. t/o on waiting a particular consensus message, etc.)
are not included into the model. However, the model covers timeouts in
general, i.e. the timeout is just the possibility to perform a particular
state transition.
3. All consensus messages must eventually be delivered to all nodes, but the
exact order of delivering isn't guaranteed.
4. The maximum number of consensus rounds (views) is restricted. This constraint
was introduced to reduce the number of possible model states to be checked.
The threshold may be specified via model configuration, and it is highly
recommended to keep this setting less or equal to the number of consensus
nodes.
Here you can find the specification file and the TLC Model Checker launch
configuration:
* [TLA⁺ specification](./dbft/dbft.tla)
* [TLC Model Checker configuration](./dbft/dbft___AllGoodModel.launch)
### Extended dBFT 2.0 model
This is an experimental dBFT 2.0 specification that extends the
[basic model](#basic-dbft-20-model) in the following way: besides the shared pool
of consensus messages `msgs` each consensus node has its own local pool of
received and handled messages. Decisions on transmission between the node states
are taken by the node based on the state of the local message pool. This approach
allows to create more accurate low-leveled model which is extremely close to the
dBFT implementation presented in this repository. At the same time such approach
*significantly* increases the number of considered model states which leads to
abnormally long TLC Model Checker runs. Thus, we do not recommend to use this
model in development and place it here as an example of alternative (and more
detailed) dBFT specification. These two models are expected to be equivalent in
terms of the liveness locks that can be discovered by both of them, and, speaking
the TLA⁺ language, the Extended dBFT specification implements the
[basic one](#basic-dbft-20-model) (which can be proven and written in TLA⁺, but
stays out of the task scope).
Except for this remark and a couple of minor differences all the
[basic model](#basic-dbft-20-model) description, constraints and assumptions are
valid for the Extended specification as far. Thus, we highly recommend to
consider the [basic model](#basic-dbft-20-model) before going to the Extended
one.
Here you can find the specification file and the TLC Model Checker launch
configuration:
* [TLA⁺ specification](./dbftMultipool/dbftMultipool.tla)
* [TLC Model Checker configuration](./dbftMultipool/dbftMultipool___AllGoodModel.launch)
## Proposed dBFT 2.1 models
Based on the liveness locks scenarios found by the TLC model checker in the
[basic dBFT 2.0 model](#basic-dbft-20-model) we've developed two extensions of
dBFT 2.0 protocol that allow to avoid the liveness lock problem and to preserve
the safety properties of the algorithm. The extensions currently don't have
any code-level implementation and presented as a TLA⁺ specifications ready to be
reviewed and discussed. The improved protocol presented in the extensions will
be referred below as dBFT 2.1.
We've checked both dBFT 2.1 models with the TLC Model Checker against the same
set of launch configurations that was used to reveal the liveness problems of the
[basic dBFT 2.0 model](#basic-dbft-20-model). The improved models have larger
set of states, thus, the TLC Model Checker wasn't able to finish the liveness
requirements checks for *all* possible states. However, the checks have passed for
a state graph diameter that was large enough to believe the presented models
solve the dBFT 2.0 liveness lock problems.
### Common `Commit` message improvement note
Here and below we assume that `Commit` messages should bear preparation hashes
from all nodes that have sent the preparation message (>= `M` but not including
a whole `PrepareRequest`). This quickly synchronizes nodes still at the preparation
stage when someone else collects enough preparations. It at the same time prevents
malicious/byzantine nodes from sending spoofed `Commit` messages. The `Commit`
message size becomes a little bigger, but since it's just hashes it still fits
into a single packet in the vast majority of the cases, so it doesn't really matter.
### dBFT 2.1 stages-based model
The basic idea of this model is to split the consensus process into three subsequent
stages marked as `I`, `II` and `III` at the scheme. To perform a transition between
two subsequent stages each consensus node should wait for a set of messages from
at least `M` consensus nodes to be received so that it's possible to complete a full
picture of the neighbours' decisions in the current consensus round. In other words,
no transition can happen unless we have `M` number of messages from the subsequent round,
timers are only set up after we have this number of messages, just to wait for
(potentially) a whole set of them. At the same time, each of the stages has
its own `ChangeView[1,2,3]` message to exit to the next consensus round (view) if
something goes wrong in the current one and there's definitely no ability to
continue consensus process in the current view. Below there's a short description
of each stage. Please, refer to the model scheme and specification for further
details.
#### Stage I
Once initialized, consensus node has two ways:
1. Send its `PrepareRequest`/`PrepareResponse` message (and transmit to the
`prepareSent` state).
2. Decide to go to the next view on timeout or any other valid reason (like
transaction missing in the node's mempool or wrong proposal) via sending
`ChangeView1` message (and transmit to the `cv1` state).
This scheme is quite similar to the basic dBFT 2.0 model except the new type of
`ChangeView` message. After that the node enters stage `I` and waits for consensus
messages of stage `I` (`PrepareRequest` or `PrepareResponse` or `ChangeView1`)
from at least `M` neighbours which is needed to decide about the next actions.
The set of received messages can be arranged in the following way:
* `M` messages of `ChangeView1` type denote that `M` nodes have decided to change
their view directly after initialization due to invalid/missing `PrepareRequest`
which leads to immediate view changing. This is a "fail fast" route that is the
same as with dBFT 2.0 for the widespread case of missing primary. No additional
delay is added, everything works as usual.
* `M` preparation messages (of type `PrepareRequest` or `PrepareResponse`) with
missing `ChangeView3` denote that the majority of nodes have decided to commit which
denotes the safe transition to stage `II` can be performed and `Commit` message
can safely be sent even if there's `ChangeView1` message in the network. Notice
that `ChangeView3` check is just a protection against node seriously lagging
behind.
* `M` messages each of the type `PrepareRequest` or `PrepareResponse` or `ChangeView1`
where at least one message is of the type `ChangeView1` denote that at least `M`
nodes have reached the stage `I` and the node can safely take further steps.
The additional `| Commit | ≤ F | CV3 | > 0` condition requires the majority of
nodes not to have the `Commit` message to be sent so that it's still possible to
collect enough `ChangeView[2,3]` messages to change the view in further stages.
If so, then the safe transition to stage `II` can be performed and `ChangeView2`
message can safely be sent.
#### Stage II
Once the node has `Commit` or `ChangeView2` message sent, it enters the stage `II`
of the consensus process and waits for at least `M` messages of stage `II`
(`Commit` or `ChangeView2`) to perform the transition to the next stage. The set
of accepted messages can be arranged in the following way:
* `M` messages of `ChangeView2` type denote that `M` nodes have decided to change
their view directly after entering the stage `II` due to timeout while waiting
for the `Commit` messages which leads to immediate view changing.
* `M` messages of type `Commit` denote that the majority of nodes have decided to
commit which denotes the block can be accepted immediately without entering the
stage `III`. Notice that this is the regular flow of normal dBFT 2.0 consensus,
it also hasn't been changed and proceeds the way it was before.
* `M` messages each of the type `Commit` or `ChangeView2` where not more than `F`
messages are of the type `Commit` denotes that the majority of nodes decided to
change their view after entering the stage `II` and there's not enough `Commit`
messages to create the block (and produce the fork), thus, the safe transition
to stage `III` can be performed and `ChangeView3` message can safely be sent
even if there's `Commit` message in the network.
In addition, the direct transition from `cv2` state to the `commitSent` state is
added in case if it's clear that there's more than `F` nodes have decided to
commit and no `ChangeView3` message has been received which means that it's possible
to produce block in the current view. This path handles a corner case of missing
stage `I` messages, in fact, because `Commit` messages prove that there are at
least `M` preparation messages exist, but the node went `cv2` path just because
it missed some of them.
#### Stage III
Unlike the basic dBFT 2.0 model where consensus node locks on the commit phase,
stage `III` gives the ability to escape from the commit phase via collecting the set
of `M` `ChangeView3` messages. This phase is reachable as soon as at least `M` nodes
has reached the phase `II` (have `ChangeView2` or `Commit` messages sent) and if
there's not enough `Commit` messages (<=`F`) to accept the block. This stage is
added to avoid situation when the node is being locked on the `commitSent` state
whereas the rest of the nodes (>`F`) is willing to go to the next view.
Here's the scheme of transitions between consensus node states for the improved
dBFT 2.1 stages-based model:
![dBFT 2.1stages-based model](./.github/dbft2.1_threeStagedCV.png)
Here you can find the specification file and the basic TLC Model Checker launch
configuration:
* [TLA⁺ specification](./dbft2.1_threeStagedCV/dbftCV3.tla)
* [TLC Model Checker configuration](./dbft2.1_threeStagedCV/dbftCV3___AllGoodModel.launch)
### dBFT 2.1 model with the centralized view changes
The improvement which was taken as a base for this model is taken from the pBFT
algorithm and is as follows.
The consensus process is split into two stages with the following meaning:
* Stage `I` holds the node states from which it's allowed to transmit to the subsequent
view under assumption that the *new* proposal will be generated (as the basic dBFT 2.0
model does).
* Stage `II` holds the node states from which it's allowed to perform view change
*preserving* the proposal from the previous view.
Another vital difference from the basic dBFT 2.0 model is that view changes are
being performed by the node on the `DoCV[1,2]` command (consensus message) sent by the
leader of the target view specified via `DoCV[1,2]` parameters. Aside from the target view
parameter, `DoCV[1,2]` message contains the set of all related pre-received consensus messages
so that the receivers of `DoCV[1,2]` are able to check its validness before the subsequent
view change.
Below presented the short description of the proposed consensus process. Please, refer to the
model scheme and specification for further details.
#### Stage I
Once initialized at view `v`, the consensus node has two ways:
1. Send its `PrepareRequest`/`PrepareResponse` message (and transmit to the
`prepareSent` state).
2. Decide to go to the next view `v+1` on timeout or any other valid reason like
transaction missing in the node's mempool via sending `ChangeView1(v+1)` message
(and transmitting to the `cv1 to v'=v+1` state).
After that the node enters stage `I` and perform as follows:
* If the node has its `PrepareRequest` or `PrepareResponse` sent:
* If at least `M` preparation messages (including its own) collected, then
it's clear that the majority has proposal for view `v` being accepted as
valid and the node can safely send the `Commit` message and transmit to the
phase `II` (`commitSent` state).
* If there's not enough preparation payloads received from the neighbours for a
long time, then the node is allowed to transmit to the stage `II` via sending
its `ChangeView2` message (and changing its state to the `cv2 to v'=v+1`). It
denotes the node's desire to change view to the next one with the current proposal
to be preserved.
* If the node entered the `cv1 to v'=v+1` state:
* If there's a majority (>=`M`) of `ChangeView1(v+1)` messages and the node is
primary in the view `v+1` then it should send the signal (`DoCV1(v+1)` message)
to the rest of the group to change their view to `v+1` with the new proposal
generated. The rest of the group (backup on `v+1` view that have sent their
`ChangeVeiew1(v+1)` messages) should change their view on `DoCV1(v+1)` receiving.
* If there's a majority (>=`M`) of `ChangeView1(v+1)` messages collected, but
`DoCV1(v+1)` is missing for a long time, then the node is able to "skip" view
`v+1` and send the `ChangeView1(v+2)` message hoping that the primary of `v+2`
will be faster enough to send the `DoCV1(v+2)` signal. The process can be repeated
on timeout for view `v+3`, etc.
* If there's more than `F` nodes that have sent their preparation messages
(and, consequently, announced their desire to transmit to the stage `II` of the
current view rather than to change view), then it's clear that it won't be more than `F` messages
of type `ChangeView1` to perform transition to the next view from the stage `II`.
Thus, the node is allowed to send its `ChangeView2` message (and change its
state to the `cv2 to v'=v+1`). Such situation may happen if the node haven't
proposal received in time (consider valid proposal).
#### Stage II
Once the node has entered the stage `II`, the proposal of the current round is
considered to be valid. Depending on the node's state the following decisions are
possible:
* If the node has its `Commit` sent and is in the `commitSent` state:
* If the majority (>=`M`) of the `Commit` messages has been received, then the
block may be safely accepted for the current proposal.
* If there's not enough `Commit` messages for a long time, then it's legal to
send the `ChangeView2(v+1)` message, transmit to the `cv2 to v'=v+1` state
and decide to go to the next view `v+1` preserving the current proposal
and hoping that it would be possible to collect enough `Commit` messages
for it in the view `v+1`.
* If the node is in the `cv2 to v'=v+1` state then:
* If there's a majority (>=`M`) of `ChangeView2(v+1)` messages and the node is
primary in the view `v+1` then it should send the signal (`DoCV2(v+1)` message)
to the rest of the group to change their view to `v+1` with the old proposal of view `v`
preserved. The rest of the group (backup on `v+1` view that have sent their
`ChangeVeiew2(v+1)` messages) should change their view on `DoCV2(v+1)` receiving.
* If there's a majority (>=`M`) of `ChangeView2(v+1)` messages collected, but
`DoCV2(v+1)` is missing for a long time, then the node is able to "skip" view
`v+1` and send the `ChangeView2(v+2)` message hoping that the primary of `v+2`
will be faster enough to send the `DoCV2(v+2)` signal. The process can be repeated
on timeout for view `v+3`, etc.
* Finally, if the node receives at least `M` messages from the stage `I` at max
`F` of which are preparations (the blue dotted arrow from `cv2 to v'=v+1` to
`cv1 to v'=v+1` state), it has the ability to go back to the `cv1` state to start
the new consensus round with the new proposal. This case is kind of special,
it allows to escape from the deadlock situation when the node is locked on `cv2`
state unable to perform any further steps whereas the rest of the network are
waiting in the `cv1` state. Consider the case of four-nodes network where the
first node is permanently "dead", the primary have sent its `PrepareRequest`
and went to the `cv2` state on the timeout and the rest two nodes are waiting
in the `cv1` state not able to move further.
It should be noted that "preserving the proposal of view `v` in view `v+1`" means
that the primary of view `v+1` broadcasts the `PrepareRequest` message at view `v+1`
that contains the same set of block's fields (transactions, timestamp, primary, etc) as
the `PrepareRequest` proposed in the view `v` has.
Here's the scheme of transitions between consensus node states for the improved
dBFT 2.1 model with the centralized view changes process:
![dBFT 2.1 model with the centralized view changes](./.github/dbft2.1_centralizedCV.png)
Here you can find the specification file and the basic TLC Model Checker launch
configuration:
* [TLA⁺ specification](./dbft2.1_centralizedCV/dbftCentralizedCV.tla)
* [TLC Model Checker configuration](./dbft2.1_centralizedCV/dbftCentralizedCV___AllGoodModel.launch)
## MEV-resistant dBFT models
[Neo X chain](https://docs.banelabs.org/) uses dBFT 2.0 algorithm as a consensus engine. As a part of
the Neo X anti-MEV feature implementation, dBFT 2.0 extension was designed to
provide single-block finality for encrypted transactions (a.k.a. envelope
transactions). Compared to dBFT 2.0, MEV-resistant dBFT algorithm includes an
additional `post-Commit` phase that is required to be passed through by consensus
nodes before every block acceptance. This phase allows consensus nodes to exchange
some additional data related to encrypted transactions and to the final state of
accepting block using a new type of consensus messages. The improved protocol based
on dBFT 2.0 with an additional phase will be referred below as MEV-resistant dBFT.
We've checked MEV-resistant dBFT model with the TLC Model Checker against the same
set of launch configurations that was used to reveal the liveness problems of the
[basic dBFT 2.0 model](#basic-dbft-20-model). MEV-resistant dBFT model brings no extra problems to the
protocol, but it has been proved that this model has exactly the same
[liveness bug](https://github.com/neo-project/neo-modules/issues/792) that the
original dBFT 2.0 model has which is expected.
### Basic MEV-resistant dBFT model
This specification is an extension of the
[basic dBFT 2.0 model](#basic-dbft-20-model). Compared to the base model,
MEV-resistant dBFT specification additionally includes:
1. New message type `CommitAck` aimed to reflect an additional protocol
message that should be sent by resource manager if at least `M` `Commit`
messages were collected by the node (that confirms a.k.a. "PreBlock"
final acceptance).
2. New resource manager state `commitAckSent` aimed to reflect the additional phase
of the protocol needed for consensus nodes to exchange some data that was not
available at the time of the first commit. This RM state represents a consensus
node state when it has sent these additional post-commit data but has not accepted
the final block yet.
3. New specification step `RMSendCommitAck` describing the transition between
`commitSent` and `commitAckSent` phases of the protocol, or, which is the same,
corresponding resource managers states. This step allows the resource manager to
send `CommitAck` message if at least `M` valid `Commit` messages are collected.
4. Adjusted behaviour of `RMAcceptBlock` step: block acceptance is possible iff the
node has sent the `CommitAck` message and there are at least `M` `CommitAck`
messages collected by the node.
5. Adjusted behaviour of "faulty" resource managers: allow malicious nodes to send an
`CommitAck` message via `RMFaultySendCommitAck` step.
It should be noted that, in comparison with the dBFT 2.0 protocol where the node is
being locked in the `commitSent` state until the block acceptance, MEV-resistant dBFT
does not allow to accept the block right after the `commitSent` state. However, it
allows the node to move from `commitSent` phase further to the `commitAckSent` state
and locks the node at this state until the block acceptance. No view change may be
initiated or accepted by a node entered the `commitAckSent` state.
Here's the scheme of transitions between consensus node states for MEV-resistant dBFT
algorithm:
![Basic MEV-resistant dBFT model transitions scheme](./.github/dbft_antiMEV.png)
Here you can find the specification file and the basic MEV-resistant dBFT TLC Model
Checker launch configuration for the four "honest" consensus nodes scenario:
* [TLA⁺ specification](dbft_antiMEV/dbft.tla)
* [TLC Model Checker configuration](dbft_antiMEV/dbft___AllGoodModel.launch)
## How to run/check the TLA⁺ specification
### Prerequirements
1. Download and install the TLA⁺ Toolbox following the
[official guide](http://lamport.azurewebsites.net/tla/toolbox.html).
2. Read the brief introduction to the TLA⁺ language and TLC Model Checker at the
[official site](http://lamport.azurewebsites.net/tla/high-level-view.html).
3. Download and take a look at the
[TLA⁺ cheat sheet](https://lamport.azurewebsites.net/tla/summary-standalone.pdf).
4. For a proficient learning watch the
[TLA⁺ Video Course](https://lamport.azurewebsites.net/video/videos.html) and
read the [Specifying Systems book](http://lamport.azurewebsites.net/tla/book.html?back-link=tools.html#documentation).
### Running the TLC model checker
1. Clone the [repository](https://github.com/nspcc-dev/dbft.git).
2. Open the TLA⁺ Toolbox, open new specification and provide path to the desired
`*.tla` file that contains the specification description.
3. Create the model named `AllGoodModel` in the TLA⁺ Toolbox.
4. Copy the corresponding `*___AllGoodModel.launch` file to the `*.toolbox`
folder. Reload/refresh the model in the TLA⁺ Toolbox.
5. Open the `Model Overview` window in the TLA⁺ Toolbox and check that behaviour
specification, declared constants, invariants and properties of the model are
filled in with some values.
6. Press `Run TLC on the model` button to start the model checking process and
explore the progress in the `Model Checkng Results` window.
### Model checking note
It should be noted that all TLA⁺ specifications provided in this repo can be perfectly checked
with `MaxView` model constraint set to be 1 for the four-nodes network scenario. Larger
`MaxView` values produces too many behaviours to be checked, so TLC Model Checker is likely
to fail with OOM during the checking process. However, `MaxView` set to be 1 is enough to check
the model liveness properties for the four-nodes scenario as there are two views to be checked
# dBFT formal models
This section contains a set of dBFT's formal specifications written in
[TLA⁺](https://lamport.azurewebsites.net/tla/tla.html) language. The models
describe the core algorithm logic represented in a high-level way and can be used
to illustrate some basic dBFT concepts and to validate the algorithm in terms of
liveness and fairness. It should be noted that presented models do not precisely
follow the dBFT implementation presented in the repository and may omit some
implementation details in favor of the specification simplicity and the
fundamental philosophy of the TLA⁺. However, the presented models directly
reflect some liveness problems dBFT 2.0 has; the models can and are aimed to be
used for the dBFT 2.0 liveness evaluation and further algorithm improvements.
Any contributions, questions and discussions on the presented models are highly
appreciated.
## dBFT 2.0 models
### Basic dBFT 2.0 model
This specification is a basis that was taken for the further algorithm
investigation. We recommend to begin acquaintance with the dBFT models from this
one.
The specification describes the process of a single block acceptance: the set of
resource managers `RM` (which is effectively a set of consensus nodes)
communicating via the shared consensus message pool `msgs` and taking the
decision in a few consensus rounds (views). Each consensus node has its own state
at each step of the behaviour. Consensus node may send a consensus message by
adding it to the `msgs` pool. To perform the transition between states the
consensus node must send a consensus message or there must be a particular set of
consensus messages in the shared message pool required for a particular
transition.
Here's the scheme of transitions between consensus node states:
![Basic dBFT model transitions scheme](./.github/dbft.png)
The specification also describes two kinds of malicious nodes behaviour that can
be combined, i.e. enabled or disabled independently for each particular node:
1. "Dead" nodes. "Dead" node is completely excluded from the consensus process
and not able to send the consensus messages and to perform state transitions.
The node may become "dead" at any step in the middle of the consensus process.
Once the node becomes "dead" there's no way for it to rejoin the consensus
process.
2. "Faulty" nodes. "Faulty" node is allowed to send consensus messages of *any*
type at *any* step and to change its view without regarding the dBFT view
changing rules. The node may become "faulty" at any step in the middle of the
consensus process. Once the node becomes "faulty" there's no way for it to
become "good" again.
The specification contains several invariants and liveness properties that must
be checked by the TLC Model Checker. These formulas mostly describe two basic
concepts that dBFT algorithm expected to guarantee:
1. No fork must happen. There must be no situation such that two different
blocks are accepted at two different consensus rounds (views).
2. The block must always be accepted. There must be no situation such that nodes
are stuck in the middle of consensus process and can't take any further steps.
The specification is written and working under several assumptions:
1. All consensus messages are valid. In real life it is guaranteed by verifiable
message signatures. In case if malicious or corrupted message is received it
won't be handled by the node.
2. The exact timeouts (e.g. t/o on waiting a particular consensus message, etc.)
are not included into the model. However, the model covers timeouts in
general, i.e. the timeout is just the possibility to perform a particular
state transition.
3. All consensus messages must eventually be delivered to all nodes, but the
exact order of delivering isn't guaranteed.
4. The maximum number of consensus rounds (views) is restricted. This constraint
was introduced to reduce the number of possible model states to be checked.
The threshold may be specified via model configuration, and it is highly
recommended to keep this setting less or equal to the number of consensus
nodes.
Here you can find the specification file and the TLC Model Checker launch
configuration:
* [TLA⁺ specification](./dbft/dbft.tla)
* [TLC Model Checker configuration](./dbft/dbft___AllGoodModel.launch)
### Extended dBFT 2.0 model
This is an experimental dBFT 2.0 specification that extends the
[basic model](#basic-dbft-20-model) in the following way: besides the shared pool
of consensus messages `msgs` each consensus node has its own local pool of
received and handled messages. Decisions on transmission between the node states
are taken by the node based on the state of the local message pool. This approach
allows to create more accurate low-leveled model which is extremely close to the
dBFT implementation presented in this repository. At the same time such approach
*significantly* increases the number of considered model states which leads to
abnormally long TLC Model Checker runs. Thus, we do not recommend to use this
model in development and place it here as an example of alternative (and more
detailed) dBFT specification. These two models are expected to be equivalent in
terms of the liveness locks that can be discovered by both of them, and, speaking
the TLA⁺ language, the Extended dBFT specification implements the
[basic one](#basic-dbft-20-model) (which can be proven and written in TLA⁺, but
stays out of the task scope).
Except for this remark and a couple of minor differences all the
[basic model](#basic-dbft-20-model) description, constraints and assumptions are
valid for the Extended specification as far. Thus, we highly recommend to
consider the [basic model](#basic-dbft-20-model) before going to the Extended
one.
Here you can find the specification file and the TLC Model Checker launch
configuration:
* [TLA⁺ specification](./dbftMultipool/dbftMultipool.tla)
* [TLC Model Checker configuration](./dbftMultipool/dbftMultipool___AllGoodModel.launch)
## Proposed dBFT 2.1 models
Based on the liveness locks scenarios found by the TLC model checker in the
[basic dBFT 2.0 model](#basic-dbft-20-model) we've developed two extensions of
dBFT 2.0 protocol that allow to avoid the liveness lock problem and to preserve
the safety properties of the algorithm. The extensions currently don't have
any code-level implementation and presented as a TLA⁺ specifications ready to be
reviewed and discussed. The improved protocol presented in the extensions will
be referred below as dBFT 2.1.
We've checked both dBFT 2.1 models with the TLC Model Checker against the same
set of launch configurations that was used to reveal the liveness problems of the
[basic dBFT 2.0 model](#basic-dbft-20-model). The improved models have larger
set of states, thus, the TLC Model Checker wasn't able to finish the liveness
requirements checks for *all* possible states. However, the checks have passed for
a state graph diameter that was large enough to believe the presented models
solve the dBFT 2.0 liveness lock problems.
### Common `Commit` message improvement note
Here and below we assume that `Commit` messages should bear preparation hashes
from all nodes that have sent the preparation message (>= `M` but not including
a whole `PrepareRequest`). This quickly synchronizes nodes still at the preparation
stage when someone else collects enough preparations. It at the same time prevents
malicious/byzantine nodes from sending spoofed `Commit` messages. The `Commit`
message size becomes a little bigger, but since it's just hashes it still fits
into a single packet in the vast majority of the cases, so it doesn't really matter.
### dBFT 2.1 stages-based model
The basic idea of this model is to split the consensus process into three subsequent
stages marked as `I`, `II` and `III` at the scheme. To perform a transition between
two subsequent stages each consensus node should wait for a set of messages from
at least `M` consensus nodes to be received so that it's possible to complete a full
picture of the neighbours' decisions in the current consensus round. In other words,
no transition can happen unless we have `M` number of messages from the subsequent round,
timers are only set up after we have this number of messages, just to wait for
(potentially) a whole set of them. At the same time, each of the stages has
its own `ChangeView[1,2,3]` message to exit to the next consensus round (view) if
something goes wrong in the current one and there's definitely no ability to
continue consensus process in the current view. Below there's a short description
of each stage. Please, refer to the model scheme and specification for further
details.
#### Stage I
Once initialized, consensus node has two ways:
1. Send its `PrepareRequest`/`PrepareResponse` message (and transmit to the
`prepareSent` state).
2. Decide to go to the next view on timeout or any other valid reason (like
transaction missing in the node's mempool or wrong proposal) via sending
`ChangeView1` message (and transmit to the `cv1` state).
This scheme is quite similar to the basic dBFT 2.0 model except the new type of
`ChangeView` message. After that the node enters stage `I` and waits for consensus
messages of stage `I` (`PrepareRequest` or `PrepareResponse` or `ChangeView1`)
from at least `M` neighbours which is needed to decide about the next actions.
The set of received messages can be arranged in the following way:
* `M` messages of `ChangeView1` type denote that `M` nodes have decided to change
their view directly after initialization due to invalid/missing `PrepareRequest`
which leads to immediate view changing. This is a "fail fast" route that is the
same as with dBFT 2.0 for the widespread case of missing primary. No additional
delay is added, everything works as usual.
* `M` preparation messages (of type `PrepareRequest` or `PrepareResponse`) with
missing `ChangeView3` denote that the majority of nodes have decided to commit which
denotes the safe transition to stage `II` can be performed and `Commit` message
can safely be sent even if there's `ChangeView1` message in the network. Notice
that `ChangeView3` check is just a protection against node seriously lagging
behind.
* `M` messages each of the type `PrepareRequest` or `PrepareResponse` or `ChangeView1`
where at least one message is of the type `ChangeView1` denote that at least `M`
nodes have reached the stage `I` and the node can safely take further steps.
The additional `| Commit | ≤ F | CV3 | > 0` condition requires the majority of
nodes not to have the `Commit` message to be sent so that it's still possible to
collect enough `ChangeView[2,3]` messages to change the view in further stages.
If so, then the safe transition to stage `II` can be performed and `ChangeView2`
message can safely be sent.
#### Stage II
Once the node has `Commit` or `ChangeView2` message sent, it enters the stage `II`
of the consensus process and waits for at least `M` messages of stage `II`
(`Commit` or `ChangeView2`) to perform the transition to the next stage. The set
of accepted messages can be arranged in the following way:
* `M` messages of `ChangeView2` type denote that `M` nodes have decided to change
their view directly after entering the stage `II` due to timeout while waiting
for the `Commit` messages which leads to immediate view changing.
* `M` messages of type `Commit` denote that the majority of nodes have decided to
commit which denotes the block can be accepted immediately without entering the
stage `III`. Notice that this is the regular flow of normal dBFT 2.0 consensus,
it also hasn't been changed and proceeds the way it was before.
* `M` messages each of the type `Commit` or `ChangeView2` where not more than `F`
messages are of the type `Commit` denotes that the majority of nodes decided to
change their view after entering the stage `II` and there's not enough `Commit`
messages to create the block (and produce the fork), thus, the safe transition
to stage `III` can be performed and `ChangeView3` message can safely be sent
even if there's `Commit` message in the network.
In addition, the direct transition from `cv2` state to the `commitSent` state is
added in case if it's clear that there's more than `F` nodes have decided to
commit and no `ChangeView3` message has been received which means that it's possible
to produce block in the current view. This path handles a corner case of missing
stage `I` messages, in fact, because `Commit` messages prove that there are at
least `M` preparation messages exist, but the node went `cv2` path just because
it missed some of them.
#### Stage III
Unlike the basic dBFT 2.0 model where consensus node locks on the commit phase,
stage `III` gives the ability to escape from the commit phase via collecting the set
of `M` `ChangeView3` messages. This phase is reachable as soon as at least `M` nodes
has reached the phase `II` (have `ChangeView2` or `Commit` messages sent) and if
there's not enough `Commit` messages (<=`F`) to accept the block. This stage is
added to avoid situation when the node is being locked on the `commitSent` state
whereas the rest of the nodes (>`F`) is willing to go to the next view.
Here's the scheme of transitions between consensus node states for the improved
dBFT 2.1 stages-based model:
![dBFT 2.1stages-based model](./.github/dbft2.1_threeStagedCV.png)
Here you can find the specification file and the basic TLC Model Checker launch
configuration:
* [TLA⁺ specification](./dbft2.1_threeStagedCV/dbftCV3.tla)
* [TLC Model Checker configuration](./dbft2.1_threeStagedCV/dbftCV3___AllGoodModel.launch)
### dBFT 2.1 model with the centralized view changes
The improvement which was taken as a base for this model is taken from the pBFT
algorithm and is as follows.
The consensus process is split into two stages with the following meaning:
* Stage `I` holds the node states from which it's allowed to transmit to the subsequent
view under assumption that the *new* proposal will be generated (as the basic dBFT 2.0
model does).
* Stage `II` holds the node states from which it's allowed to perform view change
*preserving* the proposal from the previous view.
Another vital difference from the basic dBFT 2.0 model is that view changes are
being performed by the node on the `DoCV[1,2]` command (consensus message) sent by the
leader of the target view specified via `DoCV[1,2]` parameters. Aside from the target view
parameter, `DoCV[1,2]` message contains the set of all related pre-received consensus messages
so that the receivers of `DoCV[1,2]` are able to check its validness before the subsequent
view change.
Below presented the short description of the proposed consensus process. Please, refer to the
model scheme and specification for further details.
#### Stage I
Once initialized at view `v`, the consensus node has two ways:
1. Send its `PrepareRequest`/`PrepareResponse` message (and transmit to the
`prepareSent` state).
2. Decide to go to the next view `v+1` on timeout or any other valid reason like
transaction missing in the node's mempool via sending `ChangeView1(v+1)` message
(and transmitting to the `cv1 to v'=v+1` state).
After that the node enters stage `I` and perform as follows:
* If the node has its `PrepareRequest` or `PrepareResponse` sent:
* If at least `M` preparation messages (including its own) collected, then
it's clear that the majority has proposal for view `v` being accepted as
valid and the node can safely send the `Commit` message and transmit to the
phase `II` (`commitSent` state).
* If there's not enough preparation payloads received from the neighbours for a
long time, then the node is allowed to transmit to the stage `II` via sending
its `ChangeView2` message (and changing its state to the `cv2 to v'=v+1`). It
denotes the node's desire to change view to the next one with the current proposal
to be preserved.
* If the node entered the `cv1 to v'=v+1` state:
* If there's a majority (>=`M`) of `ChangeView1(v+1)` messages and the node is
primary in the view `v+1` then it should send the signal (`DoCV1(v+1)` message)
to the rest of the group to change their view to `v+1` with the new proposal
generated. The rest of the group (backup on `v+1` view that have sent their
`ChangeVeiew1(v+1)` messages) should change their view on `DoCV1(v+1)` receiving.
* If there's a majority (>=`M`) of `ChangeView1(v+1)` messages collected, but
`DoCV1(v+1)` is missing for a long time, then the node is able to "skip" view
`v+1` and send the `ChangeView1(v+2)` message hoping that the primary of `v+2`
will be faster enough to send the `DoCV1(v+2)` signal. The process can be repeated
on timeout for view `v+3`, etc.
* If there's more than `F` nodes that have sent their preparation messages
(and, consequently, announced their desire to transmit to the stage `II` of the
current view rather than to change view), then it's clear that it won't be more than `F` messages
of type `ChangeView1` to perform transition to the next view from the stage `II`.
Thus, the node is allowed to send its `ChangeView2` message (and change its
state to the `cv2 to v'=v+1`). Such situation may happen if the node haven't
proposal received in time (consider valid proposal).
#### Stage II
Once the node has entered the stage `II`, the proposal of the current round is
considered to be valid. Depending on the node's state the following decisions are
possible:
* If the node has its `Commit` sent and is in the `commitSent` state:
* If the majority (>=`M`) of the `Commit` messages has been received, then the
block may be safely accepted for the current proposal.
* If there's not enough `Commit` messages for a long time, then it's legal to
send the `ChangeView2(v+1)` message, transmit to the `cv2 to v'=v+1` state
and decide to go to the next view `v+1` preserving the current proposal
and hoping that it would be possible to collect enough `Commit` messages
for it in the view `v+1`.
* If the node is in the `cv2 to v'=v+1` state then:
* If there's a majority (>=`M`) of `ChangeView2(v+1)` messages and the node is
primary in the view `v+1` then it should send the signal (`DoCV2(v+1)` message)
to the rest of the group to change their view to `v+1` with the old proposal of view `v`
preserved. The rest of the group (backup on `v+1` view that have sent their
`ChangeVeiew2(v+1)` messages) should change their view on `DoCV2(v+1)` receiving.
* If there's a majority (>=`M`) of `ChangeView2(v+1)` messages collected, but
`DoCV2(v+1)` is missing for a long time, then the node is able to "skip" view
`v+1` and send the `ChangeView2(v+2)` message hoping that the primary of `v+2`
will be faster enough to send the `DoCV2(v+2)` signal. The process can be repeated
on timeout for view `v+3`, etc.
* Finally, if the node receives at least `M` messages from the stage `I` at max
`F` of which are preparations (the blue dotted arrow from `cv2 to v'=v+1` to
`cv1 to v'=v+1` state), it has the ability to go back to the `cv1` state to start
the new consensus round with the new proposal. This case is kind of special,
it allows to escape from the deadlock situation when the node is locked on `cv2`
state unable to perform any further steps whereas the rest of the network are
waiting in the `cv1` state. Consider the case of four-nodes network where the
first node is permanently "dead", the primary have sent its `PrepareRequest`
and went to the `cv2` state on the timeout and the rest two nodes are waiting
in the `cv1` state not able to move further.
It should be noted that "preserving the proposal of view `v` in view `v+1`" means
that the primary of view `v+1` broadcasts the `PrepareRequest` message at view `v+1`
that contains the same set of block's fields (transactions, timestamp, primary, etc) as
the `PrepareRequest` proposed in the view `v` has.
Here's the scheme of transitions between consensus node states for the improved
dBFT 2.1 model with the centralized view changes process:
![dBFT 2.1 model with the centralized view changes](./.github/dbft2.1_centralizedCV.png)
Here you can find the specification file and the basic TLC Model Checker launch
configuration:
* [TLA⁺ specification](./dbft2.1_centralizedCV/dbftCentralizedCV.tla)
* [TLC Model Checker configuration](./dbft2.1_centralizedCV/dbftCentralizedCV___AllGoodModel.launch)
## MEV-resistant dBFT models
[Neo X chain](https://docs.banelabs.org/) uses dBFT 2.0 algorithm as a consensus engine. As a part of
the Neo X anti-MEV feature implementation, dBFT 2.0 extension was designed to
provide single-block finality for encrypted transactions (a.k.a. envelope
transactions). Compared to dBFT 2.0, MEV-resistant dBFT algorithm includes an
additional `post-Commit` phase that is required to be passed through by consensus
nodes before every block acceptance. This phase allows consensus nodes to exchange
some additional data related to encrypted transactions and to the final state of
accepting block using a new type of consensus messages. The improved protocol based
on dBFT 2.0 with an additional phase will be referred below as MEV-resistant dBFT.
We've checked MEV-resistant dBFT model with the TLC Model Checker against the same
set of launch configurations that was used to reveal the liveness problems of the
[basic dBFT 2.0 model](#basic-dbft-20-model). MEV-resistant dBFT model brings no extra problems to the
protocol, but it has been proved that this model has exactly the same
[liveness bug](https://github.com/neo-project/neo-modules/issues/792) that the
original dBFT 2.0 model has which is expected.
### Basic MEV-resistant dBFT model
This specification is an extension of the
[basic dBFT 2.0 model](#basic-dbft-20-model). Compared to the base model,
MEV-resistant dBFT specification additionally includes:
1. New message type `CommitAck` aimed to reflect an additional protocol
message that should be sent by resource manager if at least `M` `Commit`
messages were collected by the node (that confirms a.k.a. "PreBlock"
final acceptance).
2. New resource manager state `commitAckSent` aimed to reflect the additional phase
of the protocol needed for consensus nodes to exchange some data that was not
available at the time of the first commit. This RM state represents a consensus
node state when it has sent these additional post-commit data but has not accepted
the final block yet.
3. New specification step `RMSendCommitAck` describing the transition between
`commitSent` and `commitAckSent` phases of the protocol, or, which is the same,
corresponding resource managers states. This step allows the resource manager to
send `CommitAck` message if at least `M` valid `Commit` messages are collected.
4. Adjusted behaviour of `RMAcceptBlock` step: block acceptance is possible iff the
node has sent the `CommitAck` message and there are at least `M` `CommitAck`
messages collected by the node.
5. Adjusted behaviour of "faulty" resource managers: allow malicious nodes to send an
`CommitAck` message via `RMFaultySendCommitAck` step.
It should be noted that, in comparison with the dBFT 2.0 protocol where the node is
being locked in the `commitSent` state until the block acceptance, MEV-resistant dBFT
does not allow to accept the block right after the `commitSent` state. However, it
allows the node to move from `commitSent` phase further to the `commitAckSent` state
and locks the node at this state until the block acceptance. No view change may be
initiated or accepted by a node entered the `commitAckSent` state.
Here's the scheme of transitions between consensus node states for MEV-resistant dBFT
algorithm:
![Basic MEV-resistant dBFT model transitions scheme](./.github/dbft_antiMEV.png)
Here you can find the specification file and the basic MEV-resistant dBFT TLC Model
Checker launch configuration for the four "honest" consensus nodes scenario:
* [TLA⁺ specification](dbft_antiMEV/dbft.tla)
* [TLC Model Checker configuration](dbft_antiMEV/dbft___AllGoodModel.launch)
## How to run/check the TLA⁺ specification
### Prerequirements
1. Download and install the TLA⁺ Toolbox following the
[official guide](http://lamport.azurewebsites.net/tla/toolbox.html).
2. Read the brief introduction to the TLA⁺ language and TLC Model Checker at the
[official site](http://lamport.azurewebsites.net/tla/high-level-view.html).
3. Download and take a look at the
[TLA⁺ cheat sheet](https://lamport.azurewebsites.net/tla/summary-standalone.pdf).
4. For a proficient learning watch the
[TLA⁺ Video Course](https://lamport.azurewebsites.net/video/videos.html) and
read the [Specifying Systems book](http://lamport.azurewebsites.net/tla/book.html?back-link=tools.html#documentation).
### Running the TLC model checker
1. Clone the [repository](https://github.com/nspcc-dev/dbft.git).
2. Open the TLA⁺ Toolbox, open new specification and provide path to the desired
`*.tla` file that contains the specification description.
3. Create the model named `AllGoodModel` in the TLA⁺ Toolbox.
4. Copy the corresponding `*___AllGoodModel.launch` file to the `*.toolbox`
folder. Reload/refresh the model in the TLA⁺ Toolbox.
5. Open the `Model Overview` window in the TLA⁺ Toolbox and check that behaviour
specification, declared constants, invariants and properties of the model are
filled in with some values.
6. Press `Run TLC on the model` button to start the model checking process and
explore the progress in the `Model Checkng Results` window.
### Model checking note
It should be noted that all TLA⁺ specifications provided in this repo can be perfectly checked
with `MaxView` model constraint set to be 1 for the four-nodes network scenario. Larger
`MaxView` values produces too many behaviours to be checked, so TLC Model Checker is likely
to fail with OOM during the checking process. However, `MaxView` set to be 1 is enough to check
the model liveness properties for the four-nodes scenario as there are two views to be checked
in this case (0 and 1).

View File

@ -1,388 +1,388 @@
-------------------------------- MODULE dbft --------------------------------
EXTENDS
Integers,
FiniteSets
CONSTANTS
\* RM is the set of consensus node indexes starting from 0.
\* Example: {0, 1, 2, 3}
RM,
\* RMFault is a set of consensus node indexes that are allowed to become
\* FAULT in the middle of every considered behavior and to send any
\* consensus message afterwards. RMFault must be a subset of RM. An empty
\* set means that all nodes are good in every possible behaviour.
\* Examples: {0}
\* {1, 3}
\* {}
RMFault,
\* RMDead is a set of consensus node indexes that are allowed to die in the
\* middle of every behaviour and do not send any message afterwards. RMDead
\* must be a subset of RM. An empty set means that all nodes are alive and
\* responding in in every possible behaviour. RMDead may intersect the
\* RMFault set which means that node which is in both RMDead and RMFault
\* may become FAULT and send any message starting from some step of the
\* particular behaviour and may also die in the same behaviour which will
\* prevent it from sending any message.
\* Examples: {0}
\* {3, 2}
\* {}
RMDead,
\* MaxView is the maximum allowed view to be considered (starting from 0,
\* including the MaxView itself). This constraint was introduced to reduce
\* the number of possible model states to be checked. It is recommended to
\* keep this setting not too high (< N is highly recommended).
\* Example: 2
MaxView
VARIABLES
\* rmState is a set of consensus node states. It is represented by the
\* mapping (function) with domain RM and range RMStates. I.e. rmState[r] is
\* the state of the r-th consensus node at the current step.
rmState,
\* msgs is the shared pool of messages sent to the network by consensus nodes.
\* It is represented by a subset of Messages set.
msgs
\* vars is a tuple of all variables used in the specification. It is needed to
\* simplify fairness conditions definition.
vars == <<rmState, msgs>>
\* N is the number of validators.
N == Cardinality(RM)
\* F is the number of validators that are allowed to be malicious.
F == (N - 1) \div 3
\* M is the number of validators that must function correctly.
M == N - F
\* These assumptions are checked by the TLC model checker once at the start of
\* the model checking process. All the input data (declared constants) specified
\* in the "Model Overview" section must satisfy these constraints.
ASSUME
/\ RM \subseteq Nat
/\ N >= 4
/\ 0 \in RM
/\ RMFault \subseteq RM
/\ RMDead \subseteq RM
/\ Cardinality(RMFault) <= F
/\ Cardinality(RMDead) <= F
/\ Cardinality(RMFault \cup RMDead) <= F
/\ MaxView \in Nat
/\ MaxView <= 2
\* RMStates is a set of records where each record holds the node state and
\* the node current view.
RMStates == [
type: {"initialized", "prepareSent", "commitSent", "cv", "blockAccepted", "bad", "dead"},
view : Nat
]
\* Messages is a set of records where each record holds the message type,
\* the message sender and sender's view by the moment when message was sent.
Messages == [type : {"PrepareRequest", "PrepareResponse", "Commit", "ChangeView"}, rm : RM, view : Nat]
\* -------------- Useful operators --------------
\* IsPrimary is an operator defining whether provided node r is primary
\* for the current round from the r's point of view. It is a mapping
\* from RM to the set of {TRUE, FALSE}.
IsPrimary(r) == rmState[r].view % N = r
\* GetPrimary is an operator defining mapping from round index to the RM that
\* is primary in this round.
GetPrimary(view) == CHOOSE r \in RM : view % N = r
\* GetNewView returns new view number based on the previous node view value.
\* Current specifications only allows to increment view.
GetNewView(oldView) == oldView + 1
\* CountCommitted returns the number of nodes that have sent the Commit message
\* in the current round or in some other round.
CountCommitted(r) == Cardinality({rm \in RM : Cardinality({msg \in msgs : msg.rm = rm /\ msg.type = "Commit"}) /= 0})
\* MoreThanFNodesCommitted returns whether more than F nodes have been committed
\* in the current round (as the node r sees it).
\*
\* IMPORTANT NOTE: we intentionally do not add the "lost" nodes calculation to the specification, and here's
\* the reason: from the node's point of view we can't reliably check that some neighbour is completely
\* out of the network. It is possible that the node doesn't receive consensus messages from some other member
\* due to network delays. On the other hand, real nodes can go down at any time. The absence of the
\* member's message doesn't mean that the member is out of the network, we never can be sure about
\* that, thus, this information is unreliable and can't be trusted during the consensus process.
\* What can be trusted is whether there's a Commit message from some member was received by the node.
MoreThanFNodesCommitted(r) == CountCommitted(r) > F
\* PrepareRequestSentOrReceived denotes whether there's a PrepareRequest
\* message received from the current round's speaker (as the node r sees it).
PrepareRequestSentOrReceived(r) == [type |-> "PrepareRequest", rm |-> GetPrimary(rmState[r].view), view |-> rmState[r].view] \in msgs
\* -------------- Safety temporal formula --------------
\* Init is the initial predicate initializing values at the start of every
\* behaviour.
Init ==
/\ rmState = [r \in RM |-> [type |-> "initialized", view |-> 0]]
/\ msgs = {}
\* RMSendPrepareRequest describes the primary node r broadcasting PrepareRequest.
RMSendPrepareRequest(r) ==
/\ rmState[r].type = "initialized"
/\ IsPrimary(r)
/\ rmState' = [rmState EXCEPT ![r].type = "prepareSent"]
/\ msgs' = msgs \cup {[type |-> "PrepareRequest", rm |-> r, view |-> rmState[r].view]}
/\ UNCHANGED <<>>
\* RMSendPrepareResponse describes non-primary node r receiving PrepareRequest from
\* the primary node of the current round (view) and broadcasting PrepareResponse.
\* This step assumes that PrepareRequest always contains valid transactions and
\* signatures.
RMSendPrepareResponse(r) ==
/\ \/ rmState[r].type = "initialized"
\* We do allow the transition from the "cv" state to the "prepareSent" or "commitSent" stage
\* as it is done in the code-level dBFT implementation by checking the NotAcceptingPayloadsDueToViewChanging
\* condition (see
\* https://github.com/nspcc-dev/dbft/blob/31c1bbdc74f2faa32ec9025062e3a4e2ccfd4214/dbft.go#L419
\* and
\* https://github.com/neo-project/neo-modules/blob/d00d90b9c27b3d0c3c57e9ca1f560a09975df241/src/DBFTPlugin/Consensus/ConsensusService.OnMessage.cs#L79).
\* However, we can't easily count the number of "lost" nodes in this specification to match precisely
\* the implementation. Moreover, we don't need it to be counted as the RMSendPrepareResponse enabling
\* condition specifies only the thing that may happen given some particular set of enabling conditions.
\* Thus, we've extended the NotAcceptingPayloadsDueToViewChanging condition to consider only MoreThanFNodesCommitted.
\* It should be noted that the logic of MoreThanFNodesCommittedOrLost can't be reliable in detecting lost nodes
\* (even with neo-project/neo#2057), because real nodes can go down at any time. See the comment above the MoreThanFNodesCommitted.
\/ /\ rmState[r].type = "cv"
/\ MoreThanFNodesCommitted(r)
/\ \neg IsPrimary(r)
/\ PrepareRequestSentOrReceived(r)
/\ rmState' = [rmState EXCEPT ![r].type = "prepareSent"]
/\ msgs' = msgs \cup {[type |-> "PrepareResponse", rm |-> r, view |-> rmState[r].view]}
/\ UNCHANGED <<>>
\* RMSendCommit describes node r sending Commit if there's enough PrepareResponse
\* messages.
RMSendCommit(r) ==
/\ \/ rmState[r].type = "prepareSent"
\* We do allow the transition from the "cv" state to the "prepareSent" or "commitSent" stage,
\* see the related comment inside the RMSendPrepareResponse definition.
\/ /\ rmState[r].type = "cv"
/\ MoreThanFNodesCommitted(r)
/\ Cardinality({
msg \in msgs : /\ (msg.type = "PrepareResponse" \/ msg.type = "PrepareRequest")
/\ msg.view = rmState[r].view
}) >= M
/\ PrepareRequestSentOrReceived(r)
/\ rmState' = [rmState EXCEPT ![r].type = "commitSent"]
/\ msgs' = msgs \cup {[type |-> "Commit", rm |-> r, view |-> rmState[r].view]}
/\ UNCHANGED <<>>
\* RMAcceptBlock describes node r collecting enough Commit messages and accepting
\* the block.
RMAcceptBlock(r) ==
/\ rmState[r].type /= "bad"
/\ rmState[r].type /= "dead"
/\ PrepareRequestSentOrReceived(r)
/\ Cardinality({msg \in msgs : msg.type = "Commit" /\ msg.view = rmState[r].view}) >= M
/\ rmState' = [rmState EXCEPT ![r].type = "blockAccepted"]
/\ UNCHANGED <<msgs>>
\* RMSendChangeView describes node r sending ChangeView message on timeout.
RMSendChangeView(r) ==
/\ \/ (rmState[r].type = "initialized" /\ \neg IsPrimary(r))
\/ rmState[r].type = "prepareSent"
/\ LET cv == [type |-> "ChangeView", rm |-> r, view |-> rmState[r].view]
IN /\ cv \notin msgs
/\ rmState' = [rmState EXCEPT ![r].type = "cv"]
/\ msgs' = msgs \cup {[type |-> "ChangeView", rm |-> r, view |-> rmState[r].view]}
\* RMReceiveChangeView describes node r receiving enough ChangeView messages for
\* view changing.
RMReceiveChangeView(r) ==
/\ rmState[r].type /= "bad"
/\ rmState[r].type /= "dead"
/\ rmState[r].type /= "blockAccepted"
/\ rmState[r].type /= "commitSent"
/\ Cardinality({
rm \in RM : Cardinality({
msg \in msgs : /\ msg.type = "ChangeView"
/\ msg.rm = rm
/\ GetNewView(msg.view) >= GetNewView(rmState[r].view)
}) /= 0
}) >= M
/\ rmState' = [rmState EXCEPT ![r].type = "initialized", ![r].view = GetNewView(rmState[r].view)]
/\ UNCHANGED <<msgs>>
\* RMBeBad describes the faulty node r that will send any kind of consensus message starting
\* from the step it's gone wild. This step is enabled only when RMFault is non-empty set.
RMBeBad(r) ==
/\ r \in RMFault
/\ Cardinality({rm \in RM : rmState[rm].type = "bad"}) < F
/\ rmState' = [rmState EXCEPT ![r].type = "bad"]
/\ UNCHANGED <<msgs>>
\* RMFaultySendCV describes sending CV message by the faulty node r.
RMFaultySendCV(r) ==
/\ rmState[r].type = "bad"
/\ LET cv == [type |-> "ChangeView", rm |-> r, view |-> rmState[r].view]
IN /\ cv \notin msgs
/\ msgs' = msgs \cup {cv}
/\ UNCHANGED <<rmState>>
\* RMFaultyDoCV describes view changing by the faulty node r.
RMFaultyDoCV(r) ==
/\ rmState[r].type = "bad"
/\ rmState' = [rmState EXCEPT ![r].view = GetNewView(rmState[r].view)]
/\ UNCHANGED <<msgs>>
\* RMFaultySendPReq describes sending PrepareRequest message by the primary faulty node r.
RMFaultySendPReq(r) ==
/\ rmState[r].type = "bad"
/\ IsPrimary(r)
/\ LET pReq == [type |-> "PrepareRequest", rm |-> r, view |-> rmState[r].view]
IN /\ pReq \notin msgs
/\ msgs' = msgs \cup {pReq}
/\ UNCHANGED <<rmState>>
\* RMFaultySendPResp describes sending PrepareResponse message by the non-primary faulty node r.
RMFaultySendPResp(r) ==
/\ rmState[r].type = "bad"
/\ \neg IsPrimary(r)
/\ LET pResp == [type |-> "PrepareResponse", rm |-> r, view |-> rmState[r].view]
IN /\ pResp \notin msgs
/\ msgs' = msgs \cup {pResp}
/\ UNCHANGED <<rmState>>
\* RMFaultySendCommit describes sending Commit message by the faulty node r.
RMFaultySendCommit(r) ==
/\ rmState[r].type = "bad"
/\ LET commit == [type |-> "Commit", rm |-> r, view |-> rmState[r].view]
IN /\ commit \notin msgs
/\ msgs' = msgs \cup {commit}
/\ UNCHANGED <<rmState>>
\* RMDie describes node r that was removed from the network at the particular step
\* of the behaviour. After this node r can't change its state and accept/send messages.
RMDie(r) ==
/\ r \in RMDead
/\ Cardinality({rm \in RM : rmState[rm].type = "dead"}) < F
/\ rmState' = [rmState EXCEPT ![r].type = "dead"]
/\ UNCHANGED <<msgs>>
\* Terminating is an action that allows infinite stuttering to prevent deadlock on
\* behaviour termination. We consider termination to be valid if at least M nodes
\* has the block being accepted.
Terminating ==
/\ Cardinality({rm \in RM : rmState[rm].type = "blockAccepted"}) >= M
/\ UNCHANGED <<msgs, rmState>>
\* Next is the next-state action describing the transition from the current state
\* to the next state of the behaviour.
Next ==
\/ Terminating
\/ \E r \in RM:
RMSendPrepareRequest(r) \/ RMSendPrepareResponse(r) \/ RMSendCommit(r)
\/ RMAcceptBlock(r) \/ RMSendChangeView(r) \/ RMReceiveChangeView(r)
\/ RMDie(r) \/ RMBeBad(r)
\/ RMFaultySendCV(r) \/ RMFaultyDoCV(r) \/ RMFaultySendCommit(r) \/ RMFaultySendPReq(r) \/ RMFaultySendPResp(r)
\* Safety is a temporal formula that describes the whole set of allowed
\* behaviours. It specifies only what the system MAY do (i.e. the set of
\* possible allowed behaviours for the system). It asserts only what may
\* happen; any behaviour that violates it does so at some point and
\* nothing past that point makes difference.
\*
\* E.g. this safety formula (applied standalone) allows the behaviour to end
\* with an infinite set of stuttering steps (those steps that DO NOT change
\* neither msgs nor rmState) and never reach the state where at least one
\* node is committed or accepted the block.
\*
\* To forbid such behaviours we must specify what the system MUST
\* do. It will be specified below with the help of fairness conditions in
\* the Fairness formula.
Safety == Init /\ [][Next]_vars
\* -------------- Fairness temporal formula --------------
\* Fairness is a temporal assumptions under which the model is working.
\* Usually it specifies different kind of assumptions for each/some
\* subactions of the Next's state action, but the only think that bothers
\* us is preventing infinite stuttering at those steps where some of Next's
\* subactions are enabled. Thus, the only thing that we require from the
\* system is to keep take the steps until it's impossible to take them.
\* That's exactly how the weak fairness condition works: if some action
\* remains continuously enabled, it must eventually happen.
Fairness == WF_vars(Next)
\* -------------- Specification --------------
\* The complete specification of the protocol written as a temporal formula.
Spec == Safety /\ Fairness
\* -------------- Liveness temporal formula --------------
\* For every possible behaviour it's true that eventually (i.e. at least once
\* through the behaviour) block will be accepted. It is something that dBFT
\* must guarantee (an in practice this condition is violated).
TerminationRequirement == <>(Cardinality({r \in RM : rmState[r].type = "blockAccepted"}) >= M)
\* A liveness temporal formula asserts only what must happen (i.e. specifies
\* what the system MUST do). Any behaviour can NOT violate it at ANY point;
\* there's always the rest of the behaviour that can always make the liveness
\* formula true; if there's no such behaviour than the liveness formula is
\* violated. The liveness formula is supposed to be checked as a property
\* by the TLC model checker.
Liveness == TerminationRequirement
\* -------------- ModelConstraints --------------
\* MaxViewConstraint is a state predicate restricting the number of possible
\* behaviour states. It is needed to reduce model checking time and prevent
\* the model graph size explosion. This formulae must be specified at the
\* "State constraint" section of the "Additional Spec Options" section inside
\* the model overview.
MaxViewConstraint == /\ \A r \in RM : rmState[r].view <= MaxView
/\ \A msg \in msgs : msg.view <= MaxView
\* -------------- Invariants of the specification --------------
\* Model invariant is a state predicate (statement) that must be true for
\* every step of every reachable behaviour. Model invariant is supposed to
\* be checked as an Invariant by the TLC Model Checker.
\* TypeOK is a type-correctness invariant. It states that all elements of
\* specification variables must have the proper type throughout the behaviour.
TypeOK ==
/\ rmState \in [RM -> RMStates]
/\ msgs \subseteq Messages
\* InvTwoBlocksAccepted states that there can't be two different blocks accepted in
\* the two different views, i.e. dBFT must not allow forks.
InvTwoBlocksAccepted == \A r1 \in RM:
\A r2 \in RM \ {r1}:
\/ rmState[r1].type /= "blockAccepted"
\/ rmState[r2].type /= "blockAccepted"
\/ rmState[r1].view = rmState[r2].view
\* InvFaultNodesCount states that there can be F faulty or dead nodes at max.
InvFaultNodesCount == Cardinality({
r \in RM : rmState[r].type = "bad" \/ rmState[r].type = "dead"
}) <= F
\* This theorem asserts the truth of the temporal formula whose meaning is that
\* the state predicates TypeOK, InvTwoBlocksAccepted and InvFaultNodesCount are
\* the invariants of the specification Spec. This theorem is not supposed to be
\* checked by the TLC model checker, it's here for the reader's understanding of
\* the purpose of TypeOK, InvTwoBlocksAccepted and InvFaultNodesCount.
THEOREM Spec => [](TypeOK /\ InvTwoBlocksAccepted /\ InvFaultNodesCount)
=============================================================================
\* Modification History
\* Last modified Mon Mar 06 15:36:57 MSK 2023 by root
\* Last modified Fri Feb 17 15:47:41 MSK 2023 by anna
\* Last modified Sat Jan 21 01:26:16 MSK 2023 by rik
\* Created Thu Dec 15 16:06:17 MSK 2022 by anna
-------------------------------- MODULE dbft --------------------------------
EXTENDS
Integers,
FiniteSets
CONSTANTS
\* RM is the set of consensus node indexes starting from 0.
\* Example: {0, 1, 2, 3}
RM,
\* RMFault is a set of consensus node indexes that are allowed to become
\* FAULT in the middle of every considered behavior and to send any
\* consensus message afterwards. RMFault must be a subset of RM. An empty
\* set means that all nodes are good in every possible behaviour.
\* Examples: {0}
\* {1, 3}
\* {}
RMFault,
\* RMDead is a set of consensus node indexes that are allowed to die in the
\* middle of every behaviour and do not send any message afterwards. RMDead
\* must be a subset of RM. An empty set means that all nodes are alive and
\* responding in in every possible behaviour. RMDead may intersect the
\* RMFault set which means that node which is in both RMDead and RMFault
\* may become FAULT and send any message starting from some step of the
\* particular behaviour and may also die in the same behaviour which will
\* prevent it from sending any message.
\* Examples: {0}
\* {3, 2}
\* {}
RMDead,
\* MaxView is the maximum allowed view to be considered (starting from 0,
\* including the MaxView itself). This constraint was introduced to reduce
\* the number of possible model states to be checked. It is recommended to
\* keep this setting not too high (< N is highly recommended).
\* Example: 2
MaxView
VARIABLES
\* rmState is a set of consensus node states. It is represented by the
\* mapping (function) with domain RM and range RMStates. I.e. rmState[r] is
\* the state of the r-th consensus node at the current step.
rmState,
\* msgs is the shared pool of messages sent to the network by consensus nodes.
\* It is represented by a subset of Messages set.
msgs
\* vars is a tuple of all variables used in the specification. It is needed to
\* simplify fairness conditions definition.
vars == <<rmState, msgs>>
\* N is the number of validators.
N == Cardinality(RM)
\* F is the number of validators that are allowed to be malicious.
F == (N - 1) \div 3
\* M is the number of validators that must function correctly.
M == N - F
\* These assumptions are checked by the TLC model checker once at the start of
\* the model checking process. All the input data (declared constants) specified
\* in the "Model Overview" section must satisfy these constraints.
ASSUME
/\ RM \subseteq Nat
/\ N >= 4
/\ 0 \in RM
/\ RMFault \subseteq RM
/\ RMDead \subseteq RM
/\ Cardinality(RMFault) <= F
/\ Cardinality(RMDead) <= F
/\ Cardinality(RMFault \cup RMDead) <= F
/\ MaxView \in Nat
/\ MaxView <= 2
\* RMStates is a set of records where each record holds the node state and
\* the node current view.
RMStates == [
type: {"initialized", "prepareSent", "commitSent", "cv", "blockAccepted", "bad", "dead"},
view : Nat
]
\* Messages is a set of records where each record holds the message type,
\* the message sender and sender's view by the moment when message was sent.
Messages == [type : {"PrepareRequest", "PrepareResponse", "Commit", "ChangeView"}, rm : RM, view : Nat]
\* -------------- Useful operators --------------
\* IsPrimary is an operator defining whether provided node r is primary
\* for the current round from the r's point of view. It is a mapping
\* from RM to the set of {TRUE, FALSE}.
IsPrimary(r) == rmState[r].view % N = r
\* GetPrimary is an operator defining mapping from round index to the RM that
\* is primary in this round.
GetPrimary(view) == CHOOSE r \in RM : view % N = r
\* GetNewView returns new view number based on the previous node view value.
\* Current specifications only allows to increment view.
GetNewView(oldView) == oldView + 1
\* CountCommitted returns the number of nodes that have sent the Commit message
\* in the current round or in some other round.
CountCommitted(r) == Cardinality({rm \in RM : Cardinality({msg \in msgs : msg.rm = rm /\ msg.type = "Commit"}) /= 0})
\* MoreThanFNodesCommitted returns whether more than F nodes have been committed
\* in the current round (as the node r sees it).
\*
\* IMPORTANT NOTE: we intentionally do not add the "lost" nodes calculation to the specification, and here's
\* the reason: from the node's point of view we can't reliably check that some neighbour is completely
\* out of the network. It is possible that the node doesn't receive consensus messages from some other member
\* due to network delays. On the other hand, real nodes can go down at any time. The absence of the
\* member's message doesn't mean that the member is out of the network, we never can be sure about
\* that, thus, this information is unreliable and can't be trusted during the consensus process.
\* What can be trusted is whether there's a Commit message from some member was received by the node.
MoreThanFNodesCommitted(r) == CountCommitted(r) > F
\* PrepareRequestSentOrReceived denotes whether there's a PrepareRequest
\* message received from the current round's speaker (as the node r sees it).
PrepareRequestSentOrReceived(r) == [type |-> "PrepareRequest", rm |-> GetPrimary(rmState[r].view), view |-> rmState[r].view] \in msgs
\* -------------- Safety temporal formula --------------
\* Init is the initial predicate initializing values at the start of every
\* behaviour.
Init ==
/\ rmState = [r \in RM |-> [type |-> "initialized", view |-> 0]]
/\ msgs = {}
\* RMSendPrepareRequest describes the primary node r broadcasting PrepareRequest.
RMSendPrepareRequest(r) ==
/\ rmState[r].type = "initialized"
/\ IsPrimary(r)
/\ rmState' = [rmState EXCEPT ![r].type = "prepareSent"]
/\ msgs' = msgs \cup {[type |-> "PrepareRequest", rm |-> r, view |-> rmState[r].view]}
/\ UNCHANGED <<>>
\* RMSendPrepareResponse describes non-primary node r receiving PrepareRequest from
\* the primary node of the current round (view) and broadcasting PrepareResponse.
\* This step assumes that PrepareRequest always contains valid transactions and
\* signatures.
RMSendPrepareResponse(r) ==
/\ \/ rmState[r].type = "initialized"
\* We do allow the transition from the "cv" state to the "prepareSent" or "commitSent" stage
\* as it is done in the code-level dBFT implementation by checking the NotAcceptingPayloadsDueToViewChanging
\* condition (see
\* https://github.com/nspcc-dev/dbft/blob/31c1bbdc74f2faa32ec9025062e3a4e2ccfd4214/dbft.go#L419
\* and
\* https://github.com/neo-project/neo-modules/blob/d00d90b9c27b3d0c3c57e9ca1f560a09975df241/src/DBFTPlugin/Consensus/ConsensusService.OnMessage.cs#L79).
\* However, we can't easily count the number of "lost" nodes in this specification to match precisely
\* the implementation. Moreover, we don't need it to be counted as the RMSendPrepareResponse enabling
\* condition specifies only the thing that may happen given some particular set of enabling conditions.
\* Thus, we've extended the NotAcceptingPayloadsDueToViewChanging condition to consider only MoreThanFNodesCommitted.
\* It should be noted that the logic of MoreThanFNodesCommittedOrLost can't be reliable in detecting lost nodes
\* (even with neo-project/neo#2057), because real nodes can go down at any time. See the comment above the MoreThanFNodesCommitted.
\/ /\ rmState[r].type = "cv"
/\ MoreThanFNodesCommitted(r)
/\ \neg IsPrimary(r)
/\ PrepareRequestSentOrReceived(r)
/\ rmState' = [rmState EXCEPT ![r].type = "prepareSent"]
/\ msgs' = msgs \cup {[type |-> "PrepareResponse", rm |-> r, view |-> rmState[r].view]}
/\ UNCHANGED <<>>
\* RMSendCommit describes node r sending Commit if there's enough PrepareResponse
\* messages.
RMSendCommit(r) ==
/\ \/ rmState[r].type = "prepareSent"
\* We do allow the transition from the "cv" state to the "prepareSent" or "commitSent" stage,
\* see the related comment inside the RMSendPrepareResponse definition.
\/ /\ rmState[r].type = "cv"
/\ MoreThanFNodesCommitted(r)
/\ Cardinality({
msg \in msgs : /\ (msg.type = "PrepareResponse" \/ msg.type = "PrepareRequest")
/\ msg.view = rmState[r].view
}) >= M
/\ PrepareRequestSentOrReceived(r)
/\ rmState' = [rmState EXCEPT ![r].type = "commitSent"]
/\ msgs' = msgs \cup {[type |-> "Commit", rm |-> r, view |-> rmState[r].view]}
/\ UNCHANGED <<>>
\* RMAcceptBlock describes node r collecting enough Commit messages and accepting
\* the block.
RMAcceptBlock(r) ==
/\ rmState[r].type /= "bad"
/\ rmState[r].type /= "dead"
/\ PrepareRequestSentOrReceived(r)
/\ Cardinality({msg \in msgs : msg.type = "Commit" /\ msg.view = rmState[r].view}) >= M
/\ rmState' = [rmState EXCEPT ![r].type = "blockAccepted"]
/\ UNCHANGED <<msgs>>
\* RMSendChangeView describes node r sending ChangeView message on timeout.
RMSendChangeView(r) ==
/\ \/ (rmState[r].type = "initialized" /\ \neg IsPrimary(r))
\/ rmState[r].type = "prepareSent"
/\ LET cv == [type |-> "ChangeView", rm |-> r, view |-> rmState[r].view]
IN /\ cv \notin msgs
/\ rmState' = [rmState EXCEPT ![r].type = "cv"]
/\ msgs' = msgs \cup {[type |-> "ChangeView", rm |-> r, view |-> rmState[r].view]}
\* RMReceiveChangeView describes node r receiving enough ChangeView messages for
\* view changing.
RMReceiveChangeView(r) ==
/\ rmState[r].type /= "bad"
/\ rmState[r].type /= "dead"
/\ rmState[r].type /= "blockAccepted"
/\ rmState[r].type /= "commitSent"
/\ Cardinality({
rm \in RM : Cardinality({
msg \in msgs : /\ msg.type = "ChangeView"
/\ msg.rm = rm
/\ GetNewView(msg.view) >= GetNewView(rmState[r].view)
}) /= 0
}) >= M
/\ rmState' = [rmState EXCEPT ![r].type = "initialized", ![r].view = GetNewView(rmState[r].view)]
/\ UNCHANGED <<msgs>>
\* RMBeBad describes the faulty node r that will send any kind of consensus message starting
\* from the step it's gone wild. This step is enabled only when RMFault is non-empty set.
RMBeBad(r) ==
/\ r \in RMFault
/\ Cardinality({rm \in RM : rmState[rm].type = "bad"}) < F
/\ rmState' = [rmState EXCEPT ![r].type = "bad"]
/\ UNCHANGED <<msgs>>
\* RMFaultySendCV describes sending CV message by the faulty node r.
RMFaultySendCV(r) ==
/\ rmState[r].type = "bad"
/\ LET cv == [type |-> "ChangeView", rm |-> r, view |-> rmState[r].view]
IN /\ cv \notin msgs
/\ msgs' = msgs \cup {cv}
/\ UNCHANGED <<rmState>>
\* RMFaultyDoCV describes view changing by the faulty node r.
RMFaultyDoCV(r) ==
/\ rmState[r].type = "bad"
/\ rmState' = [rmState EXCEPT ![r].view = GetNewView(rmState[r].view)]
/\ UNCHANGED <<msgs>>
\* RMFaultySendPReq describes sending PrepareRequest message by the primary faulty node r.
RMFaultySendPReq(r) ==
/\ rmState[r].type = "bad"
/\ IsPrimary(r)
/\ LET pReq == [type |-> "PrepareRequest", rm |-> r, view |-> rmState[r].view]
IN /\ pReq \notin msgs
/\ msgs' = msgs \cup {pReq}
/\ UNCHANGED <<rmState>>
\* RMFaultySendPResp describes sending PrepareResponse message by the non-primary faulty node r.
RMFaultySendPResp(r) ==
/\ rmState[r].type = "bad"
/\ \neg IsPrimary(r)
/\ LET pResp == [type |-> "PrepareResponse", rm |-> r, view |-> rmState[r].view]
IN /\ pResp \notin msgs
/\ msgs' = msgs \cup {pResp}
/\ UNCHANGED <<rmState>>
\* RMFaultySendCommit describes sending Commit message by the faulty node r.
RMFaultySendCommit(r) ==
/\ rmState[r].type = "bad"
/\ LET commit == [type |-> "Commit", rm |-> r, view |-> rmState[r].view]
IN /\ commit \notin msgs
/\ msgs' = msgs \cup {commit}
/\ UNCHANGED <<rmState>>
\* RMDie describes node r that was removed from the network at the particular step
\* of the behaviour. After this node r can't change its state and accept/send messages.
RMDie(r) ==
/\ r \in RMDead
/\ Cardinality({rm \in RM : rmState[rm].type = "dead"}) < F
/\ rmState' = [rmState EXCEPT ![r].type = "dead"]
/\ UNCHANGED <<msgs>>
\* Terminating is an action that allows infinite stuttering to prevent deadlock on
\* behaviour termination. We consider termination to be valid if at least M nodes
\* has the block being accepted.
Terminating ==
/\ Cardinality({rm \in RM : rmState[rm].type = "blockAccepted"}) >= M
/\ UNCHANGED <<msgs, rmState>>
\* Next is the next-state action describing the transition from the current state
\* to the next state of the behaviour.
Next ==
\/ Terminating
\/ \E r \in RM:
RMSendPrepareRequest(r) \/ RMSendPrepareResponse(r) \/ RMSendCommit(r)
\/ RMAcceptBlock(r) \/ RMSendChangeView(r) \/ RMReceiveChangeView(r)
\/ RMDie(r) \/ RMBeBad(r)
\/ RMFaultySendCV(r) \/ RMFaultyDoCV(r) \/ RMFaultySendCommit(r) \/ RMFaultySendPReq(r) \/ RMFaultySendPResp(r)
\* Safety is a temporal formula that describes the whole set of allowed
\* behaviours. It specifies only what the system MAY do (i.e. the set of
\* possible allowed behaviours for the system). It asserts only what may
\* happen; any behaviour that violates it does so at some point and
\* nothing past that point makes difference.
\*
\* E.g. this safety formula (applied standalone) allows the behaviour to end
\* with an infinite set of stuttering steps (those steps that DO NOT change
\* neither msgs nor rmState) and never reach the state where at least one
\* node is committed or accepted the block.
\*
\* To forbid such behaviours we must specify what the system MUST
\* do. It will be specified below with the help of fairness conditions in
\* the Fairness formula.
Safety == Init /\ [][Next]_vars
\* -------------- Fairness temporal formula --------------
\* Fairness is a temporal assumptions under which the model is working.
\* Usually it specifies different kind of assumptions for each/some
\* subactions of the Next's state action, but the only think that bothers
\* us is preventing infinite stuttering at those steps where some of Next's
\* subactions are enabled. Thus, the only thing that we require from the
\* system is to keep take the steps until it's impossible to take them.
\* That's exactly how the weak fairness condition works: if some action
\* remains continuously enabled, it must eventually happen.
Fairness == WF_vars(Next)
\* -------------- Specification --------------
\* The complete specification of the protocol written as a temporal formula.
Spec == Safety /\ Fairness
\* -------------- Liveness temporal formula --------------
\* For every possible behaviour it's true that eventually (i.e. at least once
\* through the behaviour) block will be accepted. It is something that dBFT
\* must guarantee (an in practice this condition is violated).
TerminationRequirement == <>(Cardinality({r \in RM : rmState[r].type = "blockAccepted"}) >= M)
\* A liveness temporal formula asserts only what must happen (i.e. specifies
\* what the system MUST do). Any behaviour can NOT violate it at ANY point;
\* there's always the rest of the behaviour that can always make the liveness
\* formula true; if there's no such behaviour than the liveness formula is
\* violated. The liveness formula is supposed to be checked as a property
\* by the TLC model checker.
Liveness == TerminationRequirement
\* -------------- ModelConstraints --------------
\* MaxViewConstraint is a state predicate restricting the number of possible
\* behaviour states. It is needed to reduce model checking time and prevent
\* the model graph size explosion. This formulae must be specified at the
\* "State constraint" section of the "Additional Spec Options" section inside
\* the model overview.
MaxViewConstraint == /\ \A r \in RM : rmState[r].view <= MaxView
/\ \A msg \in msgs : msg.view <= MaxView
\* -------------- Invariants of the specification --------------
\* Model invariant is a state predicate (statement) that must be true for
\* every step of every reachable behaviour. Model invariant is supposed to
\* be checked as an Invariant by the TLC Model Checker.
\* TypeOK is a type-correctness invariant. It states that all elements of
\* specification variables must have the proper type throughout the behaviour.
TypeOK ==
/\ rmState \in [RM -> RMStates]
/\ msgs \subseteq Messages
\* InvTwoBlocksAccepted states that there can't be two different blocks accepted in
\* the two different views, i.e. dBFT must not allow forks.
InvTwoBlocksAccepted == \A r1 \in RM:
\A r2 \in RM \ {r1}:
\/ rmState[r1].type /= "blockAccepted"
\/ rmState[r2].type /= "blockAccepted"
\/ rmState[r1].view = rmState[r2].view
\* InvFaultNodesCount states that there can be F faulty or dead nodes at max.
InvFaultNodesCount == Cardinality({
r \in RM : rmState[r].type = "bad" \/ rmState[r].type = "dead"
}) <= F
\* This theorem asserts the truth of the temporal formula whose meaning is that
\* the state predicates TypeOK, InvTwoBlocksAccepted and InvFaultNodesCount are
\* the invariants of the specification Spec. This theorem is not supposed to be
\* checked by the TLC model checker, it's here for the reader's understanding of
\* the purpose of TypeOK, InvTwoBlocksAccepted and InvFaultNodesCount.
THEOREM Spec => [](TypeOK /\ InvTwoBlocksAccepted /\ InvFaultNodesCount)
=============================================================================
\* Modification History
\* Last modified Mon Mar 06 15:36:57 MSK 2023 by root
\* Last modified Fri Feb 17 15:47:41 MSK 2023 by anna
\* Last modified Sat Jan 21 01:26:16 MSK 2023 by rik
\* Created Thu Dec 15 16:06:17 MSK 2022 by anna

View File

@ -1,42 +1,42 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<launchConfiguration type="org.lamport.tla.toolbox.tool.tlc.modelCheck">
<stringAttribute key="configurationName" value="AllGoodModel"/>
<intAttribute key="distributedFPSetCount" value="0"/>
<stringAttribute key="distributedNetworkInterface" value="172.200.0.254"/>
<intAttribute key="distributedNodesCount" value="1"/>
<stringAttribute key="distributedTLC" value="off"/>
<intAttribute key="fpIndex" value="47"/>
<intAttribute key="maxHeapSize" value="50"/>
<stringAttribute key="modelBehaviorInit" value=""/>
<stringAttribute key="modelBehaviorNext" value=""/>
<stringAttribute key="modelBehaviorSpec" value="Spec"/>
<intAttribute key="modelBehaviorSpecType" value="1"/>
<stringAttribute key="modelBehaviorVars" value="msgs, rmState"/>
<stringAttribute key="modelComments" value=""/>
<booleanAttribute key="modelCorrectnessCheckDeadlock" value="true"/>
<listAttribute key="modelCorrectnessInvariants">
<listEntry value="1TypeOK"/>
<listEntry value="1InvTwoBlocksAccepted"/>
<listEntry value="1InvFaultNodesCount"/>
</listAttribute>
<listAttribute key="modelCorrectnessProperties">
<listEntry value="1Liveness"/>
</listAttribute>
<intAttribute key="modelEditorOpenTabs" value="10"/>
<stringAttribute key="modelParameterActionConstraint" value=""/>
<listAttribute key="modelParameterConstants">
<listEntry value="RMFault;;{};0;0"/>
<listEntry value="MaxView;;1;0;0"/>
<listEntry value="RMDead;;{};0;0"/>
<listEntry value="RM;;{0, 1, 2, 3};0;0"/>
</listAttribute>
<stringAttribute key="modelParameterContraint" value="MaxViewConstraint"/>
<listAttribute key="modelParameterDefinitions"/>
<stringAttribute key="modelParameterModelValues" value="{}"/>
<stringAttribute key="modelParameterNewDefinitions" value=""/>
<intAttribute key="modelVersion" value="20191005"/>
<intAttribute key="numberOfWorkers" value="8"/>
<stringAttribute key="result.mail.address" value=""/>
<stringAttribute key="specName" value="dbft"/>
<stringAttribute key="tlcResourcesProfile" value="local custom"/>
</launchConfiguration>
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<launchConfiguration type="org.lamport.tla.toolbox.tool.tlc.modelCheck">
<stringAttribute key="configurationName" value="AllGoodModel"/>
<intAttribute key="distributedFPSetCount" value="0"/>
<stringAttribute key="distributedNetworkInterface" value="172.200.0.254"/>
<intAttribute key="distributedNodesCount" value="1"/>
<stringAttribute key="distributedTLC" value="off"/>
<intAttribute key="fpIndex" value="47"/>
<intAttribute key="maxHeapSize" value="50"/>
<stringAttribute key="modelBehaviorInit" value=""/>
<stringAttribute key="modelBehaviorNext" value=""/>
<stringAttribute key="modelBehaviorSpec" value="Spec"/>
<intAttribute key="modelBehaviorSpecType" value="1"/>
<stringAttribute key="modelBehaviorVars" value="msgs, rmState"/>
<stringAttribute key="modelComments" value=""/>
<booleanAttribute key="modelCorrectnessCheckDeadlock" value="true"/>
<listAttribute key="modelCorrectnessInvariants">
<listEntry value="1TypeOK"/>
<listEntry value="1InvTwoBlocksAccepted"/>
<listEntry value="1InvFaultNodesCount"/>
</listAttribute>
<listAttribute key="modelCorrectnessProperties">
<listEntry value="1Liveness"/>
</listAttribute>
<intAttribute key="modelEditorOpenTabs" value="10"/>
<stringAttribute key="modelParameterActionConstraint" value=""/>
<listAttribute key="modelParameterConstants">
<listEntry value="RMFault;;{};0;0"/>
<listEntry value="MaxView;;1;0;0"/>
<listEntry value="RMDead;;{};0;0"/>
<listEntry value="RM;;{0, 1, 2, 3};0;0"/>
</listAttribute>
<stringAttribute key="modelParameterContraint" value="MaxViewConstraint"/>
<listAttribute key="modelParameterDefinitions"/>
<stringAttribute key="modelParameterModelValues" value="{}"/>
<stringAttribute key="modelParameterNewDefinitions" value=""/>
<intAttribute key="modelVersion" value="20191005"/>
<intAttribute key="numberOfWorkers" value="8"/>
<stringAttribute key="result.mail.address" value=""/>
<stringAttribute key="specName" value="dbft"/>
<stringAttribute key="tlcResourcesProfile" value="local custom"/>
</launchConfiguration>

File diff suppressed because it is too large Load Diff

View File

@ -1,42 +1,42 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<launchConfiguration type="org.lamport.tla.toolbox.tool.tlc.modelCheck">
<stringAttribute key="configurationName" value="AllGoodModel"/>
<intAttribute key="distributedFPSetCount" value="0"/>
<stringAttribute key="distributedNetworkInterface" value="172.200.0.254"/>
<intAttribute key="distributedNodesCount" value="1"/>
<stringAttribute key="distributedTLC" value="off"/>
<intAttribute key="fpIndex" value="105"/>
<intAttribute key="maxHeapSize" value="25"/>
<stringAttribute key="modelBehaviorInit" value=""/>
<stringAttribute key="modelBehaviorNext" value=""/>
<stringAttribute key="modelBehaviorSpec" value="Spec"/>
<intAttribute key="modelBehaviorSpecType" value="1"/>
<stringAttribute key="modelBehaviorVars" value="msgs, rmState, blockAccepted"/>
<stringAttribute key="modelComments" value=""/>
<booleanAttribute key="modelCorrectnessCheckDeadlock" value="true"/>
<listAttribute key="modelCorrectnessInvariants">
<listEntry value="1TypeOK"/>
<listEntry value="1InvTwoBlocksAcceptedAdvanced"/>
<listEntry value="1InvFaultNodesCount"/>
</listAttribute>
<listAttribute key="modelCorrectnessProperties">
<listEntry value="1Liveness"/>
</listAttribute>
<intAttribute key="modelEditorOpenTabs" value="10"/>
<stringAttribute key="modelParameterActionConstraint" value=""/>
<listAttribute key="modelParameterConstants">
<listEntry value="RMFault;;{};0;0"/>
<listEntry value="MaxView;;1;0;0"/>
<listEntry value="RMDead;;{};0;0"/>
<listEntry value="RM;;{0, 1, 2, 3};0;0"/>
</listAttribute>
<stringAttribute key="modelParameterContraint" value="MaxViewConstraint"/>
<listAttribute key="modelParameterDefinitions"/>
<stringAttribute key="modelParameterModelValues" value="{}"/>
<stringAttribute key="modelParameterNewDefinitions" value=""/>
<intAttribute key="modelVersion" value="20191005"/>
<intAttribute key="numberOfWorkers" value="4"/>
<stringAttribute key="result.mail.address" value=""/>
<stringAttribute key="specName" value="dbftCentralizedCV"/>
<stringAttribute key="tlcResourcesProfile" value="local custom"/>
</launchConfiguration>
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<launchConfiguration type="org.lamport.tla.toolbox.tool.tlc.modelCheck">
<stringAttribute key="configurationName" value="AllGoodModel"/>
<intAttribute key="distributedFPSetCount" value="0"/>
<stringAttribute key="distributedNetworkInterface" value="172.200.0.254"/>
<intAttribute key="distributedNodesCount" value="1"/>
<stringAttribute key="distributedTLC" value="off"/>
<intAttribute key="fpIndex" value="105"/>
<intAttribute key="maxHeapSize" value="25"/>
<stringAttribute key="modelBehaviorInit" value=""/>
<stringAttribute key="modelBehaviorNext" value=""/>
<stringAttribute key="modelBehaviorSpec" value="Spec"/>
<intAttribute key="modelBehaviorSpecType" value="1"/>
<stringAttribute key="modelBehaviorVars" value="msgs, rmState, blockAccepted"/>
<stringAttribute key="modelComments" value=""/>
<booleanAttribute key="modelCorrectnessCheckDeadlock" value="true"/>
<listAttribute key="modelCorrectnessInvariants">
<listEntry value="1TypeOK"/>
<listEntry value="1InvTwoBlocksAcceptedAdvanced"/>
<listEntry value="1InvFaultNodesCount"/>
</listAttribute>
<listAttribute key="modelCorrectnessProperties">
<listEntry value="1Liveness"/>
</listAttribute>
<intAttribute key="modelEditorOpenTabs" value="10"/>
<stringAttribute key="modelParameterActionConstraint" value=""/>
<listAttribute key="modelParameterConstants">
<listEntry value="RMFault;;{};0;0"/>
<listEntry value="MaxView;;1;0;0"/>
<listEntry value="RMDead;;{};0;0"/>
<listEntry value="RM;;{0, 1, 2, 3};0;0"/>
</listAttribute>
<stringAttribute key="modelParameterContraint" value="MaxViewConstraint"/>
<listAttribute key="modelParameterDefinitions"/>
<stringAttribute key="modelParameterModelValues" value="{}"/>
<stringAttribute key="modelParameterNewDefinitions" value=""/>
<intAttribute key="modelVersion" value="20191005"/>
<intAttribute key="numberOfWorkers" value="4"/>
<stringAttribute key="result.mail.address" value=""/>
<stringAttribute key="specName" value="dbftCentralizedCV"/>
<stringAttribute key="tlcResourcesProfile" value="local custom"/>
</launchConfiguration>

View File

@ -1,427 +1,427 @@
-------------------------------- MODULE dbftCV3 --------------------------------
EXTENDS
Integers,
FiniteSets
CONSTANTS
\* RM is the set of consensus node indexes starting from 0.
\* Example: {0, 1, 2, 3}
RM,
\* RMFault is a set of consensus node indexes that are allowed to become
\* FAULT in the middle of every considered behavior and to send any
\* consensus message afterwards. RMFault must be a subset of RM. An empty
\* set means that all nodes are good in every possible behaviour.
\* Examples: {0}
\* {1, 3}
\* {}
RMFault,
\* RMDead is a set of consensus node indexes that are allowed to die in the
\* middle of every behaviour and do not send any message afterwards. RMDead
\* must be a subset of RM. An empty set means that all nodes are alive and
\* responding in in every possible behaviour. RMDead may intersect the
\* RMFault set which means that node which is in both RMDead and RMFault
\* may become FAULT and send any message starting from some step of the
\* particular behaviour and may also die in the same behaviour which will
\* prevent it from sending any message.
\* Examples: {0}
\* {3, 2}
\* {}
RMDead,
\* MaxView is the maximum allowed view to be considered (starting from 0,
\* including the MaxView itself). This constraint was introduced to reduce
\* the number of possible model states to be checked. It is recommended to
\* keep this setting not too high (< N is highly recommended).
\* Example: 2
MaxView
VARIABLES
\* rmState is a set of consensus node states. It is represented by the
\* mapping (function) with domain RM and range RMStates. I.e. rmState[r] is
\* the state of the r-th consensus node at the current step.
rmState,
\* msgs is the shared pool of messages sent to the network by consensus nodes.
\* It is represented by a subset of Messages set.
msgs
\* vars is a tuple of all variables used in the specification. It is needed to
\* simplify fairness conditions definition.
vars == <<rmState, msgs>>
\* N is the number of validators.
N == Cardinality(RM)
\* F is the number of validators that are allowed to be malicious.
F == (N - 1) \div 3
\* M is the number of validators that must function correctly.
M == N - F
\* These assumptions are checked by the TLC model checker once at the start of
\* the model checking process. All the input data (declared constants) specified
\* in the "Model Overview" section must satisfy these constraints.
ASSUME
/\ RM \subseteq Nat
/\ N >= 4
/\ 0 \in RM
/\ RMFault \subseteq RM
/\ RMDead \subseteq RM
/\ Cardinality(RMFault) <= F
/\ Cardinality(RMDead) <= F
/\ Cardinality(RMFault \cup RMDead) <= F
/\ MaxView \in Nat
/\ MaxView <= 2
\* RMStates is a set of records where each record holds the node state and
\* the node current view.
RMStates == [
type: {"initialized", "prepareSent", "commitSent", "blockAccepted", "cv1", "cv2", "cv3", "bad", "dead"},
view : Nat
]
\* Messages is a set of records where each record holds the message type,
\* the message sender and sender's view by the moment when message was sent.
Messages == [type : {"PrepareRequest", "PrepareResponse", "Commit", "ChangeView1", "ChangeView2", "ChangeView3"}, rm : RM, view : Nat]
\* -------------- Useful operators --------------
\* IsPrimary is an operator defining whether provided node r is primary
\* for the current round from the r's point of view. It is a mapping
\* from RM to the set of {TRUE, FALSE}.
IsPrimary(r) == rmState[r].view % N = r
\* GetPrimary is an operator defining mapping from round index to the RM that
\* is primary in this round.
GetPrimary(view) == CHOOSE r \in RM : view % N = r
\* GetNewView returns new view number based on the previous node view value.
\* Current specifications only allows to increment view.
GetNewView(oldView) == oldView + 1
\* PrepareRequestSentOrReceived denotes whether there's a PrepareRequest
\* message received from the current round's speaker (as the node r sees it).
PrepareRequestSentOrReceived(r) == [type |-> "PrepareRequest", rm |-> GetPrimary(rmState[r].view), view |-> rmState[r].view] \in msgs
\* -------------- Safety temporal formula --------------
\* Init is the initial predicate initializing values at the start of every
\* behaviour.
Init ==
/\ rmState = [r \in RM |-> [type |-> "initialized", view |-> 0]]
/\ msgs = {}
\* RMSendPrepareRequest describes the primary node r broadcasting PrepareRequest.
RMSendPrepareRequest(r) ==
/\ rmState[r].type = "initialized"
/\ IsPrimary(r)
/\ rmState' = [rmState EXCEPT ![r].type = "prepareSent"]
/\ msgs' = msgs \cup {[type |-> "PrepareRequest", rm |-> r, view |-> rmState[r].view]}
/\ UNCHANGED <<>>
\* RMSendPrepareResponse describes non-primary node r receiving PrepareRequest from
\* the primary node of the current round (view) and broadcasting PrepareResponse.
\* This step assumes that PrepareRequest always contains valid transactions and
\* signatures.
RMSendPrepareResponse(r) ==
/\ rmState[r].type = "initialized"
/\ \neg IsPrimary(r)
/\ PrepareRequestSentOrReceived(r)
/\ rmState' = [rmState EXCEPT ![r].type = "prepareSent"]
/\ msgs' = msgs \cup {[type |-> "PrepareResponse", rm |-> r, view |-> rmState[r].view]}
/\ UNCHANGED <<>>
\* RMSendCommit describes node r sending Commit if there's enough PrepareRequest/PrepareResponse
\* messages and no node has sent the ChangeView3 message. It is possible to send the Commit after
\* the ChangeView1 or ChangeView2 message was sent with additional constraints.
RMSendCommit(r) ==
/\ \/ rmState[r].type = "prepareSent"
\/ rmState[r].type = "cv1"
\/ /\ rmState[r].type = "cv2"
/\ Cardinality({
msg \in msgs : msg.type = "Commit" /\ msg.view = rmState[r].view
}) > F
/\ Cardinality({
msg \in msgs : (msg.type = "PrepareResponse" \/ msg.type = "PrepareRequest") /\ msg.view = rmState[r].view
}) >= M
/\ Cardinality({
msg \in msgs : msg.type = "ChangeView3" /\ msg.view = rmState[r].view
}) = 0
/\ PrepareRequestSentOrReceived(r)
/\ rmState' = [rmState EXCEPT ![r].type = "commitSent"]
/\ msgs' = msgs \cup {[type |-> "Commit", rm |-> r, view |-> rmState[r].view]}
/\ UNCHANGED <<>>
\* RMAcceptBlock describes node r collecting enough Commit messages and accepting
\* the block.
RMAcceptBlock(r) ==
/\ rmState[r].type /= "bad"
/\ rmState[r].type /= "dead"
/\ rmState[r].type /= "blockAccepted"
/\ PrepareRequestSentOrReceived(r)
/\ Cardinality({
msg \in msgs : msg.type = "Commit" /\ msg.view = rmState[r].view
}) >= M
/\ rmState' = [rmState EXCEPT ![r].type = "blockAccepted"]
/\ UNCHANGED <<msgs>>
\* FetchBlock describes node r that fetches the accepted block from some other node.
RMFetchBlock(r) ==
/\ rmState[r].type /= "bad"
/\ rmState[r].type /= "dead"
/\ rmState[r].type /= "blockAccepted"
/\ \E rmAccepted \in RM : /\ rmState[rmAccepted].type = "blockAccepted"
/\ rmState' = [rmState EXCEPT ![r].type = "blockAccepted", ![r].view = rmState[rmAccepted].view]
/\ UNCHANGED <<msgs>>
\* RMSendChangeView1 describes node r sending ChangeView1 message on timeout.
\* Only non-primary node is allowed to send ChangeView1 message, as the primary
\* must send the PrepareRequest if the timer fires.
RMSendChangeView1(r) ==
/\ rmState[r].type = "initialized"
/\ \neg IsPrimary(r)
/\ rmState' = [rmState EXCEPT ![r].type = "cv1"]
/\ msgs' = msgs \cup {[type |-> "ChangeView1", rm |-> r, view |-> rmState[r].view]}
\* RMSendChangeView2 describes node r sending ChangeView2 message on timeout either from
\* "cv1" state or after the node has sent the PrepareRequest or PrepareResponse message.
RMSendChangeView2(r) ==
/\ \/ /\ rmState[r].type = "prepareSent"
/\ Cardinality({
msg \in msgs : msg.type = "ChangeView1" /\ msg.view = rmState[r].view
}) > 0
\/ rmState[r].type = "cv1"
/\ Cardinality({
msg \in msgs : (msg.type = "ChangeView1" \/ msg.type = "PrepareRequest" \/ msg.type = "PrepareResponse") /\ msg.view = rmState[r].view
}) >= M
/\ \/ Cardinality({
msg \in msgs : msg.type = "Commit" /\ msg.view = rmState[r].view
}) <= F
\/ Cardinality({
msg \in msgs : msg.type = "ChangeView3" /\ msg.view = rmState[r].view
}) > 0
/\ rmState' = [rmState EXCEPT ![r].type = "cv2"]
/\ msgs' = msgs \cup {[type |-> "ChangeView2", rm |-> r, view |-> rmState[r].view]}
\* RMSendChangeView3 describes node r sending ChangeView3 message on timeout either from
\* "cv2" state or after the node has sent the Commit message.
RMSendChangeView3(r) ==
/\ \/ rmState[r].type = "cv2"
\/ rmState[r].type = "commitSent"
/\ Cardinality({msg \in msgs : (msg.type = "ChangeView2" \/ msg.type = "Commit") /\ msg.view = rmState[r].view}) >= M
/\ Cardinality({msg \in msgs : (msg.type = "ChangeView2") /\ msg.view = rmState[r].view}) > 0
/\ Cardinality({msg \in msgs : msg.type = "Commit" /\ msg.view = rmState[r].view}) <= F
/\ rmState' = [rmState EXCEPT ![r].type = "cv3"]
/\ msgs' = msgs \cup {[type |-> "ChangeView3", rm |-> r, view |-> rmState[r].view]}
\* RMReceiveChangeView describes node r receiving enough ChangeView[1,2,3] messages for
\* view changing.
RMReceiveChangeView(r) ==
/\ rmState[r].type /= "bad"
/\ rmState[r].type /= "dead"
/\ rmState[r].type /= "blockAccepted"
/\ \/ Cardinality({rm \in RM : Cardinality({msg \in msgs : /\ msg.rm = rm
/\ msg.type = "ChangeView1"
/\ GetNewView(msg.view) >= GetNewView(rmState[r].view)
}) # 0
}) >= M
\/ Cardinality({rm \in RM : Cardinality({msg \in msgs : /\ msg.rm = rm
/\ msg.type = "ChangeView2"
/\ GetNewView(msg.view) >= GetNewView(rmState[r].view)
}) # 0
}) >= M
\/ Cardinality({rm \in RM : Cardinality({msg \in msgs : /\ msg.rm = rm
/\ msg.type = "ChangeView3"
/\ GetNewView(msg.view) >= GetNewView(rmState[r].view)
}) # 0
}) >= M
/\ rmState' = [rmState EXCEPT ![r].type = "initialized", ![r].view = GetNewView(rmState[r].view)]
/\ UNCHANGED <<msgs>>
\* RMBeBad describes the faulty node r that will send any kind of consensus message starting
\* from the step it's gone wild. This step is enabled only when RMFault is non-empty set.
RMBeBad(r) ==
/\ r \in RMFault
/\ Cardinality({rm \in RM : rmState[rm].type = "bad"}) < F
/\ rmState' = [rmState EXCEPT ![r].type = "bad"]
/\ UNCHANGED <<msgs>>
\* RMFaultySendCV describes sending CV1 message by the faulty node r.
RMFaultySendCV1(r) ==
/\ rmState[r].type = "bad"
/\ LET cv == [type |-> "ChangeView1", rm |-> r, view |-> rmState[r].view]
IN /\ cv \notin msgs
/\ msgs' = msgs \cup {cv}
/\ UNCHANGED <<rmState>>
\* RMFaultySendCV2 describes sending CV2 message by the faulty node r.
RMFaultySendCV2(r) ==
/\ rmState[r].type = "bad"
/\ LET cv == [type |-> "ChangeView2", rm |-> r, view |-> rmState[r].view]
IN /\ cv \notin msgs
/\ msgs' = msgs \cup {cv}
/\ UNCHANGED <<rmState>>
\* RMFaultySendCV3 describes sending CV3 message by the faulty node r.
RMFaultySendCV3(r) ==
/\ rmState[r].type = "bad"
/\ LET cv == [type |-> "ChangeView3", rm |-> r, view |-> rmState[r].view]
IN /\ cv \notin msgs
/\ msgs' = msgs \cup {cv}
/\ UNCHANGED <<rmState>>
\* RMFaultyDoCV describes view changing by the faulty node r.
RMFaultyDoCV(r) ==
/\ rmState[r].type = "bad"
/\ rmState' = [rmState EXCEPT ![r].view = GetNewView(rmState[r].view)]
/\ UNCHANGED <<msgs>>
\* RMFaultySendPReq describes sending PrepareRequest message by the primary faulty node r.
RMFaultySendPReq(r) ==
/\ rmState[r].type = "bad"
/\ IsPrimary(r)
/\ LET pReq == [type |-> "PrepareRequest", rm |-> r, view |-> rmState[r].view]
IN /\ pReq \notin msgs
/\ msgs' = msgs \cup {pReq}
/\ UNCHANGED <<rmState>>
\* RMFaultySendPResp describes sending PrepareResponse message by the non-primary faulty node r.
RMFaultySendPResp(r) ==
/\ rmState[r].type = "bad"
/\ \neg IsPrimary(r)
/\ LET pResp == [type |-> "PrepareResponse", rm |-> r, view |-> rmState[r].view]
IN /\ pResp \notin msgs
/\ msgs' = msgs \cup {pResp}
/\ UNCHANGED <<rmState>>
\* RMFaultySendCommit describes sending Commit message by the faulty node r.
RMFaultySendCommit(r) ==
/\ rmState[r].type = "bad"
/\ LET commit == [type |-> "Commit", rm |-> r, view |-> rmState[r].view]
IN /\ commit \notin msgs
/\ msgs' = msgs \cup {commit}
/\ UNCHANGED <<rmState>>
\* RMDie describes node r that was removed from the network at the particular step
\* of the behaviour. After this node r can't change its state and accept/send messages.
RMDie(r) ==
/\ r \in RMDead
/\ Cardinality({rm \in RM : rmState[rm].type = "dead"}) < F
/\ rmState' = [rmState EXCEPT ![r].type = "dead"]
/\ UNCHANGED <<msgs>>
\* Terminating is an action that allows infinite stuttering to prevent deadlock on
\* behaviour termination. We consider termination to be valid if at least M nodes
\* has the block being accepted.
Terminating ==
/\ Cardinality({rm \in RM : rmState[rm].type = "blockAccepted"}) >= M
/\ UNCHANGED <<msgs, rmState>>
\* The next-state action.
Next ==
\/ Terminating
\/ \E r \in RM:
RMSendPrepareRequest(r) \/ RMSendPrepareResponse(r) \/ RMSendCommit(r)
\/ RMAcceptBlock(r) \/ RMSendChangeView1(r) \/ RMReceiveChangeView(r) \/ RMBeBad(r) \/ RMSendChangeView2(r) \/ RMSendChangeView3(r)
\/ RMFaultySendCV1(r) \/ RMFaultyDoCV(r) \/ RMFaultySendCommit(r) \/ RMFaultySendPReq(r) \/ RMFaultySendPResp(r) \/ RMFaultySendCV2(r) \/ RMFaultySendCV3(r)
\/ RMDie(r) \/ RMFetchBlock(r)
\* Safety is a temporal formula that describes the whole set of allowed
\* behaviours. It specifies only what the system MAY do (i.e. the set of
\* possible allowed behaviours for the system). It asserts only what may
\* happen; any behaviour that violates it does so at some point and
\* nothing past that point makes difference.
\*
\* E.g. this safety formula (applied standalone) allows the behaviour to end
\* with an infinite set of stuttering steps (those steps that DO NOT change
\* neither msgs nor rmState) and never reach the state where at least one
\* node is committed or accepted the block.
\*
\* To forbid such behaviours we must specify what the system MUST
\* do. It will be specified below with the help of fairness conditions in
\* the Fairness formula.
Safety == Init /\ [][Next]_vars
\* -------------- Fairness temporal formula --------------
\* Fairness is a temporal assumptions under which the model is working.
\* Usually it specifies different kind of assumptions for each/some
\* subactions of the Next's state action, but the only think that bothers
\* us is preventing infinite stuttering at those steps where some of Next's
\* subactions are enabled. Thus, the only thing that we require from the
\* system is to keep take the steps until it's impossible to take them.
\* That's exactly how the weak fairness condition works: if some action
\* remains continuously enabled, it must eventually happen.
Fairness == WF_vars(Next)
\* -------------- Specification --------------
\* The complete specification of the protocol written as a temporal formula.
Spec == Safety /\ Fairness
\* -------------- Liveness temporal formula --------------
\* For every possible behaviour it's true that eventually (i.e. at least once
\* through the behaviour) block will be accepted. It is something that dBFT
\* must guarantee (an in practice this condition is violated).
TerminationRequirement == <>(Cardinality({r \in RM : rmState[r].type = "blockAccepted"}) >= M)
\* A liveness temporal formula asserts only what must happen (i.e. specifies
\* what the system MUST do). Any behaviour can NOT violate it at ANY point;
\* there's always the rest of the behaviour that can always make the liveness
\* formula true; if there's no such behaviour than the liveness formula is
\* violated. The liveness formula is supposed to be checked as a property
\* by the TLC model checker.
Liveness == TerminationRequirement
\* -------------- ModelConstraints --------------
\* MaxViewConstraint is a state predicate restricting the number of possible
\* behaviour states. It is needed to reduce model checking time and prevent
\* the model graph size explosion. This formulae must be specified at the
\* "State constraint" section of the "Additional Spec Options" section inside
\* the model overview.
MaxViewConstraint == /\ \A r \in RM : rmState[r].view <= MaxView
/\ \A msg \in msgs : msg.view <= MaxView
\* -------------- Invariants of the specification --------------
\* Model invariant is a state predicate (statement) that must be true for
\* every step of every reachable behaviour. Model invariant is supposed to
\* be checked as an Invariant by the TLC Model Checker.
\* TypeOK is a type-correctness invariant. It states that all elements of
\* specification variables must have the proper type throughout the behaviour.
TypeOK ==
/\ rmState \in [RM -> RMStates]
/\ msgs \subseteq Messages
\* InvTwoBlocksAccepted states that there can't be two different blocks accepted in
\* the two different views, i.e. dBFT must not allow forks.
InvTwoBlocksAccepted == \A r1 \in RM:
\A r2 \in RM \ {r1}:
\/ rmState[r1].type /= "blockAccepted"
\/ rmState[r2].type /= "blockAccepted"
\/ rmState[r1].view = rmState[r2].view
\* InvFaultNodesCount states that there can be F faulty or dead nodes at max.
InvFaultNodesCount == Cardinality({
r \in RM : rmState[r].type = "bad" \/ rmState[r].type = "dead"
}) <= F
\* This theorem asserts the truth of the temporal formula whose meaning is that
\* the state predicates TypeOK, InvTwoBlocksAccepted and InvFaultNodesCount are
\* the invariants of the specification Spec. This theorem is not supposed to be
\* checked by the TLC model checker, it's here for the reader's understanding of
\* the purpose of TypeOK, InvTwoBlocksAccepted and InvFaultNodesCount.
THEOREM Spec => [](TypeOK /\ InvTwoBlocksAccepted /\ InvFaultNodesCount)
=============================================================================
\* Modification History
\* Last modified Wed Mar 01 12:11:07 MSK 2023 by root
\* Last modified Tue Feb 07 23:11:19 MSK 2023 by rik
\* Last modified Fri Feb 03 18:09:33 MSK 2023 by anna
\* Created Thu Dec 15 16:06:17 MSK 2022 by anna
-------------------------------- MODULE dbftCV3 --------------------------------
EXTENDS
Integers,
FiniteSets
CONSTANTS
\* RM is the set of consensus node indexes starting from 0.
\* Example: {0, 1, 2, 3}
RM,
\* RMFault is a set of consensus node indexes that are allowed to become
\* FAULT in the middle of every considered behavior and to send any
\* consensus message afterwards. RMFault must be a subset of RM. An empty
\* set means that all nodes are good in every possible behaviour.
\* Examples: {0}
\* {1, 3}
\* {}
RMFault,
\* RMDead is a set of consensus node indexes that are allowed to die in the
\* middle of every behaviour and do not send any message afterwards. RMDead
\* must be a subset of RM. An empty set means that all nodes are alive and
\* responding in in every possible behaviour. RMDead may intersect the
\* RMFault set which means that node which is in both RMDead and RMFault
\* may become FAULT and send any message starting from some step of the
\* particular behaviour and may also die in the same behaviour which will
\* prevent it from sending any message.
\* Examples: {0}
\* {3, 2}
\* {}
RMDead,
\* MaxView is the maximum allowed view to be considered (starting from 0,
\* including the MaxView itself). This constraint was introduced to reduce
\* the number of possible model states to be checked. It is recommended to
\* keep this setting not too high (< N is highly recommended).
\* Example: 2
MaxView
VARIABLES
\* rmState is a set of consensus node states. It is represented by the
\* mapping (function) with domain RM and range RMStates. I.e. rmState[r] is
\* the state of the r-th consensus node at the current step.
rmState,
\* msgs is the shared pool of messages sent to the network by consensus nodes.
\* It is represented by a subset of Messages set.
msgs
\* vars is a tuple of all variables used in the specification. It is needed to
\* simplify fairness conditions definition.
vars == <<rmState, msgs>>
\* N is the number of validators.
N == Cardinality(RM)
\* F is the number of validators that are allowed to be malicious.
F == (N - 1) \div 3
\* M is the number of validators that must function correctly.
M == N - F
\* These assumptions are checked by the TLC model checker once at the start of
\* the model checking process. All the input data (declared constants) specified
\* in the "Model Overview" section must satisfy these constraints.
ASSUME
/\ RM \subseteq Nat
/\ N >= 4
/\ 0 \in RM
/\ RMFault \subseteq RM
/\ RMDead \subseteq RM
/\ Cardinality(RMFault) <= F
/\ Cardinality(RMDead) <= F
/\ Cardinality(RMFault \cup RMDead) <= F
/\ MaxView \in Nat
/\ MaxView <= 2
\* RMStates is a set of records where each record holds the node state and
\* the node current view.
RMStates == [
type: {"initialized", "prepareSent", "commitSent", "blockAccepted", "cv1", "cv2", "cv3", "bad", "dead"},
view : Nat
]
\* Messages is a set of records where each record holds the message type,
\* the message sender and sender's view by the moment when message was sent.
Messages == [type : {"PrepareRequest", "PrepareResponse", "Commit", "ChangeView1", "ChangeView2", "ChangeView3"}, rm : RM, view : Nat]
\* -------------- Useful operators --------------
\* IsPrimary is an operator defining whether provided node r is primary
\* for the current round from the r's point of view. It is a mapping
\* from RM to the set of {TRUE, FALSE}.
IsPrimary(r) == rmState[r].view % N = r
\* GetPrimary is an operator defining mapping from round index to the RM that
\* is primary in this round.
GetPrimary(view) == CHOOSE r \in RM : view % N = r
\* GetNewView returns new view number based on the previous node view value.
\* Current specifications only allows to increment view.
GetNewView(oldView) == oldView + 1
\* PrepareRequestSentOrReceived denotes whether there's a PrepareRequest
\* message received from the current round's speaker (as the node r sees it).
PrepareRequestSentOrReceived(r) == [type |-> "PrepareRequest", rm |-> GetPrimary(rmState[r].view), view |-> rmState[r].view] \in msgs
\* -------------- Safety temporal formula --------------
\* Init is the initial predicate initializing values at the start of every
\* behaviour.
Init ==
/\ rmState = [r \in RM |-> [type |-> "initialized", view |-> 0]]
/\ msgs = {}
\* RMSendPrepareRequest describes the primary node r broadcasting PrepareRequest.
RMSendPrepareRequest(r) ==
/\ rmState[r].type = "initialized"
/\ IsPrimary(r)
/\ rmState' = [rmState EXCEPT ![r].type = "prepareSent"]
/\ msgs' = msgs \cup {[type |-> "PrepareRequest", rm |-> r, view |-> rmState[r].view]}
/\ UNCHANGED <<>>
\* RMSendPrepareResponse describes non-primary node r receiving PrepareRequest from
\* the primary node of the current round (view) and broadcasting PrepareResponse.
\* This step assumes that PrepareRequest always contains valid transactions and
\* signatures.
RMSendPrepareResponse(r) ==
/\ rmState[r].type = "initialized"
/\ \neg IsPrimary(r)
/\ PrepareRequestSentOrReceived(r)
/\ rmState' = [rmState EXCEPT ![r].type = "prepareSent"]
/\ msgs' = msgs \cup {[type |-> "PrepareResponse", rm |-> r, view |-> rmState[r].view]}
/\ UNCHANGED <<>>
\* RMSendCommit describes node r sending Commit if there's enough PrepareRequest/PrepareResponse
\* messages and no node has sent the ChangeView3 message. It is possible to send the Commit after
\* the ChangeView1 or ChangeView2 message was sent with additional constraints.
RMSendCommit(r) ==
/\ \/ rmState[r].type = "prepareSent"
\/ rmState[r].type = "cv1"
\/ /\ rmState[r].type = "cv2"
/\ Cardinality({
msg \in msgs : msg.type = "Commit" /\ msg.view = rmState[r].view
}) > F
/\ Cardinality({
msg \in msgs : (msg.type = "PrepareResponse" \/ msg.type = "PrepareRequest") /\ msg.view = rmState[r].view
}) >= M
/\ Cardinality({
msg \in msgs : msg.type = "ChangeView3" /\ msg.view = rmState[r].view
}) = 0
/\ PrepareRequestSentOrReceived(r)
/\ rmState' = [rmState EXCEPT ![r].type = "commitSent"]
/\ msgs' = msgs \cup {[type |-> "Commit", rm |-> r, view |-> rmState[r].view]}
/\ UNCHANGED <<>>
\* RMAcceptBlock describes node r collecting enough Commit messages and accepting
\* the block.
RMAcceptBlock(r) ==
/\ rmState[r].type /= "bad"
/\ rmState[r].type /= "dead"
/\ rmState[r].type /= "blockAccepted"
/\ PrepareRequestSentOrReceived(r)
/\ Cardinality({
msg \in msgs : msg.type = "Commit" /\ msg.view = rmState[r].view
}) >= M
/\ rmState' = [rmState EXCEPT ![r].type = "blockAccepted"]
/\ UNCHANGED <<msgs>>
\* FetchBlock describes node r that fetches the accepted block from some other node.
RMFetchBlock(r) ==
/\ rmState[r].type /= "bad"
/\ rmState[r].type /= "dead"
/\ rmState[r].type /= "blockAccepted"
/\ \E rmAccepted \in RM : /\ rmState[rmAccepted].type = "blockAccepted"
/\ rmState' = [rmState EXCEPT ![r].type = "blockAccepted", ![r].view = rmState[rmAccepted].view]
/\ UNCHANGED <<msgs>>
\* RMSendChangeView1 describes node r sending ChangeView1 message on timeout.
\* Only non-primary node is allowed to send ChangeView1 message, as the primary
\* must send the PrepareRequest if the timer fires.
RMSendChangeView1(r) ==
/\ rmState[r].type = "initialized"
/\ \neg IsPrimary(r)
/\ rmState' = [rmState EXCEPT ![r].type = "cv1"]
/\ msgs' = msgs \cup {[type |-> "ChangeView1", rm |-> r, view |-> rmState[r].view]}
\* RMSendChangeView2 describes node r sending ChangeView2 message on timeout either from
\* "cv1" state or after the node has sent the PrepareRequest or PrepareResponse message.
RMSendChangeView2(r) ==
/\ \/ /\ rmState[r].type = "prepareSent"
/\ Cardinality({
msg \in msgs : msg.type = "ChangeView1" /\ msg.view = rmState[r].view
}) > 0
\/ rmState[r].type = "cv1"
/\ Cardinality({
msg \in msgs : (msg.type = "ChangeView1" \/ msg.type = "PrepareRequest" \/ msg.type = "PrepareResponse") /\ msg.view = rmState[r].view
}) >= M
/\ \/ Cardinality({
msg \in msgs : msg.type = "Commit" /\ msg.view = rmState[r].view
}) <= F
\/ Cardinality({
msg \in msgs : msg.type = "ChangeView3" /\ msg.view = rmState[r].view
}) > 0
/\ rmState' = [rmState EXCEPT ![r].type = "cv2"]
/\ msgs' = msgs \cup {[type |-> "ChangeView2", rm |-> r, view |-> rmState[r].view]}
\* RMSendChangeView3 describes node r sending ChangeView3 message on timeout either from
\* "cv2" state or after the node has sent the Commit message.
RMSendChangeView3(r) ==
/\ \/ rmState[r].type = "cv2"
\/ rmState[r].type = "commitSent"
/\ Cardinality({msg \in msgs : (msg.type = "ChangeView2" \/ msg.type = "Commit") /\ msg.view = rmState[r].view}) >= M
/\ Cardinality({msg \in msgs : (msg.type = "ChangeView2") /\ msg.view = rmState[r].view}) > 0
/\ Cardinality({msg \in msgs : msg.type = "Commit" /\ msg.view = rmState[r].view}) <= F
/\ rmState' = [rmState EXCEPT ![r].type = "cv3"]
/\ msgs' = msgs \cup {[type |-> "ChangeView3", rm |-> r, view |-> rmState[r].view]}
\* RMReceiveChangeView describes node r receiving enough ChangeView[1,2,3] messages for
\* view changing.
RMReceiveChangeView(r) ==
/\ rmState[r].type /= "bad"
/\ rmState[r].type /= "dead"
/\ rmState[r].type /= "blockAccepted"
/\ \/ Cardinality({rm \in RM : Cardinality({msg \in msgs : /\ msg.rm = rm
/\ msg.type = "ChangeView1"
/\ GetNewView(msg.view) >= GetNewView(rmState[r].view)
}) # 0
}) >= M
\/ Cardinality({rm \in RM : Cardinality({msg \in msgs : /\ msg.rm = rm
/\ msg.type = "ChangeView2"
/\ GetNewView(msg.view) >= GetNewView(rmState[r].view)
}) # 0
}) >= M
\/ Cardinality({rm \in RM : Cardinality({msg \in msgs : /\ msg.rm = rm
/\ msg.type = "ChangeView3"
/\ GetNewView(msg.view) >= GetNewView(rmState[r].view)
}) # 0
}) >= M
/\ rmState' = [rmState EXCEPT ![r].type = "initialized", ![r].view = GetNewView(rmState[r].view)]
/\ UNCHANGED <<msgs>>
\* RMBeBad describes the faulty node r that will send any kind of consensus message starting
\* from the step it's gone wild. This step is enabled only when RMFault is non-empty set.
RMBeBad(r) ==
/\ r \in RMFault
/\ Cardinality({rm \in RM : rmState[rm].type = "bad"}) < F
/\ rmState' = [rmState EXCEPT ![r].type = "bad"]
/\ UNCHANGED <<msgs>>
\* RMFaultySendCV describes sending CV1 message by the faulty node r.
RMFaultySendCV1(r) ==
/\ rmState[r].type = "bad"
/\ LET cv == [type |-> "ChangeView1", rm |-> r, view |-> rmState[r].view]
IN /\ cv \notin msgs
/\ msgs' = msgs \cup {cv}
/\ UNCHANGED <<rmState>>
\* RMFaultySendCV2 describes sending CV2 message by the faulty node r.
RMFaultySendCV2(r) ==
/\ rmState[r].type = "bad"
/\ LET cv == [type |-> "ChangeView2", rm |-> r, view |-> rmState[r].view]
IN /\ cv \notin msgs
/\ msgs' = msgs \cup {cv}
/\ UNCHANGED <<rmState>>
\* RMFaultySendCV3 describes sending CV3 message by the faulty node r.
RMFaultySendCV3(r) ==
/\ rmState[r].type = "bad"
/\ LET cv == [type |-> "ChangeView3", rm |-> r, view |-> rmState[r].view]
IN /\ cv \notin msgs
/\ msgs' = msgs \cup {cv}
/\ UNCHANGED <<rmState>>
\* RMFaultyDoCV describes view changing by the faulty node r.
RMFaultyDoCV(r) ==
/\ rmState[r].type = "bad"
/\ rmState' = [rmState EXCEPT ![r].view = GetNewView(rmState[r].view)]
/\ UNCHANGED <<msgs>>
\* RMFaultySendPReq describes sending PrepareRequest message by the primary faulty node r.
RMFaultySendPReq(r) ==
/\ rmState[r].type = "bad"
/\ IsPrimary(r)
/\ LET pReq == [type |-> "PrepareRequest", rm |-> r, view |-> rmState[r].view]
IN /\ pReq \notin msgs
/\ msgs' = msgs \cup {pReq}
/\ UNCHANGED <<rmState>>
\* RMFaultySendPResp describes sending PrepareResponse message by the non-primary faulty node r.
RMFaultySendPResp(r) ==
/\ rmState[r].type = "bad"
/\ \neg IsPrimary(r)
/\ LET pResp == [type |-> "PrepareResponse", rm |-> r, view |-> rmState[r].view]
IN /\ pResp \notin msgs
/\ msgs' = msgs \cup {pResp}
/\ UNCHANGED <<rmState>>
\* RMFaultySendCommit describes sending Commit message by the faulty node r.
RMFaultySendCommit(r) ==
/\ rmState[r].type = "bad"
/\ LET commit == [type |-> "Commit", rm |-> r, view |-> rmState[r].view]
IN /\ commit \notin msgs
/\ msgs' = msgs \cup {commit}
/\ UNCHANGED <<rmState>>
\* RMDie describes node r that was removed from the network at the particular step
\* of the behaviour. After this node r can't change its state and accept/send messages.
RMDie(r) ==
/\ r \in RMDead
/\ Cardinality({rm \in RM : rmState[rm].type = "dead"}) < F
/\ rmState' = [rmState EXCEPT ![r].type = "dead"]
/\ UNCHANGED <<msgs>>
\* Terminating is an action that allows infinite stuttering to prevent deadlock on
\* behaviour termination. We consider termination to be valid if at least M nodes
\* has the block being accepted.
Terminating ==
/\ Cardinality({rm \in RM : rmState[rm].type = "blockAccepted"}) >= M
/\ UNCHANGED <<msgs, rmState>>
\* The next-state action.
Next ==
\/ Terminating
\/ \E r \in RM:
RMSendPrepareRequest(r) \/ RMSendPrepareResponse(r) \/ RMSendCommit(r)
\/ RMAcceptBlock(r) \/ RMSendChangeView1(r) \/ RMReceiveChangeView(r) \/ RMBeBad(r) \/ RMSendChangeView2(r) \/ RMSendChangeView3(r)
\/ RMFaultySendCV1(r) \/ RMFaultyDoCV(r) \/ RMFaultySendCommit(r) \/ RMFaultySendPReq(r) \/ RMFaultySendPResp(r) \/ RMFaultySendCV2(r) \/ RMFaultySendCV3(r)
\/ RMDie(r) \/ RMFetchBlock(r)
\* Safety is a temporal formula that describes the whole set of allowed
\* behaviours. It specifies only what the system MAY do (i.e. the set of
\* possible allowed behaviours for the system). It asserts only what may
\* happen; any behaviour that violates it does so at some point and
\* nothing past that point makes difference.
\*
\* E.g. this safety formula (applied standalone) allows the behaviour to end
\* with an infinite set of stuttering steps (those steps that DO NOT change
\* neither msgs nor rmState) and never reach the state where at least one
\* node is committed or accepted the block.
\*
\* To forbid such behaviours we must specify what the system MUST
\* do. It will be specified below with the help of fairness conditions in
\* the Fairness formula.
Safety == Init /\ [][Next]_vars
\* -------------- Fairness temporal formula --------------
\* Fairness is a temporal assumptions under which the model is working.
\* Usually it specifies different kind of assumptions for each/some
\* subactions of the Next's state action, but the only think that bothers
\* us is preventing infinite stuttering at those steps where some of Next's
\* subactions are enabled. Thus, the only thing that we require from the
\* system is to keep take the steps until it's impossible to take them.
\* That's exactly how the weak fairness condition works: if some action
\* remains continuously enabled, it must eventually happen.
Fairness == WF_vars(Next)
\* -------------- Specification --------------
\* The complete specification of the protocol written as a temporal formula.
Spec == Safety /\ Fairness
\* -------------- Liveness temporal formula --------------
\* For every possible behaviour it's true that eventually (i.e. at least once
\* through the behaviour) block will be accepted. It is something that dBFT
\* must guarantee (an in practice this condition is violated).
TerminationRequirement == <>(Cardinality({r \in RM : rmState[r].type = "blockAccepted"}) >= M)
\* A liveness temporal formula asserts only what must happen (i.e. specifies
\* what the system MUST do). Any behaviour can NOT violate it at ANY point;
\* there's always the rest of the behaviour that can always make the liveness
\* formula true; if there's no such behaviour than the liveness formula is
\* violated. The liveness formula is supposed to be checked as a property
\* by the TLC model checker.
Liveness == TerminationRequirement
\* -------------- ModelConstraints --------------
\* MaxViewConstraint is a state predicate restricting the number of possible
\* behaviour states. It is needed to reduce model checking time and prevent
\* the model graph size explosion. This formulae must be specified at the
\* "State constraint" section of the "Additional Spec Options" section inside
\* the model overview.
MaxViewConstraint == /\ \A r \in RM : rmState[r].view <= MaxView
/\ \A msg \in msgs : msg.view <= MaxView
\* -------------- Invariants of the specification --------------
\* Model invariant is a state predicate (statement) that must be true for
\* every step of every reachable behaviour. Model invariant is supposed to
\* be checked as an Invariant by the TLC Model Checker.
\* TypeOK is a type-correctness invariant. It states that all elements of
\* specification variables must have the proper type throughout the behaviour.
TypeOK ==
/\ rmState \in [RM -> RMStates]
/\ msgs \subseteq Messages
\* InvTwoBlocksAccepted states that there can't be two different blocks accepted in
\* the two different views, i.e. dBFT must not allow forks.
InvTwoBlocksAccepted == \A r1 \in RM:
\A r2 \in RM \ {r1}:
\/ rmState[r1].type /= "blockAccepted"
\/ rmState[r2].type /= "blockAccepted"
\/ rmState[r1].view = rmState[r2].view
\* InvFaultNodesCount states that there can be F faulty or dead nodes at max.
InvFaultNodesCount == Cardinality({
r \in RM : rmState[r].type = "bad" \/ rmState[r].type = "dead"
}) <= F
\* This theorem asserts the truth of the temporal formula whose meaning is that
\* the state predicates TypeOK, InvTwoBlocksAccepted and InvFaultNodesCount are
\* the invariants of the specification Spec. This theorem is not supposed to be
\* checked by the TLC model checker, it's here for the reader's understanding of
\* the purpose of TypeOK, InvTwoBlocksAccepted and InvFaultNodesCount.
THEOREM Spec => [](TypeOK /\ InvTwoBlocksAccepted /\ InvFaultNodesCount)
=============================================================================
\* Modification History
\* Last modified Wed Mar 01 12:11:07 MSK 2023 by root
\* Last modified Tue Feb 07 23:11:19 MSK 2023 by rik
\* Last modified Fri Feb 03 18:09:33 MSK 2023 by anna
\* Created Thu Dec 15 16:06:17 MSK 2022 by anna

View File

@ -1,42 +1,42 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<launchConfiguration type="org.lamport.tla.toolbox.tool.tlc.modelCheck">
<stringAttribute key="configurationName" value="AllGoodModel"/>
<intAttribute key="distributedFPSetCount" value="0"/>
<stringAttribute key="distributedNetworkInterface" value="172.200.0.254"/>
<intAttribute key="distributedNodesCount" value="1"/>
<stringAttribute key="distributedTLC" value="off"/>
<intAttribute key="fpIndex" value="96"/>
<intAttribute key="maxHeapSize" value="25"/>
<stringAttribute key="modelBehaviorInit" value=""/>
<stringAttribute key="modelBehaviorNext" value=""/>
<stringAttribute key="modelBehaviorSpec" value="Spec"/>
<intAttribute key="modelBehaviorSpecType" value="1"/>
<stringAttribute key="modelBehaviorVars" value="msgs, rmState"/>
<stringAttribute key="modelComments" value=""/>
<booleanAttribute key="modelCorrectnessCheckDeadlock" value="true"/>
<listAttribute key="modelCorrectnessInvariants">
<listEntry value="1TypeOK"/>
<listEntry value="1InvTwoBlocksAccepted"/>
<listEntry value="1InvFaultNodesCount"/>
</listAttribute>
<listAttribute key="modelCorrectnessProperties">
<listEntry value="1Liveness"/>
</listAttribute>
<intAttribute key="modelEditorOpenTabs" value="10"/>
<stringAttribute key="modelParameterActionConstraint" value=""/>
<listAttribute key="modelParameterConstants">
<listEntry value="RMFault;;{};0;0"/>
<listEntry value="MaxView;;1;0;0"/>
<listEntry value="RMDead;;{};0;0"/>
<listEntry value="RM;;{0, 1, 2, 3};0;0"/>
</listAttribute>
<stringAttribute key="modelParameterContraint" value="MaxViewConstraint"/>
<listAttribute key="modelParameterDefinitions"/>
<stringAttribute key="modelParameterModelValues" value="{}"/>
<stringAttribute key="modelParameterNewDefinitions" value=""/>
<intAttribute key="modelVersion" value="20191005"/>
<intAttribute key="numberOfWorkers" value="4"/>
<stringAttribute key="result.mail.address" value=""/>
<stringAttribute key="specName" value="dbftCV3"/>
<stringAttribute key="tlcResourcesProfile" value="local custom"/>
</launchConfiguration>
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<launchConfiguration type="org.lamport.tla.toolbox.tool.tlc.modelCheck">
<stringAttribute key="configurationName" value="AllGoodModel"/>
<intAttribute key="distributedFPSetCount" value="0"/>
<stringAttribute key="distributedNetworkInterface" value="172.200.0.254"/>
<intAttribute key="distributedNodesCount" value="1"/>
<stringAttribute key="distributedTLC" value="off"/>
<intAttribute key="fpIndex" value="96"/>
<intAttribute key="maxHeapSize" value="25"/>
<stringAttribute key="modelBehaviorInit" value=""/>
<stringAttribute key="modelBehaviorNext" value=""/>
<stringAttribute key="modelBehaviorSpec" value="Spec"/>
<intAttribute key="modelBehaviorSpecType" value="1"/>
<stringAttribute key="modelBehaviorVars" value="msgs, rmState"/>
<stringAttribute key="modelComments" value=""/>
<booleanAttribute key="modelCorrectnessCheckDeadlock" value="true"/>
<listAttribute key="modelCorrectnessInvariants">
<listEntry value="1TypeOK"/>
<listEntry value="1InvTwoBlocksAccepted"/>
<listEntry value="1InvFaultNodesCount"/>
</listAttribute>
<listAttribute key="modelCorrectnessProperties">
<listEntry value="1Liveness"/>
</listAttribute>
<intAttribute key="modelEditorOpenTabs" value="10"/>
<stringAttribute key="modelParameterActionConstraint" value=""/>
<listAttribute key="modelParameterConstants">
<listEntry value="RMFault;;{};0;0"/>
<listEntry value="MaxView;;1;0;0"/>
<listEntry value="RMDead;;{};0;0"/>
<listEntry value="RM;;{0, 1, 2, 3};0;0"/>
</listAttribute>
<stringAttribute key="modelParameterContraint" value="MaxViewConstraint"/>
<listAttribute key="modelParameterDefinitions"/>
<stringAttribute key="modelParameterModelValues" value="{}"/>
<stringAttribute key="modelParameterNewDefinitions" value=""/>
<intAttribute key="modelVersion" value="20191005"/>
<intAttribute key="numberOfWorkers" value="4"/>
<stringAttribute key="result.mail.address" value=""/>
<stringAttribute key="specName" value="dbftCV3"/>
<stringAttribute key="tlcResourcesProfile" value="local custom"/>
</launchConfiguration>

View File

@ -1,466 +1,466 @@
---------------------------- MODULE dbftMultipool ----------------------------
EXTENDS
Integers,
FiniteSets
CONSTANTS
\* RM is the set of consensus node indexes starting from 0.
\* Example: {0, 1, 2, 3}
RM,
\* RMFault is a set of consensus node indexes that are allowed to become
\* FAULT in the middle of every considered behavior and to send any
\* consensus message afterwards. RMFault must be a subset of RM. An empty
\* set means that all nodes are good in every possible behaviour.
\* Examples: {0}
\* {1, 3}
\* {}
RMFault,
\* RMDead is a set of consensus node indexes that are allowed to die in the
\* middle of every behaviour and do not send any message afterwards. RMDead
\* must be a subset of RM. An empty set means that all nodes are alive and
\* responding in in every possible behaviour. RMDead may intersect the
\* RMFault set which means that node which is in both RMDead and RMFault
\* may become FAULT and send any message starting from some step of the
\* particular behaviour and may also die in the same behaviour which will
\* prevent it from sending any message.
\* Examples: {0}
\* {3, 2}
\* {}
RMDead,
\* MaxView is the maximum allowed view to be considered (starting from 0,
\* including the MaxView itself). This constraint was introduced to reduce
\* the number of possible model states to be checked. It is recommended to
\* keep this setting not too high (< N is highly recommended).
\* Example: 2
MaxView,
\* MaxUndeliveredMessages is the maximum number of messages in the common
\* messages pool (msgs) that were not received and handled by all consensus
\* nodes. It must not be too small (>= 3) in order to allow model taking
\* at least any steps. At the same time it must not be too high (<= 6 is
\* recommended) in order to avoid states graph size explosion.
MaxUndeliveredMessages
VARIABLES
\* rmState is a set of consensus node states. It is represented by the
\* mapping (function) with domain RM and range RMStates. I.e. rmState[r] is
\* the state of the r-th consensus node at the current step.
rmState,
\* msgs is the shared pool of messages sent to the network by consensus nodes.
\* It is represented by a subset of Messages set.
msgs
\* vars is a tuple of all variables used in the specification. It is needed to
\* simplify fairness conditions definition.
vars == <<rmState, msgs>>
\* N is the number of validators.
N == Cardinality(RM)
\* F is the number of validators that are allowed to be malicious.
F == (N - 1) \div 3
\* M is the number of validators that must function correctly.
M == N - F
\* These assumptions are checked by the TLC model checker once at the start of
\* the model checking process. All the input data (declared constants) specified
\* in the "Model Overview" section must satisfy these constraints.
ASSUME
/\ RM \subseteq Nat
/\ N >= 4
/\ 0 \in RM
/\ RMFault \subseteq RM
/\ RMDead \subseteq RM
/\ Cardinality(RMFault) <= F
/\ Cardinality(RMDead) <= F
/\ Cardinality(RMFault \cup RMDead) <= F
/\ MaxView \in Nat
/\ MaxView <= 2
/\ MaxUndeliveredMessages \in Nat
/\ MaxUndeliveredMessages >= 3 \* First value when block can be accepted in some behaviours.
\* Messages is a set of records where each record holds the message type,
\* the message sender and sender's view by the moment when message was sent.
Messages == [type : {"PrepareRequest", "PrepareResponse", "Commit", "ChangeView"}, rm : RM, view : Nat]
\* RMStates is a set of records where each record holds the node state, the node current view
\* and the pool of messages the nde has been sent or received and handled.
RMStates == [
type: {"initialized", "prepareSent", "commitSent", "blockAccepted", "bad", "dead"},
view : Nat,
pool : SUBSET Messages
]
\* -------------- Useful operators --------------
\* IsPrimary is an operator defining whether provided node r is primary
\* for the current round from the r's point of view. It is a mapping
\* from RM to the set of {TRUE, FALSE}.
IsPrimary(r) == rmState[r].view % N = r
\* GetPrimary is an operator defining mapping from round index to the RM that
\* is primary in this round.
GetPrimary(view) == CHOOSE r \in RM : view % N = r
\* GetNewView returns new view number based on the previous node view value.
\* Current specifications only allows to increment view.
GetNewView(oldView) == oldView + 1
\* IsViewChanging denotes whether node r have sent ChangeView message for the
\* current (or later) round.
IsViewChanging(r) == Cardinality({msg \in rmState[r].pool : msg.type = "ChangeView" /\ msg.view >= rmState[r].view /\ msg.rm = r}) /= 0
\* CountCommitted returns the number of nodes that have sent the Commit message
\* in the current round (as the node r sees it).
CountCommitted(r) == Cardinality({rm \in RM : Cardinality({msg \in rmState[r].pool : msg.rm = rm /\ msg.type = "Commit"}) /= 0})
\* CountFailed returns the number of nodes that haven't sent any message since
\* the last round (as the node r sees it from the point of its pool).
CountFailed(r) == Cardinality({rm \in RM : Cardinality({msg \in rmState[r].pool : msg.rm = rm /\ msg.view >= rmState[r].view}) = 0 })
\* MoreThanFNodesCommittedOrLost denotes whether more than F nodes committed or
\* failed to communicate in the current round.
MoreThanFNodesCommittedOrLost(r) == CountCommitted(r) + CountFailed(r) > F
\* NotAcceptingPayloadsDueToViewChanging returns whether the node doesn't accept
\* payloads in the current step.
NotAcceptingPayloadsDueToViewChanging(r) ==
/\ IsViewChanging(r)
/\ \neg MoreThanFNodesCommittedOrLost(r)
\* PrepareRequestSentOrReceived denotes whether there's a PrepareRequest
\* message received from the current round's speaker (as the node r sees it).
PrepareRequestSentOrReceived(r) == [type |-> "PrepareRequest", rm |-> GetPrimary(rmState[r].view), view |-> rmState[r].view] \in rmState[r].pool
\* CommitSent returns whether the node has its commit sent for the current block.
CommitSent(r) == Cardinality({msg \in rmState[r].pool : msg.rm = r /\ msg.type = "Commit"}) > 0
\* -------------- Safety temporal formula --------------
\* Init is the initial predicate initializing values at the start of every
\* behaviour.
Init ==
/\ rmState = [r \in RM |-> [type |-> "initialized", view |-> 1, pool |-> {}]]
/\ msgs = {}
\* RMSendPrepareRequest describes the primary node r broadcasting PrepareRequest.
RMSendPrepareRequest(r) ==
/\ rmState[r].type = "initialized"
/\ IsPrimary(r)
/\ LET pReq == [type |-> "PrepareRequest", rm |-> r, view |-> rmState[r].view]
commit == [type |-> "Commit", rm |-> r, view |-> rmState[r].view]
IN /\ pReq \notin msgs
/\ IF Cardinality({m \in rmState[r].pool : m.type = "PrepareResponse" /\ m.view = rmState[r].view}) < M - 1 \* -1 is for the current PrepareRequest.
THEN /\ rmState' = [rmState EXCEPT ![r].type = "prepareSent", ![r].pool = rmState[r].pool \cup {pReq}]
/\ msgs' = msgs \cup {pReq}
ELSE /\ msgs' = msgs \cup {pReq, commit}
/\ IF Cardinality({m \in rmState[r].pool : m.type = "Commit" /\ m.view = rmState[r].view}) < M-1 \* -1 is for the current Commit
THEN rmState' = [rmState EXCEPT ![r].type = "commitSent", ![r].pool = rmState[r].pool \cup {pReq, commit}]
ELSE rmState' = [rmState EXCEPT ![r].type = "blockAccepted", ![r].pool = rmState[r].pool \cup {pReq, commit}]
/\ UNCHANGED <<>>
\* RMSendChangeView describes node r sending ChangeView message on timeout.
RMSendChangeView(r) ==
/\ rmState[r].type /= "bad"
/\ rmState[r].type /= "dead"
/\ rmState[r].type /= "blockAccepted"
/\ \/ (IsPrimary(r) /\ PrepareRequestSentOrReceived(r))
\/ (\neg IsPrimary(r) /\ \neg CommitSent(r))
/\ LET msg == [type |-> "ChangeView", rm |-> r, view |-> rmState[r].view]
IN /\ msg \notin msgs
/\ msgs' = msgs \cup {msg}
/\ IF Cardinality({m \in rmState[r].pool : m.type = "ChangeView" /\ GetNewView(m.view) >= GetNewView(msg.view)}) >= M-1 \* -1 is for the currently sent CV
THEN rmState' = [rmState EXCEPT ![r].type = "initialized", ![r].view = GetNewView(msg.view), ![r].pool = rmState[r].pool \cup {msg}]
ELSE rmState' = [rmState EXCEPT ![r].pool = rmState[r].pool \cup {msg}]
\* OnTimeout describes two actions the node can take on timeout for waiting any event.
OnTimeout(r) ==
\/ RMSendPrepareRequest(r)
\/ RMSendChangeView(r)
\* RMOnPrepareRequest describes non-primary node r receiving PrepareRequest from the
\* primary node of the current round (view) and broadcasts PrepareResponse.
\* This step assumes that PrepareRequest always contains valid transactions and
\* signatures.
RMOnPrepareRequest(r) ==
/\ rmState[r].type = "initialized"
/\ \E msg \in msgs \ rmState[r].pool:
/\ msg.rm /= r
/\ msg.type = "PrepareRequest"
/\ msg.view = rmState[r].view
/\ \neg IsPrimary(r)
/\ \neg NotAcceptingPayloadsDueToViewChanging(r) \* dbft.go -L296, in C# node, but not in ours
/\ LET pResp == [type |-> "PrepareResponse", rm |-> r, view |-> rmState[r].view]
commit == [type |-> "Commit", rm |-> r, view |-> rmState[r].view]
IN IF Cardinality({m \in rmState[r].pool : m.type = "PrepareResponse" /\ m.view = rmState[r].view}) < M - 1 - 1 \* -1 is for reveived PrepareRequest; -1 is for current PrepareResponse
THEN /\ rmState' = [rmState EXCEPT ![r].type = "prepareSent", ![r].pool = rmState[r].pool \cup {msg, pResp}]
/\ msgs' = msgs \cup {pResp}
ELSE /\ msgs' = msgs \cup {msg, pResp, commit}
/\ IF Cardinality({m \in rmState[r].pool : m.type = "Commit" /\ m.view = rmState[r].view}) < M-1 \* -1 is for the current Commit
THEN rmState' = [rmState EXCEPT ![r].type = "commitSent", ![r].pool = rmState[r].pool \cup {msg, pResp, commit}]
ELSE rmState' = [rmState EXCEPT ![r].type = "blockAccepted", ![r].pool = rmState[r].pool \cup {msg, pResp, commit}]
/\ UNCHANGED <<>>
\* RMOnPrepareResponse describes node r accepting PrepareResponse message and handling it.
\* If there's enough PrepareResponses collected it will send the Commit; in case if there's
\* enough Commits it will accept the block.
RMOnPrepareResponse(r) ==
/\ rmState[r].type /= "bad"
/\ rmState[r].type /= "dead"
/\ rmState[r].type /= "blockAccepted"
/\ \E msg \in msgs \ rmState[r].pool:
/\ msg.rm /= r
/\ msg.type = "PrepareResponse"
/\ msg.view = rmState[r].view
/\ \neg NotAcceptingPayloadsDueToViewChanging(r)
/\ IF \/ Cardinality({m \in rmState[r].pool : (m.type = "PrepareRequest" \/ m.type = "PrepareResponse") /\ m.view = rmState[r].view}) < M - 1 \* -1 is for the currently received PrepareResponse.
\/ CommitSent(r)
\/ \neg PrepareRequestSentOrReceived(r)
THEN /\ rmState' = [rmState EXCEPT ![r].pool = rmState[r].pool \cup {msg}]
/\ UNCHANGED <<msgs>>
ELSE LET commit == [type |-> "Commit", rm |-> r, view |-> rmState[r].view]
IN /\ msgs' = msgs \cup {msg, commit}
/\ IF Cardinality({m \in rmState[r].pool : m.type = "Commit" /\ m.view = rmState[r].view}) < M-1 \* -1 is for the current Commit
THEN rmState' = [rmState EXCEPT ![r].type = "commitSent", ![r].pool = rmState[r].pool \cup {msg, commit}]
ELSE rmState' = [rmState EXCEPT ![r].type = "blockAccepted", ![r].pool = rmState[r].pool \cup {msg, commit}]
\* RMOnCommit describes node r accepting Commit message and (in case if there's enough Commits)
\* accepting the block.
RMOnCommit(r) ==
/\ rmState[r].type /= "bad"
/\ rmState[r].type /= "dead"
/\ rmState[r].type /= "blockAccepted"
/\ \E msg \in msgs \ rmState[r].pool:
/\ msg.rm /= r
/\ msg.type = "Commit"
/\ msg.view = rmState[r].view
/\ IF Cardinality({m \in rmState[r].pool : m.type = "Commit" /\ m.view = rmState[r].view}) < M-1 \* -1 is for the currently accepting commit
THEN rmState' = [rmState EXCEPT ![r].pool = rmState[r].pool \cup {msg}]
ELSE rmState' = [rmState EXCEPT ![r].type = "blockAccepted", ![r].pool = rmState[r].pool \cup {msg}]
/\ UNCHANGED <<msgs>>
\* RMOnChangeView describes node r receiving ChangeView message and (in case if enough ChangeViews
\* is collected) changing its view.
RMOnChangeView(r) ==
/\ rmState[r].type /= "bad"
/\ rmState[r].type /= "dead"
/\ rmState[r].type /= "blockAccepted"
/\ \E msg \in msgs \ rmState[r].pool:
/\ msg.rm /= r
/\ msg.type = "ChangeView"
/\ msg.view = rmState[r].view
/\ \neg CommitSent(r)
/\ Cardinality({m \in rmState[r].pool : m.type = "ChangeView" /\ m.rm = msg.rm /\ m.view > msg.view}) = 0
/\ IF Cardinality({m \in rmState[r].pool : m.type = "ChangeView" /\ GetNewView(m.view) >= GetNewView(msg.view)}) < M-1 \* -1 is for the currently accepting CV
THEN rmState' = [rmState EXCEPT ![r].pool = rmState[r].pool \cup {msg}]
ELSE rmState' = [rmState EXCEPT ![r].type = "initialized", ![r].view = GetNewView(msg.view), ![r].pool = rmState[r].pool \cup {msg}]
/\ UNCHANGED <<msgs>>
\* RMBeBad describes the faulty node r that will send any kind of consensus message starting
\* from the step it's gone wild. This step is enabled only when RMFault is non-empty set.
RMBeBad(r) ==
/\ r \in RMFault
/\ Cardinality({rm \in RM : rmState[rm].type = "bad"}) < F
/\ rmState' = [rmState EXCEPT ![r].type = "bad"]
/\ UNCHANGED <<msgs>>
\* RMFaultySendCV describes sending CV message by the faulty node r.
RMFaultySendCV(r) ==
/\ rmState[r].type = "bad"
/\ LET cv == [type |-> "ChangeView", rm |-> r, view |-> rmState[r].view]
IN /\ cv \notin msgs
/\ msgs' = msgs \cup {cv}
/\ UNCHANGED <<rmState>>
\* RMFaultyDoCV describes view changing by the faulty node r.
RMFaultyDoCV(r) ==
/\ rmState[r].type = "bad"
/\ rmState' = [rmState EXCEPT ![r].view = GetNewView(rmState[r].view)]
/\ UNCHANGED <<msgs>>
\* RMFaultySendPReq describes sending PrepareRequest message by the primary faulty node r.
RMFaultySendPReq(r) ==
/\ rmState[r].type = "bad"
/\ IsPrimary(r)
/\ LET pReq == [type |-> "PrepareRequest", rm |-> r, view |-> rmState[r].view]
IN /\ pReq \notin msgs
/\ msgs' = msgs \cup {pReq}
/\ UNCHANGED <<rmState>>
\* RMFaultySendPResp describes sending PrepareResponse message by the non-primary faulty node r.
RMFaultySendPResp(r) ==
/\ rmState[r].type = "bad"
/\ \neg IsPrimary(r)
/\ LET pResp == [type |-> "PrepareResponse", rm |-> r, view |-> rmState[r].view]
IN /\ pResp \notin msgs
/\ msgs' = msgs \cup {pResp}
/\ UNCHANGED <<rmState>>
\* RMFaultySendCommit describes sending Commit message by the faulty node r.
RMFaultySendCommit(r) ==
/\ rmState[r].type = "bad"
/\ LET commit == [type |-> "Commit", rm |-> r, view |-> rmState[r].view]
IN /\ commit \notin msgs
/\ msgs' = msgs \cup {commit}
/\ UNCHANGED <<rmState>>
\* RMDie describes node r that was removed from the network at the particular step
\* of the behaviour. After this node r can't change its state and accept/send messages.
RMDie(r) ==
/\ r \in RMDead
/\ Cardinality({rm \in RM : rmState[rm].type = "dead"}) < F
/\ rmState' = [rmState EXCEPT ![r].type = "dead"]
/\ UNCHANGED <<msgs>>
\* Terminating is an action that allows infinite stuttering to prevent deadlock on
\* behaviour termination. We consider termination to be valid if at least M nodes
\* has the block being accepted.
Terminating ==
/\ Cardinality({rm \in RM : rmState[rm].type = "blockAccepted"}) >=1
/\ UNCHANGED <<msgs, rmState>>
\* Next is the next-state action describing the transition from the current state
\* to the next state of the behaviour.
Next ==
\/ Terminating
\/ \E r \in RM :
\/ OnTimeout(r)
\/ RMOnPrepareRequest(r) \/ RMOnPrepareResponse(r) \/ RMOnCommit(r) \/ RMOnChangeView(r)
\/ RMDie(r) \/ RMBeBad(r)
\/ RMFaultySendCV(r) \/ RMFaultyDoCV(r) \/ RMFaultySendCommit(r) \/ RMFaultySendPReq(r) \/ RMFaultySendPResp(r)
\* Safety is a temporal formula that describes the whole set of allowed
\* behaviours. It specifies only what the system MAY do (i.e. the set of
\* possible allowed behaviours for the system). It asserts only what may
\* happen; any behaviour that violates it does so at some point and
\* nothing past that point makes difference.
\*
\* E.g. this safety formula (applied standalone) allows the behaviour to end
\* with an infinite set of stuttering steps (those steps that DO NOT change
\* neither msgs nor rmState) and never reach the state where at least one
\* node is committed or accepted the block.
\*
\* To forbid such behaviours we must specify what the system MUST
\* do. It will be specified below with the help of fairness conditions in
\* the Fairness formula.
Safety == Init /\ [][Next]_vars
\* -------------- Fairness temporal formula --------------
\* Fairness is a temporal assumptions under which the model is working.
\* Usually it specifies different kind of assumptions for each/some
\* subactions of the Next's state action, but the only think that bothers
\* us is preventing infinite stuttering at those steps where some of Next's
\* subactions are enabled. Thus, the only thing that we require from the
\* system is to keep take the steps until it's impossible to take them.
\* That's exactly how the weak fairness condition works: if some action
\* remains continuously enabled, it must eventually happen.
Fairness == WF_vars(Next)
\* -------------- Specification --------------
\* The complete specification of the protocol written as a temporal formula.
Spec == Safety /\ Fairness
\* -------------- Liveness temporal formula --------------
\* For every possible behaviour it's true that there's at least one PrepareRequest
\* message from the speaker, there's at lest one PrepareResponse message and at
\* least one Commit message.
PrepareRequestSentRequirement == <>(\E msg \in msgs : msg.type = "PrepareRequest")
PrepareResponseSentRequirement == <>(\E msg \in msgs : msg.type = "PrepareResponse")
CommitSentRequirement == <>(\E msg \in msgs : msg.type = "Commit")
\* For every possible behaviour it's true that eventually (i.e. at least once
\* through the behaviour) block will be accepted. It is something that dBFT
\* must guarantee (an in practice this condition is violated).
TerminationRequirement == <>(Cardinality({r \in RM : rmState[r].type = "blockAccepted"}) >= M)
\* A liveness temporal formula asserts only what must happen (i.e. specifies
\* what the system MUST do). Any behaviour can NOT violate it at ANY point;
\* there's always the rest of the behaviour that can always make the liveness
\* formula true; if there's no such behaviour than the liveness formula is
\* violated. The liveness formula is supposed to be checked as a property
\* by the TLC model checker.
Liveness == /\ PrepareRequestSentRequirement
/\ PrepareResponseSentRequirement
/\ CommitSentRequirement
/\ TerminationRequirement
\* -------------- Model constraints --------------
\* Model constraints are a set of state predicates restricting the number of possible
\* behaviour states. It is needed to reduce model checking time and prevent
\* the model graph size explosion. These formulaes must be specified at the
\* "State constraint" section of the "Additional Spec Options" section inside
\* the model overview.
\* MaxViewConstraint is a state predicate restricting the maximum view of messages
\* and consensus nodes state.
MaxViewConstraint == /\ \A r \in RM : rmState[r].view <= MaxView
/\ \A msg \in msgs : msg.view <= MaxView
\* MaxUndeliveredMessageConstraint is a state predicate restricting the maximum
\* number of messages undelivered to any of the consensus nodes.
MaxUndeliveredMessageConstraint == Cardinality({msg \in msgs : \E rm \in RM : msg \notin rmState[rm].pool}) <= MaxUndeliveredMessages
\* ModelConstraint is overall model constraint rule.
ModelConstraint == MaxViewConstraint /\ MaxUndeliveredMessageConstraint
\* -------------- Invariants of the specification --------------
\* Model invariant is a state predicate (statement) that must be true for
\* every step of every reachable behaviour. Model invariant is supposed to
\* be checked as an Invariant by the TLC Model Checker.
\* TypeOK is a type-correctness invariant. It states that all elements of
\* specification variables must have the proper type throughout the behaviour.
TypeOK ==
/\ rmState \in [RM -> RMStates]
/\ msgs \subseteq Messages
\* InvTwoBlocksAccepted states that there can't be two different blocks accepted in
\* the two different views, i.e. dBFT must not allow forks.
InvTwoBlocksAccepted == \A r1 \in RM:
\A r2 \in RM \ {r1}:
\/ rmState[r1].type /= "blockAccepted"
\/ rmState[r2].type /= "blockAccepted"
\/ rmState[r1].view = rmState[r2].view
\* InvDeadlock is a deadlock invariant, it states that the following situation expected
\* never to happen: one node is committed in a single view, two others are committed in
\* a larger view, and the last one has its view changing.
InvDeadlock == \A r1 \in RM :
\A r2 \in RM \ {r1} :
\A r3 \in RM \ {r1, r2} :
\A r4 \in RM \ {r1, r2, r3} :
\/ rmState[r1].type /= "commitSent"
\/ rmState[r2].type /= "commitSent"
\/ rmState[r3].type /= "commitSent"
\/ \neg IsViewChanging(r4)
\/ rmState[r1].view >= rmState[r2].view
\/ rmState[r2].view /= rmState[r3].view
\/ rmState[r3].view /= rmState[r4].view
\* InvFaultNodesCount states that there can be F faulty or dead nodes at max.
InvFaultNodesCount == Cardinality({
r \in RM : rmState[r].type = "bad" \/ rmState[r].type = "dead"
}) <= F
\* This theorem asserts the truth of the temporal formula whose meaning is that
\* the state predicates TypeOK, InvTwoBlocksAccepted, InvDeadlock and InvFaultNodesCount are
\* the invariants of the specification Spec. This theorem is not supposed to be
\* checked by the TLC model checker, it's here for the reader's understanding of
\* the purpose of TypeOK, InvTwoBlocksAccepted, InvDeadlock and InvFaultNodesCount.
THEOREM Spec => [](TypeOK /\ InvTwoBlocksAccepted /\ InvDeadlock /\ InvFaultNodesCount)
=============================================================================
\* Modification History
\* Last modified Fri Feb 17 15:51:19 MSK 2023 by anna
\* Created Tue Jan 10 12:28:45 MSK 2023 by anna
---------------------------- MODULE dbftMultipool ----------------------------
EXTENDS
Integers,
FiniteSets
CONSTANTS
\* RM is the set of consensus node indexes starting from 0.
\* Example: {0, 1, 2, 3}
RM,
\* RMFault is a set of consensus node indexes that are allowed to become
\* FAULT in the middle of every considered behavior and to send any
\* consensus message afterwards. RMFault must be a subset of RM. An empty
\* set means that all nodes are good in every possible behaviour.
\* Examples: {0}
\* {1, 3}
\* {}
RMFault,
\* RMDead is a set of consensus node indexes that are allowed to die in the
\* middle of every behaviour and do not send any message afterwards. RMDead
\* must be a subset of RM. An empty set means that all nodes are alive and
\* responding in in every possible behaviour. RMDead may intersect the
\* RMFault set which means that node which is in both RMDead and RMFault
\* may become FAULT and send any message starting from some step of the
\* particular behaviour and may also die in the same behaviour which will
\* prevent it from sending any message.
\* Examples: {0}
\* {3, 2}
\* {}
RMDead,
\* MaxView is the maximum allowed view to be considered (starting from 0,
\* including the MaxView itself). This constraint was introduced to reduce
\* the number of possible model states to be checked. It is recommended to
\* keep this setting not too high (< N is highly recommended).
\* Example: 2
MaxView,
\* MaxUndeliveredMessages is the maximum number of messages in the common
\* messages pool (msgs) that were not received and handled by all consensus
\* nodes. It must not be too small (>= 3) in order to allow model taking
\* at least any steps. At the same time it must not be too high (<= 6 is
\* recommended) in order to avoid states graph size explosion.
MaxUndeliveredMessages
VARIABLES
\* rmState is a set of consensus node states. It is represented by the
\* mapping (function) with domain RM and range RMStates. I.e. rmState[r] is
\* the state of the r-th consensus node at the current step.
rmState,
\* msgs is the shared pool of messages sent to the network by consensus nodes.
\* It is represented by a subset of Messages set.
msgs
\* vars is a tuple of all variables used in the specification. It is needed to
\* simplify fairness conditions definition.
vars == <<rmState, msgs>>
\* N is the number of validators.
N == Cardinality(RM)
\* F is the number of validators that are allowed to be malicious.
F == (N - 1) \div 3
\* M is the number of validators that must function correctly.
M == N - F
\* These assumptions are checked by the TLC model checker once at the start of
\* the model checking process. All the input data (declared constants) specified
\* in the "Model Overview" section must satisfy these constraints.
ASSUME
/\ RM \subseteq Nat
/\ N >= 4
/\ 0 \in RM
/\ RMFault \subseteq RM
/\ RMDead \subseteq RM
/\ Cardinality(RMFault) <= F
/\ Cardinality(RMDead) <= F
/\ Cardinality(RMFault \cup RMDead) <= F
/\ MaxView \in Nat
/\ MaxView <= 2
/\ MaxUndeliveredMessages \in Nat
/\ MaxUndeliveredMessages >= 3 \* First value when block can be accepted in some behaviours.
\* Messages is a set of records where each record holds the message type,
\* the message sender and sender's view by the moment when message was sent.
Messages == [type : {"PrepareRequest", "PrepareResponse", "Commit", "ChangeView"}, rm : RM, view : Nat]
\* RMStates is a set of records where each record holds the node state, the node current view
\* and the pool of messages the nde has been sent or received and handled.
RMStates == [
type: {"initialized", "prepareSent", "commitSent", "blockAccepted", "bad", "dead"},
view : Nat,
pool : SUBSET Messages
]
\* -------------- Useful operators --------------
\* IsPrimary is an operator defining whether provided node r is primary
\* for the current round from the r's point of view. It is a mapping
\* from RM to the set of {TRUE, FALSE}.
IsPrimary(r) == rmState[r].view % N = r
\* GetPrimary is an operator defining mapping from round index to the RM that
\* is primary in this round.
GetPrimary(view) == CHOOSE r \in RM : view % N = r
\* GetNewView returns new view number based on the previous node view value.
\* Current specifications only allows to increment view.
GetNewView(oldView) == oldView + 1
\* IsViewChanging denotes whether node r have sent ChangeView message for the
\* current (or later) round.
IsViewChanging(r) == Cardinality({msg \in rmState[r].pool : msg.type = "ChangeView" /\ msg.view >= rmState[r].view /\ msg.rm = r}) /= 0
\* CountCommitted returns the number of nodes that have sent the Commit message
\* in the current round (as the node r sees it).
CountCommitted(r) == Cardinality({rm \in RM : Cardinality({msg \in rmState[r].pool : msg.rm = rm /\ msg.type = "Commit"}) /= 0})
\* CountFailed returns the number of nodes that haven't sent any message since
\* the last round (as the node r sees it from the point of its pool).
CountFailed(r) == Cardinality({rm \in RM : Cardinality({msg \in rmState[r].pool : msg.rm = rm /\ msg.view >= rmState[r].view}) = 0 })
\* MoreThanFNodesCommittedOrLost denotes whether more than F nodes committed or
\* failed to communicate in the current round.
MoreThanFNodesCommittedOrLost(r) == CountCommitted(r) + CountFailed(r) > F
\* NotAcceptingPayloadsDueToViewChanging returns whether the node doesn't accept
\* payloads in the current step.
NotAcceptingPayloadsDueToViewChanging(r) ==
/\ IsViewChanging(r)
/\ \neg MoreThanFNodesCommittedOrLost(r)
\* PrepareRequestSentOrReceived denotes whether there's a PrepareRequest
\* message received from the current round's speaker (as the node r sees it).
PrepareRequestSentOrReceived(r) == [type |-> "PrepareRequest", rm |-> GetPrimary(rmState[r].view), view |-> rmState[r].view] \in rmState[r].pool
\* CommitSent returns whether the node has its commit sent for the current block.
CommitSent(r) == Cardinality({msg \in rmState[r].pool : msg.rm = r /\ msg.type = "Commit"}) > 0
\* -------------- Safety temporal formula --------------
\* Init is the initial predicate initializing values at the start of every
\* behaviour.
Init ==
/\ rmState = [r \in RM |-> [type |-> "initialized", view |-> 1, pool |-> {}]]
/\ msgs = {}
\* RMSendPrepareRequest describes the primary node r broadcasting PrepareRequest.
RMSendPrepareRequest(r) ==
/\ rmState[r].type = "initialized"
/\ IsPrimary(r)
/\ LET pReq == [type |-> "PrepareRequest", rm |-> r, view |-> rmState[r].view]
commit == [type |-> "Commit", rm |-> r, view |-> rmState[r].view]
IN /\ pReq \notin msgs
/\ IF Cardinality({m \in rmState[r].pool : m.type = "PrepareResponse" /\ m.view = rmState[r].view}) < M - 1 \* -1 is for the current PrepareRequest.
THEN /\ rmState' = [rmState EXCEPT ![r].type = "prepareSent", ![r].pool = rmState[r].pool \cup {pReq}]
/\ msgs' = msgs \cup {pReq}
ELSE /\ msgs' = msgs \cup {pReq, commit}
/\ IF Cardinality({m \in rmState[r].pool : m.type = "Commit" /\ m.view = rmState[r].view}) < M-1 \* -1 is for the current Commit
THEN rmState' = [rmState EXCEPT ![r].type = "commitSent", ![r].pool = rmState[r].pool \cup {pReq, commit}]
ELSE rmState' = [rmState EXCEPT ![r].type = "blockAccepted", ![r].pool = rmState[r].pool \cup {pReq, commit}]
/\ UNCHANGED <<>>
\* RMSendChangeView describes node r sending ChangeView message on timeout.
RMSendChangeView(r) ==
/\ rmState[r].type /= "bad"
/\ rmState[r].type /= "dead"
/\ rmState[r].type /= "blockAccepted"
/\ \/ (IsPrimary(r) /\ PrepareRequestSentOrReceived(r))
\/ (\neg IsPrimary(r) /\ \neg CommitSent(r))
/\ LET msg == [type |-> "ChangeView", rm |-> r, view |-> rmState[r].view]
IN /\ msg \notin msgs
/\ msgs' = msgs \cup {msg}
/\ IF Cardinality({m \in rmState[r].pool : m.type = "ChangeView" /\ GetNewView(m.view) >= GetNewView(msg.view)}) >= M-1 \* -1 is for the currently sent CV
THEN rmState' = [rmState EXCEPT ![r].type = "initialized", ![r].view = GetNewView(msg.view), ![r].pool = rmState[r].pool \cup {msg}]
ELSE rmState' = [rmState EXCEPT ![r].pool = rmState[r].pool \cup {msg}]
\* OnTimeout describes two actions the node can take on timeout for waiting any event.
OnTimeout(r) ==
\/ RMSendPrepareRequest(r)
\/ RMSendChangeView(r)
\* RMOnPrepareRequest describes non-primary node r receiving PrepareRequest from the
\* primary node of the current round (view) and broadcasts PrepareResponse.
\* This step assumes that PrepareRequest always contains valid transactions and
\* signatures.
RMOnPrepareRequest(r) ==
/\ rmState[r].type = "initialized"
/\ \E msg \in msgs \ rmState[r].pool:
/\ msg.rm /= r
/\ msg.type = "PrepareRequest"
/\ msg.view = rmState[r].view
/\ \neg IsPrimary(r)
/\ \neg NotAcceptingPayloadsDueToViewChanging(r) \* dbft.go -L296, in C# node, but not in ours
/\ LET pResp == [type |-> "PrepareResponse", rm |-> r, view |-> rmState[r].view]
commit == [type |-> "Commit", rm |-> r, view |-> rmState[r].view]
IN IF Cardinality({m \in rmState[r].pool : m.type = "PrepareResponse" /\ m.view = rmState[r].view}) < M - 1 - 1 \* -1 is for reveived PrepareRequest; -1 is for current PrepareResponse
THEN /\ rmState' = [rmState EXCEPT ![r].type = "prepareSent", ![r].pool = rmState[r].pool \cup {msg, pResp}]
/\ msgs' = msgs \cup {pResp}
ELSE /\ msgs' = msgs \cup {msg, pResp, commit}
/\ IF Cardinality({m \in rmState[r].pool : m.type = "Commit" /\ m.view = rmState[r].view}) < M-1 \* -1 is for the current Commit
THEN rmState' = [rmState EXCEPT ![r].type = "commitSent", ![r].pool = rmState[r].pool \cup {msg, pResp, commit}]
ELSE rmState' = [rmState EXCEPT ![r].type = "blockAccepted", ![r].pool = rmState[r].pool \cup {msg, pResp, commit}]
/\ UNCHANGED <<>>
\* RMOnPrepareResponse describes node r accepting PrepareResponse message and handling it.
\* If there's enough PrepareResponses collected it will send the Commit; in case if there's
\* enough Commits it will accept the block.
RMOnPrepareResponse(r) ==
/\ rmState[r].type /= "bad"
/\ rmState[r].type /= "dead"
/\ rmState[r].type /= "blockAccepted"
/\ \E msg \in msgs \ rmState[r].pool:
/\ msg.rm /= r
/\ msg.type = "PrepareResponse"
/\ msg.view = rmState[r].view
/\ \neg NotAcceptingPayloadsDueToViewChanging(r)
/\ IF \/ Cardinality({m \in rmState[r].pool : (m.type = "PrepareRequest" \/ m.type = "PrepareResponse") /\ m.view = rmState[r].view}) < M - 1 \* -1 is for the currently received PrepareResponse.
\/ CommitSent(r)
\/ \neg PrepareRequestSentOrReceived(r)
THEN /\ rmState' = [rmState EXCEPT ![r].pool = rmState[r].pool \cup {msg}]
/\ UNCHANGED <<msgs>>
ELSE LET commit == [type |-> "Commit", rm |-> r, view |-> rmState[r].view]
IN /\ msgs' = msgs \cup {msg, commit}
/\ IF Cardinality({m \in rmState[r].pool : m.type = "Commit" /\ m.view = rmState[r].view}) < M-1 \* -1 is for the current Commit
THEN rmState' = [rmState EXCEPT ![r].type = "commitSent", ![r].pool = rmState[r].pool \cup {msg, commit}]
ELSE rmState' = [rmState EXCEPT ![r].type = "blockAccepted", ![r].pool = rmState[r].pool \cup {msg, commit}]
\* RMOnCommit describes node r accepting Commit message and (in case if there's enough Commits)
\* accepting the block.
RMOnCommit(r) ==
/\ rmState[r].type /= "bad"
/\ rmState[r].type /= "dead"
/\ rmState[r].type /= "blockAccepted"
/\ \E msg \in msgs \ rmState[r].pool:
/\ msg.rm /= r
/\ msg.type = "Commit"
/\ msg.view = rmState[r].view
/\ IF Cardinality({m \in rmState[r].pool : m.type = "Commit" /\ m.view = rmState[r].view}) < M-1 \* -1 is for the currently accepting commit
THEN rmState' = [rmState EXCEPT ![r].pool = rmState[r].pool \cup {msg}]
ELSE rmState' = [rmState EXCEPT ![r].type = "blockAccepted", ![r].pool = rmState[r].pool \cup {msg}]
/\ UNCHANGED <<msgs>>
\* RMOnChangeView describes node r receiving ChangeView message and (in case if enough ChangeViews
\* is collected) changing its view.
RMOnChangeView(r) ==
/\ rmState[r].type /= "bad"
/\ rmState[r].type /= "dead"
/\ rmState[r].type /= "blockAccepted"
/\ \E msg \in msgs \ rmState[r].pool:
/\ msg.rm /= r
/\ msg.type = "ChangeView"
/\ msg.view = rmState[r].view
/\ \neg CommitSent(r)
/\ Cardinality({m \in rmState[r].pool : m.type = "ChangeView" /\ m.rm = msg.rm /\ m.view > msg.view}) = 0
/\ IF Cardinality({m \in rmState[r].pool : m.type = "ChangeView" /\ GetNewView(m.view) >= GetNewView(msg.view)}) < M-1 \* -1 is for the currently accepting CV
THEN rmState' = [rmState EXCEPT ![r].pool = rmState[r].pool \cup {msg}]
ELSE rmState' = [rmState EXCEPT ![r].type = "initialized", ![r].view = GetNewView(msg.view), ![r].pool = rmState[r].pool \cup {msg}]
/\ UNCHANGED <<msgs>>
\* RMBeBad describes the faulty node r that will send any kind of consensus message starting
\* from the step it's gone wild. This step is enabled only when RMFault is non-empty set.
RMBeBad(r) ==
/\ r \in RMFault
/\ Cardinality({rm \in RM : rmState[rm].type = "bad"}) < F
/\ rmState' = [rmState EXCEPT ![r].type = "bad"]
/\ UNCHANGED <<msgs>>
\* RMFaultySendCV describes sending CV message by the faulty node r.
RMFaultySendCV(r) ==
/\ rmState[r].type = "bad"
/\ LET cv == [type |-> "ChangeView", rm |-> r, view |-> rmState[r].view]
IN /\ cv \notin msgs
/\ msgs' = msgs \cup {cv}
/\ UNCHANGED <<rmState>>
\* RMFaultyDoCV describes view changing by the faulty node r.
RMFaultyDoCV(r) ==
/\ rmState[r].type = "bad"
/\ rmState' = [rmState EXCEPT ![r].view = GetNewView(rmState[r].view)]
/\ UNCHANGED <<msgs>>
\* RMFaultySendPReq describes sending PrepareRequest message by the primary faulty node r.
RMFaultySendPReq(r) ==
/\ rmState[r].type = "bad"
/\ IsPrimary(r)
/\ LET pReq == [type |-> "PrepareRequest", rm |-> r, view |-> rmState[r].view]
IN /\ pReq \notin msgs
/\ msgs' = msgs \cup {pReq}
/\ UNCHANGED <<rmState>>
\* RMFaultySendPResp describes sending PrepareResponse message by the non-primary faulty node r.
RMFaultySendPResp(r) ==
/\ rmState[r].type = "bad"
/\ \neg IsPrimary(r)
/\ LET pResp == [type |-> "PrepareResponse", rm |-> r, view |-> rmState[r].view]
IN /\ pResp \notin msgs
/\ msgs' = msgs \cup {pResp}
/\ UNCHANGED <<rmState>>
\* RMFaultySendCommit describes sending Commit message by the faulty node r.
RMFaultySendCommit(r) ==
/\ rmState[r].type = "bad"
/\ LET commit == [type |-> "Commit", rm |-> r, view |-> rmState[r].view]
IN /\ commit \notin msgs
/\ msgs' = msgs \cup {commit}
/\ UNCHANGED <<rmState>>
\* RMDie describes node r that was removed from the network at the particular step
\* of the behaviour. After this node r can't change its state and accept/send messages.
RMDie(r) ==
/\ r \in RMDead
/\ Cardinality({rm \in RM : rmState[rm].type = "dead"}) < F
/\ rmState' = [rmState EXCEPT ![r].type = "dead"]
/\ UNCHANGED <<msgs>>
\* Terminating is an action that allows infinite stuttering to prevent deadlock on
\* behaviour termination. We consider termination to be valid if at least M nodes
\* has the block being accepted.
Terminating ==
/\ Cardinality({rm \in RM : rmState[rm].type = "blockAccepted"}) >=1
/\ UNCHANGED <<msgs, rmState>>
\* Next is the next-state action describing the transition from the current state
\* to the next state of the behaviour.
Next ==
\/ Terminating
\/ \E r \in RM :
\/ OnTimeout(r)
\/ RMOnPrepareRequest(r) \/ RMOnPrepareResponse(r) \/ RMOnCommit(r) \/ RMOnChangeView(r)
\/ RMDie(r) \/ RMBeBad(r)
\/ RMFaultySendCV(r) \/ RMFaultyDoCV(r) \/ RMFaultySendCommit(r) \/ RMFaultySendPReq(r) \/ RMFaultySendPResp(r)
\* Safety is a temporal formula that describes the whole set of allowed
\* behaviours. It specifies only what the system MAY do (i.e. the set of
\* possible allowed behaviours for the system). It asserts only what may
\* happen; any behaviour that violates it does so at some point and
\* nothing past that point makes difference.
\*
\* E.g. this safety formula (applied standalone) allows the behaviour to end
\* with an infinite set of stuttering steps (those steps that DO NOT change
\* neither msgs nor rmState) and never reach the state where at least one
\* node is committed or accepted the block.
\*
\* To forbid such behaviours we must specify what the system MUST
\* do. It will be specified below with the help of fairness conditions in
\* the Fairness formula.
Safety == Init /\ [][Next]_vars
\* -------------- Fairness temporal formula --------------
\* Fairness is a temporal assumptions under which the model is working.
\* Usually it specifies different kind of assumptions for each/some
\* subactions of the Next's state action, but the only think that bothers
\* us is preventing infinite stuttering at those steps where some of Next's
\* subactions are enabled. Thus, the only thing that we require from the
\* system is to keep take the steps until it's impossible to take them.
\* That's exactly how the weak fairness condition works: if some action
\* remains continuously enabled, it must eventually happen.
Fairness == WF_vars(Next)
\* -------------- Specification --------------
\* The complete specification of the protocol written as a temporal formula.
Spec == Safety /\ Fairness
\* -------------- Liveness temporal formula --------------
\* For every possible behaviour it's true that there's at least one PrepareRequest
\* message from the speaker, there's at lest one PrepareResponse message and at
\* least one Commit message.
PrepareRequestSentRequirement == <>(\E msg \in msgs : msg.type = "PrepareRequest")
PrepareResponseSentRequirement == <>(\E msg \in msgs : msg.type = "PrepareResponse")
CommitSentRequirement == <>(\E msg \in msgs : msg.type = "Commit")
\* For every possible behaviour it's true that eventually (i.e. at least once
\* through the behaviour) block will be accepted. It is something that dBFT
\* must guarantee (an in practice this condition is violated).
TerminationRequirement == <>(Cardinality({r \in RM : rmState[r].type = "blockAccepted"}) >= M)
\* A liveness temporal formula asserts only what must happen (i.e. specifies
\* what the system MUST do). Any behaviour can NOT violate it at ANY point;
\* there's always the rest of the behaviour that can always make the liveness
\* formula true; if there's no such behaviour than the liveness formula is
\* violated. The liveness formula is supposed to be checked as a property
\* by the TLC model checker.
Liveness == /\ PrepareRequestSentRequirement
/\ PrepareResponseSentRequirement
/\ CommitSentRequirement
/\ TerminationRequirement
\* -------------- Model constraints --------------
\* Model constraints are a set of state predicates restricting the number of possible
\* behaviour states. It is needed to reduce model checking time and prevent
\* the model graph size explosion. These formulaes must be specified at the
\* "State constraint" section of the "Additional Spec Options" section inside
\* the model overview.
\* MaxViewConstraint is a state predicate restricting the maximum view of messages
\* and consensus nodes state.
MaxViewConstraint == /\ \A r \in RM : rmState[r].view <= MaxView
/\ \A msg \in msgs : msg.view <= MaxView
\* MaxUndeliveredMessageConstraint is a state predicate restricting the maximum
\* number of messages undelivered to any of the consensus nodes.
MaxUndeliveredMessageConstraint == Cardinality({msg \in msgs : \E rm \in RM : msg \notin rmState[rm].pool}) <= MaxUndeliveredMessages
\* ModelConstraint is overall model constraint rule.
ModelConstraint == MaxViewConstraint /\ MaxUndeliveredMessageConstraint
\* -------------- Invariants of the specification --------------
\* Model invariant is a state predicate (statement) that must be true for
\* every step of every reachable behaviour. Model invariant is supposed to
\* be checked as an Invariant by the TLC Model Checker.
\* TypeOK is a type-correctness invariant. It states that all elements of
\* specification variables must have the proper type throughout the behaviour.
TypeOK ==
/\ rmState \in [RM -> RMStates]
/\ msgs \subseteq Messages
\* InvTwoBlocksAccepted states that there can't be two different blocks accepted in
\* the two different views, i.e. dBFT must not allow forks.
InvTwoBlocksAccepted == \A r1 \in RM:
\A r2 \in RM \ {r1}:
\/ rmState[r1].type /= "blockAccepted"
\/ rmState[r2].type /= "blockAccepted"
\/ rmState[r1].view = rmState[r2].view
\* InvDeadlock is a deadlock invariant, it states that the following situation expected
\* never to happen: one node is committed in a single view, two others are committed in
\* a larger view, and the last one has its view changing.
InvDeadlock == \A r1 \in RM :
\A r2 \in RM \ {r1} :
\A r3 \in RM \ {r1, r2} :
\A r4 \in RM \ {r1, r2, r3} :
\/ rmState[r1].type /= "commitSent"
\/ rmState[r2].type /= "commitSent"
\/ rmState[r3].type /= "commitSent"
\/ \neg IsViewChanging(r4)
\/ rmState[r1].view >= rmState[r2].view
\/ rmState[r2].view /= rmState[r3].view
\/ rmState[r3].view /= rmState[r4].view
\* InvFaultNodesCount states that there can be F faulty or dead nodes at max.
InvFaultNodesCount == Cardinality({
r \in RM : rmState[r].type = "bad" \/ rmState[r].type = "dead"
}) <= F
\* This theorem asserts the truth of the temporal formula whose meaning is that
\* the state predicates TypeOK, InvTwoBlocksAccepted, InvDeadlock and InvFaultNodesCount are
\* the invariants of the specification Spec. This theorem is not supposed to be
\* checked by the TLC model checker, it's here for the reader's understanding of
\* the purpose of TypeOK, InvTwoBlocksAccepted, InvDeadlock and InvFaultNodesCount.
THEOREM Spec => [](TypeOK /\ InvTwoBlocksAccepted /\ InvDeadlock /\ InvFaultNodesCount)
=============================================================================
\* Modification History
\* Last modified Fri Feb 17 15:51:19 MSK 2023 by anna
\* Created Tue Jan 10 12:28:45 MSK 2023 by anna

View File

@ -1,44 +1,44 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<launchConfiguration type="org.lamport.tla.toolbox.tool.tlc.modelCheck">
<stringAttribute key="configurationName" value="AllGoodModel"/>
<intAttribute key="distributedFPSetCount" value="0"/>
<stringAttribute key="distributedNetworkInterface" value="172.200.0.254"/>
<intAttribute key="distributedNodesCount" value="1"/>
<stringAttribute key="distributedTLC" value="off"/>
<intAttribute key="fpIndex" value="70"/>
<intAttribute key="maxHeapSize" value="50"/>
<stringAttribute key="modelBehaviorInit" value=""/>
<stringAttribute key="modelBehaviorNext" value=""/>
<stringAttribute key="modelBehaviorSpec" value="Spec"/>
<intAttribute key="modelBehaviorSpecType" value="1"/>
<stringAttribute key="modelBehaviorVars" value="msgs, rmState"/>
<stringAttribute key="modelComments" value=""/>
<booleanAttribute key="modelCorrectnessCheckDeadlock" value="true"/>
<listAttribute key="modelCorrectnessInvariants">
<listEntry value="1TypeOK"/>
<listEntry value="1InvTwoBlocksAccepted"/>
<listEntry value="1InvDeadlock"/>
<listEntry value="1InvFaultNodesCount"/>
</listAttribute>
<listAttribute key="modelCorrectnessProperties">
<listEntry value="1Liveness"/>
</listAttribute>
<intAttribute key="modelEditorOpenTabs" value="10"/>
<stringAttribute key="modelParameterActionConstraint" value=""/>
<listAttribute key="modelParameterConstants">
<listEntry value="RMFault;;{};0;0"/>
<listEntry value="MaxView;;1;0;0"/>
<listEntry value="RMDead;;{};0;0"/>
<listEntry value="RM;;{0, 1, 2, 3};0;0"/>
<listEntry value="MaxUndeliveredMessages;;6;0;0"/>
</listAttribute>
<stringAttribute key="modelParameterContraint" value="ModelConstraint"/>
<listAttribute key="modelParameterDefinitions"/>
<stringAttribute key="modelParameterModelValues" value="{}"/>
<stringAttribute key="modelParameterNewDefinitions" value=""/>
<intAttribute key="modelVersion" value="20191005"/>
<intAttribute key="numberOfWorkers" value="8"/>
<stringAttribute key="result.mail.address" value=""/>
<stringAttribute key="specName" value="dbftMultipool"/>
<stringAttribute key="tlcResourcesProfile" value="local custom"/>
</launchConfiguration>
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<launchConfiguration type="org.lamport.tla.toolbox.tool.tlc.modelCheck">
<stringAttribute key="configurationName" value="AllGoodModel"/>
<intAttribute key="distributedFPSetCount" value="0"/>
<stringAttribute key="distributedNetworkInterface" value="172.200.0.254"/>
<intAttribute key="distributedNodesCount" value="1"/>
<stringAttribute key="distributedTLC" value="off"/>
<intAttribute key="fpIndex" value="70"/>
<intAttribute key="maxHeapSize" value="50"/>
<stringAttribute key="modelBehaviorInit" value=""/>
<stringAttribute key="modelBehaviorNext" value=""/>
<stringAttribute key="modelBehaviorSpec" value="Spec"/>
<intAttribute key="modelBehaviorSpecType" value="1"/>
<stringAttribute key="modelBehaviorVars" value="msgs, rmState"/>
<stringAttribute key="modelComments" value=""/>
<booleanAttribute key="modelCorrectnessCheckDeadlock" value="true"/>
<listAttribute key="modelCorrectnessInvariants">
<listEntry value="1TypeOK"/>
<listEntry value="1InvTwoBlocksAccepted"/>
<listEntry value="1InvDeadlock"/>
<listEntry value="1InvFaultNodesCount"/>
</listAttribute>
<listAttribute key="modelCorrectnessProperties">
<listEntry value="1Liveness"/>
</listAttribute>
<intAttribute key="modelEditorOpenTabs" value="10"/>
<stringAttribute key="modelParameterActionConstraint" value=""/>
<listAttribute key="modelParameterConstants">
<listEntry value="RMFault;;{};0;0"/>
<listEntry value="MaxView;;1;0;0"/>
<listEntry value="RMDead;;{};0;0"/>
<listEntry value="RM;;{0, 1, 2, 3};0;0"/>
<listEntry value="MaxUndeliveredMessages;;6;0;0"/>
</listAttribute>
<stringAttribute key="modelParameterContraint" value="ModelConstraint"/>
<listAttribute key="modelParameterDefinitions"/>
<stringAttribute key="modelParameterModelValues" value="{}"/>
<stringAttribute key="modelParameterNewDefinitions" value=""/>
<intAttribute key="modelVersion" value="20191005"/>
<intAttribute key="numberOfWorkers" value="8"/>
<stringAttribute key="result.mail.address" value=""/>
<stringAttribute key="specName" value="dbftMultipool"/>
<stringAttribute key="tlcResourcesProfile" value="local custom"/>
</launchConfiguration>

View File

@ -1,430 +1,430 @@
-------------------------------- MODULE dbft --------------------------------
EXTENDS
Integers,
FiniteSets
CONSTANTS
\* RM is the set of consensus node indexes starting from 0.
\* Example: {0, 1, 2, 3}
RM,
\* RMFault is a set of consensus node indexes that are allowed to become
\* FAULT in the middle of every considered behavior and to send any
\* consensus message afterwards. RMFault must be a subset of RM. An empty
\* set means that all nodes are good in every possible behaviour.
\* Examples: {0}
\* {1, 3}
\* {}
RMFault,
\* RMDead is a set of consensus node indexes that are allowed to die in the
\* middle of every behaviour and do not send any message afterwards. RMDead
\* must be a subset of RM. An empty set means that all nodes are alive and
\* responding in in every possible behaviour. RMDead may intersect the
\* RMFault set which means that node which is in both RMDead and RMFault
\* may become FAULT and send any message starting from some step of the
\* particular behaviour and may also die in the same behaviour which will
\* prevent it from sending any message.
\* Examples: {0}
\* {3, 2}
\* {}
RMDead,
\* MaxView is the maximum allowed view to be considered (starting from 0,
\* including the MaxView itself). This constraint was introduced to reduce
\* the number of possible model states to be checked. It is recommended to
\* keep this setting not too high (< N is highly recommended).
\* Example: 2
MaxView
VARIABLES
\* rmState is a set of consensus node states. It is represented by the
\* mapping (function) with domain RM and range RMStates. I.e. rmState[r] is
\* the state of the r-th consensus node at the current step.
rmState,
\* msgs is the shared pool of messages sent to the network by consensus nodes.
\* It is represented by a subset of Messages set.
msgs
\* vars is a tuple of all variables used in the specification. It is needed to
\* simplify fairness conditions definition.
vars == <<rmState, msgs>>
\* N is the number of validators.
N == Cardinality(RM)
\* F is the number of validators that are allowed to be malicious.
F == (N - 1) \div 3
\* M is the number of validators that must function correctly.
M == N - F
\* These assumptions are checked by the TLC model checker once at the start of
\* the model checking process. All the input data (declared constants) specified
\* in the "Model Overview" section must satisfy these constraints.
ASSUME
/\ RM \subseteq Nat
/\ N >= 4
/\ 0 \in RM
/\ RMFault \subseteq RM
/\ RMDead \subseteq RM
/\ Cardinality(RMFault) <= F
/\ Cardinality(RMDead) <= F
/\ Cardinality(RMFault \cup RMDead) <= F
/\ MaxView \in Nat
/\ MaxView <= 2
\* RMStates is a set of records where each record holds the node state and
\* the node current view.
RMStates == [
type: {"initialized", "prepareSent", "commitSent", "cv", "commitAckSent", "blockAccepted", "bad", "dead"},
view : Nat
]
\* Messages is a set of records where each record holds the message type,
\* the message sender and sender's view by the moment when message was sent.
Messages == [type : {"PrepareRequest", "PrepareResponse", "Commit", "CommitAck", "ChangeView"}, rm : RM, view : Nat]
\* -------------- Useful operators --------------
\* IsPrimary is an operator defining whether provided node r is primary
\* for the current round from the r's point of view. It is a mapping
\* from RM to the set of {TRUE, FALSE}.
IsPrimary(r) == rmState[r].view % N = r
\* GetPrimary is an operator defining mapping from round index to the RM that
\* is primary in this round.
GetPrimary(view) == CHOOSE r \in RM : view % N = r
\* GetNewView returns new view number based on the previous node view value.
\* Current specifications only allows to increment view.
GetNewView(oldView) == oldView + 1
\* CountCommitted returns the number of nodes that have sent the Commit message
\* in the current round or in some other round.
CountCommitted(r) == Cardinality({rm \in RM : Cardinality({msg \in msgs : msg.rm = rm /\ msg.type = "Commit"}) /= 0})
\* MoreThanFNodesCommitted returns whether more than F nodes have been committed
\* in the current round (as the node r sees it).
\*
\* IMPORTANT NOTE: we intentionally do not add the "lost" nodes calculation to the specification, and here's
\* the reason: from the node's point of view we can't reliably check that some neighbour is completely
\* out of the network. It is possible that the node doesn't receive consensus messages from some other member
\* due to network delays. On the other hand, real nodes can go down at any time. The absence of the
\* member's message doesn't mean that the member is out of the network, we never can be sure about
\* that, thus, this information is unreliable and can't be trusted during the consensus process.
\* What can be trusted is whether there's a Commit message from some member was received by the node.
MoreThanFNodesCommitted(r) == CountCommitted(r) > F
\* PrepareRequestSentOrReceived denotes whether there's a PrepareRequest
\* message received from the current round's speaker (as the node r sees it).
PrepareRequestSentOrReceived(r) == [type |-> "PrepareRequest", rm |-> GetPrimary(rmState[r].view), view |-> rmState[r].view] \in msgs
\* -------------- Safety temporal formula --------------
\* Init is the initial predicate initializing values at the start of every
\* behaviour.
Init ==
/\ rmState = [r \in RM |-> [type |-> "initialized", view |-> 0]]
/\ msgs = {}
\* RMSendPrepareRequest describes the primary node r broadcasting PrepareRequest.
RMSendPrepareRequest(r) ==
/\ rmState[r].type = "initialized"
/\ IsPrimary(r)
/\ rmState' = [rmState EXCEPT ![r].type = "prepareSent"]
/\ msgs' = msgs \cup {[type |-> "PrepareRequest", rm |-> r, view |-> rmState[r].view]}
/\ UNCHANGED <<>>
\* RMSendPrepareResponse describes non-primary node r receiving PrepareRequest from
\* the primary node of the current round (view) and broadcasting PrepareResponse.
\* This step assumes that PrepareRequest always contains valid transactions and
\* signatures.
RMSendPrepareResponse(r) ==
/\ \/ rmState[r].type = "initialized"
\* We do allow the transition from the "cv" state to the "prepareSent" or "commitSent" stage
\* as it is done in the code-level dBFT implementation by checking the NotAcceptingPayloadsDueToViewChanging
\* condition (see
\* https://github.com/nspcc-dev/dbft/blob/31c1bbdc74f2faa32ec9025062e3a4e2ccfd4214/dbft.go#L419
\* and
\* https://github.com/neo-project/neo-modules/blob/d00d90b9c27b3d0c3c57e9ca1f560a09975df241/src/DBFTPlugin/Consensus/ConsensusService.OnMessage.cs#L79).
\* However, we can't easily count the number of "lost" nodes in this specification to match precisely
\* the implementation. Moreover, we don't need it to be counted as the RMSendPrepareResponse enabling
\* condition specifies only the thing that may happen given some particular set of enabling conditions.
\* Thus, we've extended the NotAcceptingPayloadsDueToViewChanging condition to consider only MoreThanFNodesCommitted.
\* It should be noted that the logic of MoreThanFNodesCommittedOrLost can't be reliable in detecting lost nodes
\* (even with neo-project/neo#2057), because real nodes can go down at any time. See the comment above the MoreThanFNodesCommitted.
\/ /\ rmState[r].type = "cv"
/\ MoreThanFNodesCommitted(r)
/\ \neg IsPrimary(r)
/\ PrepareRequestSentOrReceived(r)
/\ rmState' = [rmState EXCEPT ![r].type = "prepareSent"]
/\ msgs' = msgs \cup {[type |-> "PrepareResponse", rm |-> r, view |-> rmState[r].view]}
/\ UNCHANGED <<>>
\* RMSendCommit describes node r sending Commit if there's enough PrepareResponse
\* messages.
RMSendCommit(r) ==
/\ \/ rmState[r].type = "prepareSent"
\* We do allow the transition from the "cv" state to the "prepareSent" or "commitSent" stage,
\* see the related comment inside the RMSendPrepareResponse definition.
\/ /\ rmState[r].type = "cv"
/\ MoreThanFNodesCommitted(r)
/\ Cardinality({
msg \in msgs : /\ (msg.type = "PrepareResponse" \/ msg.type = "PrepareRequest")
/\ msg.view = rmState[r].view
}) >= M
/\ PrepareRequestSentOrReceived(r)
/\ rmState' = [rmState EXCEPT ![r].type = "commitSent"]
/\ msgs' = msgs \cup {[type |-> "Commit", rm |-> r, view |-> rmState[r].view]}
/\ UNCHANGED <<>>
\* RMSendCommitAck describes node r collecting enough Commit messages and sending
\* the CommitAck message.
RMSendCommitAck(r) ==
/\ rmState[r].type /= "bad"
/\ rmState[r].type /= "dead"
/\ rmState[r].type /= "commitAckSent"
/\ rmState[r].type /= "blockAccepted"
/\ PrepareRequestSentOrReceived(r)
/\ Cardinality({msg \in msgs : msg.type = "Commit" /\ msg.view = rmState[r].view}) >= M
/\ rmState' = [rmState EXCEPT ![r].type = "commitAckSent"]
/\ msgs' = msgs \cup {[type |-> "CommitAck", rm |-> r, view |-> rmState[r].view]}
/\ UNCHANGED <<>>
\* RMAcceptBlock describes node r collecting enough CommitAck messages and accepting
\* the block.
RMAcceptBlock(r) ==
/\ rmState[r].type = "commitAckSent"
/\ Cardinality({msg \in msgs : msg.type = "CommitAck" /\ msg.view = rmState[r].view}) >= M
/\ rmState' = [rmState EXCEPT ![r].type = "blockAccepted"]
/\ UNCHANGED <<msgs>>
\* RMSendChangeView describes node r sending ChangeView message on timeout.
RMSendChangeView(r) ==
/\ \/ (rmState[r].type = "initialized" /\ \neg IsPrimary(r))
\/ rmState[r].type = "prepareSent"
/\ LET cv == [type |-> "ChangeView", rm |-> r, view |-> rmState[r].view]
IN /\ cv \notin msgs
/\ rmState' = [rmState EXCEPT ![r].type = "cv"]
/\ msgs' = msgs \cup {[type |-> "ChangeView", rm |-> r, view |-> rmState[r].view]}
\* RMReceiveChangeView describes node r receiving enough ChangeView messages for
\* view changing.
RMReceiveChangeView(r) ==
/\ rmState[r].type /= "bad"
/\ rmState[r].type /= "dead"
/\ rmState[r].type /= "blockAccepted"
/\ rmState[r].type /= "commitSent"
/\ rmState[r].type /= "commitAckSent"
/\ Cardinality({
rm \in RM : Cardinality({
msg \in msgs : /\ msg.type = "ChangeView"
/\ msg.rm = rm
/\ GetNewView(msg.view) >= GetNewView(rmState[r].view)
}) /= 0
}) >= M
/\ rmState' = [rmState EXCEPT ![r].type = "initialized", ![r].view = GetNewView(rmState[r].view)]
/\ UNCHANGED <<msgs>>
\* RMBeBad describes the faulty node r that will send any kind of consensus message starting
\* from the step it's gone wild. This step is enabled only when RMFault is non-empty set.
RMBeBad(r) ==
/\ r \in RMFault
/\ Cardinality({rm \in RM : rmState[rm].type = "bad"}) < F
/\ rmState' = [rmState EXCEPT ![r].type = "bad"]
/\ UNCHANGED <<msgs>>
\* RMFaultySendCV describes sending CV message by the faulty node r.
RMFaultySendCV(r) ==
/\ rmState[r].type = "bad"
/\ LET cv == [type |-> "ChangeView", rm |-> r, view |-> rmState[r].view]
IN /\ cv \notin msgs
/\ msgs' = msgs \cup {cv}
/\ UNCHANGED <<rmState>>
\* RMFaultyDoCV describes view changing by the faulty node r.
RMFaultyDoCV(r) ==
/\ rmState[r].type = "bad"
/\ rmState' = [rmState EXCEPT ![r].view = GetNewView(rmState[r].view)]
/\ UNCHANGED <<msgs>>
\* RMFaultySendPReq describes sending PrepareRequest message by the primary faulty node r.
RMFaultySendPReq(r) ==
/\ rmState[r].type = "bad"
/\ IsPrimary(r)
/\ LET pReq == [type |-> "PrepareRequest", rm |-> r, view |-> rmState[r].view]
IN /\ pReq \notin msgs
/\ msgs' = msgs \cup {pReq}
/\ UNCHANGED <<rmState>>
\* RMFaultySendPResp describes sending PrepareResponse message by the non-primary faulty node r.
RMFaultySendPResp(r) ==
/\ rmState[r].type = "bad"
/\ \neg IsPrimary(r)
/\ LET pResp == [type |-> "PrepareResponse", rm |-> r, view |-> rmState[r].view]
IN /\ pResp \notin msgs
/\ msgs' = msgs \cup {pResp}
/\ UNCHANGED <<rmState>>
\* RMFaultySendCommit describes sending Commit message by the faulty node r.
RMFaultySendCommit(r) ==
/\ rmState[r].type = "bad"
/\ LET commit == [type |-> "Commit", rm |-> r, view |-> rmState[r].view]
IN /\ commit \notin msgs
/\ msgs' = msgs \cup {commit}
/\ UNCHANGED <<rmState>>
\* RMFaultySendCommitAck describes sending CommitAck message by the faulty node r.
RMFaultySendCommitAck(r) ==
/\ rmState[r].type = "bad"
/\ LET ack == [type |-> "CommitAck", rm |-> r, view |-> rmState[r].view]
IN /\ ack \notin msgs
/\ msgs' = msgs \cup {ack}
/\ UNCHANGED <<rmState>>
\* RMDie describes node r that was removed from the network at the particular step
\* of the behaviour. After this node r can't change its state and accept/send messages.
RMDie(r) ==
/\ r \in RMDead
/\ Cardinality({rm \in RM : rmState[rm].type = "dead"}) < F
/\ rmState' = [rmState EXCEPT ![r].type = "dead"]
/\ UNCHANGED <<msgs>>
\* Terminating is an action that allows infinite stuttering to prevent deadlock on
\* behaviour termination. We consider termination to be valid if at least M nodes
\* has the block being accepted.
Terminating ==
/\ Cardinality({rm \in RM : rmState[rm].type = "blockAccepted"}) >= M
/\ UNCHANGED <<msgs, rmState>>
\* TerminatingFourNodesDeadlock describes node r that is in the state where dBFT
\* stucks in a four nodes scenario with one dead node allowed. Allow infinite
\* stuttering to prevent TLC deadlock recognition.
\* Note that this step is unused in the current specification, however, it may be
\* used for further investigations of this deadlock.
TerminatingFourNodesDeadlockSameView(r) ==
/\ Cardinality({rm \in RM : rmState[rm].type = "dead" /\ rmState[rm].view = rmState[r].view}) = 1
/\ Cardinality({rm \in RM : rmState[rm].type = "cv" /\ rmState[rm].view = rmState[r].view}) = 2
/\ Cardinality({rm \in RM : rmState[rm].type = "commitSent" /\ rmState[rm].view = rmState[r].view}) = 1
/\ UNCHANGED <<msgs, rmState>>
\* TerminatingFourNodesDeadlock describes node r that is in the state where dBFT
\* stucks in a four nodes scenario and the same view. Allow infinite stuttering
\* to prevent TLC deadlock recognition.
\* Note that this step is unused in the current specification, however, it may be
\* used for further investigations of this deadlock.
TerminatingFourNodesDeadlock ==
/\ Cardinality({rm \in RM : rmState[rm].type = "dead"}) = 1
/\ Cardinality({rm \in RM : rmState[rm].type = "cv"}) = 2
/\ Cardinality({rm \in RM : rmState[rm].type = "commitSent"}) = 1
/\ UNCHANGED <<msgs, rmState>>
\* Next is the next-state action describing the transition from the current state
\* to the next state of the behaviour.
Next ==
\/ Terminating
\/ \E r \in RM:
RMSendPrepareRequest(r) \/ RMSendPrepareResponse(r) \/ RMSendCommit(r) \/ RMSendCommitAck(r)
\/ RMAcceptBlock(r) \/ RMSendChangeView(r) \/ RMReceiveChangeView(r)
\/ RMDie(r) \/ RMBeBad(r)
\/ RMFaultySendCV(r) \/ RMFaultyDoCV(r) \/ RMFaultySendCommit(r) \/ RMFaultySendCommitAck(r) \/ RMFaultySendPReq(r) \/ RMFaultySendPResp(r)
\* Safety is a temporal formula that describes the whole set of allowed
\* behaviours. It specifies only what the system MAY do (i.e. the set of
\* possible allowed behaviours for the system). It asserts only what may
\* happen; any behaviour that violates it does so at some point and
\* nothing past that point makes difference.
\*
\* E.g. this safety formula (applied standalone) allows the behaviour to end
\* with an infinite set of stuttering steps (those steps that DO NOT change
\* neither msgs nor rmState) and never reach the state where at least one
\* node is committed or accepted the block.
\*
\* To forbid such behaviours we must specify what the system MUST
\* do. It will be specified below with the help of fairness conditions in
\* the Fairness formula.
Safety == Init /\ [][Next]_vars
\* -------------- Fairness temporal formula --------------
\* Fairness is a temporal assumptions under which the model is working.
\* Usually it specifies different kind of assumptions for each/some
\* subactions of the Next's state action, but the only think that bothers
\* us is preventing infinite stuttering at those steps where some of Next's
\* subactions are enabled. Thus, the only thing that we require from the
\* system is to keep take the steps until it's impossible to take them.
\* That's exactly how the weak fairness condition works: if some action
\* remains continuously enabled, it must eventually happen.
Fairness == WF_vars(Next)
\* -------------- Specification --------------
\* The complete specification of the protocol written as a temporal formula.
Spec == Safety /\ Fairness
\* -------------- Liveness temporal formula --------------
\* For every possible behaviour it's true that eventually (i.e. at least once
\* through the behaviour) block will be accepted. It is something that dBFT
\* must guarantee (an in practice this condition is violated).
TerminationRequirement == <>(Cardinality({r \in RM : rmState[r].type = "blockAccepted"}) >= M)
\* A liveness temporal formula asserts only what must happen (i.e. specifies
\* what the system MUST do). Any behaviour can NOT violate it at ANY point;
\* there's always the rest of the behaviour that can always make the liveness
\* formula true; if there's no such behaviour than the liveness formula is
\* violated. The liveness formula is supposed to be checked as a property
\* by the TLC model checker.
Liveness == TerminationRequirement
\* -------------- ModelConstraints --------------
\* MaxViewConstraint is a state predicate restricting the number of possible
\* behaviour states. It is needed to reduce model checking time and prevent
\* the model graph size explosion. This formulae must be specified at the
\* "State constraint" section of the "Additional Spec Options" section inside
\* the model overview.
MaxViewConstraint == /\ \A r \in RM : rmState[r].view <= MaxView
/\ \A msg \in msgs : msg.view <= MaxView
\* -------------- Invariants of the specification --------------
\* Model invariant is a state predicate (statement) that must be true for
\* every step of every reachable behaviour. Model invariant is supposed to
\* be checked as an Invariant by the TLC Model Checker.
\* TypeOK is a type-correctness invariant. It states that all elements of
\* specification variables must have the proper type throughout the behaviour.
TypeOK ==
/\ rmState \in [RM -> RMStates]
/\ msgs \subseteq Messages
\* InvTwoBlocksAccepted states that there can't be two different blocks accepted in
\* the two different views, i.e. dBFT must not allow forks.
InvTwoBlocksAccepted == \A r1 \in RM:
\A r2 \in RM \ {r1}:
\/ rmState[r1].type /= "blockAccepted"
\/ rmState[r2].type /= "blockAccepted"
\/ rmState[r1].view = rmState[r2].view
\* InvFaultNodesCount states that there can be F faulty or dead nodes at max.
InvFaultNodesCount == Cardinality({
r \in RM : rmState[r].type = "bad" \/ rmState[r].type = "dead"
}) <= F
\* This theorem asserts the truth of the temporal formula whose meaning is that
\* the state predicates TypeOK, InvTwoBlocksAccepted and InvFaultNodesCount are
\* the invariants of the specification Spec. This theorem is not supposed to be
\* checked by the TLC model checker, it's here for the reader's understanding of
\* the purpose of TypeOK, InvTwoBlocksAccepted and InvFaultNodesCount.
THEOREM Spec => [](TypeOK /\ InvTwoBlocksAccepted /\ InvFaultNodesCount)
=============================================================================
\* Modification History
\* Last modified Wed Jun 19 17:51:15 MSK 2024 by anna
\* Last modified Mon Mar 06 15:36:57 MSK 2023 by root
\* Last modified Sat Jan 21 01:26:16 MSK 2023 by rik
\* Created Thu Dec 15 16:06:17 MSK 2022 by anna
-------------------------------- MODULE dbft --------------------------------
EXTENDS
Integers,
FiniteSets
CONSTANTS
\* RM is the set of consensus node indexes starting from 0.
\* Example: {0, 1, 2, 3}
RM,
\* RMFault is a set of consensus node indexes that are allowed to become
\* FAULT in the middle of every considered behavior and to send any
\* consensus message afterwards. RMFault must be a subset of RM. An empty
\* set means that all nodes are good in every possible behaviour.
\* Examples: {0}
\* {1, 3}
\* {}
RMFault,
\* RMDead is a set of consensus node indexes that are allowed to die in the
\* middle of every behaviour and do not send any message afterwards. RMDead
\* must be a subset of RM. An empty set means that all nodes are alive and
\* responding in in every possible behaviour. RMDead may intersect the
\* RMFault set which means that node which is in both RMDead and RMFault
\* may become FAULT and send any message starting from some step of the
\* particular behaviour and may also die in the same behaviour which will
\* prevent it from sending any message.
\* Examples: {0}
\* {3, 2}
\* {}
RMDead,
\* MaxView is the maximum allowed view to be considered (starting from 0,
\* including the MaxView itself). This constraint was introduced to reduce
\* the number of possible model states to be checked. It is recommended to
\* keep this setting not too high (< N is highly recommended).
\* Example: 2
MaxView
VARIABLES
\* rmState is a set of consensus node states. It is represented by the
\* mapping (function) with domain RM and range RMStates. I.e. rmState[r] is
\* the state of the r-th consensus node at the current step.
rmState,
\* msgs is the shared pool of messages sent to the network by consensus nodes.
\* It is represented by a subset of Messages set.
msgs
\* vars is a tuple of all variables used in the specification. It is needed to
\* simplify fairness conditions definition.
vars == <<rmState, msgs>>
\* N is the number of validators.
N == Cardinality(RM)
\* F is the number of validators that are allowed to be malicious.
F == (N - 1) \div 3
\* M is the number of validators that must function correctly.
M == N - F
\* These assumptions are checked by the TLC model checker once at the start of
\* the model checking process. All the input data (declared constants) specified
\* in the "Model Overview" section must satisfy these constraints.
ASSUME
/\ RM \subseteq Nat
/\ N >= 4
/\ 0 \in RM
/\ RMFault \subseteq RM
/\ RMDead \subseteq RM
/\ Cardinality(RMFault) <= F
/\ Cardinality(RMDead) <= F
/\ Cardinality(RMFault \cup RMDead) <= F
/\ MaxView \in Nat
/\ MaxView <= 2
\* RMStates is a set of records where each record holds the node state and
\* the node current view.
RMStates == [
type: {"initialized", "prepareSent", "commitSent", "cv", "commitAckSent", "blockAccepted", "bad", "dead"},
view : Nat
]
\* Messages is a set of records where each record holds the message type,
\* the message sender and sender's view by the moment when message was sent.
Messages == [type : {"PrepareRequest", "PrepareResponse", "Commit", "CommitAck", "ChangeView"}, rm : RM, view : Nat]
\* -------------- Useful operators --------------
\* IsPrimary is an operator defining whether provided node r is primary
\* for the current round from the r's point of view. It is a mapping
\* from RM to the set of {TRUE, FALSE}.
IsPrimary(r) == rmState[r].view % N = r
\* GetPrimary is an operator defining mapping from round index to the RM that
\* is primary in this round.
GetPrimary(view) == CHOOSE r \in RM : view % N = r
\* GetNewView returns new view number based on the previous node view value.
\* Current specifications only allows to increment view.
GetNewView(oldView) == oldView + 1
\* CountCommitted returns the number of nodes that have sent the Commit message
\* in the current round or in some other round.
CountCommitted(r) == Cardinality({rm \in RM : Cardinality({msg \in msgs : msg.rm = rm /\ msg.type = "Commit"}) /= 0})
\* MoreThanFNodesCommitted returns whether more than F nodes have been committed
\* in the current round (as the node r sees it).
\*
\* IMPORTANT NOTE: we intentionally do not add the "lost" nodes calculation to the specification, and here's
\* the reason: from the node's point of view we can't reliably check that some neighbour is completely
\* out of the network. It is possible that the node doesn't receive consensus messages from some other member
\* due to network delays. On the other hand, real nodes can go down at any time. The absence of the
\* member's message doesn't mean that the member is out of the network, we never can be sure about
\* that, thus, this information is unreliable and can't be trusted during the consensus process.
\* What can be trusted is whether there's a Commit message from some member was received by the node.
MoreThanFNodesCommitted(r) == CountCommitted(r) > F
\* PrepareRequestSentOrReceived denotes whether there's a PrepareRequest
\* message received from the current round's speaker (as the node r sees it).
PrepareRequestSentOrReceived(r) == [type |-> "PrepareRequest", rm |-> GetPrimary(rmState[r].view), view |-> rmState[r].view] \in msgs
\* -------------- Safety temporal formula --------------
\* Init is the initial predicate initializing values at the start of every
\* behaviour.
Init ==
/\ rmState = [r \in RM |-> [type |-> "initialized", view |-> 0]]
/\ msgs = {}
\* RMSendPrepareRequest describes the primary node r broadcasting PrepareRequest.
RMSendPrepareRequest(r) ==
/\ rmState[r].type = "initialized"
/\ IsPrimary(r)
/\ rmState' = [rmState EXCEPT ![r].type = "prepareSent"]
/\ msgs' = msgs \cup {[type |-> "PrepareRequest", rm |-> r, view |-> rmState[r].view]}
/\ UNCHANGED <<>>
\* RMSendPrepareResponse describes non-primary node r receiving PrepareRequest from
\* the primary node of the current round (view) and broadcasting PrepareResponse.
\* This step assumes that PrepareRequest always contains valid transactions and
\* signatures.
RMSendPrepareResponse(r) ==
/\ \/ rmState[r].type = "initialized"
\* We do allow the transition from the "cv" state to the "prepareSent" or "commitSent" stage
\* as it is done in the code-level dBFT implementation by checking the NotAcceptingPayloadsDueToViewChanging
\* condition (see
\* https://github.com/nspcc-dev/dbft/blob/31c1bbdc74f2faa32ec9025062e3a4e2ccfd4214/dbft.go#L419
\* and
\* https://github.com/neo-project/neo-modules/blob/d00d90b9c27b3d0c3c57e9ca1f560a09975df241/src/DBFTPlugin/Consensus/ConsensusService.OnMessage.cs#L79).
\* However, we can't easily count the number of "lost" nodes in this specification to match precisely
\* the implementation. Moreover, we don't need it to be counted as the RMSendPrepareResponse enabling
\* condition specifies only the thing that may happen given some particular set of enabling conditions.
\* Thus, we've extended the NotAcceptingPayloadsDueToViewChanging condition to consider only MoreThanFNodesCommitted.
\* It should be noted that the logic of MoreThanFNodesCommittedOrLost can't be reliable in detecting lost nodes
\* (even with neo-project/neo#2057), because real nodes can go down at any time. See the comment above the MoreThanFNodesCommitted.
\/ /\ rmState[r].type = "cv"
/\ MoreThanFNodesCommitted(r)
/\ \neg IsPrimary(r)
/\ PrepareRequestSentOrReceived(r)
/\ rmState' = [rmState EXCEPT ![r].type = "prepareSent"]
/\ msgs' = msgs \cup {[type |-> "PrepareResponse", rm |-> r, view |-> rmState[r].view]}
/\ UNCHANGED <<>>
\* RMSendCommit describes node r sending Commit if there's enough PrepareResponse
\* messages.
RMSendCommit(r) ==
/\ \/ rmState[r].type = "prepareSent"
\* We do allow the transition from the "cv" state to the "prepareSent" or "commitSent" stage,
\* see the related comment inside the RMSendPrepareResponse definition.
\/ /\ rmState[r].type = "cv"
/\ MoreThanFNodesCommitted(r)
/\ Cardinality({
msg \in msgs : /\ (msg.type = "PrepareResponse" \/ msg.type = "PrepareRequest")
/\ msg.view = rmState[r].view
}) >= M
/\ PrepareRequestSentOrReceived(r)
/\ rmState' = [rmState EXCEPT ![r].type = "commitSent"]
/\ msgs' = msgs \cup {[type |-> "Commit", rm |-> r, view |-> rmState[r].view]}
/\ UNCHANGED <<>>
\* RMSendCommitAck describes node r collecting enough Commit messages and sending
\* the CommitAck message.
RMSendCommitAck(r) ==
/\ rmState[r].type /= "bad"
/\ rmState[r].type /= "dead"
/\ rmState[r].type /= "commitAckSent"
/\ rmState[r].type /= "blockAccepted"
/\ PrepareRequestSentOrReceived(r)
/\ Cardinality({msg \in msgs : msg.type = "Commit" /\ msg.view = rmState[r].view}) >= M
/\ rmState' = [rmState EXCEPT ![r].type = "commitAckSent"]
/\ msgs' = msgs \cup {[type |-> "CommitAck", rm |-> r, view |-> rmState[r].view]}
/\ UNCHANGED <<>>
\* RMAcceptBlock describes node r collecting enough CommitAck messages and accepting
\* the block.
RMAcceptBlock(r) ==
/\ rmState[r].type = "commitAckSent"
/\ Cardinality({msg \in msgs : msg.type = "CommitAck" /\ msg.view = rmState[r].view}) >= M
/\ rmState' = [rmState EXCEPT ![r].type = "blockAccepted"]
/\ UNCHANGED <<msgs>>
\* RMSendChangeView describes node r sending ChangeView message on timeout.
RMSendChangeView(r) ==
/\ \/ (rmState[r].type = "initialized" /\ \neg IsPrimary(r))
\/ rmState[r].type = "prepareSent"
/\ LET cv == [type |-> "ChangeView", rm |-> r, view |-> rmState[r].view]
IN /\ cv \notin msgs
/\ rmState' = [rmState EXCEPT ![r].type = "cv"]
/\ msgs' = msgs \cup {[type |-> "ChangeView", rm |-> r, view |-> rmState[r].view]}
\* RMReceiveChangeView describes node r receiving enough ChangeView messages for
\* view changing.
RMReceiveChangeView(r) ==
/\ rmState[r].type /= "bad"
/\ rmState[r].type /= "dead"
/\ rmState[r].type /= "blockAccepted"
/\ rmState[r].type /= "commitSent"
/\ rmState[r].type /= "commitAckSent"
/\ Cardinality({
rm \in RM : Cardinality({
msg \in msgs : /\ msg.type = "ChangeView"
/\ msg.rm = rm
/\ GetNewView(msg.view) >= GetNewView(rmState[r].view)
}) /= 0
}) >= M
/\ rmState' = [rmState EXCEPT ![r].type = "initialized", ![r].view = GetNewView(rmState[r].view)]
/\ UNCHANGED <<msgs>>
\* RMBeBad describes the faulty node r that will send any kind of consensus message starting
\* from the step it's gone wild. This step is enabled only when RMFault is non-empty set.
RMBeBad(r) ==
/\ r \in RMFault
/\ Cardinality({rm \in RM : rmState[rm].type = "bad"}) < F
/\ rmState' = [rmState EXCEPT ![r].type = "bad"]
/\ UNCHANGED <<msgs>>
\* RMFaultySendCV describes sending CV message by the faulty node r.
RMFaultySendCV(r) ==
/\ rmState[r].type = "bad"
/\ LET cv == [type |-> "ChangeView", rm |-> r, view |-> rmState[r].view]
IN /\ cv \notin msgs
/\ msgs' = msgs \cup {cv}
/\ UNCHANGED <<rmState>>
\* RMFaultyDoCV describes view changing by the faulty node r.
RMFaultyDoCV(r) ==
/\ rmState[r].type = "bad"
/\ rmState' = [rmState EXCEPT ![r].view = GetNewView(rmState[r].view)]
/\ UNCHANGED <<msgs>>
\* RMFaultySendPReq describes sending PrepareRequest message by the primary faulty node r.
RMFaultySendPReq(r) ==
/\ rmState[r].type = "bad"
/\ IsPrimary(r)
/\ LET pReq == [type |-> "PrepareRequest", rm |-> r, view |-> rmState[r].view]
IN /\ pReq \notin msgs
/\ msgs' = msgs \cup {pReq}
/\ UNCHANGED <<rmState>>
\* RMFaultySendPResp describes sending PrepareResponse message by the non-primary faulty node r.
RMFaultySendPResp(r) ==
/\ rmState[r].type = "bad"
/\ \neg IsPrimary(r)
/\ LET pResp == [type |-> "PrepareResponse", rm |-> r, view |-> rmState[r].view]
IN /\ pResp \notin msgs
/\ msgs' = msgs \cup {pResp}
/\ UNCHANGED <<rmState>>
\* RMFaultySendCommit describes sending Commit message by the faulty node r.
RMFaultySendCommit(r) ==
/\ rmState[r].type = "bad"
/\ LET commit == [type |-> "Commit", rm |-> r, view |-> rmState[r].view]
IN /\ commit \notin msgs
/\ msgs' = msgs \cup {commit}
/\ UNCHANGED <<rmState>>
\* RMFaultySendCommitAck describes sending CommitAck message by the faulty node r.
RMFaultySendCommitAck(r) ==
/\ rmState[r].type = "bad"
/\ LET ack == [type |-> "CommitAck", rm |-> r, view |-> rmState[r].view]
IN /\ ack \notin msgs
/\ msgs' = msgs \cup {ack}
/\ UNCHANGED <<rmState>>
\* RMDie describes node r that was removed from the network at the particular step
\* of the behaviour. After this node r can't change its state and accept/send messages.
RMDie(r) ==
/\ r \in RMDead
/\ Cardinality({rm \in RM : rmState[rm].type = "dead"}) < F
/\ rmState' = [rmState EXCEPT ![r].type = "dead"]
/\ UNCHANGED <<msgs>>
\* Terminating is an action that allows infinite stuttering to prevent deadlock on
\* behaviour termination. We consider termination to be valid if at least M nodes
\* has the block being accepted.
Terminating ==
/\ Cardinality({rm \in RM : rmState[rm].type = "blockAccepted"}) >= M
/\ UNCHANGED <<msgs, rmState>>
\* TerminatingFourNodesDeadlock describes node r that is in the state where dBFT
\* stucks in a four nodes scenario with one dead node allowed. Allow infinite
\* stuttering to prevent TLC deadlock recognition.
\* Note that this step is unused in the current specification, however, it may be
\* used for further investigations of this deadlock.
TerminatingFourNodesDeadlockSameView(r) ==
/\ Cardinality({rm \in RM : rmState[rm].type = "dead" /\ rmState[rm].view = rmState[r].view}) = 1
/\ Cardinality({rm \in RM : rmState[rm].type = "cv" /\ rmState[rm].view = rmState[r].view}) = 2
/\ Cardinality({rm \in RM : rmState[rm].type = "commitSent" /\ rmState[rm].view = rmState[r].view}) = 1
/\ UNCHANGED <<msgs, rmState>>
\* TerminatingFourNodesDeadlock describes node r that is in the state where dBFT
\* stucks in a four nodes scenario and the same view. Allow infinite stuttering
\* to prevent TLC deadlock recognition.
\* Note that this step is unused in the current specification, however, it may be
\* used for further investigations of this deadlock.
TerminatingFourNodesDeadlock ==
/\ Cardinality({rm \in RM : rmState[rm].type = "dead"}) = 1
/\ Cardinality({rm \in RM : rmState[rm].type = "cv"}) = 2
/\ Cardinality({rm \in RM : rmState[rm].type = "commitSent"}) = 1
/\ UNCHANGED <<msgs, rmState>>
\* Next is the next-state action describing the transition from the current state
\* to the next state of the behaviour.
Next ==
\/ Terminating
\/ \E r \in RM:
RMSendPrepareRequest(r) \/ RMSendPrepareResponse(r) \/ RMSendCommit(r) \/ RMSendCommitAck(r)
\/ RMAcceptBlock(r) \/ RMSendChangeView(r) \/ RMReceiveChangeView(r)
\/ RMDie(r) \/ RMBeBad(r)
\/ RMFaultySendCV(r) \/ RMFaultyDoCV(r) \/ RMFaultySendCommit(r) \/ RMFaultySendCommitAck(r) \/ RMFaultySendPReq(r) \/ RMFaultySendPResp(r)
\* Safety is a temporal formula that describes the whole set of allowed
\* behaviours. It specifies only what the system MAY do (i.e. the set of
\* possible allowed behaviours for the system). It asserts only what may
\* happen; any behaviour that violates it does so at some point and
\* nothing past that point makes difference.
\*
\* E.g. this safety formula (applied standalone) allows the behaviour to end
\* with an infinite set of stuttering steps (those steps that DO NOT change
\* neither msgs nor rmState) and never reach the state where at least one
\* node is committed or accepted the block.
\*
\* To forbid such behaviours we must specify what the system MUST
\* do. It will be specified below with the help of fairness conditions in
\* the Fairness formula.
Safety == Init /\ [][Next]_vars
\* -------------- Fairness temporal formula --------------
\* Fairness is a temporal assumptions under which the model is working.
\* Usually it specifies different kind of assumptions for each/some
\* subactions of the Next's state action, but the only think that bothers
\* us is preventing infinite stuttering at those steps where some of Next's
\* subactions are enabled. Thus, the only thing that we require from the
\* system is to keep take the steps until it's impossible to take them.
\* That's exactly how the weak fairness condition works: if some action
\* remains continuously enabled, it must eventually happen.
Fairness == WF_vars(Next)
\* -------------- Specification --------------
\* The complete specification of the protocol written as a temporal formula.
Spec == Safety /\ Fairness
\* -------------- Liveness temporal formula --------------
\* For every possible behaviour it's true that eventually (i.e. at least once
\* through the behaviour) block will be accepted. It is something that dBFT
\* must guarantee (an in practice this condition is violated).
TerminationRequirement == <>(Cardinality({r \in RM : rmState[r].type = "blockAccepted"}) >= M)
\* A liveness temporal formula asserts only what must happen (i.e. specifies
\* what the system MUST do). Any behaviour can NOT violate it at ANY point;
\* there's always the rest of the behaviour that can always make the liveness
\* formula true; if there's no such behaviour than the liveness formula is
\* violated. The liveness formula is supposed to be checked as a property
\* by the TLC model checker.
Liveness == TerminationRequirement
\* -------------- ModelConstraints --------------
\* MaxViewConstraint is a state predicate restricting the number of possible
\* behaviour states. It is needed to reduce model checking time and prevent
\* the model graph size explosion. This formulae must be specified at the
\* "State constraint" section of the "Additional Spec Options" section inside
\* the model overview.
MaxViewConstraint == /\ \A r \in RM : rmState[r].view <= MaxView
/\ \A msg \in msgs : msg.view <= MaxView
\* -------------- Invariants of the specification --------------
\* Model invariant is a state predicate (statement) that must be true for
\* every step of every reachable behaviour. Model invariant is supposed to
\* be checked as an Invariant by the TLC Model Checker.
\* TypeOK is a type-correctness invariant. It states that all elements of
\* specification variables must have the proper type throughout the behaviour.
TypeOK ==
/\ rmState \in [RM -> RMStates]
/\ msgs \subseteq Messages
\* InvTwoBlocksAccepted states that there can't be two different blocks accepted in
\* the two different views, i.e. dBFT must not allow forks.
InvTwoBlocksAccepted == \A r1 \in RM:
\A r2 \in RM \ {r1}:
\/ rmState[r1].type /= "blockAccepted"
\/ rmState[r2].type /= "blockAccepted"
\/ rmState[r1].view = rmState[r2].view
\* InvFaultNodesCount states that there can be F faulty or dead nodes at max.
InvFaultNodesCount == Cardinality({
r \in RM : rmState[r].type = "bad" \/ rmState[r].type = "dead"
}) <= F
\* This theorem asserts the truth of the temporal formula whose meaning is that
\* the state predicates TypeOK, InvTwoBlocksAccepted and InvFaultNodesCount are
\* the invariants of the specification Spec. This theorem is not supposed to be
\* checked by the TLC model checker, it's here for the reader's understanding of
\* the purpose of TypeOK, InvTwoBlocksAccepted and InvFaultNodesCount.
THEOREM Spec => [](TypeOK /\ InvTwoBlocksAccepted /\ InvFaultNodesCount)
=============================================================================
\* Modification History
\* Last modified Wed Jun 19 17:51:15 MSK 2024 by anna
\* Last modified Mon Mar 06 15:36:57 MSK 2023 by root
\* Last modified Sat Jan 21 01:26:16 MSK 2023 by rik
\* Created Thu Dec 15 16:06:17 MSK 2022 by anna

View File

@ -1,42 +1,42 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<launchConfiguration type="org.lamport.tla.toolbox.tool.tlc.modelCheck">
<stringAttribute key="configurationName" value="AllGoodModel"/>
<intAttribute key="distributedFPSetCount" value="0"/>
<stringAttribute key="distributedNetworkInterface" value="172.200.0.254"/>
<intAttribute key="distributedNodesCount" value="1"/>
<stringAttribute key="distributedTLC" value="off"/>
<intAttribute key="fpIndex" value="47"/>
<intAttribute key="maxHeapSize" value="50"/>
<stringAttribute key="modelBehaviorInit" value=""/>
<stringAttribute key="modelBehaviorNext" value=""/>
<stringAttribute key="modelBehaviorSpec" value="Spec"/>
<intAttribute key="modelBehaviorSpecType" value="1"/>
<stringAttribute key="modelBehaviorVars" value="msgs, rmState"/>
<stringAttribute key="modelComments" value=""/>
<booleanAttribute key="modelCorrectnessCheckDeadlock" value="true"/>
<listAttribute key="modelCorrectnessInvariants">
<listEntry value="1TypeOK"/>
<listEntry value="1InvTwoBlocksAccepted"/>
<listEntry value="1InvFaultNodesCount"/>
</listAttribute>
<listAttribute key="modelCorrectnessProperties">
<listEntry value="1Liveness"/>
</listAttribute>
<intAttribute key="modelEditorOpenTabs" value="10"/>
<stringAttribute key="modelParameterActionConstraint" value=""/>
<listAttribute key="modelParameterConstants">
<listEntry value="RMFault;;{};0;0"/>
<listEntry value="MaxView;;1;0;0"/>
<listEntry value="RMDead;;{};0;0"/>
<listEntry value="RM;;{0, 1, 2, 3};0;0"/>
</listAttribute>
<stringAttribute key="modelParameterContraint" value="MaxViewConstraint"/>
<listAttribute key="modelParameterDefinitions"/>
<stringAttribute key="modelParameterModelValues" value="{}"/>
<stringAttribute key="modelParameterNewDefinitions" value=""/>
<intAttribute key="modelVersion" value="20191005"/>
<intAttribute key="numberOfWorkers" value="8"/>
<stringAttribute key="result.mail.address" value=""/>
<stringAttribute key="specName" value="dbft"/>
<stringAttribute key="tlcResourcesProfile" value="local custom"/>
</launchConfiguration>
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<launchConfiguration type="org.lamport.tla.toolbox.tool.tlc.modelCheck">
<stringAttribute key="configurationName" value="AllGoodModel"/>
<intAttribute key="distributedFPSetCount" value="0"/>
<stringAttribute key="distributedNetworkInterface" value="172.200.0.254"/>
<intAttribute key="distributedNodesCount" value="1"/>
<stringAttribute key="distributedTLC" value="off"/>
<intAttribute key="fpIndex" value="47"/>
<intAttribute key="maxHeapSize" value="50"/>
<stringAttribute key="modelBehaviorInit" value=""/>
<stringAttribute key="modelBehaviorNext" value=""/>
<stringAttribute key="modelBehaviorSpec" value="Spec"/>
<intAttribute key="modelBehaviorSpecType" value="1"/>
<stringAttribute key="modelBehaviorVars" value="msgs, rmState"/>
<stringAttribute key="modelComments" value=""/>
<booleanAttribute key="modelCorrectnessCheckDeadlock" value="true"/>
<listAttribute key="modelCorrectnessInvariants">
<listEntry value="1TypeOK"/>
<listEntry value="1InvTwoBlocksAccepted"/>
<listEntry value="1InvFaultNodesCount"/>
</listAttribute>
<listAttribute key="modelCorrectnessProperties">
<listEntry value="1Liveness"/>
</listAttribute>
<intAttribute key="modelEditorOpenTabs" value="10"/>
<stringAttribute key="modelParameterActionConstraint" value=""/>
<listAttribute key="modelParameterConstants">
<listEntry value="RMFault;;{};0;0"/>
<listEntry value="MaxView;;1;0;0"/>
<listEntry value="RMDead;;{};0;0"/>
<listEntry value="RM;;{0, 1, 2, 3};0;0"/>
</listAttribute>
<stringAttribute key="modelParameterContraint" value="MaxViewConstraint"/>
<listAttribute key="modelParameterDefinitions"/>
<stringAttribute key="modelParameterModelValues" value="{}"/>
<stringAttribute key="modelParameterNewDefinitions" value=""/>
<intAttribute key="modelVersion" value="20191005"/>
<intAttribute key="numberOfWorkers" value="8"/>
<stringAttribute key="result.mail.address" value=""/>
<stringAttribute key="specName" value="dbft"/>
<stringAttribute key="tlcResourcesProfile" value="local custom"/>
</launchConfiguration>

2
go.mod
View File

@ -1,4 +1,4 @@
module github.com/tutus-one/tutus-consensus
module git.marketally.com/tutus-one/tutus-consensus
go 1.24

32
go.sum
View File

@ -1,16 +1,16 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@ -1,63 +1,63 @@
package dbft
type (
// inbox is a structure storing messages from a single epoch.
inbox[H Hash] struct {
prepare map[uint16]ConsensusPayload[H]
chViews map[uint16]ConsensusPayload[H]
preCommit map[uint16]ConsensusPayload[H]
commit map[uint16]ConsensusPayload[H]
}
// cache is an auxiliary structure storing messages
// from future epochs.
cache[H Hash] struct {
mail map[uint32]*inbox[H]
}
)
func newInbox[H Hash]() *inbox[H] {
return &inbox[H]{
prepare: make(map[uint16]ConsensusPayload[H]),
chViews: make(map[uint16]ConsensusPayload[H]),
preCommit: make(map[uint16]ConsensusPayload[H]),
commit: make(map[uint16]ConsensusPayload[H]),
}
}
func newCache[H Hash]() cache[H] {
return cache[H]{
mail: make(map[uint32]*inbox[H]),
}
}
func (c *cache[H]) getHeight(h uint32) *inbox[H] {
if m, ok := c.mail[h]; ok {
delete(c.mail, h)
return m
}
return nil
}
func (c *cache[H]) addMessage(m ConsensusPayload[H]) {
msgs, ok := c.mail[m.Height()]
if !ok {
msgs = newInbox[H]()
c.mail[m.Height()] = msgs
}
switch m.Type() {
case PrepareRequestType, PrepareResponseType:
msgs.prepare[m.ValidatorIndex()] = m
case ChangeViewType:
msgs.chViews[m.ValidatorIndex()] = m
case PreCommitType:
msgs.preCommit[m.ValidatorIndex()] = m
case CommitType:
msgs.commit[m.ValidatorIndex()] = m
default:
// Others are recoveries and we don't currently use them.
// Theoretically messages could be extracted.
}
}
package dbft
type (
// inbox is a structure storing messages from a single epoch.
inbox[H Hash] struct {
prepare map[uint16]ConsensusPayload[H]
chViews map[uint16]ConsensusPayload[H]
preCommit map[uint16]ConsensusPayload[H]
commit map[uint16]ConsensusPayload[H]
}
// cache is an auxiliary structure storing messages
// from future epochs.
cache[H Hash] struct {
mail map[uint32]*inbox[H]
}
)
func newInbox[H Hash]() *inbox[H] {
return &inbox[H]{
prepare: make(map[uint16]ConsensusPayload[H]),
chViews: make(map[uint16]ConsensusPayload[H]),
preCommit: make(map[uint16]ConsensusPayload[H]),
commit: make(map[uint16]ConsensusPayload[H]),
}
}
func newCache[H Hash]() cache[H] {
return cache[H]{
mail: make(map[uint32]*inbox[H]),
}
}
func (c *cache[H]) getHeight(h uint32) *inbox[H] {
if m, ok := c.mail[h]; ok {
delete(c.mail, h)
return m
}
return nil
}
func (c *cache[H]) addMessage(m ConsensusPayload[H]) {
msgs, ok := c.mail[m.Height()]
if !ok {
msgs = newInbox[H]()
c.mail[m.Height()] = msgs
}
switch m.Type() {
case PrepareRequestType, PrepareResponseType:
msgs.prepare[m.ValidatorIndex()] = m
case ChangeViewType:
msgs.chViews[m.ValidatorIndex()] = m
case PreCommitType:
msgs.preCommit[m.ValidatorIndex()] = m
case CommitType:
msgs.commit[m.ValidatorIndex()] = m
default:
// Others are recoveries and we don't currently use them.
// Theoretically messages could be extracted.
}
}

View File

@ -1,115 +1,115 @@
package dbft
import (
"testing"
"github.com/stretchr/testify/require"
)
// Structures used for type-specific dBFT/payloads implementation to avoid cyclic
// dependency.
type (
hash struct{}
payloadStub struct {
height uint32
typ MessageType
validatorIndex uint16
}
)
func (hash) String() string {
return ""
}
func (p payloadStub) ViewNumber() byte {
panic("TODO")
}
func (p payloadStub) SetViewNumber(byte) {
panic("TODO")
}
func (p payloadStub) Type() MessageType {
return p.typ
}
func (p payloadStub) SetType(MessageType) {
panic("TODO")
}
func (p payloadStub) Payload() any {
panic("TODO")
}
func (p payloadStub) SetPayload(any) {
panic("TODO")
}
func (p payloadStub) GetChangeView() ChangeView {
panic("TODO")
}
func (p payloadStub) GetPrepareRequest() PrepareRequest[hash] {
panic("TODO")
}
func (p payloadStub) GetPrepareResponse() PrepareResponse[hash] {
panic("TODO")
}
func (p payloadStub) GetCommit() Commit {
panic("TODO")
}
func (p payloadStub) GetPreCommit() PreCommit { panic("TODO") }
func (p payloadStub) GetRecoveryRequest() RecoveryRequest {
panic("TODO")
}
func (p payloadStub) GetRecoveryMessage() RecoveryMessage[hash] {
panic("TODO")
}
func (p payloadStub) ValidatorIndex() uint16 {
return p.validatorIndex
}
func (p payloadStub) SetValidatorIndex(uint16) {
panic("TODO")
}
func (p payloadStub) Height() uint32 {
return p.height
}
func (p payloadStub) SetHeight(uint32) {
panic("TODO")
}
func (p payloadStub) Hash() hash {
panic("TODO")
}
func TestMessageCache(t *testing.T) {
c := newCache[hash]()
p1 := payloadStub{
height: 3,
typ: PrepareRequestType,
}
c.addMessage(p1)
p2 := payloadStub{
height: 4,
typ: ChangeViewType,
}
c.addMessage(p2)
p3 := payloadStub{
height: 4,
typ: CommitType,
}
c.addMessage(p3)
p4 := payloadStub{
height: 3,
typ: PreCommitType,
}
c.addMessage(p4)
box := c.getHeight(3)
require.Len(t, box.chViews, 0)
require.Len(t, box.prepare, 1)
require.Len(t, box.preCommit, 1)
require.Len(t, box.commit, 0)
box = c.getHeight(4)
require.Len(t, box.chViews, 1)
require.Len(t, box.prepare, 0)
require.Len(t, box.preCommit, 0)
require.Len(t, box.commit, 1)
}
package dbft
import (
"testing"
"github.com/stretchr/testify/require"
)
// Structures used for type-specific dBFT/payloads implementation to avoid cyclic
// dependency.
type (
hash struct{}
payloadStub struct {
height uint32
typ MessageType
validatorIndex uint16
}
)
func (hash) String() string {
return ""
}
func (p payloadStub) ViewNumber() byte {
panic("TODO")
}
func (p payloadStub) SetViewNumber(byte) {
panic("TODO")
}
func (p payloadStub) Type() MessageType {
return p.typ
}
func (p payloadStub) SetType(MessageType) {
panic("TODO")
}
func (p payloadStub) Payload() any {
panic("TODO")
}
func (p payloadStub) SetPayload(any) {
panic("TODO")
}
func (p payloadStub) GetChangeView() ChangeView {
panic("TODO")
}
func (p payloadStub) GetPrepareRequest() PrepareRequest[hash] {
panic("TODO")
}
func (p payloadStub) GetPrepareResponse() PrepareResponse[hash] {
panic("TODO")
}
func (p payloadStub) GetCommit() Commit {
panic("TODO")
}
func (p payloadStub) GetPreCommit() PreCommit { panic("TODO") }
func (p payloadStub) GetRecoveryRequest() RecoveryRequest {
panic("TODO")
}
func (p payloadStub) GetRecoveryMessage() RecoveryMessage[hash] {
panic("TODO")
}
func (p payloadStub) ValidatorIndex() uint16 {
return p.validatorIndex
}
func (p payloadStub) SetValidatorIndex(uint16) {
panic("TODO")
}
func (p payloadStub) Height() uint32 {
return p.height
}
func (p payloadStub) SetHeight(uint32) {
panic("TODO")
}
func (p payloadStub) Hash() hash {
panic("TODO")
}
func TestMessageCache(t *testing.T) {
c := newCache[hash]()
p1 := payloadStub{
height: 3,
typ: PrepareRequestType,
}
c.addMessage(p1)
p2 := payloadStub{
height: 4,
typ: ChangeViewType,
}
c.addMessage(p2)
p3 := payloadStub{
height: 4,
typ: CommitType,
}
c.addMessage(p3)
p4 := payloadStub{
height: 3,
typ: PreCommitType,
}
c.addMessage(p4)
box := c.getHeight(3)
require.Len(t, box.chViews, 0)
require.Len(t, box.prepare, 1)
require.Len(t, box.preCommit, 1)
require.Len(t, box.commit, 0)
box = c.getHeight(4)
require.Len(t, box.chViews, 1)
require.Len(t, box.prepare, 0)
require.Len(t, box.preCommit, 0)
require.Len(t, box.commit, 1)
}

View File

@ -1,28 +1,28 @@
package dbft
import (
"fmt"
)
type (
// PublicKey is a generic public key interface used by dbft.
PublicKey any
// PrivateKey is a generic private key interface used by dbft. PrivateKey is used
// only by [PreBlock] and [Block] signing callbacks ([PreBlock.SetData] and
// [Block.Sign]) to grant access to the private key abstraction to Block and
// PreBlock signing code. PrivateKey does not contain any methods, hence user
// supposed to perform type assertion before the PrivateKey usage.
PrivateKey any
// Hash is a generic hash interface used by dbft for payloads, blocks and
// transactions identification. It is recommended to implement this interface
// using hash functions with low hash collision probability. The following
// requirements must be met:
// 1. Hashes of two equal payloads/blocks/transactions are equal.
// 2. Hashes of two different payloads/blocks/transactions are different.
Hash interface {
comparable
fmt.Stringer
}
)
package dbft
import (
"fmt"
)
type (
// PublicKey is a generic public key interface used by dbft.
PublicKey any
// PrivateKey is a generic private key interface used by dbft. PrivateKey is used
// only by [PreBlock] and [Block] signing callbacks ([PreBlock.SetData] and
// [Block.Sign]) to grant access to the private key abstraction to Block and
// PreBlock signing code. PrivateKey does not contain any methods, hence user
// supposed to perform type assertion before the PrivateKey usage.
PrivateKey any
// Hash is a generic hash interface used by dbft for payloads, blocks and
// transactions identification. It is recommended to implement this interface
// using hash functions with low hash collision probability. The following
// requirements must be met:
// 1. Hashes of two equal payloads/blocks/transactions are equal.
// 2. Hashes of two different payloads/blocks/transactions are different.
Hash interface {
comparable
fmt.Stringer
}
)

View File

@ -6,9 +6,9 @@ import (
"encoding/gob"
"math"
"github.com/tutus-one/tutus-consensus"
"github.com/tutus-one/tutus-consensus/internal/crypto"
"github.com/tutus-one/tutus-consensus/internal/merkle"
"git.marketally.com/tutus-one/tutus-consensus"
"git.marketally.com/tutus-one/tutus-consensus/internal/crypto"
"git.marketally.com/tutus-one/tutus-consensus/internal/merkle"
)
type amevBlock struct {

View File

@ -3,7 +3,7 @@ package consensus
import (
"encoding/gob"
"github.com/tutus-one/tutus-consensus"
"git.marketally.com/tutus-one/tutus-consensus"
)
type (

View File

@ -4,9 +4,9 @@ import (
"encoding/binary"
"errors"
"github.com/tutus-one/tutus-consensus"
"github.com/tutus-one/tutus-consensus/internal/crypto"
"github.com/tutus-one/tutus-consensus/internal/merkle"
"git.marketally.com/tutus-one/tutus-consensus"
"git.marketally.com/tutus-one/tutus-consensus/internal/crypto"
"git.marketally.com/tutus-one/tutus-consensus/internal/merkle"
)
type preBlock struct {

View File

@ -4,7 +4,7 @@ import (
"encoding/binary"
"encoding/gob"
"github.com/tutus-one/tutus-consensus"
"git.marketally.com/tutus-one/tutus-consensus"
)
type (

View File

@ -4,9 +4,9 @@ import (
"bytes"
"encoding/gob"
"github.com/tutus-one/tutus-consensus"
"github.com/tutus-one/tutus-consensus/internal/crypto"
"github.com/tutus-one/tutus-consensus/internal/merkle"
"git.marketally.com/tutus-one/tutus-consensus"
"git.marketally.com/tutus-one/tutus-consensus/internal/crypto"
"git.marketally.com/tutus-one/tutus-consensus/internal/merkle"
)
type (

View File

@ -8,8 +8,8 @@ import (
"errors"
"testing"
"github.com/tutus-one/tutus-consensus"
"github.com/tutus-one/tutus-consensus/internal/crypto"
"git.marketally.com/tutus-one/tutus-consensus"
"git.marketally.com/tutus-one/tutus-consensus/internal/crypto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)

View File

@ -3,7 +3,7 @@ package consensus
import (
"encoding/gob"
"github.com/tutus-one/tutus-consensus"
"git.marketally.com/tutus-one/tutus-consensus"
)
type (

View File

@ -3,7 +3,7 @@ package consensus
import (
"encoding/gob"
"github.com/tutus-one/tutus-consensus"
"git.marketally.com/tutus-one/tutus-consensus"
)
type (

View File

@ -1,69 +1,69 @@
package consensus
import (
"encoding/gob"
)
type (
changeViewCompact struct {
ValidatorIndex uint16
OriginalViewNumber byte
Timestamp uint32
}
preCommitCompact struct {
ViewNumber byte
ValidatorIndex uint16
Data []byte
}
commitCompact struct {
ViewNumber byte
ValidatorIndex uint16
Signature [signatureSize]byte
}
preparationCompact struct {
ValidatorIndex uint16
}
)
// EncodeBinary implements Serializable interface.
func (p changeViewCompact) EncodeBinary(w *gob.Encoder) error {
return w.Encode(p)
}
// DecodeBinary implements Serializable interface.
func (p *changeViewCompact) DecodeBinary(r *gob.Decoder) error {
return r.Decode(p)
}
// EncodeBinary implements Serializable interface.
func (p preCommitCompact) EncodeBinary(w *gob.Encoder) error {
return w.Encode(p)
}
// DecodeBinary implements Serializable interface.
func (p *preCommitCompact) DecodeBinary(r *gob.Decoder) error {
return r.Decode(p)
}
// EncodeBinary implements Serializable interface.
func (p commitCompact) EncodeBinary(w *gob.Encoder) error {
return w.Encode(p)
}
// DecodeBinary implements Serializable interface.
func (p *commitCompact) DecodeBinary(r *gob.Decoder) error {
return r.Decode(p)
}
// EncodeBinary implements Serializable interface.
func (p preparationCompact) EncodeBinary(w *gob.Encoder) error {
return w.Encode(p)
}
// DecodeBinary implements Serializable interface.
func (p *preparationCompact) DecodeBinary(r *gob.Decoder) error {
return r.Decode(p)
}
package consensus
import (
"encoding/gob"
)
type (
changeViewCompact struct {
ValidatorIndex uint16
OriginalViewNumber byte
Timestamp uint32
}
preCommitCompact struct {
ViewNumber byte
ValidatorIndex uint16
Data []byte
}
commitCompact struct {
ViewNumber byte
ValidatorIndex uint16
Signature [signatureSize]byte
}
preparationCompact struct {
ValidatorIndex uint16
}
)
// EncodeBinary implements Serializable interface.
func (p changeViewCompact) EncodeBinary(w *gob.Encoder) error {
return w.Encode(p)
}
// DecodeBinary implements Serializable interface.
func (p *changeViewCompact) DecodeBinary(r *gob.Decoder) error {
return r.Decode(p)
}
// EncodeBinary implements Serializable interface.
func (p preCommitCompact) EncodeBinary(w *gob.Encoder) error {
return w.Encode(p)
}
// DecodeBinary implements Serializable interface.
func (p *preCommitCompact) DecodeBinary(r *gob.Decoder) error {
return r.Decode(p)
}
// EncodeBinary implements Serializable interface.
func (p commitCompact) EncodeBinary(w *gob.Encoder) error {
return w.Encode(p)
}
// DecodeBinary implements Serializable interface.
func (p *commitCompact) DecodeBinary(r *gob.Decoder) error {
return r.Decode(p)
}
// EncodeBinary implements Serializable interface.
func (p preparationCompact) EncodeBinary(w *gob.Encoder) error {
return w.Encode(p)
}
// DecodeBinary implements Serializable interface.
func (p *preparationCompact) DecodeBinary(r *gob.Decoder) error {
return r.Decode(p)
}

View File

@ -3,9 +3,9 @@ package consensus
import (
"time"
"github.com/tutus-one/tutus-consensus"
"github.com/tutus-one/tutus-consensus/internal/crypto"
"github.com/tutus-one/tutus-consensus/timer"
"git.marketally.com/tutus-one/tutus-consensus"
"git.marketally.com/tutus-one/tutus-consensus/internal/crypto"
"git.marketally.com/tutus-one/tutus-consensus/timer"
"go.uber.org/zap"
)

View File

@ -5,8 +5,8 @@ import (
"encoding/gob"
"fmt"
"github.com/tutus-one/tutus-consensus"
"github.com/tutus-one/tutus-consensus/internal/crypto"
"git.marketally.com/tutus-one/tutus-consensus"
"git.marketally.com/tutus-one/tutus-consensus/internal/crypto"
)
type (

View File

@ -3,8 +3,8 @@ package consensus
import (
"encoding/binary"
"github.com/tutus-one/tutus-consensus"
"github.com/tutus-one/tutus-consensus/internal/crypto"
"git.marketally.com/tutus-one/tutus-consensus"
"git.marketally.com/tutus-one/tutus-consensus/internal/crypto"
)
// NewConsensusPayload returns minimal ConsensusPayload implementation.

View File

@ -1,9 +1,9 @@
package consensus
func secToNanoSec(s uint32) uint64 {
return uint64(s) * 1000000000
}
func nanoSecToSec(ns uint64) uint32 {
return uint32(ns / 1000000000)
}
package consensus
func secToNanoSec(s uint32) uint64 {
return uint64(s) * 1000000000
}
func nanoSecToSec(ns uint64) uint32 {
return uint32(ns / 1000000000)
}

View File

@ -4,8 +4,8 @@ import (
"bytes"
"encoding/gob"
"github.com/tutus-one/tutus-consensus"
"github.com/tutus-one/tutus-consensus/internal/crypto"
"git.marketally.com/tutus-one/tutus-consensus"
"git.marketally.com/tutus-one/tutus-consensus/internal/crypto"
)
type (

View File

@ -6,8 +6,8 @@ import (
"encoding/gob"
"testing"
"github.com/tutus-one/tutus-consensus"
"github.com/tutus-one/tutus-consensus/internal/crypto"
"git.marketally.com/tutus-one/tutus-consensus"
"git.marketally.com/tutus-one/tutus-consensus/internal/crypto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)

View File

@ -3,8 +3,8 @@ package consensus
import (
"encoding/gob"
"github.com/tutus-one/tutus-consensus"
"github.com/tutus-one/tutus-consensus/internal/crypto"
"git.marketally.com/tutus-one/tutus-consensus"
"git.marketally.com/tutus-one/tutus-consensus/internal/crypto"
)
type (

View File

@ -3,8 +3,8 @@ package consensus
import (
"encoding/gob"
"github.com/tutus-one/tutus-consensus"
"github.com/tutus-one/tutus-consensus/internal/crypto"
"git.marketally.com/tutus-one/tutus-consensus"
"git.marketally.com/tutus-one/tutus-consensus/internal/crypto"
)
type (

View File

@ -5,8 +5,8 @@ import (
"encoding/gob"
"errors"
"github.com/tutus-one/tutus-consensus"
"github.com/tutus-one/tutus-consensus/internal/crypto"
"git.marketally.com/tutus-one/tutus-consensus"
"git.marketally.com/tutus-one/tutus-consensus/internal/crypto"
)
type (

View File

@ -3,7 +3,7 @@ package consensus
import (
"encoding/gob"
"github.com/tutus-one/tutus-consensus"
"git.marketally.com/tutus-one/tutus-consensus"
)
type (

View File

@ -4,8 +4,8 @@ import (
"encoding/binary"
"errors"
"github.com/tutus-one/tutus-consensus"
"github.com/tutus-one/tutus-consensus/internal/crypto"
"git.marketally.com/tutus-one/tutus-consensus"
"git.marketally.com/tutus-one/tutus-consensus/internal/crypto"
)
// =============================

View File

@ -3,7 +3,7 @@ package crypto
import (
"io"
"github.com/tutus-one/tutus-consensus"
"git.marketally.com/tutus-one/tutus-consensus"
)
type suiteType byte

View File

@ -1,34 +1,34 @@
package crypto
import (
"crypto/rand"
"testing"
"github.com/stretchr/testify/require"
)
func TestVerifySignature(t *testing.T) {
const dataSize = 1000
priv, pub := Generate(rand.Reader)
data := make([]byte, dataSize)
_, err := rand.Reader.Read(data)
require.NoError(t, err)
sign, err := priv.(*ECDSAPriv).Sign(data)
require.NoError(t, err)
require.Equal(t, 64, len(sign))
err = pub.(*ECDSAPub).Verify(data, sign)
require.NoError(t, err)
}
func TestGenerateWith(t *testing.T) {
priv, pub := GenerateWith(defaultSuite, rand.Reader)
require.NotNil(t, priv)
require.NotNil(t, pub)
priv, pub = GenerateWith(suiteType(0xFF), rand.Reader)
require.Nil(t, priv)
require.Nil(t, pub)
}
package crypto
import (
"crypto/rand"
"testing"
"github.com/stretchr/testify/require"
)
func TestVerifySignature(t *testing.T) {
const dataSize = 1000
priv, pub := Generate(rand.Reader)
data := make([]byte, dataSize)
_, err := rand.Reader.Read(data)
require.NoError(t, err)
sign, err := priv.(*ECDSAPriv).Sign(data)
require.NoError(t, err)
require.Equal(t, 64, len(sign))
err = pub.(*ECDSAPub).Verify(data, sign)
require.NoError(t, err)
}
func TestGenerateWith(t *testing.T) {
priv, pub := GenerateWith(defaultSuite, rand.Reader)
require.NotNil(t, priv)
require.NotNil(t, pub)
priv, pub = GenerateWith(suiteType(0xFF), rand.Reader)
require.Nil(t, priv)
require.Nil(t, pub)
}

View File

@ -9,7 +9,7 @@ import (
"io"
"math/big"
"github.com/tutus-one/tutus-consensus"
"git.marketally.com/tutus-one/tutus-consensus"
)
type (

View File

@ -1,20 +1,20 @@
package crypto
import (
"errors"
"testing"
"github.com/stretchr/testify/require"
)
// Do not generate keys with not enough entropy.
func TestECDSA_Generate(t *testing.T) {
rd := &errorReader{}
priv, pub := GenerateWith(SuiteECDSA, rd)
require.Nil(t, priv)
require.Nil(t, pub)
}
type errorReader struct{}
func (r *errorReader) Read(_ []byte) (int, error) { return 0, errors.New("error on read") }
package crypto
import (
"errors"
"testing"
"github.com/stretchr/testify/require"
)
// Do not generate keys with not enough entropy.
func TestECDSA_Generate(t *testing.T) {
rd := &errorReader{}
priv, pub := GenerateWith(SuiteECDSA, rd)
require.Nil(t, priv)
require.Nil(t, pub)
}
type errorReader struct{}
func (r *errorReader) Read(_ []byte) (int, error) { return 0, errors.New("error on read") }

View File

@ -1,46 +1,46 @@
package crypto
import (
"crypto/sha256"
"encoding/hex"
)
const (
Uint256Size = 32
Uint160Size = 20
)
type (
Uint256 [Uint256Size]byte
Uint160 [Uint160Size]byte
)
// String implements fmt.Stringer interface.
func (h Uint256) String() string {
return hex.EncodeToString(h[:])
}
// String implements fmt.Stringer interface.
func (h Uint160) String() string {
return hex.EncodeToString(h[:])
}
// Hash256 returns double sha-256 of data.
func Hash256(data []byte) Uint256 {
h1 := sha256.Sum256(data)
h2 := sha256.Sum256(h1[:])
return h2
}
// Hash160 returns ripemd160 from sha256 of data.
func Hash160(data []byte) Uint160 {
var (
h1 = sha256.Sum256(data)
h Uint160
)
copy(h[:], h1[:Uint160Size])
return h
}
package crypto
import (
"crypto/sha256"
"encoding/hex"
)
const (
Uint256Size = 32
Uint160Size = 20
)
type (
Uint256 [Uint256Size]byte
Uint160 [Uint160Size]byte
)
// String implements fmt.Stringer interface.
func (h Uint256) String() string {
return hex.EncodeToString(h[:])
}
// String implements fmt.Stringer interface.
func (h Uint160) String() string {
return hex.EncodeToString(h[:])
}
// Hash256 returns double sha-256 of data.
func Hash256(data []byte) Uint256 {
h1 := sha256.Sum256(data)
h2 := sha256.Sum256(h1[:])
return h2
}
// Hash160 returns ripemd160 from sha256 of data.
func Hash160(data []byte) Uint160 {
var (
h1 = sha256.Sum256(data)
h Uint160
)
copy(h[:], h1[:Uint160Size])
return h
}

View File

@ -1,50 +1,50 @@
package crypto
import (
"encoding/hex"
"testing"
"github.com/stretchr/testify/require"
)
var hash256tc = []struct {
data []byte
hash Uint256
}{
{[]byte{}, parse256("5df6e0e2761359d30a8275058e299fcc0381534545f55cf43e41983f5d4c9456")},
{[]byte{0, 1, 2, 3}, parse256("f7a355c00c89a08c80636bed35556a210b51786f6803a494f28fc5ba05959fc2")},
}
var hash160tc = []struct {
data []byte
hash Uint160
}{
{[]byte{}, Uint160{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4}},
{[]byte{0, 1, 2, 3}, Uint160{0x5, 0x4e, 0xde, 0xc1, 0xd0, 0x21, 0x1f, 0x62, 0x4f, 0xed, 0xc, 0xbc, 0xa9, 0xd4, 0xf9, 0x40, 0xb, 0xe, 0x49, 0x1c}},
}
func TestHash256(t *testing.T) {
for _, tc := range hash256tc {
require.Equal(t, tc.hash, Hash256(tc.data))
}
}
func TestHash160(t *testing.T) {
for _, tc := range hash160tc {
require.Equal(t, tc.hash, Hash160(tc.data))
}
}
func parse256(s string) (h Uint256) {
parseHex(h[:], s)
return
}
func parseHex(b []byte, s string) {
buf, err := hex.DecodeString(s)
if err != nil || len(buf) != len(b) {
panic("invalid test data")
}
copy(b, buf)
}
package crypto
import (
"encoding/hex"
"testing"
"github.com/stretchr/testify/require"
)
var hash256tc = []struct {
data []byte
hash Uint256
}{
{[]byte{}, parse256("5df6e0e2761359d30a8275058e299fcc0381534545f55cf43e41983f5d4c9456")},
{[]byte{0, 1, 2, 3}, parse256("f7a355c00c89a08c80636bed35556a210b51786f6803a494f28fc5ba05959fc2")},
}
var hash160tc = []struct {
data []byte
hash Uint160
}{
{[]byte{}, Uint160{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4}},
{[]byte{0, 1, 2, 3}, Uint160{0x5, 0x4e, 0xde, 0xc1, 0xd0, 0x21, 0x1f, 0x62, 0x4f, 0xed, 0xc, 0xbc, 0xa9, 0xd4, 0xf9, 0x40, 0xb, 0xe, 0x49, 0x1c}},
}
func TestHash256(t *testing.T) {
for _, tc := range hash256tc {
require.Equal(t, tc.hash, Hash256(tc.data))
}
}
func TestHash160(t *testing.T) {
for _, tc := range hash160tc {
require.Equal(t, tc.hash, Hash160(tc.data))
}
}
func parse256(s string) (h Uint256) {
parseHex(h[:], s)
return
}
func parseHex(b []byte, s string) {
buf, err := hex.DecodeString(s)
if err != nil || len(buf) != len(b) {
panic("invalid test data")
}
copy(b, buf)
}

View File

@ -1,7 +1,7 @@
package merkle
import (
"github.com/tutus-one/tutus-consensus/internal/crypto"
"git.marketally.com/tutus-one/tutus-consensus/internal/crypto"
)
type (

View File

@ -5,7 +5,7 @@ import (
"encoding/hex"
"testing"
"github.com/tutus-one/tutus-consensus/internal/crypto"
"git.marketally.com/tutus-one/tutus-consensus/internal/crypto"
"github.com/stretchr/testify/require"
)

View File

@ -14,9 +14,9 @@ import (
"syscall"
"time"
"github.com/tutus-one/tutus-consensus"
"github.com/tutus-one/tutus-consensus/internal/consensus"
"github.com/tutus-one/tutus-consensus/internal/crypto"
"git.marketally.com/tutus-one/tutus-consensus"
"git.marketally.com/tutus-one/tutus-consensus/internal/consensus"
"git.marketally.com/tutus-one/tutus-consensus/internal/crypto"
"go.uber.org/zap"
)

View File

@ -1,25 +1,25 @@
package dbft
// PreBlock is a generic interface for a PreBlock used by anti-MEV dBFT extension.
// It holds a "draft" of block that should be converted to a final block with the
// help of additional data held by PreCommit messages.
type PreBlock[H Hash] interface {
// Data returns PreBlock's data CNs need to exchange during PreCommit phase.
// Data represents additional information not related to a final block signature.
Data() []byte
// SetData generates and sets PreBlock's data CNs need to exchange during
// PreCommit phase.
SetData(key PrivateKey) error
// Verify checks if data related to PreCommit phase is correct. This method is
// refined on PreBlock rather than on PreCommit message since PreBlock itself is
// required for PreCommit's data verification. It's guaranteed that all
// proposed transactions are collected by the moment of call to Verify.
Verify(key PublicKey, data []byte) error
// Transactions returns PreBlock's transaction list. This list may be different
// comparing to the final set of Block's transactions.
Transactions() []Transaction[H]
// SetTransactions sets PreBlock's transaction list. This list may be different
// comparing to the final set of Block's transactions.
SetTransactions([]Transaction[H])
}
package dbft
// PreBlock is a generic interface for a PreBlock used by anti-MEV dBFT extension.
// It holds a "draft" of block that should be converted to a final block with the
// help of additional data held by PreCommit messages.
type PreBlock[H Hash] interface {
// Data returns PreBlock's data CNs need to exchange during PreCommit phase.
// Data represents additional information not related to a final block signature.
Data() []byte
// SetData generates and sets PreBlock's data CNs need to exchange during
// PreCommit phase.
SetData(key PrivateKey) error
// Verify checks if data related to PreCommit phase is correct. This method is
// refined on PreBlock rather than on PreCommit message since PreBlock itself is
// required for PreCommit's data verification. It's guaranteed that all
// proposed transactions are collected by the moment of call to Verify.
Verify(key PublicKey, data []byte) error
// Transactions returns PreBlock's transaction list. This list may be different
// comparing to the final set of Block's transactions.
Transactions() []Transaction[H]
// SetTransactions sets PreBlock's transaction list. This list may be different
// comparing to the final set of Block's transactions.
SetTransactions([]Transaction[H])
}

View File

@ -1,10 +1,10 @@
package dbft
// PreCommit is an interface for dBFT PreCommit message. This message is used right
// before the Commit phase to exchange additional information required for the final
// block construction in anti-MEV dBFT extension.
type PreCommit interface {
// Data returns PreCommit's data that should be used for the final
// Block construction in anti-MEV dBFT extension.
Data() []byte
}
package dbft
// PreCommit is an interface for dBFT PreCommit message. This message is used right
// before the Commit phase to exchange additional information required for the final
// block construction in anti-MEV dBFT extension.
type PreCommit interface {
// Data returns PreCommit's data that should be used for the final
// Block construction in anti-MEV dBFT extension.
Data() []byte
}

View File

@ -1,11 +1,11 @@
package dbft
// PrepareRequest represents dBFT PrepareRequest message.
type PrepareRequest[H Hash] interface {
// Timestamp returns this message's timestamp.
Timestamp() uint64
// Nonce is a random nonce.
Nonce() uint64
// TransactionHashes returns hashes of all transaction in a proposed block.
TransactionHashes() []H
}
package dbft
// PrepareRequest represents dBFT PrepareRequest message.
type PrepareRequest[H Hash] interface {
// Timestamp returns this message's timestamp.
Timestamp() uint64
// Nonce is a random nonce.
Nonce() uint64
// TransactionHashes returns hashes of all transaction in a proposed block.
TransactionHashes() []H
}

View File

@ -1,8 +1,8 @@
package dbft
// PrepareResponse represents dBFT PrepareResponse message.
type PrepareResponse[H Hash] interface {
// PreparationHash returns the hash of PrepareRequest payload
// for this epoch.
PreparationHash() H
}
package dbft
// PrepareResponse represents dBFT PrepareResponse message.
type PrepareResponse[H Hash] interface {
// PreparationHash returns the hash of PrepareRequest payload
// for this epoch.
PreparationHash() H
}

View File

@ -1,23 +1,23 @@
package dbft
// RecoveryMessage represents dBFT Recovery message.
type RecoveryMessage[H Hash] interface {
// AddPayload adds payload from this epoch to be recovered.
AddPayload(p ConsensusPayload[H])
// GetPrepareRequest returns PrepareRequest to be processed.
GetPrepareRequest(p ConsensusPayload[H], validators []PublicKey, primary uint16) ConsensusPayload[H]
// GetPrepareResponses returns a slice of PrepareResponse in any order.
GetPrepareResponses(p ConsensusPayload[H], validators []PublicKey) []ConsensusPayload[H]
// GetChangeViews returns a slice of ChangeView in any order.
GetChangeViews(p ConsensusPayload[H], validators []PublicKey) []ConsensusPayload[H]
// GetPreCommits returns a slice of PreCommit messages in any order.
// If implemented on networks with no AntiMEV extension it can just
// always return nil.
GetPreCommits(p ConsensusPayload[H], validators []PublicKey) []ConsensusPayload[H]
// GetCommits returns a slice of Commit in any order.
GetCommits(p ConsensusPayload[H], validators []PublicKey) []ConsensusPayload[H]
// PreparationHash returns has of PrepareRequest payload for this epoch.
// It can be useful in case only PrepareResponse payloads were received.
PreparationHash() *H
}
package dbft
// RecoveryMessage represents dBFT Recovery message.
type RecoveryMessage[H Hash] interface {
// AddPayload adds payload from this epoch to be recovered.
AddPayload(p ConsensusPayload[H])
// GetPrepareRequest returns PrepareRequest to be processed.
GetPrepareRequest(p ConsensusPayload[H], validators []PublicKey, primary uint16) ConsensusPayload[H]
// GetPrepareResponses returns a slice of PrepareResponse in any order.
GetPrepareResponses(p ConsensusPayload[H], validators []PublicKey) []ConsensusPayload[H]
// GetChangeViews returns a slice of ChangeView in any order.
GetChangeViews(p ConsensusPayload[H], validators []PublicKey) []ConsensusPayload[H]
// GetPreCommits returns a slice of PreCommit messages in any order.
// If implemented on networks with no AntiMEV extension it can just
// always return nil.
GetPreCommits(p ConsensusPayload[H], validators []PublicKey) []ConsensusPayload[H]
// GetCommits returns a slice of Commit in any order.
GetCommits(p ConsensusPayload[H], validators []PublicKey) []ConsensusPayload[H]
// PreparationHash returns has of PrepareRequest payload for this epoch.
// It can be useful in case only PrepareResponse payloads were received.
PreparationHash() *H
}

View File

@ -1,7 +1,7 @@
package dbft
// RecoveryRequest represents dBFT RecoveryRequest message.
type RecoveryRequest interface {
// Timestamp returns this message's timestamp.
Timestamp() uint64
}
package dbft
// RecoveryRequest represents dBFT RecoveryRequest message.
type RecoveryRequest interface {
// Timestamp returns this message's timestamp.
Timestamp() uint64
}

52
rtt.go
View File

@ -1,26 +1,26 @@
package dbft
import (
"time"
)
const rttLength = 7 * 10 // 10 rounds with 7 nodes
type rtt struct {
times [rttLength]time.Duration
idx int
avg time.Duration
}
func (r *rtt) addTime(t time.Duration) {
var old = r.times[r.idx]
if old != 0 {
t = min(t, 2*old) // Too long delays should be normalized, we don't want to overshoot.
}
r.avg = r.avg + (t-old)/time.Duration(len(r.times))
r.avg = max(0, r.avg) // Can't be less than zero.
r.times[r.idx] = t
r.idx = (r.idx + 1) % len(r.times)
}
package dbft
import (
"time"
)
const rttLength = 7 * 10 // 10 rounds with 7 nodes
type rtt struct {
times [rttLength]time.Duration
idx int
avg time.Duration
}
func (r *rtt) addTime(t time.Duration) {
var old = r.times[r.idx]
if old != 0 {
t = min(t, 2*old) // Too long delays should be normalized, we don't want to overshoot.
}
r.avg = r.avg + (t-old)/time.Duration(len(r.times))
r.avg = max(0, r.avg) // Can't be less than zero.
r.times[r.idx] = t
r.idx = (r.idx + 1) % len(r.times)
}

472
send.go
View File

@ -1,236 +1,236 @@
package dbft
import (
"fmt"
"go.uber.org/zap"
)
func (d *DBFT[H]) broadcast(msg ConsensusPayload[H]) {
d.Logger.Debug("broadcasting message",
zap.Stringer("type", msg.Type()),
zap.Uint32("height", d.BlockIndex),
zap.Uint("view", uint(d.ViewNumber)))
msg.SetValidatorIndex(uint16(d.MyIndex))
d.Broadcast(msg)
}
func (c *Context[H]) makePrepareRequest(force bool) ConsensusPayload[H] {
if !c.Fill(force) {
return nil
}
req := c.Config.NewPrepareRequest(c.Timestamp, c.Nonce, c.TransactionHashes)
return c.Config.NewConsensusPayload(c, PrepareRequestType, req)
}
func (d *DBFT[H]) sendPrepareRequest(force bool) {
msg := d.makePrepareRequest(force)
if msg == ConsensusPayload[H](nil) {
d.subscribeForTransactions()
// Try one more time since there's a tiny race between an attempt to
// construct prepare request and transactions subscription.
msg = d.makePrepareRequest(force)
if msg == ConsensusPayload[H](nil) {
delay := d.maxTimePerBlock - d.timePerBlock
d.changeTimer(delay)
return
}
}
d.unsubscribeFromTransactions()
d.PreparationPayloads[d.MyIndex] = msg
d.broadcast(msg)
d.prepareSentTime = d.Timer.Now()
delay := d.timePerBlock << (d.ViewNumber + 1)
if d.ViewNumber == 0 {
delay -= d.timePerBlock
}
d.Logger.Info("sending PrepareRequest", zap.Uint32("height", d.BlockIndex), zap.Uint("view", uint(d.ViewNumber)))
d.changeTimer(delay)
d.checkPrepare()
}
func (c *Context[H]) makeChangeView(ts uint64, reason ChangeViewReason) ConsensusPayload[H] {
cv := c.Config.NewChangeView(c.ViewNumber+1, reason, ts)
msg := c.Config.NewConsensusPayload(c, ChangeViewType, cv)
c.ChangeViewPayloads[c.MyIndex] = msg
return msg
}
func (d *DBFT[H]) sendChangeView(reason ChangeViewReason) {
if d.Context.WatchOnly() {
return
}
newView := d.ViewNumber + 1
d.changeTimer(d.timePerBlock << (newView + 1))
nc := d.CountCommitted()
nf := d.CountFailed()
if reason == CVTimeout && nc+nf > d.F() {
d.Logger.Info("skip change view", zap.Int("nc", nc), zap.Int("nf", nf))
d.sendRecoveryRequest()
return
}
// Timeout while missing transactions, set the real reason.
if !d.hasAllTransactions() && reason == CVTimeout {
reason = CVTxNotFound
}
d.Logger.Info("request change view",
zap.Int("view", int(d.ViewNumber)),
zap.Uint32("height", d.BlockIndex),
zap.Stringer("reason", reason),
zap.Int("new_view", int(newView)),
zap.Int("nc", nc),
zap.Int("nf", nf))
msg := d.makeChangeView(uint64(d.Timer.Now().UnixNano()), reason)
d.StopTxFlow()
d.broadcast(msg)
d.checkChangeView(newView)
}
func (c *Context[H]) makePrepareResponse() ConsensusPayload[H] {
resp := c.Config.NewPrepareResponse(c.PreparationPayloads[c.PrimaryIndex].Hash())
msg := c.Config.NewConsensusPayload(c, PrepareResponseType, resp)
c.PreparationPayloads[c.MyIndex] = msg
return msg
}
func (d *DBFT[H]) sendPrepareResponse() {
msg := d.makePrepareResponse()
d.Logger.Info("sending PrepareResponse", zap.Uint32("height", d.BlockIndex), zap.Uint("view", uint(d.ViewNumber)))
d.StopTxFlow()
d.broadcast(msg)
}
func (c *Context[H]) makePreCommit() (ConsensusPayload[H], error) {
if msg := c.PreCommitPayloads[c.MyIndex]; msg != nil {
return msg, nil
}
if preB := c.CreatePreBlock(); preB != nil {
var preData []byte
if err := preB.SetData(c.Priv); err == nil {
preData = preB.Data()
} else {
return nil, fmt.Errorf("PreCommit data construction failed: %w", err)
}
preCommit := c.Config.NewPreCommit(preData)
return c.Config.NewConsensusPayload(c, PreCommitType, preCommit), nil
}
return nil, fmt.Errorf("failed to construct PreBlock")
}
func (c *Context[H]) makeCommit() (ConsensusPayload[H], error) {
if msg := c.CommitPayloads[c.MyIndex]; msg != nil {
return msg, nil
}
if b := c.MakeHeader(); b != nil {
var sign []byte
if err := b.Sign(c.Priv); err == nil {
sign = b.Signature()
} else {
return nil, fmt.Errorf("header signing failed: %w", err)
}
commit := c.Config.NewCommit(sign)
return c.Config.NewConsensusPayload(c, CommitType, commit), nil
}
return nil, fmt.Errorf("failed to construct Header")
}
func (d *DBFT[H]) sendPreCommit() {
msg, err := d.makePreCommit()
if err != nil {
d.Logger.Error("failed to construct PreCommit", zap.Error(err))
return
}
d.PreCommitPayloads[d.MyIndex] = msg
d.Logger.Info("sending PreCommit", zap.Uint32("height", d.BlockIndex), zap.Uint("view", uint(d.ViewNumber)))
d.broadcast(msg)
}
func (d *DBFT[H]) sendCommit() {
msg, err := d.makeCommit()
if err != nil {
d.Logger.Error("failed to construct Commit", zap.Error(err))
return
}
d.CommitPayloads[d.MyIndex] = msg
d.Logger.Info("sending Commit", zap.Uint32("height", d.BlockIndex), zap.Uint("view", uint(d.ViewNumber)))
d.broadcast(msg)
}
func (d *DBFT[H]) sendRecoveryRequest() {
// If we're here, something is wrong, we either missing some messages or
// transactions or both, so re-request missing transactions here too.
if d.RequestSentOrReceived() && !d.hasAllTransactions() {
d.processMissingTx()
}
req := d.NewRecoveryRequest(uint64(d.Timer.Now().UnixNano()))
d.broadcast(d.NewConsensusPayload(&d.Context, RecoveryRequestType, req))
}
func (c *Context[H]) makeRecoveryMessage() ConsensusPayload[H] {
recovery := c.Config.NewRecoveryMessage()
for _, p := range c.PreparationPayloads {
if p != nil {
recovery.AddPayload(p)
}
}
cv := c.LastChangeViewPayloads
// if byte(msg.ViewNumber) == c.ViewNumber {
// cv = c.changeViewPayloads
// }
for _, p := range cv {
if p != nil {
recovery.AddPayload(p)
}
}
if c.PreCommitSent() {
for _, p := range c.PreCommitPayloads {
if p != nil {
recovery.AddPayload(p)
}
}
}
if c.CommitSent() {
for _, p := range c.CommitPayloads {
if p != nil {
recovery.AddPayload(p)
}
}
}
return c.Config.NewConsensusPayload(c, RecoveryMessageType, recovery)
}
func (d *DBFT[H]) sendRecoveryMessage() {
d.broadcast(d.makeRecoveryMessage())
}
package dbft
import (
"fmt"
"go.uber.org/zap"
)
func (d *DBFT[H]) broadcast(msg ConsensusPayload[H]) {
d.Logger.Debug("broadcasting message",
zap.Stringer("type", msg.Type()),
zap.Uint32("height", d.BlockIndex),
zap.Uint("view", uint(d.ViewNumber)))
msg.SetValidatorIndex(uint16(d.MyIndex))
d.Broadcast(msg)
}
func (c *Context[H]) makePrepareRequest(force bool) ConsensusPayload[H] {
if !c.Fill(force) {
return nil
}
req := c.Config.NewPrepareRequest(c.Timestamp, c.Nonce, c.TransactionHashes)
return c.Config.NewConsensusPayload(c, PrepareRequestType, req)
}
func (d *DBFT[H]) sendPrepareRequest(force bool) {
msg := d.makePrepareRequest(force)
if msg == ConsensusPayload[H](nil) {
d.subscribeForTransactions()
// Try one more time since there's a tiny race between an attempt to
// construct prepare request and transactions subscription.
msg = d.makePrepareRequest(force)
if msg == ConsensusPayload[H](nil) {
delay := d.maxTimePerBlock - d.timePerBlock
d.changeTimer(delay)
return
}
}
d.unsubscribeFromTransactions()
d.PreparationPayloads[d.MyIndex] = msg
d.broadcast(msg)
d.prepareSentTime = d.Timer.Now()
delay := d.timePerBlock << (d.ViewNumber + 1)
if d.ViewNumber == 0 {
delay -= d.timePerBlock
}
d.Logger.Info("sending PrepareRequest", zap.Uint32("height", d.BlockIndex), zap.Uint("view", uint(d.ViewNumber)))
d.changeTimer(delay)
d.checkPrepare()
}
func (c *Context[H]) makeChangeView(ts uint64, reason ChangeViewReason) ConsensusPayload[H] {
cv := c.Config.NewChangeView(c.ViewNumber+1, reason, ts)
msg := c.Config.NewConsensusPayload(c, ChangeViewType, cv)
c.ChangeViewPayloads[c.MyIndex] = msg
return msg
}
func (d *DBFT[H]) sendChangeView(reason ChangeViewReason) {
if d.Context.WatchOnly() {
return
}
newView := d.ViewNumber + 1
d.changeTimer(d.timePerBlock << (newView + 1))
nc := d.CountCommitted()
nf := d.CountFailed()
if reason == CVTimeout && nc+nf > d.F() {
d.Logger.Info("skip change view", zap.Int("nc", nc), zap.Int("nf", nf))
d.sendRecoveryRequest()
return
}
// Timeout while missing transactions, set the real reason.
if !d.hasAllTransactions() && reason == CVTimeout {
reason = CVTxNotFound
}
d.Logger.Info("request change view",
zap.Int("view", int(d.ViewNumber)),
zap.Uint32("height", d.BlockIndex),
zap.Stringer("reason", reason),
zap.Int("new_view", int(newView)),
zap.Int("nc", nc),
zap.Int("nf", nf))
msg := d.makeChangeView(uint64(d.Timer.Now().UnixNano()), reason)
d.StopTxFlow()
d.broadcast(msg)
d.checkChangeView(newView)
}
func (c *Context[H]) makePrepareResponse() ConsensusPayload[H] {
resp := c.Config.NewPrepareResponse(c.PreparationPayloads[c.PrimaryIndex].Hash())
msg := c.Config.NewConsensusPayload(c, PrepareResponseType, resp)
c.PreparationPayloads[c.MyIndex] = msg
return msg
}
func (d *DBFT[H]) sendPrepareResponse() {
msg := d.makePrepareResponse()
d.Logger.Info("sending PrepareResponse", zap.Uint32("height", d.BlockIndex), zap.Uint("view", uint(d.ViewNumber)))
d.StopTxFlow()
d.broadcast(msg)
}
func (c *Context[H]) makePreCommit() (ConsensusPayload[H], error) {
if msg := c.PreCommitPayloads[c.MyIndex]; msg != nil {
return msg, nil
}
if preB := c.CreatePreBlock(); preB != nil {
var preData []byte
if err := preB.SetData(c.Priv); err == nil {
preData = preB.Data()
} else {
return nil, fmt.Errorf("PreCommit data construction failed: %w", err)
}
preCommit := c.Config.NewPreCommit(preData)
return c.Config.NewConsensusPayload(c, PreCommitType, preCommit), nil
}
return nil, fmt.Errorf("failed to construct PreBlock")
}
func (c *Context[H]) makeCommit() (ConsensusPayload[H], error) {
if msg := c.CommitPayloads[c.MyIndex]; msg != nil {
return msg, nil
}
if b := c.MakeHeader(); b != nil {
var sign []byte
if err := b.Sign(c.Priv); err == nil {
sign = b.Signature()
} else {
return nil, fmt.Errorf("header signing failed: %w", err)
}
commit := c.Config.NewCommit(sign)
return c.Config.NewConsensusPayload(c, CommitType, commit), nil
}
return nil, fmt.Errorf("failed to construct Header")
}
func (d *DBFT[H]) sendPreCommit() {
msg, err := d.makePreCommit()
if err != nil {
d.Logger.Error("failed to construct PreCommit", zap.Error(err))
return
}
d.PreCommitPayloads[d.MyIndex] = msg
d.Logger.Info("sending PreCommit", zap.Uint32("height", d.BlockIndex), zap.Uint("view", uint(d.ViewNumber)))
d.broadcast(msg)
}
func (d *DBFT[H]) sendCommit() {
msg, err := d.makeCommit()
if err != nil {
d.Logger.Error("failed to construct Commit", zap.Error(err))
return
}
d.CommitPayloads[d.MyIndex] = msg
d.Logger.Info("sending Commit", zap.Uint32("height", d.BlockIndex), zap.Uint("view", uint(d.ViewNumber)))
d.broadcast(msg)
}
func (d *DBFT[H]) sendRecoveryRequest() {
// If we're here, something is wrong, we either missing some messages or
// transactions or both, so re-request missing transactions here too.
if d.RequestSentOrReceived() && !d.hasAllTransactions() {
d.processMissingTx()
}
req := d.NewRecoveryRequest(uint64(d.Timer.Now().UnixNano()))
d.broadcast(d.NewConsensusPayload(&d.Context, RecoveryRequestType, req))
}
func (c *Context[H]) makeRecoveryMessage() ConsensusPayload[H] {
recovery := c.Config.NewRecoveryMessage()
for _, p := range c.PreparationPayloads {
if p != nil {
recovery.AddPayload(p)
}
}
cv := c.LastChangeViewPayloads
// if byte(msg.ViewNumber) == c.ViewNumber {
// cv = c.changeViewPayloads
// }
for _, p := range cv {
if p != nil {
recovery.AddPayload(p)
}
}
if c.PreCommitSent() {
for _, p := range c.PreCommitPayloads {
if p != nil {
recovery.AddPayload(p)
}
}
}
if c.CommitSent() {
for _, p := range c.CommitPayloads {
if p != nil {
recovery.AddPayload(p)
}
}
}
return c.Config.NewConsensusPayload(c, RecoveryMessageType, recovery)
}
func (d *DBFT[H]) sendRecoveryMessage() {
d.broadcast(d.makeRecoveryMessage())
}

View File

@ -1,22 +1,22 @@
package dbft
import (
"time"
)
// Timer is an interface which implements all time-related
// functions. It can be mocked for testing.
type Timer interface {
// Now returns current time.
Now() time.Time
// Reset resets timer to the specified block height and view.
Reset(height uint32, view byte, d time.Duration)
// Extend extends current timer with duration d.
Extend(d time.Duration)
// Height returns current height set for the timer.
Height() uint32
// View returns current view set for the timer.
View() byte
// C returns channel for timer events.
C() <-chan time.Time
}
package dbft
import (
"time"
)
// Timer is an interface which implements all time-related
// functions. It can be mocked for testing.
type Timer interface {
// Now returns current time.
Now() time.Time
// Reset resets timer to the specified block height and view.
Reset(height uint32, view byte, d time.Duration)
// Extend extends current timer with duration d.
Extend(d time.Duration)
// Height returns current height set for the timer.
Height() uint32
// View returns current view set for the timer.
View() byte
// C returns channel for timer events.
C() <-chan time.Time
}

View File

@ -1,97 +1,97 @@
/*
Package timer contains default implementation of [dbft.Timer] interface and provides
all necessary timer-related functionality to [dbft.DBFT] service.
*/
package timer
import (
"time"
)
type (
// Timer is a default [dbft.Timer] implementation.
Timer struct {
height uint32
view byte
s time.Time
d time.Duration
tt *time.Timer
ch chan time.Time
}
)
// New returns default Timer implementation.
func New() *Timer {
t := &Timer{
ch: make(chan time.Time, 1),
}
return t
}
// C implements Timer interface.
func (t *Timer) C() <-chan time.Time {
if t.tt == nil {
return t.ch
}
return t.tt.C
}
// Height returns current timer height.
func (t *Timer) Height() uint32 {
return t.height
}
// View return current timer view.
func (t *Timer) View() byte {
return t.view
}
// Reset implements Timer interface.
func (t *Timer) Reset(height uint32, view byte, d time.Duration) {
t.stop()
t.s = t.Now()
t.d = d
t.height = height
t.view = view
if t.d != 0 {
t.tt = time.NewTimer(t.d)
} else {
t.tt = nil
drain(t.ch)
t.ch <- t.s
}
}
func drain(ch <-chan time.Time) {
select {
case <-ch:
default:
}
}
// stop stops the Timer.
func (t *Timer) stop() {
if t.tt != nil {
t.tt.Stop()
t.tt = nil
}
}
// Extend implements Timer interface.
func (t *Timer) Extend(d time.Duration) {
t.d += d
if elapsed := time.Since(t.s); t.d > elapsed {
t.stop()
t.tt = time.NewTimer(t.d - elapsed)
}
}
// Now implements Timer interface.
func (t *Timer) Now() time.Time {
return time.Now()
}
/*
Package timer contains default implementation of [dbft.Timer] interface and provides
all necessary timer-related functionality to [dbft.DBFT] service.
*/
package timer
import (
"time"
)
type (
// Timer is a default [dbft.Timer] implementation.
Timer struct {
height uint32
view byte
s time.Time
d time.Duration
tt *time.Timer
ch chan time.Time
}
)
// New returns default Timer implementation.
func New() *Timer {
t := &Timer{
ch: make(chan time.Time, 1),
}
return t
}
// C implements Timer interface.
func (t *Timer) C() <-chan time.Time {
if t.tt == nil {
return t.ch
}
return t.tt.C
}
// Height returns current timer height.
func (t *Timer) Height() uint32 {
return t.height
}
// View return current timer view.
func (t *Timer) View() byte {
return t.view
}
// Reset implements Timer interface.
func (t *Timer) Reset(height uint32, view byte, d time.Duration) {
t.stop()
t.s = t.Now()
t.d = d
t.height = height
t.view = view
if t.d != 0 {
t.tt = time.NewTimer(t.d)
} else {
t.tt = nil
drain(t.ch)
t.ch <- t.s
}
}
func drain(ch <-chan time.Time) {
select {
case <-ch:
default:
}
}
// stop stops the Timer.
func (t *Timer) stop() {
if t.tt != nil {
t.tt.Stop()
t.tt = nil
}
}
// Extend implements Timer interface.
func (t *Timer) Extend(d time.Duration) {
t.d += d
if elapsed := time.Since(t.s); t.d > elapsed {
t.stop()
t.tt = time.NewTimer(t.d - elapsed)
}
}
// Now implements Timer interface.
func (t *Timer) Now() time.Time {
return time.Now()
}

View File

@ -1,56 +1,56 @@
package timer
import (
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestTimer_Reset(t *testing.T) {
tt := New()
tt.Reset(1, 2, time.Millisecond*100)
time.Sleep(time.Millisecond * 200)
shouldReceive(t, tt, 1, 2, "no value in timer")
tt.Reset(1, 2, time.Second)
tt.Reset(2, 3, 0)
shouldReceive(t, tt, 2, 3, "no value in timer after reset(0)")
tt.Reset(1, 2, time.Millisecond*100)
time.Sleep(time.Millisecond * 200)
tt.Reset(1, 3, time.Millisecond*100)
time.Sleep(time.Millisecond * 200)
shouldReceive(t, tt, 1, 3, "invalid value after reset")
tt.Reset(3, 1, time.Millisecond*100)
shouldNotReceive(t, tt, "value arrived too early")
tt.Extend(time.Millisecond * 300)
time.Sleep(time.Millisecond * 200)
shouldNotReceive(t, tt, "value arrived too early after extend")
time.Sleep(time.Millisecond * 300)
shouldReceive(t, tt, 3, 1, "no value in timer after extend")
}
func shouldReceive(t *testing.T, tt *Timer, height uint32, view byte, msg string) {
select {
case <-tt.C():
gotHeight := tt.Height()
gotView := tt.View()
require.Equal(t, height, gotHeight)
require.Equal(t, view, gotView)
default:
require.Fail(t, msg)
}
}
func shouldNotReceive(t *testing.T, tt *Timer, msg string) {
select {
case <-tt.C():
require.Fail(t, msg)
default:
}
}
package timer
import (
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestTimer_Reset(t *testing.T) {
tt := New()
tt.Reset(1, 2, time.Millisecond*100)
time.Sleep(time.Millisecond * 200)
shouldReceive(t, tt, 1, 2, "no value in timer")
tt.Reset(1, 2, time.Second)
tt.Reset(2, 3, 0)
shouldReceive(t, tt, 2, 3, "no value in timer after reset(0)")
tt.Reset(1, 2, time.Millisecond*100)
time.Sleep(time.Millisecond * 200)
tt.Reset(1, 3, time.Millisecond*100)
time.Sleep(time.Millisecond * 200)
shouldReceive(t, tt, 1, 3, "invalid value after reset")
tt.Reset(3, 1, time.Millisecond*100)
shouldNotReceive(t, tt, "value arrived too early")
tt.Extend(time.Millisecond * 300)
time.Sleep(time.Millisecond * 200)
shouldNotReceive(t, tt, "value arrived too early after extend")
time.Sleep(time.Millisecond * 300)
shouldReceive(t, tt, 3, 1, "no value in timer after extend")
}
func shouldReceive(t *testing.T, tt *Timer, height uint32, view byte, msg string) {
select {
case <-tt.C():
gotHeight := tt.Height()
gotView := tt.View()
require.Equal(t, height, gotHeight)
require.Equal(t, view, gotView)
default:
require.Fail(t, msg)
}
}
func shouldNotReceive(t *testing.T, tt *Timer, msg string) {
select {
case <-tt.C():
require.Fail(t, msg)
default:
}
}

View File

@ -1,8 +1,8 @@
package dbft
// Transaction is a generic transaction interface.
type Transaction[H Hash] interface {
// Hash must return cryptographic hash of the transaction.
// Transactions which have equal hashes are considered equal.
Hash() H
}
package dbft
// Transaction is a generic transaction interface.
type Transaction[H Hash] interface {
// Hash must return cryptographic hash of the transaction.
// Transactions which have equal hashes are considered equal.
Hash() H
}