Skip to content

Commit 0d946c2

Browse files
committed
Merge branch 'compatible' into dkijania/merge/compatible_into_develop_020425
2 parents d289227 + 463a3be commit 0d946c2

37 files changed

+218
-178
lines changed

automation/README.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -264,11 +264,11 @@ There are several public Grafana dashboards available here:
264264

265265
The purpose of a public testnet is to allow end-users to try out the software and learn how to operate it. Thus, we accept sign-ups for stake to be allocated in the genesis, and commit those keys to the compiled genesis ledger.
266266

267-
For context, these keys correspond to the "Fish Keys" in the QA Net deployments, and Online Fish Keys are ommitted in a Public Testnet deployment and "Offline Fish Keys" are instead delegated to the submitted User Keys.
267+
For context, these keys correspond to the "Fish Keys" in the QA Net deployments, and Online Fish Keys are omitted in a Public Testnet deployment and "Offline Fish Keys" are instead delegated to the submitted User Keys.
268268

269269
### Generate Genesis Ledger
270270

271-
Once you have the keys for your deploymenet created, and the Staker Keys saved to a CSV, you can use them to generate a genesis ledger with the following command.
271+
Once you have the keys for your deployment created, and the Staker Keys saved to a CSV, you can use them to generate a genesis ledger with the following command.
272272

273273
```
274274
scripts/generate-keys-and-ledger.sh

buildkite/src/Command/Bench/Base.dhall

+5
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
let B = ../../External/Buildkite.dhall
2+
13
let PipelineMode = ../../Pipeline/Mode.dhall
24

35
let PipelineTag = ../../Pipeline/Tag.dhall
@@ -24,6 +26,8 @@ let Benchmarks = ../../Constants/Benchmarks.dhall
2426

2527
let SelectFiles = ../../Lib/SelectFiles.dhall
2628

29+
let B/SoftFail = B.definitions/commandStep/properties/soft_fail/Type
30+
2731
let Spec =
2832
{ Type =
2933
{ key : Text
@@ -67,6 +71,7 @@ let command
6771
"Perf: ${spec.label} ${PipelineMode.capitalName spec.mode}"
6872
, key = spec.key
6973
, target = spec.size
74+
, soft_fail = Some (B/SoftFail.Boolean True)
7075
, docker = None Docker.Type
7176
, depends_on = spec.dependsOn
7277
}

buildkite/src/Constants/DebianVersions.dhall

+1
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,7 @@ let minimalDirtyWhen =
7575
, S.exactly "buildkite/src/Command/HardforkPackageGeneration" "dhall"
7676
, S.exactly "buildkite/src/Command/MinaArtifact" "dhall"
7777
, S.exactly "buildkite/src/Command/PatchArchiveTest" "dhall"
78+
, S.exactly "buildkite/src/Command/Bench/Base" "dhall"
7879
, S.exactly "buildkite/src/Command/ReplayerTest" "dhall"
7980
, S.strictlyStart (S.contains "buildkite/src/Jobs/Release/MinaArtifact")
8081
, S.strictlyStart (S.contains "dockerfiles/stages")

buildkite/src/README.md

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# Buildkite CI
22

3-
This folder contains all dhall code which is an backbone for our CI related code for buildkite.
3+
This folder contains all dhall code which is a backbone for our CI related code for buildkite.
44

55
# Structure
66

@@ -53,7 +53,7 @@ User defined value which describe current pipeline chunk of jobs to be executed.
5353
- coverage gathering - which gathers coverage artifacts and uploads it to coveralls.io
5454

5555
To reach above pipeline configuration below configuration can be provided:
56-
(non important attributes where omitted)
56+
(non-important attributes were omitted)
5757
```
5858
steps:
5959
- commands:
@@ -204,4 +204,4 @@ We want only to move dockers from gcr to dockerhub without changing version. Cur
204204
- "NEW_VERSION=3.0.0-dc6bf78"
205205
- "CODENAMES=Focal,Bullseye"
206206
- "PUBLISH=1"
207-
```
207+
```

docs/test_dsl_spec.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -192,7 +192,7 @@ DSL.List.iter (left_partition @ right_partition) ~f:destroy
192192

193193
ALTERNATIVE TEST: keep two partitions separate with a fixed topology, with 1-2 intermediate
194194
nodes bridging the networks, then take the other bridge offline temporarily and then have them
195-
rejoin the network without topologoical restrictions and see if the chains reconverge
195+
rejoin the network without topological restrictions and see if the chains reconverge
196196

197197
##### Basic Hard Fork Test
198198

docs/testnet-guardian-runbook.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,7 @@ Following is the list of events that could occur after the release
144144
|--|------|--------|-------------|--|
145145
| 1| Issues preventing the users from completing the task | Since, we already tested all the challenges, either the user did the task incorrectly or in a different way that was missed by the engineering team. Usually, the community responds quickly to issues involving the challenges. Let the user know of the alternative way to finish the task. If the errors are real protocol/product bugs, create an issue and request the user to attach coda logs to the issue| Minor | |
146146
| 2| Users' nodes crashing intermittently | If these are not one of the known bugs then create an issue for the same. Request the user to attach the latest crash report| Minor | |
147-
| 3| Users' nodes crashing persistently | If it is for a specific user, might be that they did something differently when starting the node or their environment is not as expected. For example, connection timeouts (eventually causing the dameons to crash) between daemon and prover or daemon and snark workers could be because of resource constraints. If the cause is not determined, create an issue and request the user to attach the crash report | Major | Engineering team |
147+
| 3| Users' nodes crashing persistently | If it is for a specific user, might be that they did something differently when starting the node or their environment is not as expected. For example, connection timeouts (eventually causing the daemons to crash) between daemon and prover or daemon and snark workers could be because of resource constraints. If the cause is not determined, create an issue and request the user to attach the crash report | Major | Engineering team |
148148
|4| Unstable testnet | Create an issue for the protocol team to investigate. Coordinate with the owners of this event to discuss further actions based on the findings by the protocol team | Critical | Aneesha, Brandon, Engineer investigating the issue|
149149
150150
## Change Log

nix/README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -605,7 +605,7 @@ happen when you try to build anything inside the pure shell. It happens because
605605
the stack size for every process is limited, and it is shared between the
606606
current environment, the argument list, and some other things. Therefore, if
607607
your environment takes up too much space, not enough is left for the arguments.
608-
The way to fix the error is to unset some of the bigger enviornment variables,
608+
The way to fix the error is to unset some of the bigger environment variables,
609609
perhaps with
610610

611611
```bash

nix/ocaml.nix

+1-1
Original file line numberDiff line numberDiff line change
@@ -254,7 +254,7 @@ let
254254
done
255255
'') package.outputs);
256256

257-
# Derivation which has all Mina's dependencies in it, and creates an empty output if the command succeds.
257+
# Derivation which has all Mina's dependencies in it, and creates an empty output if the command succeeds.
258258
# Useful for unit tests.
259259
runMinaCheck = { name ? "check", extraInputs ? [ ], extraArgs ? { }, }:
260260
check:

nix/rust.nix

+1-1
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ let
55
prev.makeRustPlatform {
66
cargo = rust;
77
rustc = rust;
8-
# override stdenv.targetPlatform here, if neccesary
8+
# override stdenv.targetPlatform here, if necessary
99
};
1010
toolchainHashes = {
1111
"1.72" = "sha256-dxE7lmCFWlq0nl/wKcmYvpP9zqQbBitAQgZ1zx9Ooik=";

rfcs/0016-transition-frontier-persistence.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ This RFC proposes a new system for persisting the transition frontier's state to
88
## Motivation
99
[motivation]: #motivation
1010

11-
The Transition Frontier is too large of a data structure to just blindly serialize and write to disk. Under non optimal network scenarios, we expect the upper bound of the data structure to be >100Gb. Even if the structure were smaller, we cannot write the structure out to disk every time we mutate it as the speed of the transition frontier data structure is critical to the systems ability to prevent DDoS attacks. Therefore, a more robust and effecient system is required to persist the Transition Frontier to disk without negatively effecting the speed of operations on the in memory copy of the Transition Frontier.
11+
The Transition Frontier is too large of a data structure to just blindly serialize and write to disk. Under non-optimal network scenarios, we expect the upper bound of the data structure to be >100Gb. Even if the structure were smaller, we cannot write the structure out to disk every time we mutate it as the speed of the transition frontier data structure is critical to the systems ability to prevent DDoS attacks. Therefore, a more robust and effecient system is required to persist the Transition Frontier to disk without negatively effecting the speed of operations on the in memory copy of the Transition Frontier.
1212

1313
## Detailed design
1414
[detailed-design]: #detailed-design
@@ -30,7 +30,7 @@ As actions are performed on the Transition Frontier, diffs are emitted and store
3030

3131
Having two different mechanisms for writing the same data can be tricky as there can be bugs in one of the two mechanisms that would cause the data structures to become desynchronized. In order to help prevent this, we can introduce an incremental hash on top of the Transition Frontier which can be updated upon each diff application. This hash will give a direct and easy way to compare the structural equality of the two data structures. Being incremental, however, also means that the order of diff application needs to be the same across both data structures, so care needs to be taken with that ordering. Therefore, in a sense, this hash will represent the structure and content of the data structure, as well as the order in which actions were taken to get there. We only care about the former in our case, and the latter is just a consequence of the hash being incremental.
3232

33-
In order to calculate this hash correctly, we need to introduce a new concept to a diff, which is that of a diff mutant. Each diff represents some mutation to perform on the Transition Frontier, however not every diff will contain the enough information by itself to encapsulate the state of the data structure after the mutation occurs. For example, setting a balance on an account in two implementations of the data structure does not guarantee that the accounts in each a equal as there are other fields on the account besides that. This is where the concept of a diff mutant comes in. The mutant of a diff is the set of all modified values in the data structure after the diff has been applied. Using this, we can create a proper incremental diff which will truly ensure our data structures are in sync.
33+
In order to calculate this hash correctly, we need to introduce a new concept to a diff, which is that of a diff mutant. Each diff represents some mutation to perform on the Transition Frontier, however not every diff will contain the enough information by itself to encapsulate the state of the data structure after the mutation occurs. For example, setting a balance on an account in two implementations of the data structure does not guarantee that the accounts in each an equal as there are other fields on the account besides that. This is where the concept of a diff mutant comes in. The mutant of a diff is the set of all modified values in the data structure after the diff has been applied. Using this, we can create a proper incremental diff which will truly ensure our data structures are in sync.
3434

3535
These hashes will be Sha256 as there is no reason to use the Pedersen hashing algorithm we use in the rest of our code since none of this information needs to be snarked. The formula for calculating a new hash `h'` given an old hash `h` and a diff `diff` is as follows: `h' = sha256 h diff (Diff.mutant diff)`.
3636

rfcs/0020-transition-frontier-extensions-2.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ See [#1585](https://github.com/CodaProtocol/coda/pull/1585) for early discussion
2828

2929
### Extensions Redefined
3030

31-
A Transition Frontier Extension is an stateful, incremental view on the state of a Transiton Frontier. When a Transition Frontier is initialized, all of its extensions are also initialized using the Transition Frontier's root. Every mutation performed is represented as a list of diffs, and when the Transition Frontier updates, each Extension is notified of this list of diffs synchronously. Transition Frontier Extensions will notify the Transition Frontier if there was a update to the Extension's view when handling the diffs. If an Extension's view is updated, then a synchronous event is broadcast internally with the new view of that Extension. A Transition Frontier Extension has access to the Transition Frontier so that it can query and calculate information it requires when it handles diffs.
31+
A Transition Frontier Extension is a stateful, incremental view on the state of a Transiton Frontier. When a Transition Frontier is initialized, all of its extensions are also initialized using the Transition Frontier's root. Every mutation performed is represented as a list of diffs, and when the Transition Frontier updates, each Extension is notified of this list of diffs synchronously. Transition Frontier Extensions will notify the Transition Frontier if there was a update to the Extension's view when handling the diffs. If an Extension's view is updated, then a synchronous event is broadcast internally with the new view of that Extension. A Transition Frontier Extension has access to the Transition Frontier so that it can query and calculate information it requires when it handles diffs.
3232

3333
### Extension Guidelines
3434

rfcs/0026-transition-caching.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ A new transition caching logic for the transition router which aims to track tra
88
## Motivation
99
[motivation]: #motivation
1010

11-
Within the transition router system, the only check for duplicate transitions is performed by the transition validator, and each transition is only checked against the transitions which are currently in the transition frontier. However, there are two types of duplicate transitions which are not being checked for: transitions which are still being processed by the system (either in the processor pipe or in the catchup scheduler and catchup thread), and transitions which have been determined to be invalid. In the case of the former, the system ends up processing more transitions than necessary, and the number of duplicated processing increases along with the networks size. In the case of the latter, the system is opened up for DDoS attacks since an adversary could continously send transitions with valid proofs but invalid staged ledger diffs, causing each node to spend a significant enough amount of time before invalidating the transition each time it recieves it.
11+
Within the transition router system, the only check for duplicate transitions is performed by the transition validator, and each transition is only checked against the transitions which are currently in the transition frontier. However, there are two types of duplicate transitions which are not being checked for: transitions which are still being processed by the system (either in the processor pipe or in the catchup scheduler and catchup thread), and transitions which have been determined to be invalid. In the case of the former, the system ends up processing more transitions than necessary, and the number of duplicated processing increases along with the networks size. In the case of the latter, the system is opened up for DDoS attacks since an adversary could continuously send transitions with valid proofs but invalid staged ledger diffs, causing each node to spend a significant enough amount of time before invalidating the transition each time it recieves it.
1212

1313
NOTE: This RFC has been re-scoped to only address duplicate transitions already being processed and not transitions which were previously determined to be invalid.
1414

src/app/dump_blocks/encoding.ml

+3-2
Original file line numberDiff line numberDiff line change
@@ -50,9 +50,10 @@ module Sexp_block : S with type t = Mina_block.t = struct
5050

5151
let of_breadcrumb = block_of_breadcrumb
5252

53-
let to_string b = Mina_block.sexp_of_t b |> Sexp.to_string |> append_newline
53+
let to_string b =
54+
Mina_block.Stable.Latest.sexp_of_t b |> Sexp.to_string |> append_newline
5455

55-
let of_string s = Sexp.of_string s |> Mina_block.t_of_sexp
56+
let of_string s = Sexp.of_string s |> Mina_block.Stable.Latest.t_of_sexp
5657
end
5758

5859
module Binary_block : S with type t = Mina_block.t = struct

src/lib/block_producer/block_producer.ml

-20
Original file line numberDiff line numberDiff line change
@@ -420,26 +420,6 @@ let generate_next_state ~commit_id ~zkapp_cmd_limit ~constraint_constants
420420
in
421421
Some (protocol_state, internal_transition, witness) ) )
422422

423-
module Precomputed = struct
424-
type t = Precomputed.t =
425-
{ scheduled_time : Block_time.t
426-
; protocol_state : Protocol_state.value
427-
; protocol_state_proof : Proof.t
428-
; staged_ledger_diff : Staged_ledger_diff.t
429-
; delta_transition_chain_proof :
430-
Frozen_ledger_hash.t * Frozen_ledger_hash.t list
431-
; protocol_version : Protocol_version.t
432-
; proposed_protocol_version : Protocol_version.t option
433-
; accounts_accessed : (int * Account.t) list
434-
; accounts_created : (Account_id.t * Currency.Fee.t) list
435-
; tokens_used : (Token_id.t * Account_id.t option) list
436-
}
437-
438-
let sexp_of_t = Precomputed.sexp_of_t
439-
440-
let t_of_sexp = Precomputed.t_of_sexp
441-
end
442-
443423
let handle_block_production_errors ~logger ~rejected_blocks_logger
444424
~time_taken:span ~previous_protocol_state ~protocol_state x =
445425
let transition_error_msg_prefix = "Validation failed: " in

src/lib/disk_cache/disk_cache.mli

+1-20
Original file line numberDiff line numberDiff line change
@@ -1,20 +1 @@
1-
open Core_kernel
2-
open Async
3-
4-
module Make : functor (T : Binable.S) -> sig
5-
type t
6-
7-
(** Initialize the on-disk cache explicitly before interactions with it take place. *)
8-
val initialize :
9-
string
10-
-> logger:Logger.t
11-
-> (t, [> `Initialization_error of Error.t ]) Deferred.Result.t
12-
13-
type id
14-
15-
(** Put the value to disk, return an identifier that is associated with a special handler in GC. *)
16-
val put : t -> T.t -> id
17-
18-
(** Read from the cache, crashing if the value cannot be found. *)
19-
val get : t -> id -> T.t
20-
end
1+
module Make : Disk_cache_intf.F

src/lib/disk_cache/dune

+5-3
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,16 @@
11
(library
22
(public_name disk_cache)
33
(virtual_modules disk_cache)
4-
(default_implementation disk_cache.filesystem)
4+
(default_implementation disk_cache.identity)
55
(libraries
66
;; opam libraries
7-
core_kernel
7+
core
88
async
99
;; local libraries
10+
file_system
1011
logger
12+
disk_cache.intf
1113
)
1214
(preprocess
13-
(pps ppx_mina ppx_version))
15+
(pps ppx_mina ppx_version ppx_jane))
1416
(instrumentation (backend bisect_ppx)))

0 commit comments

Comments
 (0)