1
1
//! The `ledger` module provides functions for parallel verification of the
2
2
//! Proof of History ledger.
3
3
4
- use bincode:: { self , deserialize, serialize_into} ;
5
- use entry:: { next_entry , Entry } ;
4
+ use bincode:: { self , deserialize, serialize_into, serialized_size } ;
5
+ use entry:: Entry ;
6
6
use hash:: Hash ;
7
- use packet;
8
- use packet:: { SharedBlob , BLOB_DATA_SIZE , BLOB_SIZE } ;
7
+ use packet:: { self , SharedBlob , BLOB_DATA_SIZE , BLOB_SIZE } ;
9
8
use rayon:: prelude:: * ;
10
- use std:: cmp:: min;
11
9
use std:: collections:: VecDeque ;
12
10
use std:: io:: Cursor ;
13
- use std:: mem:: size_of;
14
11
use transaction:: Transaction ;
15
12
13
+ // a Block is a slice of Entries
14
+
16
15
pub trait Block {
17
16
/// Verifies the hashes and counts of a slice of transactions are all consistent.
18
17
fn verify ( & self , start_hash : & Hash ) -> bool ;
@@ -27,135 +26,124 @@ impl Block for [Entry] {
27
26
}
28
27
29
28
fn to_blobs ( & self , blob_recycler : & packet:: BlobRecycler , q : & mut VecDeque < SharedBlob > ) {
30
- let mut start = 0 ;
31
- let mut end = 0 ;
32
- while start < self . len ( ) {
33
- let mut entries: Vec < Vec < Entry > > = Vec :: new ( ) ;
34
- let mut total = 0 ;
35
- for i in & self [ start..] {
36
- total += size_of :: < Transaction > ( ) * i. transactions . len ( ) ;
37
- total += size_of :: < Entry > ( ) ;
38
- if total >= BLOB_DATA_SIZE {
39
- break ;
40
- }
41
- end += 1 ;
42
- }
43
- // See if we need to split the transactions
44
- if end <= start {
45
- let mut transaction_start = 0 ;
46
- let num_transactions_per_blob = BLOB_DATA_SIZE / size_of :: < Transaction > ( ) ;
47
- let total_entry_chunks = ( self [ end] . transactions . len ( ) + num_transactions_per_blob
48
- - 1 ) / num_transactions_per_blob;
49
- trace ! (
50
- "splitting transactions end: {} total_chunks: {}" ,
51
- end,
52
- total_entry_chunks
53
- ) ;
54
- for _ in 0 ..total_entry_chunks {
55
- let transaction_end = min (
56
- transaction_start + num_transactions_per_blob,
57
- self [ end] . transactions . len ( ) ,
58
- ) ;
59
- let mut entry = Entry {
60
- num_hashes : self [ end] . num_hashes ,
61
- id : self [ end] . id ,
62
- transactions : self [ end] . transactions [ transaction_start..transaction_end]
63
- . to_vec ( ) ,
64
- } ;
65
- entries. push ( vec ! [ entry] ) ;
66
- transaction_start = transaction_end;
67
- }
68
- end += 1 ;
69
- } else {
70
- entries. push ( self [ start..end] . to_vec ( ) ) ;
71
- }
72
-
73
- for entry in entries {
74
- let b = blob_recycler. allocate ( ) ;
75
- let pos = {
76
- let mut bd = b. write ( ) . unwrap ( ) ;
77
- let mut out = Cursor :: new ( bd. data_mut ( ) ) ;
78
- serialize_into ( & mut out, & entry) . expect ( "failed to serialize output" ) ;
79
- out. position ( ) as usize
80
- } ;
81
- assert ! ( pos < BLOB_SIZE ) ;
82
- b. write ( ) . unwrap ( ) . set_size ( pos) ;
83
- q. push_back ( b) ;
84
- }
85
- start = end;
29
+ for entry in self {
30
+ let blob = blob_recycler. allocate ( ) ;
31
+ let pos = {
32
+ let mut bd = blob. write ( ) . unwrap ( ) ;
33
+ let mut out = Cursor :: new ( bd. data_mut ( ) ) ;
34
+ serialize_into ( & mut out, & entry) . expect ( "failed to serialize output" ) ;
35
+ out. position ( ) as usize
36
+ } ;
37
+ assert ! ( pos < BLOB_SIZE ) ;
38
+ blob. write ( ) . unwrap ( ) . set_size ( pos) ;
39
+ q. push_back ( blob) ;
86
40
}
87
41
}
88
42
}
89
43
90
- /// Create a vector of Entries of length `transaction_batches.len()` from `start_hash` hash, `num_hashes`, and `transaction_batches`.
91
- pub fn next_entries (
92
- start_hash : & Hash ,
93
- num_hashes : u64 ,
94
- transaction_batches : Vec < Vec < Transaction > > ,
95
- ) -> Vec < Entry > {
96
- let mut id = * start_hash;
97
- let mut entries = vec ! [ ] ;
98
- for transactions in transaction_batches {
99
- let entry = next_entry ( & id, num_hashes, transactions) ;
100
- id = entry. id ;
44
+ pub fn reconstruct_entries_from_blobs ( blobs : & VecDeque < SharedBlob > ) -> bincode:: Result < Vec < Entry > > {
45
+ let mut entries: Vec < Entry > = Vec :: with_capacity ( blobs. len ( ) ) ;
46
+ for msgs in blobs {
47
+ let blob = msgs. read ( ) . unwrap ( ) ;
48
+ let entry: Entry = deserialize ( & blob. data ( ) [ ..blob. meta . size ] ) ?;
101
49
entries. push ( entry) ;
102
50
}
103
- entries
51
+ Ok ( entries)
104
52
}
105
53
106
- pub fn reconstruct_entries_from_blobs ( blobs : & VecDeque < SharedBlob > ) -> bincode:: Result < Vec < Entry > > {
107
- let mut entries_to_apply: Vec < Entry > = Vec :: new ( ) ;
108
- let mut last_id = Hash :: default ( ) ;
109
- for msgs in blobs {
110
- let blob = msgs. read ( ) . unwrap ( ) ;
111
- let entries: Vec < Entry > = deserialize ( & blob. data ( ) [ ..blob. meta . size ] ) ?;
112
- for entry in entries {
113
- if entry. id == last_id {
114
- if let Some ( last_entry) = entries_to_apply. last_mut ( ) {
115
- last_entry. transactions . extend ( entry. transactions ) ;
116
- }
117
- } else {
118
- last_id = entry. id ;
119
- entries_to_apply. push ( entry) ;
120
- }
54
+ /// Creates the next entries for given transactions, outputs
55
+ /// updates start_hash to id of last Entry, sets cur_hashes to 0
56
+ pub fn next_entries_mut (
57
+ start_hash : & mut Hash ,
58
+ cur_hashes : & mut u64 ,
59
+ transactions : Vec < Transaction > ,
60
+ ) -> Vec < Entry > {
61
+ if transactions. is_empty ( ) {
62
+ vec ! [ Entry :: new_mut( start_hash, cur_hashes, transactions) ]
63
+ } else {
64
+ let mut chunk_len = transactions. len ( ) ;
65
+
66
+ // check for fit, make sure they can be serialized
67
+ while serialized_size ( & Entry {
68
+ num_hashes : 0 ,
69
+ id : Hash :: default ( ) ,
70
+ transactions : transactions[ 0 ..chunk_len] . to_vec ( ) ,
71
+ } ) . unwrap ( ) > BLOB_DATA_SIZE as u64
72
+ {
73
+ chunk_len /= 2 ;
121
74
}
75
+
76
+ let mut entries = Vec :: with_capacity ( transactions. len ( ) / chunk_len + 1 ) ;
77
+
78
+ for chunk in transactions. chunks ( chunk_len) {
79
+ entries. push ( Entry :: new_mut ( start_hash, cur_hashes, chunk. to_vec ( ) ) ) ;
80
+ }
81
+ entries
122
82
}
123
- Ok ( entries_to_apply)
83
+ }
84
+
85
+ /// Creates the next Entries for given transactions
86
+ pub fn next_entries (
87
+ start_hash : & Hash ,
88
+ cur_hashes : u64 ,
89
+ transactions : Vec < Transaction > ,
90
+ ) -> Vec < Entry > {
91
+ let mut id = * start_hash;
92
+ let mut num_hashes = cur_hashes;
93
+ next_entries_mut ( & mut id, & mut num_hashes, transactions)
124
94
}
125
95
126
96
#[ cfg( test) ]
127
97
mod tests {
128
98
use super :: * ;
99
+ use entry:: { next_entry, Entry } ;
129
100
use hash:: hash;
130
101
use packet:: BlobRecycler ;
131
102
use signature:: { KeyPair , KeyPairUtil } ;
132
103
use std:: net:: { IpAddr , Ipv4Addr , SocketAddr } ;
133
104
use transaction:: Transaction ;
134
105
106
+ /// Create a vector of Entries of length `transaction_batches.len()`
107
+ /// from `start_hash` hash, `num_hashes`, and `transaction_batches`.
108
+ fn next_entries_batched (
109
+ start_hash : & Hash ,
110
+ cur_hashes : u64 ,
111
+ transaction_batches : Vec < Vec < Transaction > > ,
112
+ ) -> Vec < Entry > {
113
+ let mut id = * start_hash;
114
+ let mut entries = vec ! [ ] ;
115
+ let mut num_hashes = cur_hashes;
116
+
117
+ for transactions in transaction_batches {
118
+ let mut entry_batch = next_entries_mut ( & mut id, & mut num_hashes, transactions) ;
119
+ entries. append ( & mut entry_batch) ;
120
+ }
121
+ entries
122
+ }
123
+
135
124
#[ test]
136
125
fn test_verify_slice ( ) {
137
126
let zero = Hash :: default ( ) ;
138
127
let one = hash ( & zero) ;
139
128
assert ! ( vec![ ] [ ..] . verify( & zero) ) ; // base case
140
129
assert ! ( vec![ Entry :: new_tick( 0 , & zero) ] [ ..] . verify( & zero) ) ; // singleton case 1
141
130
assert ! ( !vec![ Entry :: new_tick( 0 , & zero) ] [ ..] . verify( & one) ) ; // singleton case 2, bad
142
- assert ! ( next_entries ( & zero, 0 , vec![ vec![ ] ; 2 ] ) [ ..] . verify( & zero) ) ; // inductive step
131
+ assert ! ( next_entries_batched ( & zero, 0 , vec![ vec![ ] ; 2 ] ) [ ..] . verify( & zero) ) ; // inductive step
143
132
144
- let mut bad_ticks = next_entries ( & zero, 0 , vec ! [ vec![ ] ; 2 ] ) ;
133
+ let mut bad_ticks = next_entries_batched ( & zero, 0 , vec ! [ vec![ ] ; 2 ] ) ;
145
134
bad_ticks[ 1 ] . id = one;
146
135
assert ! ( !bad_ticks. verify( & zero) ) ; // inductive step, bad
147
136
}
148
137
149
138
#[ test]
150
- fn test_entry_to_blobs ( ) {
139
+ fn test_entries_to_blobs ( ) {
151
140
let zero = Hash :: default ( ) ;
152
141
let one = hash ( & zero) ;
153
142
let keypair = KeyPair :: new ( ) ;
154
143
let tx0 = Transaction :: new ( & keypair, keypair. pubkey ( ) , 1 , one) ;
155
- let transactions = vec ! [ tx0; 10000 ] ;
156
- let e0 = Entry :: new ( & zero, 0 , transactions) ;
144
+ let transactions = vec ! [ tx0; 10_000 ] ;
145
+ let entries = next_entries ( & zero, 0 , transactions) ;
157
146
158
- let entries = vec ! [ e0] ;
159
147
let blob_recycler = BlobRecycler :: default ( ) ;
160
148
let mut blob_q = VecDeque :: new ( ) ;
161
149
entries. to_blobs ( & blob_recycler, & mut blob_q) ;
@@ -172,14 +160,16 @@ mod tests {
172
160
}
173
161
174
162
#[ test]
175
- fn test_next_entries ( ) {
163
+ fn test_next_entries_batched ( ) {
164
+ // this also tests next_entries, ugly, but is an easy way to do vec of vec (batch)
176
165
let mut id = Hash :: default ( ) ;
177
166
let next_id = hash ( & id) ;
178
167
let keypair = KeyPair :: new ( ) ;
179
168
let tx0 = Transaction :: new ( & keypair, keypair. pubkey ( ) , 1 , next_id) ;
169
+
180
170
let transactions = vec ! [ tx0; 5 ] ;
181
171
let transaction_batches = vec ! [ transactions. clone( ) ; 5 ] ;
182
- let entries0 = next_entries ( & id, 1 , transaction_batches) ;
172
+ let entries0 = next_entries_batched ( & id, 0 , transaction_batches) ;
183
173
184
174
assert_eq ! ( entries0. len( ) , 5 ) ;
185
175
@@ -197,14 +187,30 @@ mod tests {
197
187
mod bench {
198
188
extern crate test;
199
189
use self :: test:: Bencher ;
190
+ use hash:: hash;
200
191
use ledger:: * ;
192
+ use packet:: BlobRecycler ;
193
+ use signature:: { KeyPair , KeyPairUtil } ;
194
+ use transaction:: Transaction ;
201
195
202
196
#[ bench]
203
- fn bench_next_entries ( bencher : & mut Bencher ) {
204
- let start_hash = Hash :: default ( ) ;
205
- let entries = next_entries ( & start_hash, 10_000 , vec ! [ vec![ ] ; 8 ] ) ;
197
+ fn bench_block_to_blobs_to_block ( bencher : & mut Bencher ) {
198
+ let zero = Hash :: default ( ) ;
199
+ let one = hash ( & zero) ;
200
+ let keypair = KeyPair :: new ( ) ;
201
+ let tx0 = Transaction :: new ( & keypair, keypair. pubkey ( ) , 1 , one) ;
202
+ let transactions = vec ! [ tx0; 10 ] ;
203
+ let entries = next_entries ( & zero, 1 , transactions) ;
204
+
205
+ let blob_recycler = BlobRecycler :: default ( ) ;
206
206
bencher. iter ( || {
207
- assert ! ( entries. verify( & start_hash) ) ;
207
+ let mut blob_q = VecDeque :: new ( ) ;
208
+ entries. to_blobs ( & blob_recycler, & mut blob_q) ;
209
+ assert_eq ! ( reconstruct_entries_from_blobs( & blob_q) . unwrap( ) , entries) ;
210
+ for blob in blob_q {
211
+ blob_recycler. recycle ( blob) ;
212
+ }
208
213
} ) ;
209
214
}
215
+
210
216
}
0 commit comments