Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/CI.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ on:
push:
branches:
- main
- te/fix_spec_tests_2nd
pull_request:
branches:
- main
Expand Down
3 changes: 2 additions & 1 deletion src/state_transition/block/initiate_validator_exit.zig
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
const std = @import("std");
const CachedBeaconStateAllForks = @import("../cache/state_cache.zig").CachedBeaconStateAllForks;
const ForkSeq = @import("config").ForkSeq;
const ssz = @import("consensus_types");
Expand Down Expand Up @@ -56,5 +57,5 @@ pub fn initiateValidatorExit(cached_state: *const CachedBeaconStateAllForks, val
validator.exit_epoch = computeExitEpochAndUpdateChurn(cached_state, validator.effective_balance);
}

validator.withdrawable_epoch = validator.exit_epoch + config.MIN_VALIDATOR_WITHDRAWABILITY_DELAY;
validator.withdrawable_epoch = try std.math.add(u64, validator.exit_epoch, config.MIN_VALIDATOR_WITHDRAWABILITY_DELAY);
}
1 change: 1 addition & 0 deletions src/state_transition/cache/epoch_transition_cache.zig
Original file line number Diff line number Diff line change
Expand Up @@ -207,6 +207,7 @@ pub const EpochTransitionCache = struct {
var indices_eligible_for_activation_queue = std.ArrayList(ValidatorIndex).init(allocator);
// we will extract indices_eligible_for_activation from validator_activation_list later
var validator_activation_list = ValidatorActivationList.init(allocator);
defer validator_activation_list.deinit();
var indices_to_eject = std.ArrayList(ValidatorIndex).init(allocator);

var total_active_stake_by_increment: u64 = 0;
Expand Down
2 changes: 1 addition & 1 deletion src/state_transition/epoch/process_epoch.zig
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ pub fn processEpoch(allocator: std.mem.Allocator, cached_state: *CachedBeaconSta
}

if (state.isPhase0()) {
processParticipationRecordUpdates(cached_state);
processParticipationRecordUpdates(cached_state, allocator);
} else {
try processParticipationFlagUpdates(cached_state, allocator);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,11 @@ pub fn weighJustificationAndFinalization(cached_state: *CachedBeaconStateAllFork
const current_epoch = computeEpochAtSlot(state.slot());
const previous_epoch = if (current_epoch == GENESIS_EPOCH) GENESIS_EPOCH else current_epoch - 1;

const old_previous_justified_checkpoint = state.previousJustifiedCheckpoint();
const old_current_justified_checkpoint = state.currentJustifiedCheckpoint();
const old_previous_justified_checkpoint = state.previousJustifiedCheckpoint().*;
const old_current_justified_checkpoint = state.currentJustifiedCheckpoint().*;

// Process justifications
old_previous_justified_checkpoint.* = old_current_justified_checkpoint.*;
state.previousJustifiedCheckpoint().* = old_current_justified_checkpoint;
const justification_bits = state.justificationBits();
var bits = [_]bool{false} ** JustificationBits.length;
justification_bits.toBoolArray(&bits);
Expand Down Expand Up @@ -66,18 +66,18 @@ pub fn weighJustificationAndFinalization(cached_state: *CachedBeaconStateAllFork
// Process finalizations
// The 2nd/3rd/4th most recent epochs are all justified, the 2nd using the 4th as source
if (bits[1] and bits[2] and bits[3] and old_previous_justified_checkpoint.epoch + 3 == current_epoch) {
finalized_checkpoint.* = old_previous_justified_checkpoint.*;
finalized_checkpoint.* = old_previous_justified_checkpoint;
}
// The 2nd/3rd most recent epochs are both justified, the 2nd using the 3rd as source
if (bits[1] and bits[2] and old_previous_justified_checkpoint.epoch + 2 == current_epoch) {
finalized_checkpoint.* = old_previous_justified_checkpoint.*;
finalized_checkpoint.* = old_previous_justified_checkpoint;
}
// The 1st/2nd/3rd most recent epochs are all justified, the 1st using the 3rd as source
if (bits[0] and bits[1] and bits[2] and old_current_justified_checkpoint.epoch + 2 == current_epoch) {
finalized_checkpoint.* = old_current_justified_checkpoint.*;
finalized_checkpoint.* = old_current_justified_checkpoint;
}
// The 1st/2nd most recent epochs are both justified, the 1st using the 2nd as source
if (bits[0] and bits[1] and old_current_justified_checkpoint.epoch + 1 == current_epoch) {
finalized_checkpoint.* = old_current_justified_checkpoint.*;
finalized_checkpoint.* = old_current_justified_checkpoint;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@ const EpochTransitionCache = @import("../cache/epoch_transition_cache.zig").Epoc
const ssz = @import("consensus_types");
const preset = @import("preset").preset;

pub fn processParticipationRecordUpdates(cached_state: *CachedBeaconStateAllForks) void {
pub fn processParticipationRecordUpdates(cached_state: *CachedBeaconStateAllForks, allocator: Allocator) void {
const state = cached_state.state;
// rotate current/previous epoch attestations
state.rotateEpochPendingAttestations();
state.rotateEpochPendingAttestations(allocator);
}
22 changes: 13 additions & 9 deletions src/state_transition/epoch/process_pending_deposits.zig
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@ pub fn processPendingDeposits(allocator: Allocator, cached_state: *CachedBeaconS
const available_for_processing = deposit_balance_to_consume.* + getActivationExitChurnLimit(epoch_cache);
var processed_amount: u64 = 0;
var next_deposit_index: u64 = 0;
var deposits_to_postpone = std.ArrayList(PendingDeposit).init(allocator);
defer deposits_to_postpone.deinit();
var is_churn_limit_reached = false;
const finalized_slot = computeStartSlotAtEpoch(state.finalizedCheckpoint().epoch);

Expand All @@ -33,7 +35,7 @@ pub fn processPendingDeposits(allocator: Allocator, cached_state: *CachedBeaconS
const chunk = 100;
const pending_deposits = state.pendingDeposits();
const pending_deposits_len = pending_deposits.items.len;
outer: while (start_index < pending_deposits_len) : (start_index += chunk) {
outer: while (start_index < pending_deposits.items.len) : (start_index += chunk) {
// TODO(ssz.primitive): implement getReadonlyByRange api for TreeView
// const deposits: []PendingDeposit = state.getPendingDeposits().getReadonlyByRange(start_index, chunk);
const deposits: []PendingDeposit = pending_deposits.items[start_index..@min(start_index + chunk, pending_deposits_len)];
Expand Down Expand Up @@ -74,8 +76,8 @@ pub fn processPendingDeposits(allocator: Allocator, cached_state: *CachedBeaconS
// Deposited balance will never become active. Increase balance but do not consume churn
try applyPendingDeposit(allocator, cached_state, deposit, cache);
} else if (is_validator_exited) {
// TODO: typescript version accumulate to temp array while in zig we append directly
try pending_deposits.append(allocator, deposit);
// Validator is exiting, postpone the deposit until after withdrawable epoch
try deposits_to_postpone.append(deposit);
} else {
// Check if deposit fits in the churn, otherwise, do no more deposit processing in this epoch.
is_churn_limit_reached = processed_amount + deposit.amount > available_for_processing;
Expand All @@ -93,17 +95,19 @@ pub fn processPendingDeposits(allocator: Allocator, cached_state: *CachedBeaconS
}

if (next_deposit_index > 0) {
// TODO: implement sliceFrom for TreeView api
const new_len = pending_deposits_len - next_deposit_index;
@memcpy(pending_deposits.items[0..new_len], pending_deposits.items[next_deposit_index..]);
// cannot use memcpy since they may overlap
for (0..new_len) |i| {
pending_deposits.items[i] = pending_deposits.items[i + next_deposit_index];
}
try pending_deposits.resize(allocator, new_len);
}

// TODO: consider doing this for TreeView
// for (const deposit of depositsToPostpone) {
// state.pendingDeposits.push(deposit);
// }
for (deposits_to_postpone.items) |deposit| {
try pending_deposits.append(allocator, deposit);
}

// no need to append to pending_deposits again because we did that in the for loop above already
// Accumulate churn only if the churn limit has been hit.
deposit_balance_to_consume.* =
if (is_churn_limit_reached)
Expand Down
4 changes: 2 additions & 2 deletions src/state_transition/types/beacon_state.zig
Original file line number Diff line number Diff line change
Expand Up @@ -427,10 +427,10 @@ pub const BeaconStateAllForks = union(enum) {
};
}

pub fn rotateEpochPendingAttestations(self: *BeaconStateAllForks) void {
pub fn rotateEpochPendingAttestations(self: *BeaconStateAllForks, allocator: Allocator) void {
switch (self.*) {
.phase0 => |state| {
state.previous_epoch_attestations.clearRetainingCapacity();
state.previous_epoch_attestations.deinit(allocator);
state.previous_epoch_attestations = state.current_epoch_attestations;
state.current_epoch_attestations = ssz.phase0.EpochAttestations.default_value;
},
Expand Down
1 change: 1 addition & 0 deletions test/spec/root.zig
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,5 @@ const testing = @import("std").testing;
comptime {
testing.refAllDecls(@import("./test_case/operations_tests.zig"));
testing.refAllDecls(@import("./test_case/sanity_tests.zig"));
testing.refAllDecls(@import("./test_case/epoch_processing_tests.zig"));
}
134 changes: 134 additions & 0 deletions test/spec/runner/epoch_processing.zig
Original file line number Diff line number Diff line change
@@ -0,0 +1,134 @@
const ssz = @import("consensus_types");
const Allocator = std.mem.Allocator;
const Root = ssz.primitive.Root.Type;
const ForkSeq = @import("config").ForkSeq;
const Preset = @import("preset").Preset;
const preset = @import("preset").preset;
const std = @import("std");
const state_transition = @import("state_transition");
const TestCachedBeaconStateAllForks = state_transition.test_utils.TestCachedBeaconStateAllForks;
const BeaconStateAllForks = state_transition.BeaconStateAllForks;
const EpochTransitionCache = state_transition.EpochTransitionCache;
const Withdrawals = ssz.capella.Withdrawals.Type;
const WithdrawalsResult = state_transition.WithdrawalsResult;
const test_case = @import("../test_case.zig");
const TestCaseUtils = test_case.TestCaseUtils;
const loadSszValue = test_case.loadSszSnappyValue;
const loadBlsSetting = test_case.loadBlsSetting;
const expectEqualBeaconStates = test_case.expectEqualBeaconStates;
const BlsSetting = test_case.BlsSetting;

pub const EpochProcessingFn = enum {
effective_balance_updates,
eth1_data_reset,
historical_roots_update,
inactivity_updates,
justification_and_finalization,
participation_flag_updates,
participation_record_updates,
randao_mixes_reset,
registry_updates,
rewards_and_penalties,
slashings,
slashings_reset,
sync_committee_updates,
historical_summaries_update,
pending_deposits,
pending_consolidations,
// TODO: fulu
// proposer_lookahead,

pub fn suiteName(self: EpochProcessingFn) []const u8 {
return @tagName(self) ++ "/pyspec_tests";
}
};

pub fn TestCase(comptime fork: ForkSeq, comptime epoch_process_fn: EpochProcessingFn) type {
const tc_utils = TestCaseUtils(fork);

return struct {
pre: TestCachedBeaconStateAllForks,
// a null post state means the test is expected to fail
post: ?BeaconStateAllForks,

const Self = @This();

pub fn execute(allocator: std.mem.Allocator, dir: std.fs.Dir) !void {
var tc = try Self.init(allocator, dir);
defer tc.deinit();

try tc.runTest();
}

pub fn init(allocator: std.mem.Allocator, dir: std.fs.Dir) !Self {
var tc = Self{
.pre = undefined,
.post = undefined,
};

// load pre state
tc.pre = try tc_utils.loadPreState(allocator, dir);
errdefer tc.pre.deinit();

// load pre state
tc.post = try tc_utils.loadPostState(allocator, dir);

return tc;
}

pub fn deinit(self: *Self) void {
self.pre.deinit();
if (self.post) |*post| {
post.deinit(self.pre.allocator);
}
state_transition.deinitStateTransition();
}

fn runTest(self: *Self) !void {
if (self.post) |post| {
try self.process();
try expectEqualBeaconStates(post, self.pre.cached_state.state.*);
} else {
self.process() catch |err| {
if (err == error.SkipZigTest) {
return err;
}
return;
};
return error.ExpectedError;
}
}

fn process(self: *Self) !void {
const pre = self.pre.cached_state;
const allocator = self.pre.allocator;
var epoch_transition_cache = try EpochTransitionCache.init(allocator, self.pre.cached_state);
defer {
epoch_transition_cache.deinit();
allocator.destroy(epoch_transition_cache);
}

switch (epoch_process_fn) {
.effective_balance_updates => _ = try state_transition.processEffectiveBalanceUpdates(pre, epoch_transition_cache),
.eth1_data_reset => state_transition.processEth1DataReset(allocator, pre, epoch_transition_cache),
.historical_roots_update => try state_transition.processHistoricalRootsUpdate(allocator, pre, epoch_transition_cache),
.inactivity_updates => try state_transition.processInactivityUpdates(pre, epoch_transition_cache),
.justification_and_finalization => try state_transition.processJustificationAndFinalization(pre, epoch_transition_cache),
// TODO: allocator as first param
.participation_flag_updates => try state_transition.processParticipationFlagUpdates(pre, allocator),
.participation_record_updates => state_transition.processParticipationRecordUpdates(pre, allocator),
.randao_mixes_reset => state_transition.processRandaoMixesReset(pre, epoch_transition_cache),
.registry_updates => try state_transition.processRegistryUpdates(pre, epoch_transition_cache),
.rewards_and_penalties => try state_transition.processRewardsAndPenalties(allocator, pre, epoch_transition_cache),
.slashings => try state_transition.processSlashings(allocator, pre, epoch_transition_cache),
.slashings_reset => state_transition.processSlashingsReset(pre, epoch_transition_cache),
.sync_committee_updates => try state_transition.processSyncCommitteeUpdates(allocator, pre),
.historical_summaries_update => try state_transition.processHistoricalSummariesUpdate(allocator, pre, epoch_transition_cache),
.pending_deposits => try state_transition.processPendingDeposits(allocator, pre, epoch_transition_cache),
.pending_consolidations => try state_transition.processPendingConsolidations(allocator, pre, epoch_transition_cache),
// TODO: fulu
// .proposer_lookahead => {},
}
}
};
}
7 changes: 3 additions & 4 deletions test/spec/test_case.zig
Original file line number Diff line number Diff line change
Expand Up @@ -52,10 +52,9 @@ pub fn TestCaseUtils(comptime fork: ForkSeq) type {
/// consumer should deinit the returned state and destroy the pointer
pub fn loadPostState(allocator: Allocator, dir: std.fs.Dir) !?BeaconStateAllForks {
const post_exist = if (dir.statFile("post.ssz_snappy")) |_| true else |err| blk: {
if (err == error.FileNotFound) {
break :blk false;
} else {
return err;
switch (err) {
error.FileNotFound => break :blk false,
else => return err,
}
};
if (post_exist) {
Expand Down
2 changes: 2 additions & 0 deletions test/spec/write_spec_tests.zig
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,14 @@ const supported_forks = [_]ForkSeq{
const supported_test_runners = [_]RunnerKind{
.operations,
.sanity,
.epoch_processing,
};

fn TestWriter(comptime kind: RunnerKind) type {
return switch (kind) {
.operations => @import("./writer/operations.zig"),
.sanity => @import("./writer/sanity.zig"),
.epoch_processing => @import("./writer/epoch_processing.zig"),
else => @compileError("Unsupported test runner"),
};
}
Expand Down
62 changes: 62 additions & 0 deletions test/spec/writer/epoch_processing.zig
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
const std = @import("std");
const spec_test_options = @import("spec_test_options");
const ForkSeq = @import("config").ForkSeq;
const Preset = @import("preset").Preset;
const Handler = @import("../runner/epoch_processing.zig").EpochProcessingFn;

pub const handlers = std.enums.values(Handler);

pub const header =
\\// This file is generated by write_spec_tests.zig.
\\// Do not commit changes by hand.
\\
\\const std = @import("std");
\\const ForkSeq = @import("config").ForkSeq;
\\const active_preset = @import("preset").active_preset;
\\const spec_test_options = @import("spec_test_options");
\\const EpochProcessing = @import("../runner/epoch_processing.zig");
\\
\\const allocator = std.testing.allocator;
\\
\\
;

const test_template =
\\test "{s} epoch processing {s} {s}" {{
\\ const test_dir_name = try std.fs.path.join(allocator, &[_][]const u8{{
\\ spec_test_options.spec_test_out_dir,
\\ spec_test_options.spec_test_version,
\\ @tagName(active_preset) ++ "/tests/" ++ @tagName(active_preset) ++ "/{s}/epoch_processing/{s}/pyspec_tests/{s}",
\\ }});
\\ defer allocator.free(test_dir_name);
\\ const test_dir = std.fs.cwd().openDir(test_dir_name, .{{}}) catch return error.SkipZigTest;
\\
\\ try EpochProcessing.TestCase(.{s}, .{s}).execute(allocator, test_dir);
\\}}
\\
\\
;

pub fn writeHeader(writer: std.io.AnyWriter) !void {
try writer.print(header, .{});
}

pub fn writeTest(
writer: std.io.AnyWriter,
fork: ForkSeq,
handler: Handler,
test_case_name: []const u8,
) !void {
try writer.print(test_template, .{
@tagName(fork),
@tagName(handler),
test_case_name,

@tagName(fork),
@tagName(handler),
test_case_name,

@tagName(fork),
@tagName(handler),
});
}