Skip to content

Commit

Permalink
[VM] Update execution logic to invalidate loader cache upon all parti…
Browse files Browse the repository at this point in the history
…al errors when there's a code publish
  • Loading branch information
perryjrandall committed Jan 27, 2023
1 parent 6aae9a3 commit a78970a
Show file tree
Hide file tree
Showing 6 changed files with 163 additions and 15 deletions.
73 changes: 58 additions & 15 deletions aptos-move/aptos-vm/src/aptos_vm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -325,6 +325,7 @@ impl AptosVM {
txn_data: &TransactionMetadata,
payload: &TransactionPayload,
log_context: &AdapterLogSchema,
new_published_modules_loaded: &mut bool,
) -> Result<(VMStatus, TransactionOutputExt), VMStatus> {
fail_point!("move_adapter::execute_script_or_entry_function", |_| {
Err(VMStatus::Error(
Expand Down Expand Up @@ -389,7 +390,11 @@ impl AptosVM {
}
.map_err(|e| e.into_vm_status())?;

self.resolve_pending_code_publish(&mut session, gas_meter)?;
self.resolve_pending_code_publish(
&mut session,
gas_meter,
new_published_modules_loaded,
)?;

let session_output = session.finish().map_err(|e| e.into_vm_status())?;
let change_set_ext =
Expand Down Expand Up @@ -444,13 +449,15 @@ impl AptosVM {
modules: &[CompiledModule],
exists: BTreeSet<ModuleId>,
senders: &[AccountAddress],
new_published_modules_loaded: &mut bool,
) -> VMResult<()> {
let init_func_name = ident_str!("init_module");
for module in modules {
if exists.contains(&module.self_id()) {
// Call initializer only on first publish.
continue;
}
*new_published_modules_loaded = true;
let init_function = session.load_function(&module.self_id(), init_func_name, &[]);
// it is ok to not have init_module function
// init_module function should be (1) private and (2) has no return value
Expand Down Expand Up @@ -516,6 +523,7 @@ impl AptosVM {
txn_data: &TransactionMetadata,
modules: &ModuleBundle,
log_context: &AdapterLogSchema,
new_published_modules_loaded: &mut bool,
) -> Result<(VMStatus, TransactionOutputExt), VMStatus> {
if MODULE_BUNDLE_DISALLOWED.load(Ordering::Relaxed) {
return Err(VMStatus::Error(StatusCode::FEATURE_UNDER_GATING));
Expand All @@ -531,6 +539,8 @@ impl AptosVM {
.map_err(|e| e.into_vm_status())?;

Self::verify_module_bundle(&mut session, modules)?;
// publish_module_bundle_with_compat_config doesn't actually load the published module into
// the loader cache. It only puts the module data in the data cache.
session
.publish_module_bundle_with_compat_config(
modules.clone().into_inner(),
Expand All @@ -554,6 +564,7 @@ impl AptosVM {
&self.deserialize_module_bundle(modules)?,
BTreeSet::new(),
&[txn_data.sender()],
new_published_modules_loaded,
)?;

let session_output = session.finish().map_err(|e| e.into_vm_status())?;
Expand All @@ -572,6 +583,7 @@ impl AptosVM {
&self,
session: &mut SessionExt<S>,
gas_meter: &mut AptosGasMeter,
new_published_modules_loaded: &mut bool,
) -> VMResult<()> {
if let Some(PublishRequest {
destination,
Expand Down Expand Up @@ -621,15 +633,9 @@ impl AptosVM {
&modules,
exists,
&[destination],
new_published_modules_loaded,
)
})
.map_err(|e| {
// Be sure to flash the loader cache to align storage with the cache.
// None of the modules in the bundle will be committed to storage,
// but some of them may have ended up in the cache.
self.0.mark_loader_cache_as_invalid();
e
})
} else {
Ok(())
}
Expand Down Expand Up @@ -727,6 +733,10 @@ impl AptosVM {
txn_data.max_gas_amount(),
);

// We keep track of whether any newly published modules are loaded into the Vm's loader
// cache as part of executing transactions. This would allow us to decide whether the cache
// should be flushed later.
let mut new_published_modules_loaded = false;
let result = match txn.payload() {
payload @ TransactionPayload::Script(_)
| payload @ TransactionPayload::EntryFunction(_) => self
Expand All @@ -737,10 +747,17 @@ impl AptosVM {
&txn_data,
payload,
log_context,
&mut new_published_modules_loaded,
),
TransactionPayload::ModuleBundle(m) => {
self.execute_modules(storage, session, &mut gas_meter, &txn_data, m, log_context)
}
TransactionPayload::ModuleBundle(m) => self.execute_modules(
storage,
session,
&mut gas_meter,
&txn_data,
m,
log_context,
&mut new_published_modules_loaded,
),
};

let gas_usage = txn_data
Expand All @@ -752,6 +769,15 @@ impl AptosVM {
match result {
Ok(output) => output,
Err(err) => {
// Invalidate the loader cache in case there was a new module loaded from a module
// publish request that failed.
// This ensures the loader cache is flushed later to align storage with the cache.
// None of the modules in the bundle will be committed to storage,
// but some of them may have ended up in the cache.
if new_published_modules_loaded {
self.0.mark_loader_cache_as_invalid();
};

let txn_status = TransactionStatus::from(err.clone());
if txn_status.is_discarded() {
discard_error_vm_status(err)
Expand Down Expand Up @@ -1279,6 +1305,7 @@ impl AptosSimulationVM {
storage_gas_params.clone(),
txn_data.max_gas_amount(),
);
let mut new_published_modules_loaded = false;

let result = match txn.payload() {
payload @ TransactionPayload::Script(_)
Expand All @@ -1290,17 +1317,33 @@ impl AptosSimulationVM {
&txn_data,
payload,
log_context,
&mut new_published_modules_loaded,
)
}
TransactionPayload::ModuleBundle(m) => {
self.0
.execute_modules(storage, session, &mut gas_meter, &txn_data, m, log_context)
}
TransactionPayload::ModuleBundle(m) => self.0.execute_modules(
storage,
session,
&mut gas_meter,
&txn_data,
m,
log_context,
&mut new_published_modules_loaded,
),
};

match result {
Ok(output) => output,
Err(err) => {
// Invalidate the loader cache in case there was a new module loaded from a module
// publish request that failed.
// This ensures the loader cache is flushed later to align storage with the cache.
// None of the modules in the bundle will be committed to storage,
// but some of them may have ended up in the cache.
if new_published_modules_loaded {
self.0 .0.mark_loader_cache_as_invalid();
};
// We don't need to worry about invalidating the loader cache here since transaction
// simulation is done in the API layer of a node. The VM is not going to be reused.
let txn_status = TransactionStatus::from(err.clone());
if txn_status.is_discarded() {
discard_error_vm_status(err)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
[package]
name = "test_package"
version = "0.0.0"
upgrade_policy = "immutable"

[dependencies]
AptosFramework = { local = "../../../../../framework/aptos-framework" }
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
module 0xcafe::test {
use aptos_framework::coin::{Self, Coin};
use aptos_framework::aptos_coin::AptosCoin;
use std::signer::address_of;

struct State has key {
important_value: u64,
coins: Coin<AptosCoin>,
}

fun init_module(s: &signer) {
// Transfer away all the APT from s so there's nothing left to pay for gas.
// This makes this init_module function fail for sure.
let balance = coin::balance<AptosCoin>(address_of(s));
let coins = coin::withdraw<AptosCoin>(s, balance);

move_to(s, State {
important_value: get_value(),
coins,
})
}

fun get_value(): u64 {
1
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
[package]
name = "test_package"
version = "0.0.0"
upgrade_policy = "immutable"

[dependencies]
AptosFramework = { local = "../../../../../framework/aptos-framework" }
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
module 0xcafe::test {
use aptos_framework::coin::{Self, Coin};
use aptos_framework::aptos_coin::AptosCoin;

struct State has key {
important_value: u64,
coins: Coin<AptosCoin>,
}

fun init_module(s: &signer) {
move_to(s, State {
important_value: get_value(),
coins: coin::zero<AptosCoin>(),
})
}

fun get_value(): u64 {
2
}
}
45 changes: 45 additions & 0 deletions aptos-move/e2e-move-tests/src/tests/code_publishing.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,13 @@ struct State {
value: u64,
}

/// Mimics `0xcafe::test::State`
#[derive(Serialize, Deserialize)]
struct StateWithCoins {
important_value: u64,
value: u64,
}

/// Runs the basic publishing test for all legacy flag combinations. Otherwise we will only
/// run tests which are expected to make a difference for legacy flag combinations.
#[rstest(enabled, disabled,
Expand Down Expand Up @@ -243,6 +250,44 @@ fn code_publishing_using_resource_account() {
assert_success!(result);
}

#[test]
fn code_publishing_with_two_attempts_and_verify_loader_is_invalidated() {
let mut h = MoveHarness::new();
let acc = h.new_account_at(AccountAddress::from_hex_literal("0xcafe").unwrap());

// First module publish attempt failed when executing the init_module.
// Second attempt should pass.
// We expect the correct logic in init_module to be executed from the second attempt so the
// value stored is from the second code, and not the first (which would be the case if the
// VM's loader cache is not properly cleared after the first attempt).
//
// Depending on how the loader cache is flushed, the second attempt might even fail if the
// entire init_module from the first attempt still lingers around and will fail if invoked.
let failed_module_publish = h.create_publish_package(
&acc,
&common::test_dir_path("code_publishing.data/pack_init_module_failed"),
None,
|_| {},
);
let module_publish_second_attempt = h.create_publish_package(
&acc,
&common::test_dir_path("code_publishing.data/pack_init_module_second_attempt"),
None,
|_| {},
);
let results = h.run_block(vec![failed_module_publish, module_publish_second_attempt]);
assert_abort!(results[0], _);
assert_success!(results[1]);

let value_resource = h
.read_resource::<StateWithCoins>(
acc.address(),
parse_struct_tag("0xcafe::test::State").unwrap(),
)
.unwrap();
assert_eq!(2, value_resource.important_value);
}

#[rstest(enabled, disabled,
case(vec![], vec![FeatureFlag::CODE_DEPENDENCY_CHECK]),
case(vec![FeatureFlag::CODE_DEPENDENCY_CHECK], vec![]),
Expand Down

0 comments on commit a78970a

Please sign in to comment.