diff --git a/Cargo.lock b/Cargo.lock index 25a1739466d..a9b29bd471f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1931,9 +1931,7 @@ dependencies = [ [[package]] name = "frame-decode" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "641e3739fa708a278d35b008a05244008c221240abc3e1c27138466c13e999ed" +version = "0.11.0" dependencies = [ "frame-metadata 23.0.0", "parity-scale-codec", @@ -4431,9 +4429,9 @@ dependencies = [ [[package]] name = "scale-info-legacy" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5da3f59983b08a37d8d979d2326bdc00e8cca57b3d28fb05bdc0f6d7c28600c" +checksum = "bd183213b6831b6bc08fda67a310bf9299889d669e264a2a2168679079a0c522" dependencies = [ "hashbrown 0.15.3", "scale-type-resolver", @@ -4487,9 +4485,9 @@ dependencies = [ [[package]] name = "scale-value" -version = "0.18.0" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ca8b26b451ecb7fd7b62b259fa28add63d12ec49bbcac0e01fcb4b5ae0c09aa" +checksum = "884aab179aba344c67ddcd1d7dd8e3f8fee202f2e570d97ec34ec8688442a5b3" dependencies = [ "base58", "blake2", @@ -5570,6 +5568,7 @@ dependencies = [ "bitvec", "derive-where", "either", + "frame-decode", "frame-metadata 23.0.0", "futures", "hex", diff --git a/Cargo.toml b/Cargo.toml index cc855dc9935..ed5b86a438c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -81,7 +81,7 @@ darling = "0.20.10" derive-where = "1.2.7" either = { version = "1.13.0", default-features = false } finito = { version = "0.1.0", default-features = false } -frame-decode = { version = "0.10.0", default-features = false } +frame-decode = { version = "0.11.0", default-features = false } frame-metadata = { version = "23.0.0", default-features = false } futures = { version = "0.3.31", default-features = false, features = ["std"] } getrandom = { version = "0.2", default-features = false } @@ -98,12 +98,12 @@ proc-macro2 = "1.0.86" quote = "1.0.37" regex = { version = "1.11.0", default-features = false } scale-info = { version = "2.11.4", default-features = false } -scale-value = { version = "0.18.0", default-features = false } +scale-value = { version = "0.18.1", default-features = false } scale-bits = { version = "0.7.0", default-features = false } scale-decode = { version = "0.16.0", default-features = false } scale-encode = { version = "0.10.0", default-features = false } scale-type-resolver = { version = "0.2.0" } -scale-info-legacy = { version = "0.2.3" } +scale-info-legacy = { version = "0.2.4" } scale-typegen = "0.11.1" scale-typegen-description = "0.11.0" serde = { version = "1.0.210", default-features = false, features = ["derive"] } @@ -192,3 +192,6 @@ opt-level = 2 opt-level = 2 [profile.test.package.smoldot] opt-level = 2 + +[patch.crates-io] +frame-decode = { path = "../frame-decode" } \ No newline at end of file diff --git a/cli/src/commands/diff.rs b/cli/src/commands/diff.rs index a5f723f4dbf..03208b29fa0 100644 --- a/cli/src/commands/diff.rs +++ b/cli/src/commands/diff.rs @@ -215,7 +215,6 @@ struct StorageEntryDiff { key_different: bool, value_different: bool, default_different: bool, - modifier_different: bool, } impl StorageEntryDiff { @@ -225,41 +224,32 @@ impl StorageEntryDiff { metadata_1: &Metadata, metadata_2: &Metadata, ) -> Self { - let value_1_ty_id = storage_entry_1.entry_type().value_ty(); + let value_1_ty_id = storage_entry_1.value_ty(); let value_1_hash = metadata_1 .type_hash(value_1_ty_id) .expect("type is in metadata; qed"); - let value_2_ty_id = storage_entry_2.entry_type().value_ty(); + let value_2_ty_id = storage_entry_2.value_ty(); let value_2_hash = metadata_2 .type_hash(value_2_ty_id) .expect("type is in metadata; qed"); let value_different = value_1_hash != value_2_hash; - let key_1_hash = storage_entry_1 - .entry_type() - .key_ty() - .map(|key_ty| { - metadata_1 - .type_hash(key_ty) - .expect("type is in metadata; qed") - }) - .unwrap_or_default(); - let key_2_hash = storage_entry_2 - .entry_type() - .key_ty() - .map(|key_ty| { - metadata_2 - .type_hash(key_ty) - .expect("type is in metadata; qed") - }) - .unwrap_or_default(); - let key_different = key_1_hash != key_2_hash; + let key_parts_same = storage_entry_1.keys().len() == storage_entry_2.keys().len() + && storage_entry_1 + .keys() + .zip(storage_entry_2.keys()) + .all(|(a, b)| { + let a_hash = metadata_1.type_hash(a.key_id).expect("type is in metadata"); + let b_hash = metadata_2.type_hash(b.key_id).expect("type is in metadata"); + a.hasher == b.hasher && a_hash == b_hash + }); + + let key_different = !key_parts_same; StorageEntryDiff { key_different, value_different, - default_different: storage_entry_1.default_bytes() != storage_entry_2.default_bytes(), - modifier_different: storage_entry_1.modifier() != storage_entry_2.modifier(), + default_different: storage_entry_1.default_value() != storage_entry_2.default_value(), } } @@ -271,9 +261,6 @@ impl StorageEntryDiff { if self.value_different { strings.push("value type"); } - if self.modifier_different { - strings.push("modifier"); - } if self.default_different { strings.push("default value"); } diff --git a/cli/src/commands/explore/pallets/calls.rs b/cli/src/commands/explore/pallets/calls.rs index 573191511ec..11de1ac4eb4 100644 --- a/cli/src/commands/explore/pallets/calls.rs +++ b/cli/src/commands/explore/pallets/calls.rs @@ -12,7 +12,7 @@ use subxt::utils::H256; use subxt::{ OfflineClient, config::SubstrateConfig, - metadata::{Metadata, types::PalletMetadata}, + metadata::{Metadata, PalletMetadata}, }; use crate::utils::{ diff --git a/cli/src/commands/explore/pallets/constants.rs b/cli/src/commands/explore/pallets/constants.rs index 68325ceb0a4..e91240c02a4 100644 --- a/cli/src/commands/explore/pallets/constants.rs +++ b/cli/src/commands/explore/pallets/constants.rs @@ -2,7 +2,7 @@ use clap::Args; use color_eyre::eyre::eyre; use indoc::{formatdoc, writedoc}; use scale_typegen_description::type_description; -use subxt::metadata::{Metadata, types::PalletMetadata}; +use subxt::metadata::{Metadata, PalletMetadata}; use crate::utils::{Indent, SyntaxHighlight, first_paragraph_of_docs, format_scale_value}; diff --git a/cli/src/commands/explore/pallets/events.rs b/cli/src/commands/explore/pallets/events.rs index ed87b296019..6abb36a8e84 100644 --- a/cli/src/commands/explore/pallets/events.rs +++ b/cli/src/commands/explore/pallets/events.rs @@ -2,7 +2,7 @@ use clap::Args; use color_eyre::eyre::eyre; use indoc::{formatdoc, writedoc}; use scale_info::{Variant, form::PortableForm}; -use subxt::metadata::{Metadata, types::PalletMetadata}; +use subxt::metadata::{Metadata, PalletMetadata}; use crate::utils::{Indent, fields_description, first_paragraph_of_docs}; diff --git a/cli/src/commands/explore/pallets/storage.rs b/cli/src/commands/explore/pallets/storage.rs index 3536354296e..3d5edc0b45a 100644 --- a/cli/src/commands/explore/pallets/storage.rs +++ b/cli/src/commands/explore/pallets/storage.rs @@ -1,17 +1,11 @@ use clap::Args; -use color_eyre::{ - eyre::{bail, eyre}, - owo_colors::OwoColorize, -}; +use color_eyre::{eyre::bail, owo_colors::OwoColorize}; use indoc::{formatdoc, writedoc}; use scale_typegen_description::type_description; use scale_value::Value; use std::fmt::Write; use std::write; -use subxt::metadata::{ - Metadata, - types::{PalletMetadata, StorageEntryType, StorageMetadata}, -}; +use subxt::metadata::{Metadata, PalletMetadata, StorageMetadata}; use crate::utils::{ FileOrUrl, Indent, SyntaxHighlight, create_client, first_paragraph_of_docs, @@ -75,12 +69,7 @@ pub async fn explore_storage( ); }; - let (return_ty_id, key_ty_id) = match storage.entry_type() { - StorageEntryType::Plain(value) => (*value, None), - StorageEntryType::Map { - value_ty, key_ty, .. - } => (*value_ty, Some(*key_ty)), - }; + let return_ty_id = storage.value_ty(); let key_value_placeholder = "".blue(); @@ -114,15 +103,30 @@ pub async fn explore_storage( "}?; // inform user about shape of the key if it can be provided: - if let Some(key_ty_id) = key_ty_id { - let key_ty_description = type_description(key_ty_id, metadata.types(), true) - .expect("No type Description") - .indent(4) - .highlight(); + let storage_keys = storage.keys().collect::>(); + if !storage_keys.is_empty() { + let key_ty_description = format!( + "({})", + storage_keys + .iter() + .map(|key| type_description(key.key_id, metadata.types(), true) + .expect("No type Description")) + .collect::>() + .join(", ") + ) + .indent(4) + .highlight(); - let key_ty_example = type_example(key_ty_id, metadata.types()) - .indent(4) - .highlight(); + let key_ty_example = format!( + "({})", + storage_keys + .iter() + .map(|key| type_example(key.key_id, metadata.types()).to_string()) + .collect::>() + .join(", ") + ) + .indent(4) + .highlight(); writedoc! {output, " @@ -144,7 +148,8 @@ pub async fn explore_storage( return Ok(()); } - let storage_entry_keys: Vec = match (!trailing_args.is_empty(), key_ty_id.is_some()) { + let storage_entry_keys: Vec = match (!trailing_args.is_empty(), !storage_keys.is_empty()) + { // keys provided, keys not needed. (true, false) => { let trailing_args_str = trailing_args.join(" "); @@ -190,18 +195,16 @@ pub async fn explore_storage( // construct the client: let client = create_client(&file_or_url).await?; - let storage_query = subxt::dynamic::storage(pallet_name, storage.name(), storage_entry_keys); - let decoded_value_thunk_or_none = client - .storage() - .at_latest() - .await? - .fetch(&storage_query) - .await?; + let storage_query = subxt::dynamic::storage::, Value>(pallet_name, storage.name()); + + let storage_client_at = client.storage().at_latest().await?; + + let storage_entry = storage_client_at.entry(storage_query)?; + + let storage_value = storage_entry.fetch(storage_entry_keys).await?; - let decoded_value_thunk = - decoded_value_thunk_or_none.ok_or(eyre!("Value not found in storage."))?; + let value = storage_value.decode()?.to_string().highlight(); - let value = decoded_value_thunk.to_value()?.to_string().highlight(); writedoc! {output, " The value of the storage entry is: diff --git a/cli/src/commands/explore/runtime_apis/mod.rs b/cli/src/commands/explore/runtime_apis/mod.rs index b291a2b95b2..04a609b7b6e 100644 --- a/cli/src/commands/explore/runtime_apis/mod.rs +++ b/cli/src/commands/explore/runtime_apis/mod.rs @@ -101,15 +101,13 @@ pub async fn run<'a>( return format!("The method does not require an {input_value_placeholder}"); } - let fields: Vec<(Option<&str>, u32)> = method - .inputs() - .map(|f| (Some(f.name.as_str()), f.ty)) - .collect(); + let fields: Vec<(Option<&str>, u32)> = + method.inputs().map(|f| (Some(&*f.name), f.id)).collect(); let fields_description = fields_description(&fields, method.name(), metadata.types()).indent(4); let fields_example = - fields_composite_example(method.inputs().map(|e| e.ty), metadata.types()) + fields_composite_example(method.inputs().map(|e| e.id), metadata.types()) .indent(4) .highlight(); @@ -164,13 +162,14 @@ pub async fn run<'a>( {value_str} "}?; // encode, then decode. This ensures that the scale value is of the correct shape for the param: - let bytes = value.encode_as_type(ty.ty, metadata.types())?; - let value = Value::decode_as_type(&mut &bytes[..], ty.ty, metadata.types())?; + let bytes = value.encode_as_type(ty.id, metadata.types())?; + let value = Value::decode_as_type(&mut &bytes[..], ty.id, metadata.types())?; Ok(value) }) .collect::>>()?; - let method_call = subxt::dynamic::runtime_api_call(api_name, method.name(), args_data); + let method_call = + subxt::dynamic::runtime_api_call::<_, Value>(api_name, method.name(), args_data); let client = create_client(&file_or_url).await?; let output_value = client .runtime_api() @@ -179,7 +178,7 @@ pub async fn run<'a>( .call(method_call) .await?; - let output_value = output_value.to_value()?.to_string().highlight(); + let output_value = output_value.to_string().highlight(); writedoc! {output, " Returned value: diff --git a/codegen/src/api/custom_values.rs b/codegen/src/api/custom_values.rs index aa816c6534a..2501bbc145c 100644 --- a/codegen/src/api/custom_values.rs +++ b/codegen/src/api/custom_values.rs @@ -57,16 +57,17 @@ fn generate_custom_value_fn( .types() .resolve(custom_value.type_id()) .is_some(); + let (return_ty, decodable) = if type_is_valid { let return_ty = type_gen .resolve_type_path(custom_value.type_id()) .expect("type is in metadata; qed") .to_token_stream(type_gen.settings()); - let decodable = quote!(#crate_path::utils::Yes); + let decodable = quote!(#crate_path::utils::Maybe); (return_ty, decodable) } else { // if type registry does not contain the type, we can just return the Encoded scale bytes. - (quote!(()), quote!(())) + (quote!(()), quote!(#crate_path::utils::No)) }; Some(quote!( diff --git a/codegen/src/api/pallet_view_functions.rs b/codegen/src/api/pallet_view_functions.rs index 2a1b9932c9f..563a621de3f 100644 --- a/codegen/src/api/pallet_view_functions.rs +++ b/codegen/src/api/pallet_view_functions.rs @@ -12,7 +12,45 @@ use scale_typegen::typegen::ir::ToTokensWithSettings; use std::collections::HashSet; use subxt_metadata::{PalletMetadata, ViewFunctionMetadata}; +pub fn generate_pallet_view_functions( + type_gen: &TypeGenerator, + pallet: &PalletMetadata, + crate_path: &syn::Path, +) -> Result { + if !pallet.has_view_functions() { + // If there are no view functions in this pallet, we + // don't generate anything. + return Ok(quote! {}); + } + + let view_functions: Vec<_> = pallet + .view_functions() + .map(|vf| generate_pallet_view_function(pallet.name(), vf, type_gen, crate_path)) + .collect::>()?; + + let view_functions_types = view_functions.iter().map(|(apis, _)| apis); + let view_functions_methods = view_functions.iter().map(|(_, getters)| getters); + + let types_mod_ident = type_gen.types_mod_ident(); + + Ok(quote! { + pub mod view_functions { + use super::root_mod; + use super::#types_mod_ident; + + pub struct ViewFunctionsApi; + + impl ViewFunctionsApi { + #( #view_functions_methods )* + } + + #( #view_functions_types )* + } + }) +} + fn generate_pallet_view_function( + pallet_name: &str, view_function: ViewFunctionMetadata<'_>, type_gen: &TypeGenerator, crate_path: &syn::Path, @@ -20,9 +58,7 @@ fn generate_pallet_view_function( let types_mod_ident = type_gen.types_mod_ident(); let view_function_name_str = view_function.name(); - let view_function_name_ident = format_ident!("{}", view_function_name_str); - - let query_id = view_function.query_id(); + let view_function_name_ident = format_ident!("{view_function_name_str}"); let validation_hash = view_function.hash(); let docs = view_function.docs(); @@ -68,7 +104,7 @@ fn generate_pallet_view_function( // Path to the actual type we'll have generated for this input. let type_path = type_gen - .resolve_type_path(input.ty) + .resolve_type_path(input.id) .expect("view function input type is in metadata; qed") .to_token_stream(type_gen.settings()); @@ -81,12 +117,11 @@ fn generate_pallet_view_function( .collect() }; - let input_struct_params = view_function_inputs + let input_tuple_types = view_function_inputs .iter() .map(|i| { - let arg = &i.name; let ty = &i.type_alias; - quote!(pub #arg: #ty) + quote!(#view_function_name_ident::#ty) }) .collect::>(); @@ -111,19 +146,12 @@ fn generate_pallet_view_function( .resolve_type_path(view_function.output_ty())? .to_token_stream(type_gen.settings()); - let input_struct_derives = type_gen.settings().derives.default_derives(); - // Define the input and output type bits. - let view_function_def = quote!( + let view_function_types = quote!( pub mod #view_function_name_ident { use super::root_mod; use super::#types_mod_ident; - #input_struct_derives - pub struct Input { - #(#input_struct_params,)* - } - #(#input_type_aliases)* pub mod output { @@ -134,61 +162,23 @@ fn generate_pallet_view_function( ); // Define the getter method that will live on the `ViewFunctionApi` type. - let view_function_getter = quote!( + let view_function_method = quote!( #docs pub fn #view_function_name_ident( &self, #(#input_args),* ) -> #crate_path::view_functions::payload::StaticPayload< - #view_function_name_ident::Input, + (#(#input_tuple_types,)*), #view_function_name_ident::output::Output > { #crate_path::view_functions::payload::StaticPayload::new_static( - [#(#query_id,)*], - #view_function_name_ident::Input { - #(#input_param_names,)* - }, + #pallet_name, + #view_function_name_str, + (#(#input_param_names,)*), [#(#validation_hash,)*], ) } ); - Ok((view_function_def, view_function_getter)) -} - -pub fn generate_pallet_view_functions( - type_gen: &TypeGenerator, - pallet: &PalletMetadata, - crate_path: &syn::Path, -) -> Result { - if !pallet.has_view_functions() { - // If there are no view functions in this pallet, we - // don't generate anything. - return Ok(quote! {}); - } - - let view_functions: Vec<_> = pallet - .view_functions() - .map(|vf| generate_pallet_view_function(vf, type_gen, crate_path)) - .collect::>()?; - - let view_functions_defs = view_functions.iter().map(|(apis, _)| apis); - let view_functions_getters = view_functions.iter().map(|(_, getters)| getters); - - let types_mod_ident = type_gen.types_mod_ident(); - - Ok(quote! { - pub mod view_functions { - use super::root_mod; - use super::#types_mod_ident; - - pub struct ViewFunctionsApi; - - impl ViewFunctionsApi { - #( #view_functions_getters )* - } - - #( #view_functions_defs )* - } - }) + Ok((view_function_types, view_function_method)) } diff --git a/codegen/src/api/runtime_apis.rs b/codegen/src/api/runtime_apis.rs index d6693131654..23744814ad5 100644 --- a/codegen/src/api/runtime_apis.rs +++ b/codegen/src/api/runtime_apis.rs @@ -16,16 +16,51 @@ use quote::{format_ident, quote}; use crate::CodegenError; +/// Generate the runtime APIs. +pub fn generate_runtime_apis( + metadata: &Metadata, + type_gen: &TypeGenerator, + types_mod_ident: &syn::Ident, + crate_path: &syn::Path, +) -> Result { + let runtime_fns: Vec<_> = metadata + .runtime_api_traits() + .map(|api| generate_runtime_api(api, type_gen, crate_path)) + .collect::>()?; + + let trait_defs = runtime_fns.iter().map(|(apis, _)| apis); + let trait_getters = runtime_fns.iter().map(|(_, getters)| getters); + + Ok(quote! { + pub mod runtime_apis { + use super::root_mod; + use super::#types_mod_ident; + + use #crate_path::ext::codec::Encode; + + pub struct RuntimeApi; + + impl RuntimeApi { + #( #trait_getters )* + } + + #( #trait_defs )* + } + }) +} + /// Generates runtime functions for the given API metadata. fn generate_runtime_api( api: RuntimeApiMetadata, type_gen: &TypeGenerator, crate_path: &syn::Path, ) -> Result<(TokenStream2, TokenStream2), CodegenError> { + let types_mod_ident = type_gen.types_mod_ident(); // Trait name must remain as is (upper case) to identify the runtime call. let trait_name_str = api.name(); // The snake case for the trait name. let trait_name_snake = format_ident!("{}", api.name().to_snake_case()); + let docs = api.docs(); let docs: TokenStream2 = type_gen .settings() @@ -33,11 +68,12 @@ fn generate_runtime_api( .then_some(quote! { #( #[doc = #docs ] )* }) .unwrap_or_default(); - let structs_and_methods = api + let types_and_methods = api .methods() .map(|method| { let method_name = format_ident!("{}", method.name()); let method_name_str = method.name(); + let validation_hash = method.hash(); let docs = method.docs(); let docs: TokenStream2 = type_gen @@ -46,111 +82,130 @@ fn generate_runtime_api( .then_some(quote! { #( #[doc = #docs ] )* }) .unwrap_or_default(); - let mut unique_names = HashSet::new(); - let mut unique_aliases = HashSet::new(); - - let inputs: Vec<_> = method - .inputs() - .enumerate() - .map(|(idx, input)| { - // These are method names, which can just be '_', but struct field names can't - // just be an underscore, so fix any such names we find to work in structs. - let mut name = input.name.trim_start_matches('_').to_string(); - if name.is_empty() { - name = format!("_{idx}"); - } - while !unique_names.insert(name.clone()) { - // Name is already used, append the index until it is unique. - name = format!("{name}_param{idx}"); - } - - let mut alias = name.to_upper_camel_case(); - // Note: name is not empty. - if alias.as_bytes()[0].is_ascii_digit() { - alias = format!("Param{alias}"); - } - while !unique_aliases.insert(alias.clone()) { - alias = format!("{alias}Param{idx}"); - } - - let (alias_name, name) = (format_ident!("{alias}"), format_ident!("{name}")); + struct Input { + name: syn::Ident, + type_alias: syn::Ident, + type_path: TokenStream2, + } - // Generate alias for runtime type. - let ty = type_gen - .resolve_type_path(input.ty) - .expect("runtime api input type is in metadata; qed") - .to_token_stream(type_gen.settings()); - let aliased_param = quote!( pub type #alias_name = #ty; ); + let runtime_api_inputs: Vec = { + let mut unique_names = HashSet::new(); + let mut unique_aliases = HashSet::new(); + + method + .inputs() + .enumerate() + .map(|(idx, input)| { + // The method argument name is either the input name or the + // index (eg _1, _2 etc) if one isn't provided. + // if we get unlucky we'll end up with param_param1 etc. + let mut name = input.name.trim_start_matches('_').to_string(); + if name.is_empty() { + name = format!("_{idx}"); + } + while !unique_names.insert(name.clone()) { + name = format!("{name}_param{idx}"); + } + + // The alias is either InputName if provided, or Param1, Param2 etc if not. + // If we get unlucky we may even end up with ParamParam1 etc. + let mut alias = name.trim_start_matches('_').to_upper_camel_case(); + // Note: name is not empty. + if alias.as_bytes()[0].is_ascii_digit() { + alias = format!("Param{alias}"); + } + while !unique_aliases.insert(alias.clone()) { + alias = format!("{alias}Param{idx}"); + } + + // Generate alias for runtime type. + let type_path = type_gen + .resolve_type_path(input.id) + .expect("runtime api input type is in metadata; qed") + .to_token_stream(type_gen.settings()); + + Input { + name: format_ident!("{name}"), + type_alias: format_ident!("{alias}"), + type_path, + } + }) + .collect() + }; + + let input_tuple_types = runtime_api_inputs + .iter() + .map(|i| { + let ty = &i.type_alias; + quote!(#method_name::#ty) + }) + .collect::>(); + + let input_args = runtime_api_inputs + .iter() + .map(|i| { + let arg = &i.name; + let ty = &i.type_alias; + quote!(#arg: #method_name::#ty) + }) + .collect::>(); - // Structures are placed on the same level as the alias module. - let struct_ty_path = quote!( #method_name::#alias_name ); - let struct_param = quote!(#name: #struct_ty_path); + let input_param_names = runtime_api_inputs.iter().map(|i| &i.name); - // Function parameters must be indented by `types`. - let fn_param = quote!(#name: types::#struct_ty_path); - (fn_param, struct_param, name, aliased_param) - }) - .collect(); + let input_type_aliases = runtime_api_inputs.iter().map(|i| { + let ty = &i.type_alias; + let path = &i.type_path; + quote!(pub type #ty = #path;) + }); - let fn_params = inputs.iter().map(|(fn_param, _, _, _)| fn_param); - let struct_params = inputs.iter().map(|(_, struct_param, _, _)| struct_param); - let param_names = inputs.iter().map(|(_, _, name, _)| name); - let type_aliases = inputs.iter().map(|(_, _, _, aliased_param)| aliased_param); - let types_mod_ident = type_gen.types_mod_ident(); + let output_type_path = type_gen + .resolve_type_path(method.output_ty())? + .to_token_stream(type_gen.settings()); - let output = type_gen.resolve_type_path(method.output_ty())?.to_token_stream(type_gen.settings()); - let aliased_module = quote!( + // Define the input and output type bits for the method. + let runtime_api_types = quote! { pub mod #method_name { + use super::root_mod; use super::#types_mod_ident; - #( #type_aliases )* + #(#input_type_aliases)* - // Guard the `Output` name against collisions by placing it in a dedicated module. pub mod output { use super::#types_mod_ident; - pub type Output = #output; + pub type Output = #output_type_path; } } - ); - - // From the method metadata generate a structure that holds - // all parameter types. This structure is used with metadata - // to encode parameters to the call via `encode_as_fields_to`. - let derives = type_gen.settings().derives.default_derives(); - let struct_name = format_ident!("{}", method.name().to_upper_camel_case()); - let struct_input = quote!( - #aliased_module - - #derives - pub struct #struct_name { - #( pub #struct_params, )* - } - ); + }; - let call_hash = method.hash(); - let method = quote!( + // Define the getter method that will live on the `ViewFunctionApi` type. + let runtime_api_method = quote!( #docs - pub fn #method_name(&self, #( #fn_params, )* ) -> #crate_path::runtime_api::payload::StaticPayload { + pub fn #method_name( + &self, + #(#input_args),* + ) -> #crate_path::runtime_api::payload::StaticPayload< + (#(#input_tuple_types,)*), + #method_name::output::Output + > { #crate_path::runtime_api::payload::StaticPayload::new_static( #trait_name_str, #method_name_str, - types::#struct_name { #( #param_names, )* }, - [#(#call_hash,)*], + (#(#input_param_names,)*), + [#(#validation_hash,)*], ) } ); - Ok((struct_input, method)) + Ok((runtime_api_types, runtime_api_method)) }) .collect::, CodegenError>>()?; let trait_name = format_ident!("{}", trait_name_str); + let types = types_and_methods.iter().map(|(types, _)| types); + let methods = types_and_methods.iter().map(|(_, methods)| methods); - let structs = structs_and_methods.iter().map(|(struct_, _)| struct_); - let methods = structs_and_methods.iter().map(|(_, method)| method); - let types_mod_ident = type_gen.types_mod_ident(); - - let runtime_api = quote!( + // The runtime API definition and types. + let trait_defs = quote!( pub mod #trait_name_snake { use super::root_mod; use super::#types_mod_ident; @@ -162,11 +217,7 @@ fn generate_runtime_api( #( #methods )* } - pub mod types { - use super::#types_mod_ident; - - #( #structs )* - } + #( #types )* } ); @@ -177,40 +228,7 @@ fn generate_runtime_api( } ); - Ok((runtime_api, trait_getter)) -} - -/// Generate the runtime APIs. -pub fn generate_runtime_apis( - metadata: &Metadata, - type_gen: &TypeGenerator, - types_mod_ident: &syn::Ident, - crate_path: &syn::Path, -) -> Result { - let runtime_fns: Vec<_> = metadata - .runtime_api_traits() - .map(|api| generate_runtime_api(api, type_gen, crate_path)) - .collect::>()?; - - let runtime_apis_def = runtime_fns.iter().map(|(apis, _)| apis); - let runtime_apis_getters = runtime_fns.iter().map(|(_, getters)| getters); - - Ok(quote! { - pub mod runtime_apis { - use super::root_mod; - use super::#types_mod_ident; - - use #crate_path::ext::codec::Encode; - - pub struct RuntimeApi; - - impl RuntimeApi { - #( #runtime_apis_getters )* - } - - #( #runtime_apis_def )* - } - }) + Ok((trait_defs, trait_getter)) } #[cfg(test)] diff --git a/codegen/src/api/storage.rs b/codegen/src/api/storage.rs index 922aca7d7af..3ef3cd041a5 100644 --- a/codegen/src/api/storage.rs +++ b/codegen/src/api/storage.rs @@ -2,14 +2,11 @@ // This file is dual-licensed as Apache-2.0 or GPL-3.0. // see LICENSE for license details. -use heck::{ToSnakeCase as _, ToUpperCamelCase}; -use proc_macro2::{Ident, TokenStream as TokenStream2, TokenStream}; +use heck::ToSnakeCase as _; +use proc_macro2::TokenStream as TokenStream2; use quote::{format_ident, quote}; -use scale_info::TypeDef; use scale_typegen::TypeGenerator; -use subxt_metadata::{ - PalletMetadata, StorageEntryMetadata, StorageEntryModifier, StorageEntryType, StorageHasher, -}; +use subxt_metadata::{PalletMetadata, StorageEntryMetadata}; use super::CodegenError; @@ -29,33 +26,34 @@ pub fn generate_storage( crate_path: &syn::Path, ) -> Result { let Some(storage) = pallet.storage() else { + // If there are no storage entries in this pallet, we + // don't generate anything. return Ok(quote!()); }; - let (storage_fns, alias_modules): (Vec, Vec) = storage + let storage_entries = storage .entries() .iter() .map(|entry| generate_storage_entry_fns(type_gen, pallet, entry, crate_path)) - .collect::, CodegenError>>()? - .into_iter() - .unzip(); + .collect::, CodegenError>>()?; + + let storage_entry_types = storage_entries.iter().map(|(types, _)| types); + let storage_entry_methods = storage_entries.iter().map(|(_, method)| method); + let types_mod_ident = type_gen.types_mod_ident(); Ok(quote! { pub mod storage { + use super::root_mod; use super::#types_mod_ident; - pub mod types { - use super::#types_mod_ident; - - #( #alias_modules )* - } - pub struct StorageApi; impl StorageApi { - #( #storage_fns )* + #( #storage_entry_methods )* } + + #( #storage_entry_types )* } }) } @@ -67,239 +65,108 @@ fn generate_storage_entry_fns( storage_entry: &StorageEntryMetadata, crate_path: &syn::Path, ) -> Result<(TokenStream2, TokenStream2), CodegenError> { - let snake_case_name = storage_entry.name().to_snake_case(); - let storage_entry_ty = storage_entry.entry_type().value_ty(); - let storage_entry_value_ty = type_gen - .resolve_type_path(storage_entry_ty) - .expect("storage type is in metadata; qed") - .to_token_stream(type_gen.settings()); - - let alias_name = format_ident!("{}", storage_entry.name().to_upper_camel_case()); - let alias_module_name = format_ident!("{snake_case_name}"); - let alias_storage_path = quote!( types::#alias_module_name::#alias_name ); - - struct MapEntryKey { - arg_name: Ident, - alias_type_def: TokenStream, - alias_type_path: TokenStream, - hasher: StorageHasher, - } - - let map_entry_key = |idx, id, hasher| -> MapEntryKey { - let arg_name: Ident = format_ident!("_{}", idx); - let ty_path = type_gen - .resolve_type_path(id) - .expect("type is in metadata; qed"); - - let alias_name = format_ident!("Param{}", idx); - let alias_type = ty_path.to_token_stream(type_gen.settings()); - - let alias_type_def = quote!( pub type #alias_name = #alias_type; ); - let alias_type_path = quote!( types::#alias_module_name::#alias_name ); - - MapEntryKey { - arg_name, - alias_type_def, - alias_type_path, - hasher, - } - }; - - let keys: Vec = match storage_entry.entry_type() { - StorageEntryType::Plain(_) => vec![], - StorageEntryType::Map { - key_ty, hashers, .. - } => { - if hashers.len() == 1 { - // If there's exactly 1 hasher, then we have a plain StorageMap. We can't - // break the key down (even if it's a tuple) because the hasher applies to - // the whole key. - vec![map_entry_key(0, *key_ty, hashers[0])] - } else { - // If there are multiple hashers, then we have a StorageDoubleMap or StorageNMap. - // We expect the key type to be tuple, and we will return a MapEntryKey for each - // key in the tuple. - let hasher_count = hashers.len(); - let tuple = match &type_gen - .resolve_type(*key_ty) - .expect("key type should be present") - .type_def - { - TypeDef::Tuple(tuple) => tuple, - _ => { - return Err(CodegenError::InvalidStorageHasherCount { - storage_entry_name: storage_entry.name().to_owned(), - key_count: 1, - hasher_count, - }); - } - }; - - // We should have the same number of hashers and keys. - let key_count = tuple.fields.len(); - if hasher_count != key_count { - return Err(CodegenError::InvalidStorageHasherCount { - storage_entry_name: storage_entry.name().to_owned(), - key_count, - hasher_count, - }); - } - - // Collect them together. - tuple - .fields - .iter() - .zip(hashers) - .enumerate() - .map(|(idx, (field, hasher))| map_entry_key(idx, field.id, *hasher)) - .collect() - } - } - }; + let types_mod_ident = type_gen.types_mod_ident(); let pallet_name = pallet.name(); - let storage_name = storage_entry.name(); - let Some(storage_hash) = pallet.storage_hash(storage_name) else { + let storage_entry_name_str = storage_entry.name(); + let storage_entry_snake_case_name = storage_entry_name_str.to_snake_case(); + let storage_entry_snake_case_ident = format_ident!("{storage_entry_snake_case_name}"); + let Some(validation_hash) = pallet.storage_hash(storage_entry_name_str) else { return Err(CodegenError::MissingStorageMetadata( pallet_name.into(), - storage_name.into(), + storage_entry_name_str.into(), )); }; let docs = storage_entry.docs(); - let docs = type_gen + let docs: TokenStream2 = type_gen .settings() .should_gen_docs .then_some(quote! { #( #[doc = #docs ] )* }) .unwrap_or_default(); - let is_defaultable_type = match storage_entry.modifier() { - StorageEntryModifier::Default => quote!(#crate_path::utils::Yes), - StorageEntryModifier::Optional => quote!(()), - }; + struct Input { + type_alias: syn::Ident, + type_path: TokenStream2, + } - // Note: putting `#crate_path::storage::address::StaticStorageKey` into this variable is necessary - // to get the line width below a certain limit. If not done, rustfmt will refuse to format the following big expression. - // for more information see [this post](https://users.rust-lang.org/t/rustfmt-silently-fails-to-work/75485/4). - let static_storage_key: TokenStream = quote!(#crate_path::storage::address::StaticStorageKey); - let all_fns = (0..=keys.len()).map(|n_keys| { - let keys_slice = &keys[..n_keys]; - let (fn_name, is_fetchable, is_iterable) = if n_keys == keys.len() { - let fn_name = format_ident!("{snake_case_name}"); - (fn_name, true, false) - } else { - let fn_name = if n_keys == 0 { - format_ident!("{snake_case_name}_iter") - } else { - format_ident!("{snake_case_name}_iter{}", n_keys) - }; - (fn_name, false, true) - }; - let is_fetchable_type = is_fetchable - .then_some(quote!(#crate_path::utils::Yes)) - .unwrap_or(quote!(())); - let is_iterable_type = is_iterable - .then_some(quote!(#crate_path::utils::Yes)) - .unwrap_or(quote!(())); - - let (keys, keys_type) = match keys_slice.len() { - 0 => (quote!(()), quote!(())), - 1 => { - let key = &keys_slice[0]; - if key.hasher.ends_with_key() { - let arg = &key.arg_name; - let keys = quote!(#static_storage_key::new(#arg)); - let path = &key.alias_type_path; - let path = quote!(#static_storage_key<#path>); - (keys, path) - } else { - (quote!(()), quote!(())) - } - } - _ => { - let keys_iter = keys_slice.iter().map( - |MapEntryKey { - arg_name, hasher, .. - }| { - if hasher.ends_with_key() { - quote!( #static_storage_key::new(#arg_name) ) - } else { - quote!(()) - } - }, - ); - let keys = quote!( (#(#keys_iter,)*) ); - let paths_iter = keys_slice.iter().map( - |MapEntryKey { - alias_type_path, - hasher, - .. - }| { - if hasher.ends_with_key() { - quote!( #static_storage_key<#alias_type_path> ) - } else { - quote!(()) - } - }, - ); - let paths = quote!( (#(#paths_iter,)*) ); - (keys, paths) + let storage_key_types: Vec = storage_entry + .keys() + .enumerate() + .map(|(idx, key)| { + // Storage key aliases are just indexes; no names to use. + let type_alias = format_ident!("Param{}", idx); + + // Path to the actual type we'll have generated for this input. + let type_path = type_gen + .resolve_type_path(key.key_id) + .expect("view function input type is in metadata; qed") + .to_token_stream(type_gen.settings()); + + Input { + type_alias, + type_path, } - }; + }) + .collect(); - let key_args = keys_slice.iter().map( - |MapEntryKey { - arg_name, - alias_type_path, - .. - }| quote!( #arg_name: #alias_type_path ), - ); - - quote!( - #docs - pub fn #fn_name( - &self, - #(#key_args,)* - ) -> #crate_path::storage::address::StaticAddress::< - #keys_type, - #alias_storage_path, - #is_fetchable_type, - #is_defaultable_type, - #is_iterable_type - > { - #crate_path::storage::address::StaticAddress::new_static( - #pallet_name, - #storage_name, - #keys, - [#(#storage_hash,)*] - ) - } - ) - }); + let storage_key_tuple_types = storage_key_types + .iter() + .map(|i| { + let ty = &i.type_alias; + quote!(#storage_entry_snake_case_ident::#ty) + }) + .collect::>(); - let alias_types = keys + let storage_key_type_aliases = storage_key_types .iter() - .map(|MapEntryKey { alias_type_def, .. }| alias_type_def); + .map(|i| { + let ty = &i.type_alias; + let path = &i.type_path; + quote!(pub type #ty = #path;) + }) + .collect::>(); + + let storage_value_type_path = type_gen + .resolve_type_path(storage_entry.value_ty())? + .to_token_stream(type_gen.settings()); - let types_mod_ident = type_gen.types_mod_ident(); - // Generate type alias for the return type only, since - // the keys of the storage entry are not explicitly named. - let alias_module = quote! { - pub mod #alias_module_name { + let is_plain = if storage_entry.keys().len() == 0 { + quote!(#crate_path::utils::Yes) + } else { + quote!(#crate_path::utils::Maybe) + }; + + let storage_entry_types = quote!( + pub mod #storage_entry_snake_case_ident { + use super::root_mod; use super::#types_mod_ident; - pub type #alias_name = #storage_entry_value_ty; + #(#storage_key_type_aliases)* - #( #alias_types )* + pub mod output { + use super::#types_mod_ident; + pub type Output = #storage_value_type_path; + } } - }; + ); + + let storage_entry_method = quote!( + #docs + pub fn #storage_entry_snake_case_ident(&self) -> #crate_path::storage::address::StaticAddress< + (#(#storage_key_tuple_types,)*), + #storage_entry_snake_case_ident::output::Output, + #is_plain + > { + #crate_path::storage::address::StaticAddress::new_static( + #pallet_name, + #storage_entry_name_str, + [#(#validation_hash,)*], + ) + } + ); - Ok(( - quote! { - #( #all_fns )* - }, - alias_module, - )) + Ok((storage_entry_types, storage_entry_method)) } #[cfg(test)] diff --git a/core/Cargo.toml b/core/Cargo.toml index 8978c6322c4..f41477cc876 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -27,6 +27,8 @@ std = [ "tracing/std", "impl-serde/std", "primitive-types/std", + "sp-core/std", + "sp-keyring/std", "sp-crypto-hashing/std", ] diff --git a/core/src/blocks/extrinsic_transaction_extensions.rs b/core/src/blocks/extrinsic_transaction_extensions.rs index 12e0ca4fe63..a071eb05963 100644 --- a/core/src/blocks/extrinsic_transaction_extensions.rs +++ b/core/src/blocks/extrinsic_transaction_extensions.rs @@ -7,7 +7,8 @@ use crate::config::transaction_extensions::{ ChargeAssetTxPayment, ChargeTransactionPayment, CheckNonce, }; use crate::dynamic::Value; -use crate::{Metadata, config::Config, error::Error}; +use crate::error::ExtrinsicError; +use crate::{Metadata, config::Config}; use frame_decode::extrinsics::ExtrinsicExtensions; use scale_decode::DecodeAsType; @@ -50,7 +51,7 @@ impl<'a, T: Config> ExtrinsicTransactionExtensions<'a, T> { /// Searches through all signed extensions to find a specific one. /// If the Signed Extension is not found `Ok(None)` is returned. /// If the Signed Extension is found but decoding failed `Err(_)` is returned. - pub fn find>(&self) -> Result, Error> { + pub fn find>(&self) -> Result, ExtrinsicError> { for ext in self.iter() { match ext.as_signed_extension::() { // We found a match; return it: @@ -117,12 +118,16 @@ impl<'a, T: Config> ExtrinsicTransactionExtension<'a, T> { } /// Signed Extension as a [`scale_value::Value`] - pub fn value(&self) -> Result, Error> { + pub fn value(&self) -> Result, ExtrinsicError> { let value = scale_value::scale::decode_as_type( &mut &self.bytes[..], self.ty_id, self.metadata.types(), - )?; + ) + .map_err(|e| ExtrinsicError::CouldNotDecodeTransactionExtension { + name: self.identifier.to_owned(), + error: e.into(), + })?; Ok(value) } @@ -131,15 +136,19 @@ impl<'a, T: Config> ExtrinsicTransactionExtension<'a, T> { /// decode with. pub fn as_signed_extension>( &self, - ) -> Result, Error> { + ) -> Result, ExtrinsicError> { if !S::matches(self.identifier, self.ty_id, self.metadata.types()) { return Ok(None); } self.as_type::().map(Some) } - fn as_type(&self) -> Result { - let value = E::decode_as_type(&mut &self.bytes[..], self.ty_id, self.metadata.types())?; + fn as_type(&self) -> Result { + let value = E::decode_as_type(&mut &self.bytes[..], self.ty_id, self.metadata.types()) + .map_err(|e| ExtrinsicError::CouldNotDecodeTransactionExtension { + name: self.identifier.to_owned(), + error: e.into(), + })?; Ok(value) } } diff --git a/core/src/blocks/extrinsics.rs b/core/src/blocks/extrinsics.rs index d58af734830..06b656c5638 100644 --- a/core/src/blocks/extrinsics.rs +++ b/core/src/blocks/extrinsics.rs @@ -2,19 +2,16 @@ // This file is dual-licensed as Apache-2.0 or GPL-3.0. // see LICENSE for license details. -use super::BlockError; use crate::blocks::extrinsic_transaction_extensions::ExtrinsicTransactionExtensions; use crate::{ Metadata, config::{Config, HashFor, Hasher}, - error::{Error, MetadataError}, + error::{ExtrinsicDecodeErrorAt, ExtrinsicDecodeErrorAtReason, ExtrinsicError}, }; use alloc::sync::Arc; use alloc::vec::Vec; -use core::ops::Deref; use frame_decode::extrinsics::Extrinsic; -use scale_decode::DecodeAsType; -use subxt_metadata::PalletMetadata; +use scale_decode::{DecodeAsFields, DecodeAsType}; pub use crate::blocks::StaticExtrinsic; @@ -30,7 +27,10 @@ impl Extrinsics { /// Instantiate a new [`Extrinsics`] object, given a vector containing /// each extrinsic hash (in the form of bytes) and some metadata that /// we'll use to decode them. - pub fn decode_from(extrinsics: Vec>, metadata: Metadata) -> Result { + pub fn decode_from( + extrinsics: Vec>, + metadata: Metadata, + ) -> Result { let hasher = T::Hasher::new(&metadata); let extrinsics = extrinsics .into_iter() @@ -39,29 +39,26 @@ impl Extrinsics { let cursor = &mut &*bytes; // Try to decode the extrinsic. - let decoded_info = frame_decode::extrinsics::decode_extrinsic( - cursor, - metadata.deref(), - metadata.types(), - ) - .map_err(|error| BlockError::ExtrinsicDecodeError { - extrinsic_index, - error, - })? - .into_owned(); + let decoded_info = + frame_decode::extrinsics::decode_extrinsic(cursor, &metadata, metadata.types()) + .map_err(|error| ExtrinsicDecodeErrorAt { + extrinsic_index, + error: ExtrinsicDecodeErrorAtReason::DecodeError(error), + })? + .into_owned(); // We didn't consume all bytes, so decoding probably failed. if !cursor.is_empty() { - return Err(BlockError::LeftoverBytes { + return Err(ExtrinsicDecodeErrorAt { extrinsic_index, - num_leftover_bytes: cursor.len(), + error: ExtrinsicDecodeErrorAtReason::LeftoverBytes(cursor.to_vec()), } .into()); } Ok(Arc::new((decoded_info, bytes))) }) - .collect::>()?; + .collect::>()?; Ok(Self { extrinsics, @@ -106,7 +103,7 @@ impl Extrinsics { /// If an error occurs, all subsequent iterations return `None`. pub fn find( &self, - ) -> impl Iterator, Error>> { + ) -> impl Iterator, ExtrinsicError>> { self.iter().filter_map(|details| { match details.as_extrinsic::() { // Failed to decode extrinsic: @@ -120,18 +117,22 @@ impl Extrinsics { /// Iterate through the extrinsics using metadata to dynamically decode and skip /// them, and return the first extrinsic found which decodes to the provided `E` type. - pub fn find_first(&self) -> Result>, Error> { + pub fn find_first( + &self, + ) -> Result>, ExtrinsicError> { self.find::().next().transpose() } /// Iterate through the extrinsics using metadata to dynamically decode and skip /// them, and return the last extrinsic found which decodes to the provided `Ev` type. - pub fn find_last(&self) -> Result>, Error> { + pub fn find_last( + &self, + ) -> Result>, ExtrinsicError> { self.find::().last().transpose() } /// Find an extrinsics that decodes to the type provided. Returns true if it was found. - pub fn has(&self) -> Result { + pub fn has(&self) -> Result { Ok(self.find::().next().transpose()?.is_some()) } } @@ -269,56 +270,58 @@ where } /// The name of the pallet from whence the extrinsic originated. - pub fn pallet_name(&self) -> Result<&str, Error> { - Ok(self.extrinsic_metadata()?.pallet.name()) + pub fn pallet_name(&self) -> &str { + self.decoded_info().pallet_name() } /// The name of the call (ie the name of the variant that it corresponds to). - pub fn variant_name(&self) -> Result<&str, Error> { - Ok(&self.extrinsic_metadata()?.variant.name) - } - - /// Fetch the metadata for this extrinsic. - pub fn extrinsic_metadata(&self) -> Result, Error> { - let pallet = self.metadata.pallet_by_index_err(self.pallet_index())?; - let variant = pallet - .call_variant_by_index(self.variant_index()) - .ok_or_else(|| MetadataError::VariantIndexNotFound(self.variant_index()))?; - - Ok(ExtrinsicMetadataDetails { pallet, variant }) + pub fn call_name(&self) -> &str { + self.decoded_info().call_name() } /// Decode and provide the extrinsic fields back in the form of a [`scale_value::Composite`] /// type which represents the named or unnamed fields that were present in the extrinsic. - pub fn field_values(&self) -> Result, Error> { + pub fn decode_as_fields(&self) -> Result { let bytes = &mut self.field_bytes(); - let extrinsic_metadata = self.extrinsic_metadata()?; - - let mut fields = extrinsic_metadata - .variant - .fields - .iter() - .map(|f| scale_decode::Field::new(f.ty.id, f.name.as_deref())); + let mut fields = self.decoded_info().call_data().map(|d| { + let name = if d.name().is_empty() { + None + } else { + Some(d.name()) + }; + scale_decode::Field::new(*d.ty(), name) + }); let decoded = - scale_value::scale::decode_as_fields(bytes, &mut fields, self.metadata.types())?; + E::decode_as_fields(bytes, &mut fields, self.metadata.types()).map_err(|e| { + ExtrinsicError::CannotDecodeFields { + extrinsic_index: self.index as usize, + error: e.into(), + } + })?; Ok(decoded) } /// Attempt to decode these [`ExtrinsicDetails`] into a type representing the extrinsic fields. /// Such types are exposed in the codegen as `pallet_name::calls::types::CallName` types. - pub fn as_extrinsic(&self) -> Result, Error> { - let extrinsic_metadata = self.extrinsic_metadata()?; - if extrinsic_metadata.pallet.name() == E::PALLET - && extrinsic_metadata.variant.name == E::CALL + pub fn as_extrinsic(&self) -> Result, ExtrinsicError> { + if self.decoded_info().pallet_name() == E::PALLET + && self.decoded_info().call_name() == E::CALL { - let mut fields = extrinsic_metadata - .variant - .fields - .iter() - .map(|f| scale_decode::Field::new(f.ty.id, f.name.as_deref())); + let mut fields = self.decoded_info().call_data().map(|d| { + let name = if d.name().is_empty() { + None + } else { + Some(d.name()) + }; + scale_decode::Field::new(*d.ty(), name) + }); let decoded = - E::decode_as_fields(&mut self.field_bytes(), &mut fields, self.metadata.types())?; + E::decode_as_fields(&mut self.field_bytes(), &mut fields, self.metadata.types()) + .map_err(|e| ExtrinsicError::CannotDecodeFields { + extrinsic_index: self.index as usize, + error: e.into(), + })?; Ok(Some(decoded)) } else { Ok(None) @@ -328,12 +331,16 @@ where /// Attempt to decode these [`ExtrinsicDetails`] into an outer call enum type (which includes /// the pallet and extrinsic enum variants as well as the extrinsic fields). A compatible /// type for this is exposed via static codegen as a root level `Call` type. - pub fn as_root_extrinsic(&self) -> Result { + pub fn as_root_extrinsic(&self) -> Result { let decoded = E::decode_as_type( &mut &self.call_bytes()[..], self.metadata.outer_enums().call_enum_ty(), self.metadata.types(), - )?; + ) + .map_err(|e| ExtrinsicError::CannotDecodeIntoRootExtrinsic { + extrinsic_index: self.index as usize, + error: e.into(), + })?; Ok(decoded) } @@ -351,14 +358,6 @@ pub struct FoundExtrinsic { pub value: E, } -/// Details for the given extrinsic plucked from the metadata. -pub struct ExtrinsicMetadataDetails<'a> { - /// Metadata for the pallet that the extrinsic belongs to. - pub pallet: PalletMetadata<'a>, - /// Metadata for the variant which describes the pallet extrinsics. - pub variant: &'a scale_info::Variant, -} - #[cfg(test)] mod tests { use super::*; @@ -513,12 +512,10 @@ mod tests { let result = Extrinsics::::decode_from(vec![vec![]], metadata); assert_matches!( result.err(), - Some(crate::Error::Block( - crate::error::BlockError::ExtrinsicDecodeError { - extrinsic_index: 0, - error: _ - } - )) + Some(crate::error::ExtrinsicDecodeErrorAt { + extrinsic_index: 0, + error: _ + }) ); } @@ -533,12 +530,12 @@ mod tests { assert_matches!( result.err(), - Some(crate::Error::Block( - crate::error::BlockError::ExtrinsicDecodeError { - extrinsic_index: 0, - error: ExtrinsicDecodeError::VersionNotSupported(3), - } - )) + Some(crate::error::ExtrinsicDecodeErrorAt { + extrinsic_index: 0, + error: ExtrinsicDecodeErrorAtReason::DecodeError( + ExtrinsicDecodeError::VersionNotSupported(3) + ), + }) ); } @@ -611,20 +608,10 @@ mod tests { assert_eq!(extrinsic.index(), 0); assert_eq!(extrinsic.pallet_index(), 0); - assert_eq!( - extrinsic - .pallet_name() - .expect("Valid metadata contains pallet name"), - "Test" - ); + assert_eq!(extrinsic.pallet_name(), "Test"); assert_eq!(extrinsic.variant_index(), 2); - assert_eq!( - extrinsic - .variant_name() - .expect("Valid metadata contains variant name"), - "TestCall" - ); + assert_eq!(extrinsic.call_name(), "TestCall"); // Decode the extrinsic to the root enum. let decoded_extrinsic = extrinsic diff --git a/core/src/blocks/mod.rs b/core/src/blocks/mod.rs index 3114bf05735..d00fec232ec 100644 --- a/core/src/blocks/mod.rs +++ b/core/src/blocks/mod.rs @@ -70,14 +70,13 @@ mod static_extrinsic; use crate::Metadata; use crate::config::Config; -use crate::error::Error; +use crate::error::ExtrinsicDecodeErrorAt; +pub use crate::error::ExtrinsicError; use alloc::vec::Vec; - -pub use crate::error::BlockError; pub use extrinsic_transaction_extensions::{ ExtrinsicTransactionExtension, ExtrinsicTransactionExtensions, }; -pub use extrinsics::{ExtrinsicDetails, ExtrinsicMetadataDetails, Extrinsics, FoundExtrinsic}; +pub use extrinsics::{ExtrinsicDetails, Extrinsics, FoundExtrinsic}; pub use static_extrinsic::StaticExtrinsic; /// Instantiate a new [`Extrinsics`] object, given a vector containing each extrinsic hash (in the @@ -87,6 +86,6 @@ pub use static_extrinsic::StaticExtrinsic; pub fn decode_from( extrinsics: Vec>, metadata: Metadata, -) -> Result, Error> { +) -> Result, ExtrinsicDecodeErrorAt> { Extrinsics::decode_from(extrinsics, metadata) } diff --git a/core/src/client.rs b/core/src/client.rs index e7396c883a0..cfc8de4fa10 100644 --- a/core/src/client.rs +++ b/core/src/client.rs @@ -5,8 +5,8 @@ //! A couple of client types that we use elsewhere. use crate::{ + Metadata, config::{Config, HashFor}, - metadata::Metadata, }; use derive_where::derive_where; diff --git a/core/src/constants/address.rs b/core/src/constants/address.rs index 4dd68fdbf2e..4e246a89c20 100644 --- a/core/src/constants/address.rs +++ b/core/src/constants/address.rs @@ -4,17 +4,16 @@ //! Construct addresses to access constants with. -use crate::dynamic::DecodedValueThunk; -use crate::metadata::DecodeWithMetadata; use alloc::borrow::Cow; use alloc::string::String; use derive_where::derive_where; +use scale_decode::DecodeAsType; /// This represents a constant address. Anything implementing this trait /// can be used to fetch constants. pub trait Address { /// The target type of the value that lives at this address. - type Target: DecodeWithMetadata; + type Target: DecodeAsType; /// The name of the pallet that the constant lives under. fn pallet_name(&self) -> &str; @@ -32,20 +31,18 @@ pub trait Address { /// This represents the address of a constant. #[derive_where(Clone, Debug, PartialOrd, Ord, PartialEq, Eq)] -pub struct DefaultAddress { +pub struct StaticAddress { pallet_name: Cow<'static, str>, constant_name: Cow<'static, str>, constant_hash: Option<[u8; 32]>, _marker: core::marker::PhantomData, } -/// The type of address used by our static codegen. -pub type StaticAddress = DefaultAddress; -/// The type of address typically used to return dynamic constant values. -pub type DynamicAddress = DefaultAddress; +/// A dynamic lookup address to access a constant. +pub type DynamicAddress = StaticAddress; -impl DefaultAddress { - /// Create a new [`DefaultAddress`] to use to look up a constant. +impl StaticAddress { + /// Create a new [`StaticAddress`] to use to look up a constant. pub fn new(pallet_name: impl Into, constant_name: impl Into) -> Self { Self { pallet_name: Cow::Owned(pallet_name.into()), @@ -55,7 +52,7 @@ impl DefaultAddress { } } - /// Create a new [`DefaultAddress`] that will be validated + /// Create a new [`StaticAddress`] that will be validated /// against node metadata using the hash given. #[doc(hidden)] pub fn new_static( @@ -82,7 +79,7 @@ impl DefaultAddress { } } -impl Address for DefaultAddress { +impl Address for StaticAddress { type Target = ReturnTy; fn pallet_name(&self) -> &str { @@ -99,6 +96,9 @@ impl Address for DefaultAddress { } /// Construct a new dynamic constant lookup. -pub fn dynamic(pallet_name: impl Into, constant_name: impl Into) -> DynamicAddress { +pub fn dynamic( + pallet_name: impl Into, + constant_name: impl Into, +) -> DynamicAddress { DynamicAddress::new(pallet_name, constant_name) } diff --git a/core/src/constants/mod.rs b/core/src/constants/mod.rs index 39e0eefebaf..4bbd8ffe4c2 100644 --- a/core/src/constants/mod.rs +++ b/core/src/constants/mod.rs @@ -40,26 +40,30 @@ pub mod address; +use crate::Metadata; +use crate::error::ConstantError; use address::Address; use alloc::borrow::ToOwned; - -use crate::{Error, Metadata, error::MetadataError, metadata::DecodeWithMetadata}; +use frame_decode::constants::ConstantTypeInfo; +use scale_decode::IntoVisitor; /// When the provided `address` is statically generated via the `#[subxt]` macro, this validates /// that the shape of the constant value is the same as the shape expected by the static address. /// /// When the provided `address` is dynamic (and thus does not come with any expectation of the /// shape of the constant value), this just returns `Ok(())` -pub fn validate(address: &Addr, metadata: &Metadata) -> Result<(), Error> { +pub fn validate(address: &Addr, metadata: &Metadata) -> Result<(), ConstantError> { if let Some(actual_hash) = address.validation_hash() { let expected_hash = metadata - .pallet_by_name_err(address.pallet_name())? + .pallet_by_name(address.pallet_name()) + .ok_or_else(|| ConstantError::PalletNameNotFound(address.pallet_name().to_string()))? .constant_hash(address.constant_name()) - .ok_or_else(|| { - MetadataError::ConstantNameNotFound(address.constant_name().to_owned()) + .ok_or_else(|| ConstantError::ConstantNameNotFound { + pallet_name: address.pallet_name().to_string(), + constant_name: address.constant_name().to_owned(), })?; if actual_hash != expected_hash { - return Err(MetadataError::IncompatibleCodegen.into()); + return Err(ConstantError::IncompatibleCodegen); } } Ok(()) @@ -67,19 +71,37 @@ pub fn validate(address: &Addr, metadata: &Metadata) -> Result<() /// Fetch a constant out of the metadata given a constant address. If the `address` has been /// statically generated, this will validate that the constant shape is as expected, too. -pub fn get(address: &Addr, metadata: &Metadata) -> Result { +pub fn get( + address: &Addr, + metadata: &Metadata, +) -> Result { // 1. Validate constant shape if hash given: validate(address, metadata)?; // 2. Attempt to decode the constant into the type given: - let constant = metadata - .pallet_by_name_err(address.pallet_name())? - .constant_by_name(address.constant_name()) - .ok_or_else(|| MetadataError::ConstantNameNotFound(address.constant_name().to_owned()))?; - let value = ::decode_with_metadata( - &mut constant.value(), - constant.ty(), + let constant = frame_decode::constants::decode_constant( + address.pallet_name(), + address.constant_name(), metadata, - )?; - Ok(value) + metadata.types(), + Addr::Target::into_visitor(), + ) + .map_err(ConstantError::CouldNotDecodeConstant)?; + + Ok(constant) +} + +/// Access the bytes of a constant by the address it is registered under. +pub fn get_bytes( + address: &Addr, + metadata: &Metadata, +) -> Result, ConstantError> { + // 1. Validate custom value shape if hash given: + validate(address, metadata)?; + + // 2. Return the underlying bytes: + let constant = metadata + .constant_info(address.pallet_name(), address.constant_name()) + .map_err(|e| ConstantError::ConstantInfoError(e.into_owned()))?; + Ok(constant.bytes.to_vec()) } diff --git a/core/src/custom_values/address.rs b/core/src/custom_values/address.rs index 6bbacaed68e..b8eaefbaf46 100644 --- a/core/src/custom_values/address.rs +++ b/core/src/custom_values/address.rs @@ -4,22 +4,22 @@ //! Construct addresses to access custom values with. -use crate::dynamic::DecodedValueThunk; -use crate::metadata::DecodeWithMetadata; +use alloc::borrow::Cow; use derive_where::derive_where; +use scale_decode::DecodeAsType; /// Use this with [`Address::IsDecodable`]. -pub use crate::utils::Yes; +pub use crate::utils::{Maybe, No, NoMaybe}; /// This represents the address of a custom value in the metadata. /// Anything that implements it can be used to fetch custom values from the metadata. /// The trait is implemented by [`str`] for dynamic lookup and [`StaticAddress`] for static queries. pub trait Address { /// The type of the custom value. - type Target: DecodeWithMetadata; + type Target: DecodeAsType; /// Should be set to `Yes` for Dynamic values and static values that have a valid type. - /// Should be `()` for custom values, that have an invalid type id. - type IsDecodable; + /// Should be `No` for custom values, that have an invalid type id. + type IsDecodable: NoMaybe; /// the name (key) by which the custom value can be accessed in the metadata. fn name(&self) -> &str; @@ -30,31 +30,34 @@ pub trait Address { } } -impl Address for str { - type Target = DecodedValueThunk; - type IsDecodable = Yes; - - fn name(&self) -> &str { - self - } -} - /// A static address to a custom value. #[derive_where(Clone, Debug, PartialOrd, Ord, PartialEq, Eq)] pub struct StaticAddress { - name: &'static str, + name: Cow<'static, str>, hash: Option<[u8; 32]>, - phantom: core::marker::PhantomData<(ReturnTy, IsDecodable)>, + marker: core::marker::PhantomData<(ReturnTy, IsDecodable)>, } +/// A dynamic address to a custom value. +pub type DynamicAddress = StaticAddress; + impl StaticAddress { #[doc(hidden)] /// Creates a new StaticAddress. - pub fn new_static(name: &'static str, hash: [u8; 32]) -> StaticAddress { - StaticAddress:: { - name, + pub fn new_static(name: &'static str, hash: [u8; 32]) -> Self { + Self { + name: Cow::Borrowed(name), hash: Some(hash), - phantom: core::marker::PhantomData, + marker: core::marker::PhantomData, + } + } + + /// Create a new [`StaticAddress`] + pub fn new(name: impl Into) -> Self { + Self { + name: name.into().into(), + hash: None, + marker: core::marker::PhantomData, } } @@ -63,20 +66,37 @@ impl StaticAddress { Self { name: self.name, hash: None, - phantom: self.phantom, + marker: self.marker, } } } -impl Address for StaticAddress { - type Target = R; - type IsDecodable = Y; +impl Address for StaticAddress { + type Target = Target; + type IsDecodable = IsDecodable; fn name(&self) -> &str { - self.name + &self.name } fn validation_hash(&self) -> Option<[u8; 32]> { self.hash } } + +// Support plain strings for looking up custom values (but prefer `dynamic` if you want to pick the return type) +impl Address for str { + type Target = scale_value::Value; + type IsDecodable = Maybe; + + fn name(&self) -> &str { + self + } +} + +/// Construct a new dynamic custom value lookup. +pub fn dynamic( + custom_value_name: impl Into, +) -> DynamicAddress { + DynamicAddress::new(custom_value_name) +} diff --git a/core/src/custom_values/mod.rs b/core/src/custom_values/mod.rs index eae390a8564..f5514b70ebf 100644 --- a/core/src/custom_values/mod.rs +++ b/core/src/custom_values/mod.rs @@ -32,47 +32,51 @@ pub mod address; -use crate::utils::Yes; -use crate::{Error, Metadata, error::MetadataError, metadata::DecodeWithMetadata}; +use crate::utils::Maybe; +use crate::{Metadata, error::CustomValueError}; use address::Address; use alloc::vec::Vec; +use frame_decode::custom_values::CustomValueTypeInfo; +use scale_decode::IntoVisitor; /// Run the validation logic against some custom value address you'd like to access. Returns `Ok(())` /// if the address is valid (or if it's not possible to check since the address has no validation hash). /// Returns an error if the address was not valid (wrong name, type or raw bytes) -pub fn validate(address: &Addr, metadata: &Metadata) -> Result<(), Error> { +pub fn validate( + address: &Addr, + metadata: &Metadata, +) -> Result<(), CustomValueError> { if let Some(actual_hash) = address.validation_hash() { let custom = metadata.custom(); let custom_value = custom .get(address.name()) - .ok_or_else(|| MetadataError::CustomValueNameNotFound(address.name().into()))?; + .ok_or_else(|| CustomValueError::NotFound(address.name().into()))?; let expected_hash = custom_value.hash(); if actual_hash != expected_hash { - return Err(MetadataError::IncompatibleCodegen.into()); + return Err(CustomValueError::IncompatibleCodegen); } } - if metadata.custom().get(address.name()).is_none() { - return Err(MetadataError::IncompatibleCodegen.into()); - } Ok(()) } /// Access a custom value by the address it is registered under. This can be just a [str] to get back a dynamic value, /// or a static address from the generated static interface to get a value of a static type returned. -pub fn get + ?Sized>( +pub fn get + ?Sized>( address: &Addr, metadata: &Metadata, -) -> Result { +) -> Result { // 1. Validate custom value shape if hash given: validate(address, metadata)?; // 2. Attempt to decode custom value: - let custom_value = metadata.custom_value_by_name_err(address.name())?; - let value = ::decode_with_metadata( - &mut custom_value.bytes(), - custom_value.type_id(), + let value = frame_decode::custom_values::decode_custom_value( + address.name(), metadata, - )?; + metadata.types(), + Addr::Target::into_visitor(), + ) + .map_err(CustomValueError::CouldNotDecodeCustomValue)?; + Ok(value) } @@ -80,13 +84,15 @@ pub fn get + ?Sized>( pub fn get_bytes( address: &Addr, metadata: &Metadata, -) -> Result, Error> { +) -> Result, CustomValueError> { // 1. Validate custom value shape if hash given: validate(address, metadata)?; // 2. Return the underlying bytes: - let custom_value = metadata.custom_value_by_name_err(address.name())?; - Ok(custom_value.bytes().to_vec()) + let custom_value = metadata + .custom_value_info(address.name()) + .map_err(|e| CustomValueError::NotFound(e.not_found))?; + Ok(custom_value.bytes.to_vec()) } #[cfg(test)] @@ -162,8 +168,9 @@ mod tests { let metadata = mock_metadata(); assert!(custom_values::get("Invalid Address", &metadata).is_err()); - let person_decoded_value_thunk = custom_values::get("Mr. Robot", &metadata).unwrap(); - let person: Person = person_decoded_value_thunk.as_type().unwrap(); + + let person_addr = custom_values::address::dynamic::("Mr. Robot"); + let person = custom_values::get(&person_addr, &metadata).unwrap(); assert_eq!( person, Person { diff --git a/core/src/dynamic.rs b/core/src/dynamic.rs index 8f02b2d04c2..79fec8feb43 100644 --- a/core/src/dynamic.rs +++ b/core/src/dynamic.rs @@ -5,9 +5,6 @@ //! This module provides the entry points to create dynamic //! transactions, storage and constant lookups. -use crate::metadata::{DecodeWithMetadata, Metadata}; -use alloc::vec::Vec; -use scale_decode::DecodeAsType; pub use scale_value::{At, Value}; /// A [`scale_value::Value`] type endowed with contextual information @@ -30,57 +27,3 @@ pub use crate::runtime_api::payload::dynamic as runtime_api_call; // Execute View Function API function call dynamically. pub use crate::view_functions::payload::dynamic as view_function_call; - -/// This is the result of making a dynamic request to a node. From this, -/// we can return the raw SCALE bytes that we were handed back, or we can -/// complete the decoding of the bytes into a [`DecodedValue`] type. -pub struct DecodedValueThunk { - type_id: u32, - metadata: Metadata, - scale_bytes: Vec, -} - -impl DecodeWithMetadata for DecodedValueThunk { - fn decode_with_metadata( - bytes: &mut &[u8], - type_id: u32, - metadata: &Metadata, - ) -> Result { - let mut v = Vec::with_capacity(bytes.len()); - v.extend_from_slice(bytes); - *bytes = &[]; - Ok(DecodedValueThunk { - type_id, - metadata: metadata.clone(), - scale_bytes: v, - }) - } -} - -impl DecodedValueThunk { - /// Return the SCALE encoded bytes handed back from the node. - pub fn into_encoded(self) -> Vec { - self.scale_bytes - } - /// Return the SCALE encoded bytes handed back from the node without taking ownership of them. - pub fn encoded(&self) -> &[u8] { - &self.scale_bytes - } - /// Decode the SCALE encoded storage entry into a dynamic [`DecodedValue`] type. - pub fn to_value(&self) -> Result { - let val = scale_value::scale::decode_as_type( - &mut &*self.scale_bytes, - self.type_id, - self.metadata.types(), - )?; - Ok(val) - } - /// decode the `DecodedValueThunk` into a concrete type. - pub fn as_type(&self) -> Result { - T::decode_as_type( - &mut &self.scale_bytes[..], - self.type_id, - self.metadata.types(), - ) - } -} diff --git a/core/src/error.rs b/core/src/error.rs index 9517b30856a..c8ba79469c5 100644 --- a/core/src/error.rs +++ b/core/src/error.rs @@ -6,196 +6,289 @@ use alloc::boxed::Box; use alloc::string::String; -use subxt_metadata::StorageHasher; use thiserror::Error as DeriveError; /// The error emitted when something goes wrong. #[derive(Debug, DeriveError)] +#[allow(missing_docs)] pub enum Error { - /// Codec error. - #[error("Codec error: {0}")] - Codec(codec::Error), - /// Metadata error. #[error(transparent)] - Metadata(#[from] MetadataError), - /// Storage address error. + StorageError(#[from] StorageError), #[error(transparent)] - StorageAddress(#[from] StorageAddressError), - /// Error decoding to a [`crate::dynamic::Value`]. - #[error("Error decoding into dynamic value: {0}")] - Decode(#[from] scale_decode::Error), - /// Error encoding from a [`crate::dynamic::Value`]. - #[error("Error encoding from dynamic value: {0}")] - Encode(#[from] scale_encode::Error), - /// Error constructing an extrinsic. - #[error("Error constructing transaction: {0}")] Extrinsic(#[from] ExtrinsicError), - /// Block body error. - #[error("Error working with block_body: {0}")] - Block(#[from] BlockError), + #[error(transparent)] + Constant(#[from] ConstantError), + #[error(transparent)] + CustomValue(#[from] CustomValueError), + #[error(transparent)] + RuntimeApi(#[from] RuntimeApiError), + #[error(transparent)] + ViewFunction(#[from] ViewFunctionError), + #[error(transparent)] + Events(#[from] EventsError), } -impl From for Error { - fn from(err: scale_decode::visitor::DecodeError) -> Error { - Error::Decode(err.into()) - } +#[derive(Debug, DeriveError)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum EventsError { + #[error("Can't decode event: can't decode phase: {0}")] + CannotDecodePhase(codec::Error), + #[error("Can't decode event: can't decode pallet index: {0}")] + CannotDecodePalletIndex(codec::Error), + #[error("Can't decode event: can't decode variant index: {0}")] + CannotDecodeVariantIndex(codec::Error), + #[error("Can't decode event: can't find pallet with index {0}")] + CannotFindPalletWithIndex(u8), + #[error( + "Can't decode event: can't find variant with index {variant_index} in pallet {pallet_name}" + )] + CannotFindVariantWithIndex { + pallet_name: String, + variant_index: u8, + }, + #[error("Can't decode field {field_name:?} in event {pallet_name}.{event_name}: {reason}")] + CannotDecodeFieldInEvent { + pallet_name: String, + event_name: String, + field_name: String, + reason: scale_decode::visitor::DecodeError, + }, + #[error("Can't decode event topics: {0}")] + CannotDecodeEventTopics(codec::Error), + #[error("Can't decode the fields of event {pallet_name}.{event_name}: {reason}")] + CannotDecodeEventFields { + pallet_name: String, + event_name: String, + reason: scale_decode::Error, + }, + #[error("Can't decode event {pallet_name}.{event_name} to Event enum: {reason}")] + CannotDecodeEventEnum { + pallet_name: String, + event_name: String, + reason: scale_decode::Error, + }, } -// TODO: when `codec::Error` implements `core::Error` -// remove this impl and replace it by thiserror #[from] -impl From for Error { - fn from(err: codec::Error) -> Error { - Error::Codec(err) - } +#[derive(Debug, DeriveError)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum ViewFunctionError { + #[error("The static View Function address used is not compatible with the live chain")] + IncompatibleCodegen, + #[error("Can't find View Function: pallet {0} not found")] + PalletNotFound(String), + #[error("Can't find View Function {function_name} in pallet {pallet_name}")] + ViewFunctionNotFound { + pallet_name: String, + function_name: String, + }, + #[error("Failed to encode View Function inputs: {0}")] + CouldNotEncodeInputs(frame_decode::view_functions::ViewFunctionInputsEncodeError), + #[error("Failed to decode View Function: {0}")] + CouldNotDecodeResponse(frame_decode::view_functions::ViewFunctionDecodeError), } -/// Block error #[derive(Debug, DeriveError)] -pub enum BlockError { - /// Leftover bytes found after decoding the extrinsic. - #[error( - "After decoding the extrinsic at index {extrinsic_index}, {num_leftover_bytes} bytes were left, suggesting that decoding may have failed" - )] - LeftoverBytes { - /// Index of the extrinsic that failed to decode. - extrinsic_index: usize, - /// Number of bytes leftover after decoding the extrinsic. - num_leftover_bytes: usize, - }, - /// Something went wrong decoding the extrinsic. - #[error("Failed to decode extrinsic at index {extrinsic_index}: {error}")] - ExtrinsicDecodeError { - /// Index of the extrinsic that failed to decode. - extrinsic_index: usize, - /// The decode error. - error: ExtrinsicDecodeError, +#[non_exhaustive] +#[allow(missing_docs)] +pub enum RuntimeApiError { + #[error("The static Runtime API address used is not compatible with the live chain")] + IncompatibleCodegen, + #[error("Runtime API trait not found: {0}")] + TraitNotFound(String), + #[error("Runtime API method {method_name} not found in trait {trait_name}")] + MethodNotFound { + trait_name: String, + method_name: String, }, + #[error("Failed to encode Runtime API inputs: {0}")] + CouldNotEncodeInputs(frame_decode::runtime_apis::RuntimeApiInputsEncodeError), + #[error("Failed to decode Runtime API: {0}")] + CouldNotDecodeResponse(frame_decode::runtime_apis::RuntimeApiDecodeError), } -/// An alias for [`frame_decode::extrinsics::ExtrinsicDecodeError`]. -/// -pub type ExtrinsicDecodeError = frame_decode::extrinsics::ExtrinsicDecodeError; +#[derive(Debug, DeriveError)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum CustomValueError { + #[error("The static custom value address used is not compatible with the live chain")] + IncompatibleCodegen, + #[error("The custom value '{0}' was not found")] + NotFound(String), + #[error("Failed to decode custom value: {0}")] + CouldNotDecodeCustomValue(frame_decode::custom_values::CustomValueDecodeError), +} -/// Something went wrong trying to access details in the metadata. -#[derive(Clone, Debug, PartialEq, DeriveError)] +/// Something went wrong working with a constant. +#[derive(Debug, DeriveError)] #[non_exhaustive] -pub enum MetadataError { - /// The DispatchError type isn't available in the metadata - #[error("The DispatchError type isn't available")] - DispatchErrorNotFound, - /// Type not found in metadata. - #[error("Type with ID {0} not found")] - TypeNotFound(u32), - /// Pallet not found (index). - #[error("Pallet with index {0} not found")] - PalletIndexNotFound(u8), - /// Pallet not found (name). - #[error("Pallet with name {0} not found")] - PalletNameNotFound(String), - /// Variant not found. - #[error("Variant with index {0} not found")] - VariantIndexNotFound(u8), - /// Constant not found. - #[error("Constant with name {0} not found")] - ConstantNameNotFound(String), - /// Call not found. - #[error("Call with name {0} not found")] - CallNameNotFound(String), - /// Runtime trait not found. - #[error("Runtime trait with name {0} not found")] - RuntimeTraitNotFound(String), - /// Runtime method not found. - #[error("Runtime method with name {0} not found")] - RuntimeMethodNotFound(String), - /// View Function not found. - #[error("View Function with query ID {} not found", hex::encode(.0))] - ViewFunctionNotFound([u8; 32]), - /// Call type not found in metadata. - #[error("Call type not found in pallet with index {0}")] - CallTypeNotFoundInPallet(u8), - /// Event type not found in metadata. - #[error("Event type not found in pallet with index {0}")] - EventTypeNotFoundInPallet(u8), - /// Storage details not found in metadata. - #[error("Storage details not found in pallet with name {0}")] - StorageNotFoundInPallet(String), - /// Storage entry not found. - #[error("Storage entry {0} not found")] - StorageEntryNotFound(String), - /// The generated interface used is not compatible with the node. - #[error("The generated code is not compatible with the node")] +#[allow(missing_docs)] +pub enum ConstantError { + #[error("The static constant address used is not compatible with the live chain")] IncompatibleCodegen, - /// Custom value not found. - #[error("Custom value with name {0} not found")] - CustomValueNameNotFound(String), + #[error("Can't find constant: pallet with name {0} not found")] + PalletNameNotFound(String), + #[error( + "Constant '{constant_name}' not found in pallet {pallet_name} in the live chain metadata" + )] + ConstantNameNotFound { + pallet_name: String, + constant_name: String, + }, + #[error("Failed to decode constant: {0}")] + CouldNotDecodeConstant(frame_decode::constants::ConstantDecodeError), + #[error("Cannot obtain constant information from metadata: {0}")] + ConstantInfoError(frame_decode::constants::ConstantInfoError<'static>), } /// Something went wrong trying to encode or decode a storage address. -#[derive(Clone, Debug, DeriveError)] +#[derive(Debug, DeriveError)] #[non_exhaustive] -pub enum StorageAddressError { - /// Storage lookup does not have the expected number of keys. - #[error("Storage lookup requires {expected} keys but more keys have been provided.")] - TooManyKeys { - /// The number of keys provided in the storage address. - expected: usize, - }, - /// This storage entry in the metadata does not have the correct number of hashers to fields. - #[error("Storage entry in metadata does not have the correct number of hashers to fields")] - WrongNumberOfHashers { - /// The number of hashers in the metadata for this storage entry. - hashers: usize, - /// The number of fields in the metadata for this storage entry. - fields: usize, - }, - /// We weren't given enough bytes to decode the storage address/key. - #[error("Not enough remaining bytes to decode the storage address/key")] - NotEnoughBytes, - /// We have leftover bytes after decoding the storage address. - #[error("We have leftover bytes after decoding the storage address")] - TooManyBytes, - /// The bytes of a storage address are not the expected address for decoding the storage keys of the address. +#[allow(missing_docs)] +pub enum StorageError { + #[error("The static storage address used is not compatible with the live chain")] + IncompatibleCodegen, + #[error("Can't find storage value: pallet with name {0} not found")] + PalletNameNotFound(String), + #[error( + "Storage entry '{entry_name}' not found in pallet {pallet_name} in the live chain metadata" + )] + StorageEntryNotFound { + pallet_name: String, + entry_name: String, + }, + #[error("Cannot obtain storage information from metadata: {0}")] + StorageInfoError(frame_decode::storage::StorageInfoError<'static>), + #[error("Cannot encode storage key: {0}")] + StorageKeyEncodeError(frame_decode::storage::StorageKeyEncodeError), + #[error("Cannot create a key to iterate over a plain entry")] + CannotIterPlainEntry { + pallet_name: String, + entry_name: String, + }, #[error( - "Storage address bytes are not the expected format. Addresses need to be at least 16 bytes (pallet ++ entry) and follow a structure given by the hashers defined in the metadata" + "Wrong number of key parts provided to iterate a storage address. We expected at most {max_expected} key parts but got {got} key parts" )] - UnexpectedAddressBytes, - /// An invalid hasher was used to reconstruct a value from a chunk of bytes that is part of a storage address. Hashers where the hash does not contain the original value are invalid for this purpose. + WrongNumberOfKeyPartsProvidedForIterating { max_expected: usize, got: usize }, + #[error( + "Wrong number of key parts provided to fetch a storage address. We expected {expected} key parts but got {got} key parts" + )] + WrongNumberOfKeyPartsProvidedForFetching { expected: usize, got: usize }, +} + +#[derive(Debug, DeriveError)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum StorageKeyError { + #[error("Can't decode the storage key: {error}")] + StorageKeyDecodeError { + bytes: Vec, + error: frame_decode::storage::StorageKeyDecodeError, + }, + #[error("Can't decode the values from the storage key: {0}")] + CannotDecodeValuesInKey(frame_decode::storage::StorageKeyValueDecodeError), #[error( - "An invalid hasher was used to reconstruct a value with type ID {ty_id} from a hash formed by a {hasher:?} hasher. This is only possible for concat-style hashers or the identity hasher" + "Cannot decode storage key: there were leftover bytes, indicating that the decoding failed" )] - HasherCannotReconstructKey { - /// Type id of the key's type. - ty_id: u32, - /// The invalid hasher that caused this error. - hasher: StorageHasher, + LeftoverBytes { bytes: Vec }, + #[error("Can't decode a single value from the storage key part at index {index}: {error}")] + CannotDecodeValueInKey { + index: usize, + error: scale_decode::Error, }, } -/// An error that can be encountered when constructing a transaction. #[derive(Debug, DeriveError)] #[non_exhaustive] +#[allow(missing_docs)] +pub enum StorageValueError { + #[error("Cannot decode storage value: {0}")] + CannotDecode(frame_decode::storage::StorageValueDecodeError), + #[error( + "Cannot decode storage value: there were leftover bytes, indicating that the decoding failed" + )] + LeftoverBytes { bytes: Vec }, +} + +/// An error that can be encountered when constructing a transaction. +#[derive(Debug, DeriveError)] +#[allow(missing_docs)] pub enum ExtrinsicError { - /// Transaction version not supported by Subxt. + #[error("The extrinsic payload is not compatible with the live chain")] + IncompatibleCodegen, + #[error("Can't find extrinsic: pallet with name {0} not found")] + PalletNameNotFound(String), + #[error("Can't find extrinsic: call name {call_name} doesn't exist in pallet {pallet_name}")] + CallNameNotFound { + pallet_name: String, + call_name: String, + }, + #[error("Can't encode the extrinsic call data: {0}")] + CannotEncodeCallData(scale_encode::Error), #[error("Subxt does not support the extrinsic versions expected by the chain")] UnsupportedVersion, - /// Issue encoding transaction extensions. #[error("Cannot construct the required transaction extensions: {0}")] Params(#[from] ExtrinsicParamsError), + #[error("Cannot decode transaction extension '{name}': {error}")] + CouldNotDecodeTransactionExtension { + /// The extension name. + name: String, + /// The decode error. + error: scale_decode::Error, + }, + #[error( + "After decoding the extrinsic at index {extrinsic_index}, {num_leftover_bytes} bytes were left, suggesting that decoding may have failed" + )] + LeftoverBytes { + /// Index of the extrinsic that failed to decode. + extrinsic_index: usize, + /// Number of bytes leftover after decoding the extrinsic. + num_leftover_bytes: usize, + }, + #[error("{0}")] + ExtrinsicDecodeErrorAt(#[from] ExtrinsicDecodeErrorAt), + #[error("Failed to decode the fields of an extrinsic at index {extrinsic_index}: {error}")] + CannotDecodeFields { + /// Index of the extrinsic whose fields we could not decode + extrinsic_index: usize, + /// The decode error. + error: scale_decode::Error, + }, + #[error("Failed to decode the extrinsic at index {extrinsic_index} to a root enum: {error}")] + CannotDecodeIntoRootExtrinsic { + /// Index of the extrinsic that we failed to decode + extrinsic_index: usize, + /// The decode error. + error: scale_decode::Error, + }, } -impl From for Error { - fn from(value: ExtrinsicParamsError) -> Self { - Error::Extrinsic(value.into()) - } +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +#[error("Cannot decode extrinsic at index {extrinsic_index}: {error}")] +pub struct ExtrinsicDecodeErrorAt { + pub extrinsic_index: usize, + pub error: ExtrinsicDecodeErrorAtReason, +} + +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum ExtrinsicDecodeErrorAtReason { + #[error("{0}")] + DecodeError(frame_decode::extrinsics::ExtrinsicDecodeError), + #[error("Leftover bytes")] + LeftoverBytes(Vec), } /// An error that can be emitted when trying to construct an instance of [`crate::config::ExtrinsicParams`], /// encode data from the instance, or match on signed extensions. #[derive(Debug, DeriveError)] #[non_exhaustive] +#[allow(missing_docs)] pub enum ExtrinsicParamsError { - /// Cannot find a type id in the metadata. The context provides some additional - /// information about the source of the error (eg the signed extension name). #[error("Cannot find type id '{type_id} in the metadata (context: {context})")] MissingTypeId { /// Type ID. @@ -203,10 +296,8 @@ pub enum ExtrinsicParamsError { /// Some arbitrary context to help narrow the source of the error. context: &'static str, }, - /// A signed extension in use on some chain was not provided. #[error("The chain expects a signed extension with the name {0}, but we did not provide one")] UnknownTransactionExtension(String), - /// Some custom error. #[error("Error constructing extrinsic parameters: {0}")] Custom(Box), } diff --git a/core/src/events.rs b/core/src/events.rs index 192a7ebca5a..43f010f755b 100644 --- a/core/src/events.rs +++ b/core/src/events.rs @@ -46,9 +46,9 @@ use scale_decode::{DecodeAsFields, DecodeAsType}; use subxt_metadata::PalletMetadata; use crate::{ - Error, Metadata, + Metadata, config::{Config, HashFor}, - error::MetadataError, + error::EventsError, }; /// Create a new [`Events`] instance from the given bytes. @@ -148,7 +148,7 @@ impl Events { // use of it with our `FilterEvents` stuff. pub fn iter( &self, - ) -> impl Iterator, Error>> + Send + Sync + 'static { + ) -> impl Iterator, EventsError>> + Send + Sync + 'static { // The event bytes ignoring the compact encoded length on the front: let event_bytes = self.event_bytes.clone(); let metadata = self.metadata.clone(); @@ -184,25 +184,25 @@ impl Events { /// Iterate through the events using metadata to dynamically decode and skip /// them, and return only those which should decode to the provided `Ev` type. /// If an error occurs, all subsequent iterations return `None`. - pub fn find(&self) -> impl Iterator> { + pub fn find(&self) -> impl Iterator> { self.iter() .filter_map(|ev| ev.and_then(|ev| ev.as_event::()).transpose()) } /// Iterate through the events using metadata to dynamically decode and skip /// them, and return the first event found which decodes to the provided `Ev` type. - pub fn find_first(&self) -> Result, Error> { + pub fn find_first(&self) -> Result, EventsError> { self.find::().next().transpose() } /// Iterate through the events using metadata to dynamically decode and skip /// them, and return the last event found which decodes to the provided `Ev` type. - pub fn find_last(&self) -> Result, Error> { + pub fn find_last(&self) -> Result, EventsError> { self.find::().last().transpose() } /// Find an event that decodes to the type provided. Returns true if it was found. - pub fn has(&self) -> Result { + pub fn has(&self) -> Result { Ok(self.find::().next().transpose()?.is_some()) } } @@ -246,23 +246,29 @@ impl EventDetails { all_bytes: Arc<[u8]>, start_idx: usize, index: u32, - ) -> Result, Error> { + ) -> Result, EventsError> { let input = &mut &all_bytes[start_idx..]; - let phase = Phase::decode(input)?; + let phase = Phase::decode(input).map_err(EventsError::CannotDecodePhase)?; let event_start_idx = all_bytes.len() - input.len(); - let pallet_index = u8::decode(input)?; - let variant_index = u8::decode(input)?; + let pallet_index = u8::decode(input).map_err(EventsError::CannotDecodePalletIndex)?; + let variant_index = u8::decode(input).map_err(EventsError::CannotDecodeVariantIndex)?; let event_fields_start_idx = all_bytes.len() - input.len(); // Get metadata for the event: - let event_pallet = metadata.pallet_by_index_err(pallet_index)?; + let event_pallet = metadata + .pallet_by_index(pallet_index) + .ok_or_else(|| EventsError::CannotFindPalletWithIndex(pallet_index))?; let event_variant = event_pallet .event_variant_by_index(variant_index) - .ok_or(MetadataError::VariantIndexNotFound(variant_index))?; + .ok_or_else(|| EventsError::CannotFindVariantWithIndex { + pallet_name: event_pallet.name().to_string(), + variant_index: variant_index, + })?; + tracing::debug!( "Decoding Event '{}::{}'", event_pallet.name(), @@ -278,14 +284,23 @@ impl EventDetails { metadata.types(), scale_decode::visitor::IgnoreVisitor::new(), ) - .map_err(scale_decode::Error::from)?; + .map_err(|e| EventsError::CannotDecodeFieldInEvent { + pallet_name: event_pallet.name().to_string(), + event_name: event_variant.name.clone(), + field_name: field_metadata + .name + .clone() + .unwrap_or("".to_string()), + reason: e, + })?; } // the end of the field bytes. let event_fields_end_idx = all_bytes.len() - input.len(); // topics come after the event data in EventRecord. - let topics = Vec::>::decode(input)?; + let topics = + Vec::>::decode(input).map_err(EventsError::CannotDecodeEventTopics)?; // what bytes did we skip over in total, including topics. let end_idx = all_bytes.len() - input.len(); @@ -367,7 +382,7 @@ impl EventDetails { /// Decode and provide the event fields back in the form of a [`scale_value::Composite`] /// type which represents the named or unnamed fields that were present in the event. - pub fn field_values(&self) -> Result, Error> { + pub fn decode_as_fields(&self) -> Result { let bytes = &mut self.field_bytes(); let event_metadata = self.event_metadata(); @@ -378,14 +393,20 @@ impl EventDetails { .map(|f| scale_decode::Field::new(f.ty.id, f.name.as_deref())); let decoded = - scale_value::scale::decode_as_fields(bytes, &mut fields, self.metadata.types())?; + E::decode_as_fields(bytes, &mut fields, self.metadata.types()).map_err(|e| { + EventsError::CannotDecodeEventFields { + pallet_name: event_metadata.pallet.name().to_string(), + event_name: event_metadata.variant.name.clone(), + reason: e, + } + })?; Ok(decoded) } /// Attempt to decode these [`EventDetails`] into a type representing the event fields. /// Such types are exposed in the codegen as `pallet_name::events::EventName` types. - pub fn as_event(&self) -> Result, Error> { + pub fn as_event(&self) -> Result, EventsError> { let ev_metadata = self.event_metadata(); if ev_metadata.pallet.name() == E::PALLET && ev_metadata.variant.name == E::EVENT { let mut fields = ev_metadata @@ -394,7 +415,12 @@ impl EventDetails { .iter() .map(|f| scale_decode::Field::new(f.ty.id, f.name.as_deref())); let decoded = - E::decode_as_fields(&mut self.field_bytes(), &mut fields, self.metadata.types())?; + E::decode_as_fields(&mut self.field_bytes(), &mut fields, self.metadata.types()) + .map_err(|e| EventsError::CannotDecodeEventFields { + pallet_name: E::PALLET.to_string(), + event_name: E::EVENT.to_string(), + reason: e, + })?; Ok(Some(decoded)) } else { Ok(None) @@ -404,14 +430,22 @@ impl EventDetails { /// Attempt to decode these [`EventDetails`] into a root event type (which includes /// the pallet and event enum variants as well as the event fields). A compatible /// type for this is exposed via static codegen as a root level `Event` type. - pub fn as_root_event(&self) -> Result { + pub fn as_root_event(&self) -> Result { let bytes = &self.all_bytes[self.event_start_idx..self.event_fields_end_idx]; let decoded = E::decode_as_type( &mut &bytes[..], self.metadata.outer_enums().event_enum_ty(), self.metadata.types(), - )?; + ) + .map_err(|e| { + let md = self.event_metadata(); + EventsError::CannotDecodeEventEnum { + pallet_name: md.pallet.name().to_string(), + event_name: md.variant.name.clone(), + reason: e, + } + })?; Ok(decoded) } @@ -623,7 +657,7 @@ mod tests { expected: TestRawEventDetails, ) { let actual_fields_no_context: Vec<_> = actual - .field_values() + .decode_as_fields::>() .expect("can decode field values (2)") .into_values() .map(|value| value.remove_context()) diff --git a/core/src/lib.rs b/core/src/lib.rs index a39d5e1899a..14b0cb42ad0 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -31,7 +31,6 @@ pub mod custom_values; pub mod dynamic; pub mod error; pub mod events; -pub mod metadata; pub mod runtime_api; pub mod storage; pub mod tx; @@ -40,7 +39,7 @@ pub mod view_functions; pub use config::Config; pub use error::Error; -pub use metadata::Metadata; +pub use subxt_metadata::Metadata; /// Re-exports of some of the key external crates. pub mod ext { diff --git a/core/src/metadata/decode_encode_traits.rs b/core/src/metadata/decode_encode_traits.rs deleted file mode 100644 index b9f83f95954..00000000000 --- a/core/src/metadata/decode_encode_traits.rs +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use super::Metadata; - -use alloc::vec::Vec; - -/// This trait is implemented for all types that also implement [`scale_decode::DecodeAsType`]. -pub trait DecodeWithMetadata: Sized { - /// Given some metadata and a type ID, attempt to SCALE decode the provided bytes into `Self`. - fn decode_with_metadata( - bytes: &mut &[u8], - type_id: u32, - metadata: &Metadata, - ) -> Result; -} - -impl DecodeWithMetadata for T { - fn decode_with_metadata( - bytes: &mut &[u8], - type_id: u32, - metadata: &Metadata, - ) -> Result { - let val = T::decode_as_type(bytes, type_id, metadata.types())?; - Ok(val) - } -} - -/// This trait is implemented for all types that also implement [`scale_encode::EncodeAsType`]. -pub trait EncodeWithMetadata { - /// SCALE encode this type to bytes, possibly with the help of metadata. - fn encode_with_metadata( - &self, - type_id: u32, - metadata: &Metadata, - bytes: &mut Vec, - ) -> Result<(), scale_encode::Error>; -} - -impl EncodeWithMetadata for T { - /// SCALE encode this type to bytes, possibly with the help of metadata. - fn encode_with_metadata( - &self, - type_id: u32, - metadata: &Metadata, - bytes: &mut Vec, - ) -> Result<(), scale_encode::Error> { - self.encode_as_type_to(type_id, metadata.types(), bytes)?; - Ok(()) - } -} diff --git a/core/src/metadata/metadata_type.rs b/core/src/metadata/metadata_type.rs deleted file mode 100644 index 4fe325a0350..00000000000 --- a/core/src/metadata/metadata_type.rs +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use crate::error::MetadataError; - -use alloc::borrow::ToOwned; -use alloc::sync::Arc; - -/// A cheaply clone-able representation of the runtime metadata received from a node. -#[derive(Clone, Debug)] -pub struct Metadata { - inner: Arc, -} - -impl core::ops::Deref for Metadata { - type Target = subxt_metadata::Metadata; - fn deref(&self) -> &Self::Target { - &self.inner - } -} - -impl Metadata { - /// Identical to `metadata.pallet_by_name()`, but returns an error if the pallet is not found. - pub fn pallet_by_name_err( - &self, - name: &str, - ) -> Result, MetadataError> { - self.pallet_by_name(name) - .ok_or_else(|| MetadataError::PalletNameNotFound(name.to_owned())) - } - - /// Identical to `metadata.pallet_by_index()`, but returns an error if the pallet is not found. - pub fn pallet_by_index_err( - &self, - index: u8, - ) -> Result, MetadataError> { - self.pallet_by_index(index) - .ok_or(MetadataError::PalletIndexNotFound(index)) - } - - /// Identical to `metadata.runtime_api_trait_by_name()`, but returns an error if the trait is not found. - pub fn runtime_api_trait_by_name_err( - &self, - name: &str, - ) -> Result, MetadataError> { - self.runtime_api_trait_by_name(name) - .ok_or_else(|| MetadataError::RuntimeTraitNotFound(name.to_owned())) - } - - /// Identical to `metadata.custom().get(name)`, but returns an error if the trait is not found. - pub fn custom_value_by_name_err( - &self, - name: &str, - ) -> Result, MetadataError> { - self.custom() - .get(name) - .ok_or_else(|| MetadataError::CustomValueNameNotFound(name.to_owned())) - } -} - -impl From for Metadata { - fn from(md: subxt_metadata::Metadata) -> Self { - Metadata { - inner: Arc::new(md), - } - } -} - -impl TryFrom for Metadata { - type Error = subxt_metadata::TryFromError; - fn try_from(value: frame_metadata::RuntimeMetadataPrefixed) -> Result { - subxt_metadata::Metadata::try_from(value).map(Metadata::from) - } -} - -impl codec::Decode for Metadata { - fn decode(input: &mut I) -> Result { - subxt_metadata::Metadata::decode(input).map(Metadata::from) - } -} diff --git a/core/src/metadata/mod.rs b/core/src/metadata/mod.rs deleted file mode 100644 index 480b434bfcf..00000000000 --- a/core/src/metadata/mod.rs +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! A [`Metadata`] type, which is used through this crate. -//! -//! This can be decoded from the bytes handed back from a node when asking for metadata. -//! -//! # Examples -//! -//! ```rust -//! use subxt_core::metadata; -//! -//! // We need to fetch the bytes from somewhere, and then we can decode them: -//! let metadata_bytes = include_bytes!("../../../artifacts/polkadot_metadata_small.scale"); -//! let metadata = metadata::decode_from(&metadata_bytes[..]).unwrap(); -//! ``` - -mod decode_encode_traits; -mod metadata_type; - -use codec::Decode; - -pub use decode_encode_traits::{DecodeWithMetadata, EncodeWithMetadata}; -pub use metadata_type::Metadata; - -/// Attempt to decode some bytes into [`Metadata`], returning an error -/// if decoding fails. -/// -/// This is a shortcut for importing [`codec::Decode`] and using the -/// implementation of that on [`Metadata`]. -pub fn decode_from(bytes: &[u8]) -> Result { - Metadata::decode(&mut &*bytes) -} diff --git a/core/src/runtime_api/mod.rs b/core/src/runtime_api/mod.rs index 9bdab6f50af..b4a63b4b018 100644 --- a/core/src/runtime_api/mod.rs +++ b/core/src/runtime_api/mod.rs @@ -43,33 +43,42 @@ pub mod payload; -use crate::error::{Error, MetadataError}; -use crate::metadata::{DecodeWithMetadata, Metadata}; -use alloc::borrow::ToOwned; +use crate::Metadata; +use crate::error::RuntimeApiError; use alloc::format; use alloc::string::String; use alloc::vec::Vec; use payload::Payload; +use scale_decode::IntoVisitor; /// Run the validation logic against some runtime API payload you'd like to use. Returns `Ok(())` /// if the payload is valid (or if it's not possible to check since the payload has no validation hash). /// Return an error if the payload was not valid or something went wrong trying to validate it (ie /// the runtime API in question do not exist at all) -pub fn validate(payload: &P, metadata: &Metadata) -> Result<(), Error> { - let Some(static_hash) = payload.validation_hash() else { +pub fn validate(payload: &P, metadata: &Metadata) -> Result<(), RuntimeApiError> { + let Some(hash) = payload.validation_hash() else { return Ok(()); }; - let api_trait = metadata.runtime_api_trait_by_name_err(payload.trait_name())?; - let Some(api_method) = api_trait.method_by_name(payload.method_name()) else { - return Err(MetadataError::IncompatibleCodegen.into()); - }; + let trait_name = payload.trait_name(); + let method_name = payload.method_name(); + + let api_trait = metadata + .runtime_api_trait_by_name(trait_name) + .ok_or_else(|| RuntimeApiError::TraitNotFound(trait_name.to_string()))?; + let api_method = + api_trait + .method_by_name(method_name) + .ok_or_else(|| RuntimeApiError::MethodNotFound { + trait_name: trait_name.to_string(), + method_name: method_name.to_string(), + })?; - let runtime_hash = api_method.hash(); - if static_hash != runtime_hash { - return Err(MetadataError::IncompatibleCodegen.into()); + if hash != api_method.hash() { + Err(RuntimeApiError::IncompatibleCodegen) + } else { + Ok(()) } - Ok(()) } /// Return the name of the runtime API call from the payload. @@ -78,8 +87,17 @@ pub fn call_name(payload: &P) -> String { } /// Return the encoded call args given a runtime API payload. -pub fn call_args(payload: &P, metadata: &Metadata) -> Result, Error> { - payload.encode_args(metadata) +pub fn call_args(payload: &P, metadata: &Metadata) -> Result, RuntimeApiError> { + let value = frame_decode::runtime_apis::encode_runtime_api_inputs( + payload.trait_name(), + payload.method_name(), + payload.args(), + metadata, + metadata.types(), + ) + .map_err(RuntimeApiError::CouldNotEncodeInputs)?; + + Ok(value) } /// Decode the value bytes at the location given by the provided runtime API payload. @@ -87,17 +105,16 @@ pub fn decode_value( bytes: &mut &[u8], payload: &P, metadata: &Metadata, -) -> Result { - let api_method = metadata - .runtime_api_trait_by_name_err(payload.trait_name())? - .method_by_name(payload.method_name()) - .ok_or_else(|| MetadataError::RuntimeMethodNotFound(payload.method_name().to_owned()))?; - - let val = ::decode_with_metadata( - &mut &bytes[..], - api_method.output_ty(), +) -> Result { + let value = frame_decode::runtime_apis::decode_runtime_api_response( + payload.trait_name(), + payload.method_name(), + bytes, metadata, - )?; + metadata.types(), + P::ReturnType::into_visitor(), + ) + .map_err(RuntimeApiError::CouldNotDecodeResponse)?; - Ok(val) + Ok(value) } diff --git a/core/src/runtime_api/payload.rs b/core/src/runtime_api/payload.rs index ee3e32e6559..59d6957762e 100644 --- a/core/src/runtime_api/payload.rs +++ b/core/src/runtime_api/payload.rs @@ -6,45 +6,18 @@ //! runtime API calls that can be made. use alloc::borrow::Cow; -use alloc::borrow::ToOwned; -use alloc::string::String; -use alloc::vec::Vec; use core::marker::PhantomData; use derive_where::derive_where; -use scale_encode::EncodeAsFields; -use scale_value::Composite; +use frame_decode::runtime_apis::IntoEncodableValues; +use scale_decode::DecodeAsType; -use crate::Error; -use crate::dynamic::DecodedValueThunk; -use crate::error::MetadataError; - -use crate::metadata::{DecodeWithMetadata, Metadata}; - -/// This represents a runtime API payload that can call into the runtime of node. -/// -/// # Components -/// -/// - associated return type -/// -/// Resulting bytes of the call are interpreted into this type. -/// -/// - runtime function name -/// -/// The function name of the runtime API call. This is obtained by concatenating -/// the runtime trait name with the trait's method. -/// -/// For example, the substrate runtime trait [Metadata](https://github.com/paritytech/substrate/blob/cb954820a8d8d765ce75021e244223a3b4d5722d/primitives/api/src/lib.rs#L745) -/// contains the `metadata_at_version` function. The corresponding runtime function -/// is `Metadata_metadata_at_version`. -/// -/// - encoded arguments -/// -/// Each argument of the runtime function must be scale-encoded. +/// This represents a runtime API payload that can be used to call a Runtime API on +/// a chain and decode the response. pub trait Payload { + /// Type of the arguments. + type ArgsType: IntoEncodableValues; /// The return type of the function call. - // Note: `DecodeWithMetadata` is needed to decode the function call result - // with the `subxt::Metadata. - type ReturnType: DecodeWithMetadata; + type ReturnType: DecodeAsType; /// The runtime API trait name. fn trait_name(&self) -> &str; @@ -52,16 +25,8 @@ pub trait Payload { /// The runtime API method name. fn method_name(&self) -> &str; - /// Scale encode the arguments data. - fn encode_args_to(&self, metadata: &Metadata, out: &mut Vec) -> Result<(), Error>; - - /// Encode arguments data and return the output. This is a convenience - /// wrapper around [`Payload::encode_args_to`]. - fn encode_args(&self, metadata: &Metadata) -> Result, Error> { - let mut v = Vec::new(); - self.encode_args_to(metadata, &mut v)?; - Ok(v) - } + /// The input arguments. + fn args(&self) -> &Self::ArgsType; /// Returns the statically generated validation hash. fn validation_hash(&self) -> Option<[u8; 32]> { @@ -74,24 +39,23 @@ pub trait Payload { /// /// This can be created from static values (ie those generated /// via the `subxt` macro) or dynamic values via [`dynamic`]. -#[derive_where(Clone, Debug, Eq, Ord, PartialEq, PartialOrd; ArgsData)] -pub struct DefaultPayload { +#[derive_where(Clone, Debug, Eq, Ord, PartialEq, PartialOrd; ArgsType)] +pub struct StaticPayload { trait_name: Cow<'static, str>, method_name: Cow<'static, str>, - args_data: ArgsData, + args: ArgsType, validation_hash: Option<[u8; 32]>, - _marker: PhantomData, + _marker: PhantomData, } -/// A statically generated runtime API payload. -pub type StaticPayload = DefaultPayload; /// A dynamic runtime API payload. -pub type DynamicPayload = DefaultPayload, DecodedValueThunk>; +pub type DynamicPayload = StaticPayload; -impl Payload - for DefaultPayload +impl Payload + for StaticPayload { - type ReturnType = ReturnTy; + type ArgsType = ArgsType; + type ReturnType = ReturnType; fn trait_name(&self) -> &str { &self.trait_name @@ -101,18 +65,8 @@ impl Payload &self.method_name } - fn encode_args_to(&self, metadata: &Metadata, out: &mut Vec) -> Result<(), Error> { - let api_method = metadata - .runtime_api_trait_by_name_err(&self.trait_name)? - .method_by_name(&self.method_name) - .ok_or_else(|| MetadataError::RuntimeMethodNotFound((*self.method_name).to_owned()))?; - let mut fields = api_method - .inputs() - .map(|input| scale_encode::Field::named(input.ty, &input.name)); - - self.args_data - .encode_as_fields_to(&mut fields, metadata.types(), out)?; - Ok(()) + fn args(&self) -> &Self::ArgsType { + &self.args } fn validation_hash(&self) -> Option<[u8; 32]> { @@ -120,23 +74,23 @@ impl Payload } } -impl DefaultPayload { - /// Create a new [`DefaultPayload`]. +impl StaticPayload { + /// Create a new [`StaticPayload`]. pub fn new( trait_name: impl Into, method_name: impl Into, - args_data: ArgsData, + args: ArgsType, ) -> Self { - DefaultPayload { - trait_name: Cow::Owned(trait_name.into()), - method_name: Cow::Owned(method_name.into()), - args_data, + StaticPayload { + trait_name: trait_name.into().into(), + method_name: method_name.into().into(), + args, validation_hash: None, _marker: PhantomData, } } - /// Create a new static [`DefaultPayload`] using static function name + /// Create a new static [`StaticPayload`] using static function name /// and scale-encoded argument data. /// /// This is only expected to be used from codegen. @@ -144,13 +98,13 @@ impl DefaultPayload { pub fn new_static( trait_name: &'static str, method_name: &'static str, - args_data: ArgsData, + args: ArgsType, hash: [u8; 32], - ) -> DefaultPayload { - DefaultPayload { + ) -> StaticPayload { + StaticPayload { trait_name: Cow::Borrowed(trait_name), method_name: Cow::Borrowed(method_name), - args_data, + args, validation_hash: Some(hash), _marker: core::marker::PhantomData, } @@ -173,18 +127,13 @@ impl DefaultPayload { pub fn method_name(&self) -> &str { &self.method_name } - - /// Returns the arguments data. - pub fn args_data(&self) -> &ArgsData { - &self.args_data - } } /// Create a new [`DynamicPayload`]. -pub fn dynamic( +pub fn dynamic( trait_name: impl Into, method_name: impl Into, - args_data: impl Into>, -) -> DynamicPayload { - DefaultPayload::new(trait_name, method_name, args_data.into()) + args_data: ArgsType, +) -> DynamicPayload { + DynamicPayload::new(trait_name, method_name, args_data.into()) } diff --git a/core/src/storage/address.rs b/core/src/storage/address.rs index d17157a3706..f3b6d55f0f0 100644 --- a/core/src/storage/address.rs +++ b/core/src/storage/address.rs @@ -4,143 +4,83 @@ //! Construct addresses to access storage entries with. -use crate::{ - dynamic::DecodedValueThunk, - error::{Error, MetadataError}, - metadata::{DecodeWithMetadata, Metadata}, - utils::Yes, -}; -use derive_where::derive_where; - -use alloc::borrow::{Cow, ToOwned}; -use alloc::string::String; -use alloc::vec::Vec; - -// Re-export types used here: -pub use super::storage_key::{StaticStorageKey, StorageHashers, StorageHashersIter, StorageKey}; - -/// This represents a storage address. Anything implementing this trait -/// can be used to fetch and iterate over storage entries. +use crate::utils::{Maybe, YesMaybe}; +use alloc::borrow::Cow; +use frame_decode::storage::{IntoDecodableValues, IntoEncodableValues}; +use scale_decode::DecodeAsType; + +/// A storage address. This allows access to a given storage entry, which can then +/// be iterated over or fetched from by providing the relevant set of keys, or +/// otherwise inspected. pub trait Address { - /// The target type of the value that lives at this address. - type Target: DecodeWithMetadata; - /// The keys type used to construct this address. - type Keys: StorageKey; - /// Can an entry be fetched from this address? - /// Set this type to [`Yes`] to enable the corresponding calls to be made. - type IsFetchable; - /// Can a default entry be obtained from this address? - /// Set this type to [`Yes`] to enable the corresponding calls to be made. - type IsDefaultable; - /// Can this address be iterated over? - /// Set this type to [`Yes`] to enable the corresponding calls to be made. - type IsIterable; - - /// The name of the pallet that the entry lives under. + /// All of the keys required to get to an individual value at this address. + /// Keys must always impl [`IntoEncodableValues`], and for iteration must + /// also impl [`frame_decode::storage::IntoDecodableValues`]. + type KeyParts: IntoEncodableValues + IntoDecodableValues; + /// Type of the storage value at this location. + type Value: DecodeAsType; + /// Does the address point to a plain value (as opposed to a map)? + /// Set to [`crate::utils::Yes`] to enable APIs which require a map, + /// or [`crate::utils::Maybe`] to enable APIs which allow a map. + type IsPlain: YesMaybe; + + /// The pallet containing this storage entry. fn pallet_name(&self) -> &str; - /// The name of the entry in a given pallet that the item is at. + /// The name of the storage entry. fn entry_name(&self) -> &str; - /// Output the non-prefix bytes; that is, any additional bytes that need - /// to be appended to the key to dig into maps. - fn append_entry_bytes(&self, metadata: &Metadata, bytes: &mut Vec) -> Result<(), Error>; - - /// An optional hash which, if present, will be checked against - /// the node metadata to confirm that the return type matches what - /// we are expecting. - fn validation_hash(&self) -> Option<[u8; 32]> { - None - } + /// Return a unique hash for this address which can be used to validate it against metadata. + fn validation_hash(&self) -> Option<[u8; 32]>; } -/// A concrete storage address. This can be created from static values (ie those generated -/// via the `subxt` macro) or dynamic values via [`dynamic`]. -#[derive_where(Clone, Debug, Eq, Ord, PartialEq, PartialOrd; Keys)] -pub struct DefaultAddress { +/// An address which is generated by the static APIs. +pub struct StaticAddress { pallet_name: Cow<'static, str>, entry_name: Cow<'static, str>, - keys: Keys, validation_hash: Option<[u8; 32]>, - _marker: core::marker::PhantomData<(ReturnTy, Fetchable, Defaultable, Iterable)>, -} - -/// A storage address constructed by the static codegen. -pub type StaticAddress = - DefaultAddress; -/// A typical storage address constructed at runtime rather than via the `subxt` macro; this -/// has no restriction on what it can be used for (since we don't statically know). -pub type DynamicAddress = DefaultAddress; - -impl DynamicAddress { - /// Creates a new dynamic address. As `Keys` you can use a `Vec` - pub fn new(pallet_name: impl Into, entry_name: impl Into, keys: Keys) -> Self { - Self { - pallet_name: Cow::Owned(pallet_name.into()), - entry_name: Cow::Owned(entry_name.into()), - keys, - validation_hash: None, - _marker: core::marker::PhantomData, - } - } + marker: core::marker::PhantomData<(KeyParts, Value, IsPlain)>, } -impl - DefaultAddress -where - Keys: StorageKey, - ReturnTy: DecodeWithMetadata, -{ - /// Create a new [`Address`] using static strings for the pallet and call name. +impl StaticAddress { + /// Create a new [`StaticAddress`] using static strings for the pallet and call name. /// This is only expected to be used from codegen. #[doc(hidden)] - pub fn new_static( - pallet_name: &'static str, - entry_name: &'static str, - keys: Keys, - hash: [u8; 32], - ) -> Self { + pub fn new_static(pallet_name: &'static str, entry_name: &'static str, hash: [u8; 32]) -> Self { Self { pallet_name: Cow::Borrowed(pallet_name), entry_name: Cow::Borrowed(entry_name), - keys, validation_hash: Some(hash), - _marker: core::marker::PhantomData, + marker: core::marker::PhantomData, } } -} -impl - DefaultAddress -where - Keys: StorageKey, - ReturnTy: DecodeWithMetadata, -{ - /// Do not validate this storage entry prior to accessing it. - pub fn unvalidated(self) -> Self { + /// Create a new address. + pub fn new(pallet_name: impl Into, entry_name: impl Into) -> Self { Self { + pallet_name: pallet_name.into().into(), + entry_name: entry_name.into().into(), validation_hash: None, - ..self + marker: core::marker::PhantomData, } } - /// Return bytes representing the root of this storage entry (a hash of the pallet and entry name). - pub fn to_root_bytes(&self) -> Vec { - super::get_address_root_bytes(self) + /// Do not validate this storage entry prior to accessing it. + pub fn unvalidated(mut self) -> Self { + self.validation_hash = None; + self } } -impl Address - for DefaultAddress +impl Address for StaticAddress where - Keys: StorageKey, - ReturnTy: DecodeWithMetadata, + KeyParts: IntoEncodableValues + IntoDecodableValues, + Value: DecodeAsType, + IsPlain: YesMaybe, { - type Target = ReturnTy; - type Keys = Keys; - type IsFetchable = Fetchable; - type IsDefaultable = Defaultable; - type IsIterable = Iterable; + type KeyParts = KeyParts; + type Value = Value; + type IsPlain = IsPlain; fn pallet_name(&self) -> &str { &self.pallet_name @@ -150,31 +90,22 @@ where &self.entry_name } - fn append_entry_bytes(&self, metadata: &Metadata, bytes: &mut Vec) -> Result<(), Error> { - let pallet = metadata.pallet_by_name_err(self.pallet_name())?; - let storage = pallet - .storage() - .ok_or_else(|| MetadataError::StorageNotFoundInPallet(self.pallet_name().to_owned()))?; - let entry = storage - .entry_by_name(self.entry_name()) - .ok_or_else(|| MetadataError::StorageEntryNotFound(self.entry_name().to_owned()))?; - - let hashers = StorageHashers::new(entry.entry_type(), metadata.types())?; - self.keys - .encode_storage_key(bytes, &mut hashers.iter(), metadata.types())?; - Ok(()) - } - fn validation_hash(&self) -> Option<[u8; 32]> { self.validation_hash } } -/// Construct a new dynamic storage lookup. -pub fn dynamic( +/// A dynamic address is simply a [`StaticAddress`] which asserts that the +/// entry *might* be a map and *might* have a default value. +pub type DynamicAddress, Value = scale_value::Value> = + StaticAddress; + +/// Construct a new dynamic storage address. You can define the type of the +/// storage keys and value yourself here, but have no guarantee that they will +/// be correct. +pub fn dynamic( pallet_name: impl Into, entry_name: impl Into, - storage_entry_keys: Keys, -) -> DynamicAddress { - DynamicAddress::new(pallet_name, entry_name, storage_entry_keys) +) -> DynamicAddress { + DynamicAddress::::new(pallet_name.into(), entry_name.into()) } diff --git a/core/src/storage/mod.rs b/core/src/storage/mod.rs index e276bbb4700..6c1af197f75 100644 --- a/core/src/storage/mod.rs +++ b/core/src/storage/mod.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// Copyright 2019-2025 Parity Technologies (UK) Ltd. // This file is dual-licensed as Apache-2.0 or GPL-3.0. // see LICENSE for license details. @@ -41,25 +41,29 @@ //! println!("Alice's account info: {value:?}"); //! ``` +mod prefix_of; +mod storage_entry; mod storage_key; -mod utils; +mod storage_key_value; +mod storage_value; pub mod address; -use crate::{Error, Metadata, error::MetadataError, metadata::DecodeWithMetadata}; +use crate::{Metadata, error::StorageError}; use address::Address; -use alloc::vec::Vec; -// This isn't a part of the public API, but expose here because it's useful in Subxt. -#[doc(hidden)] -pub use utils::lookup_storage_entry_details; +pub use prefix_of::{EqualOrPrefixOf, PrefixOf}; +pub use storage_entry::{StorageEntry, entry}; +pub use storage_key::{StorageHasher, StorageKey, StorageKeyPart}; +pub use storage_key_value::StorageKeyValue; +pub use storage_value::StorageValue; /// When the provided `address` is statically generated via the `#[subxt]` macro, this validates /// that the shape of the storage value is the same as the shape expected by the static address. /// /// When the provided `address` is dynamic (and thus does not come with any expectation of the /// shape of the constant value), this just returns `Ok(())` -pub fn validate(address: &Addr, metadata: &Metadata) -> Result<(), Error> { +pub fn validate(address: &Addr, metadata: &Metadata) -> Result<(), StorageError> { let Some(hash) = address.validation_hash() else { return Ok(()); }; @@ -67,76 +71,19 @@ pub fn validate(address: &Addr, metadata: &Metadata) -> Result<() let pallet_name = address.pallet_name(); let entry_name = address.entry_name(); - let pallet_metadata = metadata.pallet_by_name_err(pallet_name)?; - - let Some(expected_hash) = pallet_metadata.storage_hash(entry_name) else { - return Err(MetadataError::IncompatibleCodegen.into()); - }; - if expected_hash != hash { - return Err(MetadataError::IncompatibleCodegen.into()); + let pallet_metadata = metadata + .pallet_by_name(pallet_name) + .ok_or_else(|| StorageError::PalletNameNotFound(pallet_name.to_string()))?; + let storage_hash = pallet_metadata.storage_hash(entry_name).ok_or_else(|| { + StorageError::StorageEntryNotFound { + pallet_name: pallet_name.to_string(), + entry_name: entry_name.to_string(), + } + })?; + + if storage_hash != hash { + Err(StorageError::IncompatibleCodegen) + } else { + Ok(()) } - Ok(()) -} - -/// Given a storage address and some metadata, this encodes the address into bytes which can be -/// handed to a node to retrieve the corresponding value. -pub fn get_address_bytes( - address: &Addr, - metadata: &Metadata, -) -> Result, Error> { - let mut bytes = Vec::new(); - utils::write_storage_address_root_bytes(address, &mut bytes); - address.append_entry_bytes(metadata, &mut bytes)?; - Ok(bytes) -} - -/// Given a storage address and some metadata, this encodes the root of the address (ie the pallet -/// and storage entry part) into bytes. If the entry being addressed is inside a map, this returns -/// the bytes needed to iterate over all of the entries within it. -pub fn get_address_root_bytes(address: &Addr) -> Vec { - let mut bytes = Vec::new(); - utils::write_storage_address_root_bytes(address, &mut bytes); - bytes -} - -/// Given some storage value that we've retrieved from a node, the address used to retrieve it, and -/// metadata from the node, this function attempts to decode the bytes into the target value specified -/// by the address. -pub fn decode_value( - bytes: &mut &[u8], - address: &Addr, - metadata: &Metadata, -) -> Result { - let pallet_name = address.pallet_name(); - let entry_name = address.entry_name(); - - let (_, entry_metadata) = - utils::lookup_storage_entry_details(pallet_name, entry_name, metadata)?; - let value_ty_id = match entry_metadata.entry_type() { - subxt_metadata::StorageEntryType::Plain(ty) => *ty, - subxt_metadata::StorageEntryType::Map { value_ty, .. } => *value_ty, - }; - - let val = Addr::Target::decode_with_metadata(bytes, value_ty_id, metadata)?; - Ok(val) -} - -/// Return the default value at a given storage address if one is available, or an error otherwise. -pub fn default_value( - address: &Addr, - metadata: &Metadata, -) -> Result { - let pallet_name = address.pallet_name(); - let entry_name = address.entry_name(); - - let (_, entry_metadata) = - utils::lookup_storage_entry_details(pallet_name, entry_name, metadata)?; - let value_ty_id = match entry_metadata.entry_type() { - subxt_metadata::StorageEntryType::Plain(ty) => *ty, - subxt_metadata::StorageEntryType::Map { value_ty, .. } => *value_ty, - }; - - let default_bytes = entry_metadata.default_bytes(); - let val = Addr::Target::decode_with_metadata(&mut &*default_bytes, value_ty_id, metadata)?; - Ok(val) } diff --git a/core/src/storage/prefix_of.rs b/core/src/storage/prefix_of.rs new file mode 100644 index 00000000000..b0a05b2bb99 --- /dev/null +++ b/core/src/storage/prefix_of.rs @@ -0,0 +1,194 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use frame_decode::helpers::IntoEncodableValues; +use scale_encode::EncodeAsType; + +/// For a given set of values that can be used as keys for a storage entry, +/// this is implemented for any prefixes of that set. ie if the keys `(A,B,C)` +/// would access a storage value, then `PrefixOf<(A,B,C)>` is implemented for +/// `(A,B)`, `(A,)` and `()`. +pub trait PrefixOf: IntoEncodableValues {} + +// If T impls PrefixOf, &T impls PrefixOf. +impl<'a, K, T: PrefixOf> PrefixOf for &'a T {} + +// Impls for tuples up to length 6 (storage maps rarely require more than 2 entries +// so it's very unlikely we'll ever need to go this deep). +impl PrefixOf<(A,)> for () {} + +impl PrefixOf<(A, B)> for () {} +impl PrefixOf<(A, B)> for (A,) where (A,): IntoEncodableValues {} + +impl PrefixOf<(A, B, C)> for () {} +impl PrefixOf<(A, B, C)> for (A,) where (A,): IntoEncodableValues {} +impl PrefixOf<(A, B, C)> for (A, B) where (A, B): IntoEncodableValues {} + +impl PrefixOf<(A, B, C, D)> for () {} +impl PrefixOf<(A, B, C, D)> for (A,) where (A,): IntoEncodableValues {} +impl PrefixOf<(A, B, C, D)> for (A, B) where (A, B): IntoEncodableValues {} +impl PrefixOf<(A, B, C, D)> for (A, B, C) where (A, B, C): IntoEncodableValues {} + +impl PrefixOf<(A, B, C, D, E)> for () {} +impl PrefixOf<(A, B, C, D, E)> for (A,) where (A,): IntoEncodableValues {} +impl PrefixOf<(A, B, C, D, E)> for (A, B) where (A, B): IntoEncodableValues {} +impl PrefixOf<(A, B, C, D, E)> for (A, B, C) where (A, B, C): IntoEncodableValues {} +impl PrefixOf<(A, B, C, D, E)> for (A, B, C, D) where + (A, B, C, D): IntoEncodableValues +{ +} + +impl PrefixOf<(A, B, C, D, E, F)> for () {} +impl PrefixOf<(A, B, C, D, E, F)> for (A,) where (A,): IntoEncodableValues {} +impl PrefixOf<(A, B, C, D, E, F)> for (A, B) where (A, B): IntoEncodableValues {} +impl PrefixOf<(A, B, C, D, E, F)> for (A, B, C) where + (A, B, C): IntoEncodableValues +{ +} +impl PrefixOf<(A, B, C, D, E, F)> for (A, B, C, D) where + (A, B, C, D): IntoEncodableValues +{ +} +impl PrefixOf<(A, B, C, D, E, F)> for (A, B, C, D, E) where + (A, B, C, D, E): IntoEncodableValues +{ +} + +// Vecs are prefixes of vecs. The length is not statically known and so +// these would be given dynamically only, leaving the correct length to the user. +impl PrefixOf> for Vec {} + +// We don't use arrays in Subxt for storage entry access, but `IntoEncodableValues` +// supports them so let's allow impls which do use them to benefit too. +macro_rules! array_impl { + ($n:literal: $($p:literal)+) => { + $( + impl PrefixOf<[T; $n]> for [T; $p] {} + )+ + } +} + +array_impl!(1: 0); +array_impl!(2: 1 0); +array_impl!(3: 2 1 0); +array_impl!(4: 3 2 1 0); +array_impl!(5: 4 3 2 1 0); +array_impl!(6: 5 4 3 2 1 0); + +/// This is much like [`PrefixOf`] except that it also includes `Self` as an allowed type, +/// where `Self` must impl [`IntoEncodableValues`] just as every [`PrefixOf`] does. +pub trait EqualOrPrefixOf: IntoEncodableValues {} + +// Tuples +macro_rules! tuple_impl_eq { + ($($t:ident)+) => { + // Any T that is a PrefixOf impls EqualOrPrefixOf too + impl <$($t,)+ T: PrefixOf<($($t,)+)>> EqualOrPrefixOf<($($t,)+)> for T {} + // Keys impls EqualOrPrefixOf + impl <$($t),+> EqualOrPrefixOf<($($t,)+)> for ($($t,)+) where ($($t,)+): IntoEncodableValues {} + // &'a Keys impls EqualOrPrefixOf + impl <'a, $($t),+> EqualOrPrefixOf<($($t,)+)> for &'a ($($t,)+) where ($($t,)+): IntoEncodableValues {} + } +} + +tuple_impl_eq!(A); +tuple_impl_eq!(A B); +tuple_impl_eq!(A B C); +tuple_impl_eq!(A B C D); +tuple_impl_eq!(A B C D E); +tuple_impl_eq!(A B C D E F); + +// Vec +impl EqualOrPrefixOf> for Vec {} +impl<'a, T: EncodeAsType> EqualOrPrefixOf> for &'a Vec {} + +// Arrays +macro_rules! array_impl_eq { + ($($n:literal)+) => { + $( + impl EqualOrPrefixOf<[A; $n]> for [A; $n] {} + impl <'a, A: EncodeAsType> EqualOrPrefixOf<[A; $n]> for &'a [A; $n] {} + )+ + } +} + +impl EqualOrPrefixOf<[A; N]> for T where T: PrefixOf<[A; N]> {} +array_impl_eq!(1 2 3 4 5 6); + +#[cfg(test)] +mod test { + use super::*; + + struct Test(core::marker::PhantomData); + + impl Test { + fn new() -> Self { + Test(core::marker::PhantomData) + } + fn accepts_prefix_of>(&self, keys: P) { + let _encoder = keys.into_encodable_values(); + } + fn accepts_eq_or_prefix_of>(&self, keys: P) { + let _encoder = keys.into_encodable_values(); + } + } + + #[test] + fn test_prefix_of() { + // In real life we'd have a struct a bit like this: + let t = Test::<(bool, String, u64)>::new(); + + // And we'd want to be able to call some method like this: + //// This shouldn't work: + // t.accepts_prefix_of((true, String::from("hi"), 0)); + t.accepts_prefix_of(&(true, String::from("hi"))); + t.accepts_prefix_of((true, String::from("hi"))); + t.accepts_prefix_of((true,)); + t.accepts_prefix_of(()); + + let t = Test::<[u64; 5]>::new(); + + //// This shouldn't work: + // t.accepts_prefix_of([0,1,2,3,4]); + t.accepts_prefix_of(&[0, 1, 2, 3]); + t.accepts_prefix_of([0, 1, 2, 3]); + t.accepts_prefix_of([0, 1, 2]); + t.accepts_prefix_of([0, 1]); + t.accepts_prefix_of([0]); + t.accepts_prefix_of([]); + } + + #[test] + fn test_eq_or_prefix_of() { + // In real life we'd have a struct a bit like this: + let t = Test::<(bool, String, u64)>::new(); + + // And we'd want to be able to call some method like this: + t.accepts_eq_or_prefix_of(&(true, String::from("hi"), 0)); + t.accepts_eq_or_prefix_of(&(true, String::from("hi"))); + t.accepts_eq_or_prefix_of(&(true,)); + t.accepts_eq_or_prefix_of(&()); + + t.accepts_eq_or_prefix_of((true, String::from("hi"), 0)); + t.accepts_eq_or_prefix_of((true, String::from("hi"))); + t.accepts_eq_or_prefix_of((true,)); + t.accepts_eq_or_prefix_of(()); + + let t = Test::<[u64; 5]>::new(); + + t.accepts_eq_or_prefix_of(&[0, 1, 2, 3, 4]); + t.accepts_eq_or_prefix_of(&[0, 1, 2, 3]); + t.accepts_eq_or_prefix_of(&[0, 1, 2]); + t.accepts_eq_or_prefix_of(&[0, 1]); + t.accepts_eq_or_prefix_of(&[0]); + t.accepts_eq_or_prefix_of(&[]); + + t.accepts_eq_or_prefix_of([0, 1, 2, 3, 4]); + t.accepts_eq_or_prefix_of([0, 1, 2, 3]); + t.accepts_eq_or_prefix_of([0, 1, 2]); + t.accepts_eq_or_prefix_of([0, 1]); + t.accepts_eq_or_prefix_of([0]); + t.accepts_eq_or_prefix_of([]); + } +} diff --git a/core/src/storage/storage_entry.rs b/core/src/storage/storage_entry.rs new file mode 100644 index 00000000000..4e8328b9e51 --- /dev/null +++ b/core/src/storage/storage_entry.rs @@ -0,0 +1,173 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::{PrefixOf, StorageKeyValue, StorageValue, address::Address}; +use crate::error::StorageError; +use crate::utils::{Maybe, Yes, YesMaybe}; +use alloc::sync::Arc; +use alloc::vec::Vec; +use core::marker::PhantomData; +use frame_decode::storage::{IntoEncodableValues, StorageInfo}; +use scale_info::PortableRegistry; +use subxt_metadata::Metadata; + +/// Create a [`StorageEntry`] to work with a given storage entry. +pub fn entry<'info, Addr: Address>( + address: Addr, + metadata: &'info Metadata, +) -> Result, StorageError> { + super::validate(&address, &metadata)?; + + use frame_decode::storage::StorageTypeInfo; + let types = metadata.types(); + let info = metadata + .storage_info(address.pallet_name(), address.entry_name()) + .map_err(|e| StorageError::StorageInfoError(e.into_owned()))?; + + Ok(StorageEntry(Arc::new(StorageEntryInner { + address, + info: Arc::new(info), + types, + marker: PhantomData, + }))) +} + +/// This represents a single storage entry (be it a plain value or map). +pub struct StorageEntry<'info, Addr, IsPlain>(Arc>); + +impl<'info, Addr, IsPlain> Clone for StorageEntry<'info, Addr, IsPlain> { + fn clone(&self) -> Self { + Self(self.0.clone()) + } +} + +struct StorageEntryInner<'info, Addr, IsPlain> { + address: Addr, + info: Arc>, + types: &'info PortableRegistry, + marker: core::marker::PhantomData, +} + +impl<'info, Addr: Address, IsPlain> StorageEntry<'info, Addr, IsPlain> { + /// Name of the pallet containing this storage entry. + pub fn pallet_name(&self) -> &str { + self.0.address.pallet_name() + } + + /// Name of the storage entry. + pub fn entry_name(&self) -> &str { + self.0.address.entry_name() + } + + /// Is the storage entry a plain value? + pub fn is_plain(&self) -> bool { + self.0.info.keys.is_empty() + } + + /// Is the storage entry a map? + pub fn is_map(&self) -> bool { + !self.is_plain() + } + + /// Instantiate a [`StorageKeyValue`] for this entry. + /// + /// It is expected that the bytes are obtained by iterating key/value pairs at this address. + pub fn key_value( + &self, + key_bytes: impl Into>, + value_bytes: Vec, + ) -> StorageKeyValue<'info, Addr> { + StorageKeyValue::new( + self.0.info.clone(), + self.0.types, + key_bytes.into(), + value_bytes, + ) + } + + /// Instantiate a [`StorageValue`] for this entry. + /// + /// It is expected that the bytes are obtained by fetching a value at this address. + pub fn value(&self, bytes: Vec) -> StorageValue<'info, Addr::Value> { + StorageValue::new(self.0.info.clone(), self.0.types, bytes) + } + + /// Return the default [`StorageValue`] for this storage entry, if there is one. + pub fn default_value(&self) -> Option> { + if let Some(default_bytes) = self.0.info.default_value.as_deref() { + Some(StorageValue::new( + self.0.info.clone(), + self.0.types, + default_bytes.to_vec(), + )) + } else { + None + } + } + + /// The keys for plain storage values are always 32 byte hashes. + pub fn key_prefix(&self) -> [u8; 32] { + frame_decode::storage::encode_storage_key_prefix( + self.0.address.pallet_name(), + self.0.address.entry_name(), + ) + } + + // This has a less "strict" type signature and so is just used under the hood. + fn key(&self, key_parts: Keys) -> Result, StorageError> { + let key = frame_decode::storage::encode_storage_key_with_info( + self.0.address.pallet_name(), + self.0.address.entry_name(), + key_parts, + &self.0.info, + self.0.types, + ) + .map_err(StorageError::StorageKeyEncodeError)?; + + Ok(key) + } +} + +impl<'info, Addr: Address> StorageEntry<'info, Addr, Yes> { + /// This constructs a key suitable for fetching a value at the given plain storage address. + pub fn fetch_key(&self) -> Vec { + self.key_prefix().to_vec() + } +} + +impl<'info, Addr: Address> StorageEntry<'info, Addr, Maybe> { + /// This constructs a key suitable for fetching a value at the given map storage address. This will error + /// if we can see that the wrong number of key parts are provided. + pub fn fetch_key(&self, key_parts: Addr::KeyParts) -> Result, StorageError> { + if key_parts.num_encodable_values() != self.0.info.keys.len() { + Err(StorageError::WrongNumberOfKeyPartsProvidedForFetching { + expected: self.0.info.keys.len(), + got: key_parts.num_encodable_values(), + }) + } else { + self.key(key_parts) + } + } + + /// This constructs a key suitable for iterating at the given storage address. This will error + /// if we can see that too many key parts are provided. + pub fn iter_key>( + &self, + key_parts: Keys, + ) -> Result, StorageError> { + if Addr::IsPlain::is_yes() { + Err(StorageError::CannotIterPlainEntry { + pallet_name: self.0.address.pallet_name().into(), + entry_name: self.0.address.entry_name().into(), + }) + } else if key_parts.num_encodable_values() >= self.0.info.keys.len() { + Err(StorageError::WrongNumberOfKeyPartsProvidedForIterating { + max_expected: self.0.info.keys.len() - 1, + got: key_parts.num_encodable_values(), + }) + } else { + self.key(key_parts) + } + } +} diff --git a/core/src/storage/storage_key.rs b/core/src/storage/storage_key.rs index bac737c66ff..0a685d814e3 100644 --- a/core/src/storage/storage_key.rs +++ b/core/src/storage/storage_key.rs @@ -1,471 +1,138 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// Copyright 2019-2025 Parity Technologies (UK) Ltd. // This file is dual-licensed as Apache-2.0 or GPL-3.0. // see LICENSE for license details. -use super::utils::hash_bytes; -use crate::error::{Error, MetadataError, StorageAddressError}; -use alloc::vec; -use alloc::vec::Vec; -use scale_decode::{DecodeAsType, visitor::IgnoreVisitor}; -use scale_encode::EncodeAsType; -use scale_info::{PortableRegistry, TypeDef}; -use scale_value::Value; -use subxt_metadata::{StorageEntryType, StorageHasher}; - -/// A collection of storage hashers paired with the type ids of the types they should hash. -/// Can be created for each storage entry in the metadata via [`StorageHashers::new()`]. -#[derive(Debug)] -pub struct StorageHashers { - hashers_and_ty_ids: Vec<(StorageHasher, u32)>, -} - -impl StorageHashers { - /// Creates new [`StorageHashers`] from a storage entry. Looks at the [`StorageEntryType`] and - /// assigns a hasher to each type id that makes up the key. - pub fn new(storage_entry: &StorageEntryType, types: &PortableRegistry) -> Result { - let mut hashers_and_ty_ids = vec![]; - if let StorageEntryType::Map { - hashers, key_ty, .. - } = storage_entry - { - let ty = types - .resolve(*key_ty) - .ok_or(MetadataError::TypeNotFound(*key_ty))?; - - if hashers.len() == 1 { - // If there's exactly 1 hasher, then we have a plain StorageMap. We can't - // break the key down (even if it's a tuple) because the hasher applies to - // the whole key. - hashers_and_ty_ids = vec![(hashers[0], *key_ty)]; - } else { - // If there are multiple hashers, then we have a StorageDoubleMap or StorageNMap. - // We expect the key type to be tuple, and we will return a MapEntryKey for each - // key in the tuple. - let hasher_count = hashers.len(); - let tuple = match &ty.type_def { - TypeDef::Tuple(tuple) => tuple, - _ => { - return Err(StorageAddressError::WrongNumberOfHashers { - hashers: hasher_count, - fields: 1, - } - .into()); - } - }; - - // We should have the same number of hashers and keys. - let key_count = tuple.fields.len(); - if hasher_count != key_count { - return Err(StorageAddressError::WrongNumberOfHashers { - hashers: hasher_count, - fields: key_count, - } - .into()); - } - - // Collect them together. - hashers_and_ty_ids = tuple - .fields - .iter() - .zip(hashers) - .map(|(field, hasher)| (*hasher, field.id)) - .collect(); - } +use crate::error::StorageKeyError; +use alloc::sync::Arc; +use core::marker::PhantomData; +use frame_decode::storage::{IntoDecodableValues, StorageInfo, StorageKey as StorageKeyPartInfo}; +use scale_info::PortableRegistry; + +pub use frame_decode::storage::StorageHasher; + +/// This represents the different parts of a storage key. +pub struct StorageKey<'info, KeyParts> { + info: Arc>, + types: &'info PortableRegistry, + bytes: Arc<[u8]>, + marker: PhantomData, +} + +impl<'info, KeyParts: IntoDecodableValues> StorageKey<'info, KeyParts> { + pub(crate) fn new( + info: &StorageInfo<'info, u32>, + types: &'info PortableRegistry, + bytes: Arc<[u8]>, + ) -> Result { + let cursor = &mut &*bytes; + let storage_key_info = frame_decode::storage::decode_storage_key_with_info( + cursor, &info, types, + ) + .map_err(|e| StorageKeyError::StorageKeyDecodeError { + bytes: bytes.to_vec(), + error: e, + })?; + + if !cursor.is_empty() { + return Err(StorageKeyError::LeftoverBytes { + bytes: cursor.to_vec(), + }); } - Ok(Self { hashers_and_ty_ids }) - } - - /// Creates an iterator over the storage hashers and type ids. - pub fn iter(&self) -> StorageHashersIter<'_> { - StorageHashersIter { - hashers: self, - idx: 0, - } - } -} - -/// An iterator over all type ids of the key and the respective hashers. -/// See [`StorageHashers::iter()`]. -#[derive(Debug)] -pub struct StorageHashersIter<'a> { - hashers: &'a StorageHashers, - idx: usize, -} - -impl StorageHashersIter<'_> { - fn next_or_err(&mut self) -> Result<(StorageHasher, u32), Error> { - self.next().ok_or_else(|| { - StorageAddressError::TooManyKeys { - expected: self.hashers.hashers_and_ty_ids.len(), - } - .into() + Ok(StorageKey { + info: Arc::new(storage_key_info), + types, + bytes, + marker: PhantomData, }) } -} - -impl Iterator for StorageHashersIter<'_> { - type Item = (StorageHasher, u32); - - fn next(&mut self) -> Option { - let item = self.hashers.hashers_and_ty_ids.get(self.idx).copied()?; - self.idx += 1; - Some(item) - } -} - -impl ExactSizeIterator for StorageHashersIter<'_> { - fn len(&self) -> usize { - self.hashers.hashers_and_ty_ids.len() - self.idx - } -} - -/// This trait should be implemented by anything that can be used as one or multiple storage keys. -pub trait StorageKey { - /// Encodes the storage key into some bytes - fn encode_storage_key( - &self, - bytes: &mut Vec, - hashers: &mut StorageHashersIter, - types: &PortableRegistry, - ) -> Result<(), Error>; - - /// Attempts to decode the StorageKey given some bytes and a set of hashers and type IDs that they are meant to represent. - /// The bytes passed to `decode` should start with: - /// - 1. some fixed size hash (for all hashers except `Identity`) - /// - 2. the plain key value itself (for `Identity`, `Blake2_128Concat` and `Twox64Concat` hashers) - fn decode_storage_key( - bytes: &mut &[u8], - hashers: &mut StorageHashersIter, - types: &PortableRegistry, - ) -> Result - where - Self: Sized + 'static; -} - -/// Implement `StorageKey` for `()` which can be used for keyless storage entries, -/// or to otherwise just ignore some entry. -impl StorageKey for () { - fn encode_storage_key( - &self, - _bytes: &mut Vec, - hashers: &mut StorageHashersIter, - _types: &PortableRegistry, - ) -> Result<(), Error> { - _ = hashers.next_or_err(); - Ok(()) - } - - fn decode_storage_key( - bytes: &mut &[u8], - hashers: &mut StorageHashersIter, - types: &PortableRegistry, - ) -> Result { - let (hasher, ty_id) = match hashers.next_or_err() { - Ok((hasher, ty_id)) => (hasher, ty_id), - Err(_) if bytes.is_empty() => return Ok(()), - Err(err) => return Err(err), - }; - consume_hash_returning_key_bytes(bytes, hasher, ty_id, types)?; - Ok(()) - } -} - -/// A storage key used as part of the static codegen. -#[derive(Clone, Debug, PartialOrd, PartialEq, Eq)] -pub struct StaticStorageKey { - key: K, -} - -impl StaticStorageKey { - /// Creates a new static storage key. - pub fn new(key: K) -> Self { - StaticStorageKey { key } - } -} - -impl StaticStorageKey { - /// Returns the decoded storage key. - pub fn into_key(self) -> K { - self.key - } -} - -impl StorageKey for StaticStorageKey { - fn encode_storage_key( - &self, - bytes: &mut Vec, - hashers: &mut StorageHashersIter, - types: &PortableRegistry, - ) -> Result<(), Error> { - let (hasher, ty_id) = hashers.next_or_err()?; - let encoded_value = self.key.encode_as_type(ty_id, types)?; - hash_bytes(&encoded_value, hasher, bytes); - Ok(()) - } - - fn decode_storage_key( - bytes: &mut &[u8], - hashers: &mut StorageHashersIter, - types: &PortableRegistry, - ) -> Result - where - Self: Sized + 'static, - { - let (hasher, ty_id) = hashers.next_or_err()?; - let key_bytes = consume_hash_returning_key_bytes(bytes, hasher, ty_id, types)?; - // if the hasher had no key appended, we can't decode it into a `StaticStorageKey`. - let Some(key_bytes) = key_bytes else { - return Err(StorageAddressError::HasherCannotReconstructKey { ty_id, hasher }.into()); - }; + /// Attempt to decode the values contained within this storage key. The target type is + /// given by the storage address used to access this entry. To decode into a custom type, + /// use [`Self::parts()`] or [`Self::part()`] and decode each part. + pub fn decode(&self) -> Result { + let values = + frame_decode::storage::decode_storage_key_values(&self.bytes, &self.info, self.types) + .map_err(StorageKeyError::CannotDecodeValuesInKey)?; - // Decode and return the key. - let key = K::decode_as_type(&mut &*key_bytes, ty_id, types)?; - let key = StaticStorageKey { key }; - Ok(key) + Ok(values) } -} -impl StorageKey for Vec { - fn encode_storage_key( - &self, - bytes: &mut Vec, - hashers: &mut StorageHashersIter, - types: &PortableRegistry, - ) -> Result<(), Error> { - for value in self.iter() { - let (hasher, ty_id) = hashers.next_or_err()?; - let encoded_value = value.encode_as_type(ty_id, types)?; - hash_bytes(&encoded_value, hasher, bytes); - } - Ok(()) + /// Iterate over the parts of this storage key. Each part of a storage key corresponds to a + /// single value that has been hashed. + pub fn parts(&self) -> impl ExactSizeIterator> { + let parts_len = self.info.parts().len(); + (0..parts_len).map(move |index| StorageKeyPart { + index, + info: self.info.clone(), + types: self.types, + bytes: self.bytes.clone(), + }) } - fn decode_storage_key( - bytes: &mut &[u8], - hashers: &mut StorageHashersIter, - types: &PortableRegistry, - ) -> Result - where - Self: Sized + 'static, - { - let mut result: Vec = vec![]; - for (hasher, ty_id) in hashers.by_ref() { - match consume_hash_returning_key_bytes(bytes, hasher, ty_id, types)? { - Some(value_bytes) => { - let value = - scale_value::scale::decode_as_type(&mut &*value_bytes, ty_id, types)?; - - result.push(value.remove_context()); - } - None => { - result.push(Value::unnamed_composite([])); - } - } + /// Return the part of the storage key at the provided index, or `None` if the index is out of bounds. + pub fn part(&self, index: usize) -> Option> { + if index < self.parts().len() { + Some(StorageKeyPart { + index, + info: self.info.clone(), + types: self.types, + bytes: self.bytes.clone(), + }) + } else { + None } - - // We've consumed all of the hashers, so we expect to also consume all of the bytes: - if !bytes.is_empty() { - return Err(StorageAddressError::TooManyBytes.into()); - } - - Ok(result) } } -// Skip over the hash bytes (including any key at the end), returning bytes -// representing the key if one exists, or None if the hasher has no key appended. -fn consume_hash_returning_key_bytes<'a>( - bytes: &mut &'a [u8], - hasher: StorageHasher, - ty_id: u32, - types: &PortableRegistry, -) -> Result, Error> { - // Strip the bytes off for the actual hash, consuming them. - let bytes_to_strip = hasher.len_excluding_key(); - if bytes.len() < bytes_to_strip { - return Err(StorageAddressError::NotEnoughBytes.into()); - } - *bytes = &bytes[bytes_to_strip..]; - - // Now, find the bytes representing the key, consuming them. - let before_key = *bytes; - if hasher.ends_with_key() { - scale_decode::visitor::decode_with_visitor( - bytes, - ty_id, - types, - IgnoreVisitor::::new(), - ) - .map_err(|err| Error::Decode(err.into()))?; - // Return the key bytes, having advanced the input cursor past them. - let key_bytes = &before_key[..before_key.len() - bytes.len()]; - - Ok(Some(key_bytes)) - } else { - // There are no key bytes, so return None. - Ok(None) - } -} - -/// Generates StorageKey implementations for tuples -macro_rules! impl_tuples { - ($($ty:ident $n:tt),+) => {{ - impl<$($ty: StorageKey),+> StorageKey for ($( $ty ),+) { - fn encode_storage_key( - &self, - bytes: &mut Vec, - hashers: &mut StorageHashersIter, - types: &PortableRegistry, - ) -> Result<(), Error> { - $( self.$n.encode_storage_key(bytes, hashers, types)?; )+ - Ok(()) - } - - fn decode_storage_key( - bytes: &mut &[u8], - hashers: &mut StorageHashersIter, - types: &PortableRegistry, - ) -> Result - where - Self: Sized + 'static, - { - Ok( ( $( $ty::decode_storage_key(bytes, hashers, types)?, )+ ) ) - } - } - }}; +/// This represents a part of a storage key. +pub struct StorageKeyPart<'info> { + index: usize, + info: Arc>, + types: &'info PortableRegistry, + bytes: Arc<[u8]>, } -#[rustfmt::skip] -const _: () = { - impl_tuples!(A 0, B 1); - impl_tuples!(A 0, B 1, C 2); - impl_tuples!(A 0, B 1, C 2, D 3); - impl_tuples!(A 0, B 1, C 2, D 3, E 4); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7); -}; - -#[cfg(test)] -mod tests { - - use codec::Encode; - use scale_info::{PortableRegistry, Registry, TypeInfo, meta_type}; - use subxt_metadata::StorageHasher; - - use crate::utils::Era; - - use alloc::string::String; - use alloc::vec; - use alloc::vec::Vec; - - use super::{StaticStorageKey, StorageKey}; - - struct KeyBuilder { - registry: Registry, - bytes: Vec, - hashers_and_ty_ids: Vec<(StorageHasher, u32)>, +impl<'info> StorageKeyPart<'info> { + /// Get the raw bytes for this part of the storage key. + pub fn bytes(&self) -> &[u8] { + let part = &self.info[self.index]; + let hash_range = part.hash_range(); + let value_range = part.value().map(|v| v.range()).unwrap_or(std::ops::Range { + start: hash_range.end, + end: hash_range.end, + }); + let combined_range = std::ops::Range { + start: hash_range.start, + end: value_range.end, + }; + &self.bytes[combined_range] } - impl KeyBuilder { - fn new() -> KeyBuilder { - KeyBuilder { - registry: Registry::new(), - bytes: vec![], - hashers_and_ty_ids: vec![], - } - } - - fn add(mut self, value: T, hasher: StorageHasher) -> Self { - let id = self.registry.register_type(&meta_type::()).id; - - self.hashers_and_ty_ids.push((hasher, id)); - for _i in 0..hasher.len_excluding_key() { - self.bytes.push(0); - } - value.encode_to(&mut self.bytes); - self - } - - fn build(self) -> (PortableRegistry, Vec, Vec<(StorageHasher, u32)>) { - (self.registry.into(), self.bytes, self.hashers_and_ty_ids) - } + /// Get the hasher that was used to construct this part of the storage key. + pub fn hasher(&self) -> StorageHasher { + self.info[self.index].hasher() } - #[test] - fn storage_key_decoding_fuzz() { - let hashers = [ - StorageHasher::Blake2_128, - StorageHasher::Blake2_128Concat, - StorageHasher::Blake2_256, - StorageHasher::Identity, - StorageHasher::Twox128, - StorageHasher::Twox256, - StorageHasher::Twox64Concat, - ]; - - let key_preserving_hashers = [ - StorageHasher::Blake2_128Concat, - StorageHasher::Identity, - StorageHasher::Twox64Concat, - ]; - - type T4A = ( - (), - StaticStorageKey, - StaticStorageKey, - StaticStorageKey, - ); - type T4B = ( - (), - (StaticStorageKey, StaticStorageKey), - StaticStorageKey, - ); - type T4C = ( - ((), StaticStorageKey), - (StaticStorageKey, StaticStorageKey), - ); - - let era = Era::Immortal; - for h0 in hashers { - for h1 in key_preserving_hashers { - for h2 in key_preserving_hashers { - for h3 in key_preserving_hashers { - let (types, bytes, hashers_and_ty_ids) = KeyBuilder::new() - .add((), h0) - .add(13u32, h1) - .add("Hello", h2) - .add(era, h3) - .build(); - - let hashers = super::StorageHashers { hashers_and_ty_ids }; - let keys_a = - T4A::decode_storage_key(&mut &bytes[..], &mut hashers.iter(), &types) - .unwrap(); - - let keys_b = - T4B::decode_storage_key(&mut &bytes[..], &mut hashers.iter(), &types) - .unwrap(); + /// For keys that were produced using "concat" or "identity" hashers, the value + /// is available as a part of the key hash, allowing us to decode it into anything + /// implementing [`scale_decode::DecodeAsType`]. If the key was produced using a + /// different hasher, this will return `None`. + pub fn decode_as(&self) -> Result, StorageKeyError> { + let part_info = &self.info[self.index]; + let Some(value_info) = part_info.value() else { + return Ok(None); + }; - let keys_c = - T4C::decode_storage_key(&mut &bytes[..], &mut hashers.iter(), &types) - .unwrap(); + let value_bytes = &self.bytes[value_info.range()]; + let value_ty = value_info.ty().clone(); - assert_eq!(keys_a.1.into_key(), 13); - assert_eq!(keys_b.1.0.into_key(), 13); - assert_eq!(keys_c.0.1.into_key(), 13); + let decoded_key_part = T::decode_as_type(&mut &*value_bytes, value_ty, self.types) + .map_err(|e| StorageKeyError::CannotDecodeValueInKey { + index: self.index, + error: e, + })?; - assert_eq!(keys_a.2.into_key(), "Hello"); - assert_eq!(keys_b.1.1.into_key(), "Hello"); - assert_eq!(keys_c.1.0.into_key(), "Hello"); - assert_eq!(keys_a.3.into_key(), era); - assert_eq!(keys_b.2.into_key(), era); - assert_eq!(keys_c.1.1.into_key(), era); - } - } - } - } + Ok(Some(decoded_key_part)) } } diff --git a/core/src/storage/storage_key_value.rs b/core/src/storage/storage_key_value.rs new file mode 100644 index 00000000000..fe712252bf8 --- /dev/null +++ b/core/src/storage/storage_key_value.rs @@ -0,0 +1,47 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::{Address, StorageKey, StorageValue}; +use crate::error::StorageKeyError; +use alloc::sync::Arc; +use frame_decode::storage::StorageInfo; +use scale_info::PortableRegistry; + +/// This represents a storage key/value pair, which is typically returned from +/// iterating over values in some storage map. +pub struct StorageKeyValue<'info, Addr: Address> { + key: Arc<[u8]>, + // This contains the storage information already: + value: StorageValue<'info, Addr::Value>, +} + +impl<'info, Addr: Address> StorageKeyValue<'info, Addr> { + pub(crate) fn new( + info: Arc>, + types: &'info PortableRegistry, + key_bytes: Arc<[u8]>, + value_bytes: Vec, + ) -> Self { + StorageKeyValue { + key: key_bytes, + value: StorageValue::new(info, types, value_bytes), + } + } + + /// Get the raw bytes for this storage entry's key. + pub fn key_bytes(&self) -> &[u8] { + &self.key + } + + /// Decode the key for this storage entry. This gives back a type from which we can + /// decode specific parts of the key hash (where applicable). + pub fn key(&'_ self) -> Result, StorageKeyError> { + StorageKey::new(&self.value.info, self.value.types, self.key.clone()) + } + + /// Return the storage value. + pub fn value(&self) -> &StorageValue<'info, Addr::Value> { + &self.value + } +} diff --git a/core/src/storage/storage_value.rs b/core/src/storage/storage_value.rs new file mode 100644 index 00000000000..de070cdc186 --- /dev/null +++ b/core/src/storage/storage_value.rs @@ -0,0 +1,69 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use crate::error::StorageValueError; +use alloc::sync::Arc; +use core::marker::PhantomData; +use frame_decode::storage::StorageInfo; +use scale_decode::DecodeAsType; +use scale_info::PortableRegistry; + +/// This represents a storage value. +pub struct StorageValue<'info, Value> { + pub(crate) info: Arc>, + pub(crate) types: &'info PortableRegistry, + bytes: Vec, + marker: PhantomData, +} + +impl<'info, Value: DecodeAsType> StorageValue<'info, Value> { + pub(crate) fn new( + info: Arc>, + types: &'info PortableRegistry, + bytes: Vec, + ) -> StorageValue<'info, Value> { + StorageValue { + info, + types, + bytes, + marker: PhantomData, + } + } + + /// Get the raw bytes for this storage value. + pub fn bytes(&self) -> &[u8] { + &self.bytes + } + + /// Consume this storage value and return the raw bytes. + pub fn into_bytes(self) -> Vec { + self.bytes.to_vec() + } + + /// Decode this storage value into the provided response type. + pub fn decode(&self) -> Result { + self.decode_as::() + } + + /// Decode this storage value into an arbitrary type. + pub fn decode_as(&self) -> Result { + let cursor = &mut &*self.bytes; + + let value = frame_decode::storage::decode_storage_value_with_info( + cursor, + &self.info, + self.types, + T::into_visitor(), + ) + .map_err(StorageValueError::CannotDecode)?; + + if !cursor.is_empty() { + return Err(StorageValueError::LeftoverBytes { + bytes: cursor.to_vec(), + }); + } + + Ok(value) + } +} diff --git a/core/src/storage/utils.rs b/core/src/storage/utils.rs deleted file mode 100644 index dc5d10ace44..00000000000 --- a/core/src/storage/utils.rs +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! these utility methods complement the [`Address`] trait, but -//! aren't things that should ever be overridden, and so don't exist on -//! the trait itself. - -use super::address::Address; -use crate::error::{Error, MetadataError}; -use crate::metadata::Metadata; -use alloc::borrow::ToOwned; -use alloc::vec::Vec; -use subxt_metadata::{PalletMetadata, StorageEntryMetadata, StorageHasher}; - -/// Return the root of a given [`Address`]: hash the pallet name and entry name -/// and append those bytes to the output. -pub fn write_storage_address_root_bytes(addr: &Addr, out: &mut Vec) { - out.extend(sp_crypto_hashing::twox_128(addr.pallet_name().as_bytes())); - out.extend(sp_crypto_hashing::twox_128(addr.entry_name().as_bytes())); -} - -/// Take some SCALE encoded bytes and a [`StorageHasher`] and hash the bytes accordingly. -pub fn hash_bytes(input: &[u8], hasher: StorageHasher, bytes: &mut Vec) { - match hasher { - StorageHasher::Identity => bytes.extend(input), - StorageHasher::Blake2_128 => bytes.extend(sp_crypto_hashing::blake2_128(input)), - StorageHasher::Blake2_128Concat => { - bytes.extend(sp_crypto_hashing::blake2_128(input)); - bytes.extend(input); - } - StorageHasher::Blake2_256 => bytes.extend(sp_crypto_hashing::blake2_256(input)), - StorageHasher::Twox128 => bytes.extend(sp_crypto_hashing::twox_128(input)), - StorageHasher::Twox256 => bytes.extend(sp_crypto_hashing::twox_256(input)), - StorageHasher::Twox64Concat => { - bytes.extend(sp_crypto_hashing::twox_64(input)); - bytes.extend(input); - } - } -} - -/// Return details about the given storage entry. -pub fn lookup_storage_entry_details<'a>( - pallet_name: &str, - entry_name: &str, - metadata: &'a Metadata, -) -> Result<(PalletMetadata<'a>, &'a StorageEntryMetadata), Error> { - let pallet_metadata = metadata.pallet_by_name_err(pallet_name)?; - let storage_metadata = pallet_metadata - .storage() - .ok_or_else(|| MetadataError::StorageNotFoundInPallet(pallet_name.to_owned()))?; - let storage_entry = storage_metadata - .entry_by_name(entry_name) - .ok_or_else(|| MetadataError::StorageEntryNotFound(entry_name.to_owned()))?; - Ok((pallet_metadata, storage_entry)) -} diff --git a/core/src/tx/mod.rs b/core/src/tx/mod.rs index 5a1c590deb8..2eb927e8cd9 100644 --- a/core/src/tx/mod.rs +++ b/core/src/tx/mod.rs @@ -59,11 +59,11 @@ pub mod payload; pub mod signer; +use crate::Metadata; use crate::config::{Config, ExtrinsicParams, ExtrinsicParamsEncoder, HashFor, Hasher}; -use crate::error::{Error, ExtrinsicError, MetadataError}; -use crate::metadata::Metadata; +use crate::error::ExtrinsicError; use crate::utils::Encoded; -use alloc::borrow::{Cow, ToOwned}; +use alloc::borrow::Cow; use alloc::vec::Vec; use codec::{Compact, Encode}; use payload::Payload; @@ -77,18 +77,28 @@ pub use crate::client::{ClientState, RuntimeVersion}; /// if the call is valid (or if it's not possible to check since the call has no validation hash). /// Return an error if the call was not valid or something went wrong trying to validate it (ie /// the pallet or call in question do not exist at all). -pub fn validate(call: &Call, metadata: &Metadata) -> Result<(), Error> { - if let Some(details) = call.validation_details() { - let expected_hash = metadata - .pallet_by_name_err(details.pallet_name)? - .call_hash(details.call_name) - .ok_or_else(|| MetadataError::CallNameNotFound(details.call_name.to_owned()))?; - - if details.hash != expected_hash { - return Err(MetadataError::IncompatibleCodegen.into()); - } +pub fn validate(call: &Call, metadata: &Metadata) -> Result<(), ExtrinsicError> { + let Some(details) = call.validation_details() else { + return Ok(()); + }; + + let pallet_name = details.pallet_name; + let call_name = details.call_name; + + let expected_hash = metadata + .pallet_by_name(pallet_name) + .ok_or_else(|| ExtrinsicError::PalletNameNotFound(pallet_name.to_string()))? + .call_hash(call_name) + .ok_or_else(|| ExtrinsicError::CallNameNotFound { + pallet_name: pallet_name.to_string(), + call_name: call_name.to_string(), + })?; + + if details.hash != expected_hash { + Err(ExtrinsicError::IncompatibleCodegen) + } else { + Ok(()) } - Ok(()) } /// Returns the suggested transaction versions to build for a given chain, or an error @@ -96,7 +106,7 @@ pub fn validate(call: &Call, metadata: &Metadata) -> Result<(), E /// /// If the result is [`TransactionVersion::V4`], use the `v4` methods in this module. If it's /// [`TransactionVersion::V5`], use the `v5` ones. -pub fn suggested_version(metadata: &Metadata) -> Result { +pub fn suggested_version(metadata: &Metadata) -> Result { let versions = metadata.extrinsic().supported_versions(); if versions.contains(&4) { @@ -104,7 +114,7 @@ pub fn suggested_version(metadata: &Metadata) -> Result(call: &Call, metadata: &Metadata) -> Result, Error> { +pub fn call_data( + call: &Call, + metadata: &Metadata, +) -> Result, ExtrinsicError> { let mut bytes = Vec::new(); call.encode_call_data_to(metadata, &mut bytes)?; Ok(bytes) @@ -128,7 +141,7 @@ pub fn call_data(call: &Call, metadata: &Metadata) -> Result( call: &Call, metadata: &Metadata, -) -> Result, Error> { +) -> Result, ExtrinsicError> { create_unsigned_at_version(call, 4, metadata) } @@ -136,7 +149,7 @@ pub fn create_v4_unsigned( pub fn create_v5_bare( call: &Call, metadata: &Metadata, -) -> Result, Error> { +) -> Result, ExtrinsicError> { create_unsigned_at_version(call, 5, metadata) } @@ -145,7 +158,7 @@ fn create_unsigned_at_version( call: &Call, tx_version: u8, metadata: &Metadata, -) -> Result, Error> { +) -> Result, ExtrinsicError> { // 1. Validate this call against the current node metadata if the call comes // with a hash allowing us to do so. validate(call, metadata)?; @@ -176,7 +189,7 @@ pub fn create_v4_signed( call: &Call, client_state: &ClientState, params: >::Params, -) -> Result, Error> { +) -> Result, ExtrinsicError> { // 1. Validate this call against the current node metadata if the call comes // with a hash allowing us to do so. validate(call, &client_state.metadata)?; @@ -200,7 +213,7 @@ pub fn create_v5_general( call: &Call, client_state: &ClientState, params: >::Params, -) -> Result, Error> { +) -> Result, ExtrinsicError> { // 1. Validate this call against the current node metadata if the call comes // with a hash allowing us to do so. validate(call, &client_state.metadata)?; diff --git a/core/src/tx/payload.rs b/core/src/tx/payload.rs index a3842cf03b4..0dd0748cb46 100644 --- a/core/src/tx/payload.rs +++ b/core/src/tx/payload.rs @@ -5,10 +5,9 @@ //! This module contains the trait and types used to represent //! transactions that can be submitted. -use crate::Error; -use crate::error::MetadataError; -use crate::metadata::Metadata; -use alloc::borrow::{Cow, ToOwned}; +use crate::Metadata; +use crate::error::ExtrinsicError; +use alloc::borrow::Cow; use alloc::boxed::Box; use alloc::string::String; @@ -21,11 +20,15 @@ use scale_value::{Composite, Value, ValueDef, Variant}; /// to a node. pub trait Payload { /// Encode call data to the provided output. - fn encode_call_data_to(&self, metadata: &Metadata, out: &mut Vec) -> Result<(), Error>; + fn encode_call_data_to( + &self, + metadata: &Metadata, + out: &mut Vec, + ) -> Result<(), ExtrinsicError>; /// Encode call data and return the output. This is a convenience /// wrapper around [`Payload::encode_call_data_to`]. - fn encode_call_data(&self, metadata: &Metadata) -> Result, Error> { + fn encode_call_data(&self, metadata: &Metadata) -> Result, ExtrinsicError> { let mut v = Vec::new(); self.encode_call_data_to(metadata, &mut v)?; Ok(v) @@ -46,10 +49,10 @@ macro_rules! boxed_payload { &self, metadata: &Metadata, out: &mut Vec, - ) -> Result<(), Error> { + ) -> Result<(), ExtrinsicError> { self.as_ref().encode_call_data_to(metadata, out) } - fn encode_call_data(&self, metadata: &Metadata) -> Result, Error> { + fn encode_call_data(&self, metadata: &Metadata) -> Result, ExtrinsicError> { self.as_ref().encode_call_data(metadata) } fn validation_details(&self) -> Option> { @@ -164,11 +167,20 @@ impl DefaultPayload> { } impl Payload for DefaultPayload { - fn encode_call_data_to(&self, metadata: &Metadata, out: &mut Vec) -> Result<(), Error> { - let pallet = metadata.pallet_by_name_err(&self.pallet_name)?; + fn encode_call_data_to( + &self, + metadata: &Metadata, + out: &mut Vec, + ) -> Result<(), ExtrinsicError> { + let pallet = metadata + .pallet_by_name(&self.pallet_name) + .ok_or_else(|| ExtrinsicError::PalletNameNotFound(self.pallet_name.to_string()))?; let call = pallet .call_variant_by_name(&self.call_name) - .ok_or_else(|| MetadataError::CallNameNotFound((*self.call_name).to_owned()))?; + .ok_or_else(|| ExtrinsicError::CallNameNotFound { + pallet_name: pallet.name().to_string(), + call_name: self.call_name.to_string(), + })?; let pallet_index = pallet.index(); let call_index = call.index; @@ -182,7 +194,8 @@ impl Payload for DefaultPayload { .map(|f| scale_encode::Field::new(f.ty.id, f.name.as_deref())); self.call_data - .encode_as_fields_to(&mut fields, metadata.types(), out)?; + .encode_as_fields_to(&mut fields, metadata.types(), out) + .map_err(ExtrinsicError::CannotEncodeCallData)?; Ok(()) } @@ -208,7 +221,7 @@ pub fn dynamic( #[cfg(test)] mod tests { use super::*; - use crate::metadata::Metadata; + use crate::Metadata; use codec::Decode; use scale_value::Composite; diff --git a/core/src/utils/mod.rs b/core/src/utils/mod.rs index 42c499f85c4..e739f9f6c0f 100644 --- a/core/src/utils/mod.rs +++ b/core/src/utils/mod.rs @@ -13,6 +13,7 @@ mod multi_signature; mod static_type; mod unchecked_extrinsic; mod wrapper_opaque; +mod yesnomaybe; use alloc::borrow::ToOwned; use alloc::format; @@ -30,6 +31,7 @@ pub use primitive_types::{H160, H256, H512}; pub use static_type::Static; pub use unchecked_extrinsic::UncheckedExtrinsic; pub use wrapper_opaque::WrapperKeepOpaque; +pub use yesnomaybe::{Maybe, No, NoMaybe, Yes, YesMaybe, YesNo}; /// Wraps an already encoded byte vector, prevents being encoded as a raw byte vector as part of /// the transaction payload @@ -73,9 +75,6 @@ unsafe impl Sync for PhantomDataSendSync {} /// as `BTreeMap` which allows us to easily swap the two during codegen. pub type KeyedVec = Vec<(K, V)>; -/// A unit marker struct. -pub struct Yes; - /// A quick helper to encode some bytes to hex. pub fn to_hex(bytes: impl AsRef<[u8]>) -> String { format!("0x{}", hex::encode(bytes.as_ref())) diff --git a/core/src/utils/yesnomaybe.rs b/core/src/utils/yesnomaybe.rs new file mode 100644 index 00000000000..18a878d9423 --- /dev/null +++ b/core/src/utils/yesnomaybe.rs @@ -0,0 +1,82 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +/// A unit marker enum. +pub enum Yes {} +/// A unit marker enum. +pub enum Maybe {} +/// A unit marker enum. +pub enum No {} + +/// This is implemented for [`Yes`] and [`No`] and +/// allows us to check at runtime which of these types is present. +pub trait YesNo { + /// [`Yes`] + fn is_yes() -> bool { + false + } + /// [`No`] + fn is_no() -> bool { + false + } +} + +impl YesNo for Yes { + fn is_yes() -> bool { + true + } +} +impl YesNo for No { + fn is_no() -> bool { + true + } +} + +/// This is implemented for [`Yes`] and [`Maybe`] and +/// allows us to check at runtime which of these types is present. +pub trait YesMaybe { + /// [`Yes`] + fn is_yes() -> bool { + false + } + /// [`Maybe`] + fn is_maybe() -> bool { + false + } +} + +impl YesMaybe for Yes { + fn is_yes() -> bool { + true + } +} +impl YesMaybe for Maybe { + fn is_maybe() -> bool { + true + } +} + +/// This is implemented for [`No`] and [`Maybe`] and +/// allows us to check at runtime which of these types is present. +pub trait NoMaybe { + /// [`No`] + fn is_no() -> bool { + false + } + /// [`Maybe`] + fn is_maybe() -> bool { + false + } +} + +impl NoMaybe for No { + fn is_no() -> bool { + true + } +} +impl NoMaybe for Maybe { + fn is_maybe() -> bool { + true + } +} diff --git a/core/src/view_functions/mod.rs b/core/src/view_functions/mod.rs index 3d422804fba..037ba6c346b 100644 --- a/core/src/view_functions/mod.rs +++ b/core/src/view_functions/mod.rs @@ -7,28 +7,38 @@ pub mod payload; -use crate::error::{Error, MetadataError}; -use crate::metadata::{DecodeWithMetadata, Metadata}; +use crate::Metadata; +use crate::error::ViewFunctionError; use alloc::vec::Vec; use payload::Payload; +use scale_decode::IntoVisitor; /// Run the validation logic against some View Function payload you'd like to use. Returns `Ok(())` /// if the payload is valid (or if it's not possible to check since the payload has no validation hash). /// Return an error if the payload was not valid or something went wrong trying to validate it (ie /// the View Function in question do not exist at all) -pub fn validate(payload: &P, metadata: &Metadata) -> Result<(), Error> { - let Some(static_hash) = payload.validation_hash() else { +pub fn validate(payload: &P, metadata: &Metadata) -> Result<(), ViewFunctionError> { + let Some(hash) = payload.validation_hash() else { return Ok(()); }; + let pallet_name = payload.pallet_name(); + let function_name = payload.function_name(); + let view_function = metadata - .view_function_by_query_id(payload.query_id()) - .ok_or_else(|| MetadataError::ViewFunctionNotFound(*payload.query_id()))?; - if static_hash != view_function.hash() { - return Err(MetadataError::IncompatibleCodegen.into()); - } + .pallet_by_name(pallet_name) + .ok_or_else(|| ViewFunctionError::PalletNotFound(pallet_name.to_string()))? + .view_function_by_name(function_name) + .ok_or_else(|| ViewFunctionError::ViewFunctionNotFound { + pallet_name: pallet_name.to_string(), + function_name: function_name.to_string(), + })?; - Ok(()) + if hash != view_function.hash() { + Err(ViewFunctionError::IncompatibleCodegen) + } else { + Ok(()) + } } /// The name of the Runtime API call which can execute @@ -36,17 +46,20 @@ pub const CALL_NAME: &str = "RuntimeViewFunction_execute_view_function"; /// Encode the bytes that will be passed to the "execute_view_function" Runtime API call, /// to execute the View Function represented by the given payload. -pub fn call_args(payload: &P, metadata: &Metadata) -> Result, Error> { - let mut call_args = Vec::with_capacity(32); - call_args.extend_from_slice(payload.query_id()); - - let mut call_arg_params = Vec::new(); - payload.encode_args_to(metadata, &mut call_arg_params)?; - - use codec::Encode; - call_arg_params.encode_to(&mut call_args); +pub fn call_args( + payload: &P, + metadata: &Metadata, +) -> Result, ViewFunctionError> { + let inputs = frame_decode::view_functions::encode_view_function_inputs( + payload.pallet_name(), + payload.function_name(), + payload.args(), + metadata, + metadata.types(), + ) + .map_err(ViewFunctionError::CouldNotEncodeInputs)?; - Ok(call_args) + Ok(inputs) } /// Decode the value bytes at the location given by the provided View Function payload. @@ -54,16 +67,16 @@ pub fn decode_value( bytes: &mut &[u8], payload: &P, metadata: &Metadata, -) -> Result { - let view_function = metadata - .view_function_by_query_id(payload.query_id()) - .ok_or_else(|| MetadataError::ViewFunctionNotFound(*payload.query_id()))?; - - let val = ::decode_with_metadata( - &mut &bytes[..], - view_function.output_ty(), +) -> Result { + let value = frame_decode::view_functions::decode_view_function_response( + payload.pallet_name(), + payload.function_name(), + bytes, metadata, - )?; + metadata.types(), + P::ReturnType::into_visitor(), + ) + .map_err(ViewFunctionError::CouldNotDecodeResponse)?; - Ok(val) + Ok(value) } diff --git a/core/src/view_functions/payload.rs b/core/src/view_functions/payload.rs index a231d519c09..835b4a50531 100644 --- a/core/src/view_functions/payload.rs +++ b/core/src/view_functions/payload.rs @@ -5,17 +5,11 @@ //! This module contains the trait and types used to represent //! View Function calls that can be made. -use alloc::vec::Vec; +use alloc::borrow::Cow; use core::marker::PhantomData; use derive_where::derive_where; -use scale_encode::EncodeAsFields; -use scale_value::Composite; - -use crate::Error; -use crate::dynamic::DecodedValueThunk; -use crate::error::MetadataError; - -use crate::metadata::{DecodeWithMetadata, Metadata}; +use frame_decode::view_functions::IntoEncodableValues; +use scale_decode::DecodeAsType; /// This represents a View Function payload that can call into the runtime of node. /// @@ -33,24 +27,19 @@ use crate::metadata::{DecodeWithMetadata, Metadata}; /// /// Each argument of the View Function must be scale-encoded. pub trait Payload { + /// Type of the arguments for this call. + type ArgsType: IntoEncodableValues; /// The return type of the function call. - // Note: `DecodeWithMetadata` is needed to decode the function call result - // with the `subxt::Metadata. - type ReturnType: DecodeWithMetadata; - - /// The payload target. - fn query_id(&self) -> &[u8; 32]; - - /// Scale encode the arguments data. - fn encode_args_to(&self, metadata: &Metadata, out: &mut Vec) -> Result<(), Error>; - - /// Encode arguments data and return the output. This is a convenience - /// wrapper around [`Payload::encode_args_to`]. - fn encode_args(&self, metadata: &Metadata) -> Result, Error> { - let mut v = Vec::new(); - self.encode_args_to(metadata, &mut v)?; - Ok(v) - } + type ReturnType: DecodeAsType; + + /// The View Function pallet name. + fn pallet_name(&self) -> &str; + + /// The View Function function name. + fn function_name(&self) -> &str; + + /// The arguments. + fn args(&self) -> &Self::ArgsType; /// Returns the statically generated validation hash. fn validation_hash(&self) -> Option<[u8; 32]> { @@ -59,44 +48,38 @@ pub trait Payload { } /// A View Function payload containing the generic argument data -/// and interpreting the result of the call as `ReturnTy`. +/// and interpreting the result of the call as `ReturnType`. /// /// This can be created from static values (ie those generated /// via the `subxt` macro) or dynamic values via [`dynamic`]. -#[derive_where(Clone, Debug, Eq, Ord, PartialEq, PartialOrd; ArgsData)] -pub struct DefaultPayload { - query_id: [u8; 32], - args_data: ArgsData, +#[derive_where(Clone, Debug, Eq, Ord, PartialEq, PartialOrd; ArgsType)] +pub struct StaticPayload { + pallet_name: Cow<'static, str>, + function_name: Cow<'static, str>, + args: ArgsType, validation_hash: Option<[u8; 32]>, - _marker: PhantomData, + _marker: PhantomData, } -/// A statically generated View Function payload. -pub type StaticPayload = DefaultPayload; /// A dynamic View Function payload. -pub type DynamicPayload = DefaultPayload, DecodedValueThunk>; +pub type DynamicPayload = StaticPayload; -impl Payload - for DefaultPayload +impl Payload + for StaticPayload { - type ReturnType = ReturnTy; + type ArgsType = ArgsType; + type ReturnType = ReturnType; - fn query_id(&self) -> &[u8; 32] { - &self.query_id + fn pallet_name(&self) -> &str { + &self.pallet_name } - fn encode_args_to(&self, metadata: &Metadata, out: &mut Vec) -> Result<(), Error> { - let view_function = metadata - .view_function_by_query_id(&self.query_id) - .ok_or(MetadataError::ViewFunctionNotFound(self.query_id))?; - let mut fields = view_function - .inputs() - .map(|input| scale_encode::Field::named(input.ty, &input.name)); - - self.args_data - .encode_as_fields_to(&mut fields, metadata.types(), out)?; + fn function_name(&self) -> &str { + &self.function_name + } - Ok(()) + fn args(&self) -> &Self::ArgsType { + &self.args } fn validation_hash(&self) -> Option<[u8; 32]> { @@ -104,30 +87,37 @@ impl Payload } } -impl DefaultPayload { - /// Create a new [`DefaultPayload`] for a View Function call. - pub fn new(query_id: [u8; 32], args_data: ArgsData) -> Self { - DefaultPayload { - query_id, - args_data, +impl StaticPayload { + /// Create a new [`StaticPayload`] for a View Function call. + pub fn new( + pallet_name: impl Into, + function_name: impl Into, + args: ArgsType, + ) -> Self { + StaticPayload { + pallet_name: pallet_name.into().into(), + function_name: function_name.into().into(), + args, validation_hash: None, _marker: PhantomData, } } - /// Create a new static [`DefaultPayload`] for a View Function call + /// Create a new static [`StaticPayload`] for a View Function call /// using static function name and scale-encoded argument data. /// /// This is only expected to be used from codegen. #[doc(hidden)] pub fn new_static( - query_id: [u8; 32], - args_data: ArgsData, + pallet_name: &'static str, + function_name: &'static str, + args: ArgsType, hash: [u8; 32], - ) -> DefaultPayload { - DefaultPayload { - query_id, - args_data, + ) -> StaticPayload { + StaticPayload { + pallet_name: Cow::Borrowed(pallet_name), + function_name: Cow::Borrowed(function_name), + args, validation_hash: Some(hash), _marker: core::marker::PhantomData, } @@ -140,14 +130,13 @@ impl DefaultPayload { ..self } } - - /// Returns the arguments data. - pub fn args_data(&self) -> &ArgsData { - &self.args_data - } } /// Create a new [`DynamicPayload`] to call a View Function. -pub fn dynamic(query_id: [u8; 32], args_data: impl Into>) -> DynamicPayload { - DefaultPayload::new(query_id, args_data.into()) +pub fn dynamic( + pallet_name: impl Into, + function_name: impl Into, + args: ArgsType, +) -> DynamicPayload { + DynamicPayload::new(pallet_name, function_name, args) } diff --git a/historic/examples/extrinsics.rs b/historic/examples/extrinsics.rs index 019b33c6508..82ba674b895 100644 --- a/historic/examples/extrinsics.rs +++ b/historic/examples/extrinsics.rs @@ -43,7 +43,7 @@ async fn main() -> Result<(), Error> { println!( " {}: {}", field.name(), - field.decode::().unwrap() + field.decode_as::().unwrap() ); } @@ -53,7 +53,7 @@ async fn main() -> Result<(), Error> { extrinsic .call() .fields() - .decode::>() + .decode_as::>() .unwrap() ); @@ -66,14 +66,14 @@ async fn main() -> Result<(), Error> { println!( " {}: {}", extension.name(), - extension.decode::().unwrap() + extension.decode_as::().unwrap() ); } // Or all of them at once: println!( " All: {}", - extensions.decode::>().unwrap() + extensions.decode_as::>().unwrap() ); } } diff --git a/historic/examples/storage.rs b/historic/examples/storage.rs index 2798b73c89b..8dacf4c1698 100644 --- a/historic/examples/storage.rs +++ b/historic/examples/storage.rs @@ -19,25 +19,22 @@ async fn main() -> Result<(), Error> { let client_at_block = client.at(block_number).await?; // We'll work the account balances at the given block, for this example. - let account_balances = client_at_block - .storage() - .entry("System", "Account")? - .into_map()?; + let account_balances = client_at_block.storage().entry("System", "Account")?; // We can see the default value for this entry at this block, if one exists. - if let Some(default_value) = account_balances.default() { - let default_balance_info = default_value.decode::()?; + if let Some(default_value) = account_balances.default_value() { + let default_balance_info = default_value.decode_as::()?; println!(" Default balance info: {default_balance_info}"); } // We can fetch a specific account balance by its key, like so (here I just picked a random key // I knew to exist from iterating over storage entries): let account_id_hex = "9a4d0faa2ba8c3cc5711852960940793acf55bf195b6eecf88fa78e961d0ce4a"; - let account_id = hex::decode(account_id_hex).unwrap(); + let account_id: [u8; 32] = hex::decode(account_id_hex).unwrap().try_into().unwrap(); if let Some(entry) = account_balances.fetch((account_id,)).await? { // We can decode the value into our generic `scale_value::Value` type, which can // represent any SCALE-encoded value, like so: - let _balance_info = entry.decode::()?; + let _balance_info = entry.decode_as::()?; // Or, if we know what shape to expect, we can decode the parts of the value that we care // about directly into a static type, which is more efficient and allows easy type-safe @@ -53,7 +50,7 @@ async fn main() -> Result<(), Error> { misc_frozen: u128, fee_frozen: u128, } - let balance_info = entry.decode::()?; + let balance_info = entry.decode_as::()?; println!( " Single balance info from {account_id_hex} => free: {} reserved: {} misc_frozen: {} fee_frozen: {}", @@ -72,23 +69,38 @@ async fn main() -> Result<(), Error> { let mut all_balances = account_balances.iter(()).await?.take(10); while let Some(entry) = all_balances.next().await { let entry = entry?; - let key = entry.decode_key()?; + let key = entry.key()?; // Decode the account ID from the key (we know here that we're working // with a map which has one value, an account ID, so we just decode that part: let account_id = key .part(0) .unwrap() - .decode::<[u8; 32]>()? + .decode_as::<[u8; 32]>()? .expect("We expect this key to decode into a 32 byte AccountId"); let account_id_hex = hex::encode(account_id); // Decode these values into our generic scale_value::Value type. Less efficient than // defining a static type as above, but easier for the sake of the example. - let balance_info = entry.decode_value::()?; + let balance_info = entry.value().decode_as::()?; println!(" {account_id_hex} => {balance_info}"); } + + // We can also chain things together to fetch and decode a value in one go. + let _val = client_at_block + .storage() + .entry("System", "Account")? + .fetch((account_id,)) + .await? + .unwrap() + .decode_as::()?; + + let _vals = client_at_block + .storage() + .entry("System", "Account")? + .iter(()) + .await?; } Ok(()) diff --git a/historic/src/error.rs b/historic/src/error.rs index ed49961d64c..59305f2721d 100644 --- a/historic/src/error.rs +++ b/historic/src/error.rs @@ -21,10 +21,6 @@ pub enum Error { #[error(transparent)] StorageError(#[from] StorageError), #[error(transparent)] - StorageEntryIsNotAMap(#[from] StorageEntryIsNotAMap), - #[error(transparent)] - StorageEntryIsNotAPlainValue(#[from] StorageEntryIsNotAPlainValue), - #[error(transparent)] StorageKeyError(#[from] StorageKeyError), #[error(transparent)] StorageValueError(#[from] StorageValueError), @@ -214,22 +210,22 @@ pub enum ExtrinsicCallError { #[allow(missing_docs)] #[derive(Debug, thiserror::Error)] -#[error("Storage entry is not a map: pallet {pallet_name}, storage {storage_name}")] +#[error("Storage entry is not a map: pallet {pallet_name}, storage {entry_name}")] pub struct StorageEntryIsNotAMap { /// The pallet containing the storage entry that was not found. pub pallet_name: String, /// The storage entry that was not found. - pub storage_name: String, + pub entry_name: String, } #[allow(missing_docs)] #[derive(Debug, thiserror::Error)] -#[error("Storage entry is not a plain value: pallet {pallet_name}, storage {storage_name}")] +#[error("Storage entry is not a plain value: pallet {pallet_name}, storage {entry_name}")] pub struct StorageEntryIsNotAPlainValue { /// The pallet containing the storage entry that was not found. pub pallet_name: String, /// The storage entry that was not found. - pub storage_name: String, + pub entry_name: String, } #[allow(missing_docs)] @@ -252,14 +248,23 @@ pub enum StorageError { reason: frame_decode::storage::StorageKeyEncodeError, }, #[error( - "Too many keys provided: expected {num_keys_expected} keys, but got {num_keys_provided}" + "Wrong number of keys provided to fetch a value: expected {num_keys_expected} keys, but got {num_keys_provided}" )] - WrongNumberOfKeysProvided { + WrongNumberOfKeysProvidedForFetch { /// The number of keys that were provided. num_keys_provided: usize, /// The number of keys expected. num_keys_expected: usize, }, + #[error( + "too many keys were provided to iterate over a storage entry: expected at most {max_keys_expected} keys, but got {num_keys_provided}" + )] + TooManyKeysProvidedForIter { + /// The number of keys that were provided. + num_keys_provided: usize, + /// The maximum number of keys that we expect. + max_keys_expected: usize, + }, #[error( "Could not extract storage information from metadata: Unsupported metadata version ({version})" )] @@ -295,6 +300,10 @@ pub enum StorageKeyError { index: usize, reason: scale_decode::Error, }, + #[error("Could not decode values out of the storage key: {reason}")] + DecodeKeyValueError { + reason: frame_decode::storage::StorageKeyValueDecodeError, + }, } #[allow(missing_docs)] diff --git a/historic/src/extrinsics.rs b/historic/src/extrinsics.rs index 30239da842d..cfe1acb34e5 100644 --- a/historic/src/extrinsics.rs +++ b/historic/src/extrinsics.rs @@ -7,7 +7,7 @@ mod extrinsic_info; mod extrinsic_transaction_extensions; mod extrinsics_type; -pub use extrinsic_transaction_extensions::ExtrinsicTransactionExtensions; +pub use extrinsic_transaction_extensions::ExtrinsicExtrinsicParams; pub use extrinsics_type::{Extrinsic, Extrinsics}; /// Work with extrinsics. diff --git a/historic/src/extrinsics/extrinsic_call.rs b/historic/src/extrinsics/extrinsic_call.rs index 31ffce61513..f42339cd734 100644 --- a/historic/src/extrinsics/extrinsic_call.rs +++ b/historic/src/extrinsics/extrinsic_call.rs @@ -98,7 +98,7 @@ impl<'extrinsics, 'atblock> ExtrinsicCallFields<'extrinsics, 'atblock> { } /// Attempt to decode the fields into the given type. - pub fn decode(&self) -> Result { + pub fn decode_as(&self) -> Result { with_info!(&self.info => { let cursor = &mut self.bytes(); let mut fields = &mut info.info.call_data().map(|named_arg| { @@ -156,7 +156,7 @@ impl<'extrinsics, 'atblock> ExtrinsicCallField<'extrinsics, 'atblock> { } /// Attempt to decode the value of this field into the given type. - pub fn decode(&self) -> Result { + pub fn decode_as(&self) -> Result { with_call_field_info!(&self.info => { let cursor = &mut &*self.field_bytes; let decoded = T::decode_as_type(cursor, info.info.ty().clone(), info.resolver) diff --git a/historic/src/extrinsics/extrinsic_transaction_extensions.rs b/historic/src/extrinsics/extrinsic_transaction_extensions.rs index 28c8747548a..a2c3aca84e0 100644 --- a/historic/src/extrinsics/extrinsic_transaction_extensions.rs +++ b/historic/src/extrinsics/extrinsic_transaction_extensions.rs @@ -16,7 +16,7 @@ struct ExtrinsicExtensionsInfo<'extrinsics, 'atblock, TypeId, Resolver> { } /// This represents the transaction extensions of an extrinsic. -pub struct ExtrinsicTransactionExtensions<'extrinsics, 'atblock> { +pub struct ExtrinsicExtrinsicParams<'extrinsics, 'atblock> { all_bytes: &'extrinsics [u8], info: AnyExtrinsicExtensionsInfo<'extrinsics, 'atblock>, } @@ -31,7 +31,7 @@ macro_rules! with_extensions_info { }; } -impl<'extrinsics, 'atblock> ExtrinsicTransactionExtensions<'extrinsics, 'atblock> { +impl<'extrinsics, 'atblock> ExtrinsicExtrinsicParams<'extrinsics, 'atblock> { pub(crate) fn new( all_bytes: &'extrinsics [u8], info: &'extrinsics AnyExtrinsicInfo<'atblock>, @@ -105,7 +105,7 @@ impl<'extrinsics, 'atblock> ExtrinsicTransactionExtensions<'extrinsics, 'atblock /// Attempt to decode the transaction extensions into a type where each field name is the name of the transaction /// extension and the field value is the decoded extension. - pub fn decode( + pub fn decode_as( &self, ) -> Result { with_extensions_info!(&self.info => { @@ -189,7 +189,7 @@ impl<'extrinsics, 'atblock> ExtrinsicTransactionExtension<'extrinsics, 'atblock> } /// Decode the bytes for this transaction extension into a type that implements `scale_decode::DecodeAsType`. - pub fn decode( + pub fn decode_as( &self, ) -> Result { with_extension_info!(&self.info => { diff --git a/historic/src/extrinsics/extrinsics_type.rs b/historic/src/extrinsics/extrinsics_type.rs index 4119f9b6b15..4f0dfe92891 100644 --- a/historic/src/extrinsics/extrinsics_type.rs +++ b/historic/src/extrinsics/extrinsics_type.rs @@ -1,6 +1,6 @@ use super::extrinsic_call::ExtrinsicCall; use super::extrinsic_info::{AnyExtrinsicInfo, with_info}; -use super::extrinsic_transaction_extensions::ExtrinsicTransactionExtensions; +use super::extrinsic_transaction_extensions::ExtrinsicExtrinsicParams; use crate::client::OfflineClientAtBlockT; use crate::config::Config; use crate::error::ExtrinsicsError; @@ -106,8 +106,8 @@ impl<'extrinsics, 'atblock> Extrinsic<'extrinsics, 'atblock> { /// Get information about the transaction extensions of this extrinsic. pub fn transaction_extensions( &self, - ) -> Option> { - ExtrinsicTransactionExtensions::new(self.bytes, self.info) + ) -> Option> { + ExtrinsicExtrinsicParams::new(self.bytes, self.info) } } diff --git a/historic/src/storage.rs b/historic/src/storage.rs index 14496445fa7..3cce7b2feed 100644 --- a/historic/src/storage.rs +++ b/historic/src/storage.rs @@ -5,16 +5,17 @@ mod storage_value; use crate::client::{OfflineClientAtBlockT, OnlineClientAtBlockT}; use crate::config::Config; -use crate::error::{StorageEntryIsNotAMap, StorageEntryIsNotAPlainValue, StorageError}; +use crate::error::StorageError; use crate::storage::storage_info::with_info; use std::borrow::Cow; +use std::sync::Arc; use storage_info::AnyStorageInfo; pub use storage_entry::StorageEntry; pub use storage_key::{StorageHasher, StorageKey, StorageKeyPart}; pub use storage_value::StorageValue; // We take how storage keys can be passed in from `frame-decode`, so re-export here. -pub use frame_decode::storage::{IntoStorageKeys, StorageKeys}; +pub use frame_decode::storage::{EncodableValues, IntoEncodableValues}; /// Work with storage. pub struct StorageClient<'atblock, Client, T> { @@ -33,44 +34,34 @@ impl<'atblock, Client, T> StorageClient<'atblock, Client, T> { } // Things that we can do offline with storage. -impl<'atblock, 'client: 'atblock, Client, T> StorageClient<'atblock, Client, T> +impl<'atblock, Client, T> StorageClient<'atblock, Client, T> where - T: Config + 'client, - Client: OfflineClientAtBlockT<'client, T>, + T: Config + 'atblock, + Client: OfflineClientAtBlockT<'atblock, T>, { /// Select the storage entry you'd like to work with. pub fn entry( &self, pallet_name: impl Into, - storage_name: impl Into, + entry_name: impl Into, ) -> Result, StorageError> { let pallet_name = pallet_name.into(); - let storage_name = storage_name.into(); + let entry_name = entry_name.into(); let storage_info = AnyStorageInfo::new( &pallet_name, - &storage_name, + &entry_name, self.client.metadata(), self.client.legacy_types(), )?; - if storage_info.is_map() { - Ok(StorageEntryClient::Map(StorageEntryMapClient { - client: self.client, - pallet_name, - storage_name, - info: storage_info, - marker: std::marker::PhantomData, - })) - } else { - Ok(StorageEntryClient::Plain(StorageEntryPlainClient { - client: self.client, - pallet_name, - storage_name, - info: storage_info, - marker: std::marker::PhantomData, - })) - } + Ok(StorageEntryClient { + client: self.client, + pallet_name, + entry_name, + info: Arc::new(storage_info), + marker: std::marker::PhantomData, + }) } /// Iterate over all of the storage entries listed in the metadata for the current block. This does **not** include well known @@ -78,6 +69,7 @@ where pub fn entries(&self) -> impl Iterator> { let client = self.client; let metadata = client.metadata(); + frame_decode::helpers::list_storage_entries_any(metadata).map(|entry| StorageEntriesItem { entry, client: self.client, @@ -88,24 +80,24 @@ where /// Working with a specific storage entry. pub struct StorageEntriesItem<'atblock, Client, T> { - entry: frame_decode::helpers::StorageEntry<'atblock>, + entry: frame_decode::storage::StorageEntry<'atblock>, client: &'atblock Client, marker: std::marker::PhantomData, } -impl<'atblock, 'client: 'atblock, Client, T> StorageEntriesItem<'atblock, Client, T> +impl<'atblock, Client, T> StorageEntriesItem<'atblock, Client, T> where - T: Config + 'client, - Client: OfflineClientAtBlockT<'client, T>, + T: Config + 'atblock, + Client: OfflineClientAtBlockT<'atblock, T>, { /// The pallet name. pub fn pallet_name(&self) -> &str { - self.entry.pallet() + &self.entry.pallet_name } /// The storage entry name. - pub fn storage_name(&self) -> &str { - self.entry.entry() + pub fn entry_name(&self) -> &str { + &self.entry.storage_entry } /// Extract the relevant storage information so that we can work with this entry. @@ -115,87 +107,22 @@ where marker: std::marker::PhantomData, } .entry( - self.entry.pallet().to_owned(), - self.entry.entry().to_owned(), + self.entry.pallet_name.clone(), + self.entry.storage_entry.clone(), ) } } -/// A client for working with a specific storage entry. This is an enum because the storage entry -/// might be either a map or a plain value, and each has a different interface. -pub enum StorageEntryClient<'atblock, Client, T> { - Plain(StorageEntryPlainClient<'atblock, Client, T>), - Map(StorageEntryMapClient<'atblock, Client, T>), -} - -impl<'atblock, Client, T> StorageEntryClient<'atblock, Client, T> -where - T: Config + 'atblock, - Client: OfflineClientAtBlockT<'atblock, T>, -{ - /// Get the pallet name. - pub fn pallet_name(&self) -> &str { - match self { - StorageEntryClient::Plain(client) => &client.pallet_name, - StorageEntryClient::Map(client) => &client.pallet_name, - } - } - - /// Get the storage entry name. - pub fn storage_name(&self) -> &str { - match self { - StorageEntryClient::Plain(client) => &client.storage_name, - StorageEntryClient::Map(client) => &client.storage_name, - } - } - - /// Is the storage entry a plain value? - pub fn is_plain(&self) -> bool { - matches!(self, StorageEntryClient::Plain(_)) - } - - /// Is the storage entry a map? - pub fn is_map(&self) -> bool { - matches!(self, StorageEntryClient::Map(_)) - } - - /// If this storage entry is a plain value, return the client for working with it. Else return an error. - pub fn into_plain( - self, - ) -> Result, StorageEntryIsNotAPlainValue> { - match self { - StorageEntryClient::Plain(client) => Ok(client), - StorageEntryClient::Map(_) => Err(StorageEntryIsNotAPlainValue { - pallet_name: self.pallet_name().into(), - storage_name: self.storage_name().into(), - }), - } - } - - /// If this storage entry is a map, return the client for working with it. Else return an error. - pub fn into_map( - self, - ) -> Result, StorageEntryIsNotAMap> { - match self { - StorageEntryClient::Plain(_) => Err(StorageEntryIsNotAMap { - pallet_name: self.pallet_name().into(), - storage_name: self.storage_name().into(), - }), - StorageEntryClient::Map(client) => Ok(client), - } - } -} - -/// A client for working with a plain storage entry. -pub struct StorageEntryPlainClient<'atblock, Client, T> { +/// A client for working with a specific storage entry. +pub struct StorageEntryClient<'atblock, Client, T> { client: &'atblock Client, pallet_name: String, - storage_name: String, - info: AnyStorageInfo<'atblock>, + entry_name: String, + info: Arc>, marker: std::marker::PhantomData, } -impl<'atblock, Client, T> StorageEntryPlainClient<'atblock, Client, T> +impl<'atblock, Client, T> StorageEntryClient<'atblock, Client, T> where T: Config + 'atblock, Client: OfflineClientAtBlockT<'atblock, T>, @@ -206,134 +133,72 @@ where } /// Get the storage entry name. - pub fn storage_name(&self) -> &str { - &self.storage_name + pub fn entry_name(&self) -> &str { + &self.entry_name } - /// Return the default value for this storage entry, if there is one. Returns `None` if there - /// is no default value. - pub fn default(&self) -> Option> { - with_info!(info = &self.info => { - info.info.default_value.as_ref().map(|default_value| { - StorageValue::new(&self.info, default_value.clone()) - }) - }) - } -} - -impl<'atblock, Client, T> StorageEntryPlainClient<'atblock, Client, T> -where - T: Config + 'atblock, - Client: OnlineClientAtBlockT<'atblock, T>, -{ - /// Fetch the value for this storage entry. - pub async fn fetch(&self) -> Result>, StorageError> { - let key_bytes = self.key(); - fetch(self.client, &key_bytes) - .await - .map(|v| v.map(|bytes| StorageValue::new(&self.info, Cow::Owned(bytes)))) - } - - /// Fetch the value for this storage entry as per [`StorageEntryPlainClient::fetch`], but return the default - /// value for the storage entry if one exists and the entry does not exist. - pub async fn fetch_or_default( - &self, - ) -> Result>, StorageError> { - self.fetch() - .await - .map(|option_val| option_val.or_else(|| self.default())) - } - - /// The key for this storage entry. - pub fn key(&self) -> [u8; 32] { + /// The key which points to this storage entry (but not necessarily any values within it). + pub fn key_prefix(&self) -> [u8; 32] { let pallet_name = &*self.pallet_name; - let storage_name = &*self.storage_name; + let entry_name = &*self.entry_name; - frame_decode::storage::encode_prefix(pallet_name, storage_name) - } -} - -/// A client for working with a storage entry that is a map. -pub struct StorageEntryMapClient<'atblock, Client, T> { - client: &'atblock Client, - pallet_name: String, - storage_name: String, - info: AnyStorageInfo<'atblock>, - marker: std::marker::PhantomData, -} - -impl<'atblock, Client, T> StorageEntryMapClient<'atblock, Client, T> -where - T: Config + 'atblock, - Client: OfflineClientAtBlockT<'atblock, T>, -{ - /// Get the pallet name. - pub fn pallet_name(&self) -> &str { - &self.pallet_name - } - - /// Get the storage entry name. - pub fn storage_name(&self) -> &str { - &self.storage_name + frame_decode::storage::encode_storage_key_prefix(pallet_name, entry_name) } /// Return the default value for this storage entry, if there is one. Returns `None` if there /// is no default value. - pub fn default(&self) -> Option> { - with_info!(info = &self.info => { + pub fn default_value(&self) -> Option> { + with_info!(info = &*self.info => { info.info.default_value.as_ref().map(|default_value| { - StorageValue::new(&self.info, default_value.clone()) + StorageValue::new(self.info.clone(), default_value.clone()) }) }) } } -impl<'atblock, Client, T> StorageEntryMapClient<'atblock, Client, T> +impl<'atblock, Client, T> StorageEntryClient<'atblock, Client, T> where T: Config + 'atblock, Client: OnlineClientAtBlockT<'atblock, T>, { /// Fetch a specific key in this map. If the number of keys provided is not equal /// to the number of keys required to fetch a single value from the map, then an error - /// will be emitted. - pub async fn fetch( + /// will be emitted. If no value exists but there is a default value for this storage + /// entry, then the default value will be returned. Else, `None` will be returned. + pub async fn fetch( &self, keys: Keys, - ) -> Result>, StorageError> { - let expected_num_keys = with_info!(info = &self.info => { + ) -> Result>, StorageError> { + let expected_num_keys = with_info!(info = &*self.info => { info.info.keys.len() }); - if expected_num_keys != keys.num_keys() { - return Err(StorageError::WrongNumberOfKeysProvided { - num_keys_provided: keys.num_keys(), + // For fetching, we need exactly as many keys as exist for a storage entry. + if expected_num_keys != keys.num_encodable_values() { + return Err(StorageError::WrongNumberOfKeysProvidedForFetch { + num_keys_provided: keys.num_encodable_values(), num_keys_expected: expected_num_keys, }); } let key_bytes = self.key(keys)?; - fetch(self.client, &key_bytes) - .await - .map(|v| v.map(|bytes| StorageValue::new(&self.info, Cow::Owned(bytes)))) - } + let info = self.info.clone(); + let value = fetch(self.client, &key_bytes) + .await? + .map(|bytes| StorageValue::new(info, Cow::Owned(bytes))) + .or_else(|| self.default_value()); - /// Fetch a specific key in this map as per [`StorageEntryMapClient::fetch`], but return the default - /// value for the storage entry if one exists and the entry was not found. - pub async fn fetch_or_default( - &self, - keys: Keys, - ) -> Result>, StorageError> { - self.fetch(keys) - .await - .map(|option_val| option_val.or_else(|| self.default())) + Ok(value) } /// Iterate over the values underneath the provided keys. - pub async fn iter( + pub async fn iter( &self, keys: Keys, ) -> Result< - impl futures::Stream, StorageError>> + Unpin, + impl futures::Stream, StorageError>> + + Unpin + + use<'atblock, Client, T, Keys>, StorageError, > { use futures::stream::StreamExt; @@ -341,6 +206,19 @@ where ArchiveStorageEvent, StorageQuery, StorageQueryType, }; + let expected_num_keys = with_info!(info = &*self.info => { + info.info.keys.len() + }); + + // For iterating, we need at most one less key than the number that exists for a storage entry. + // TODO: The error message will be confusing if == keys are provided! + if keys.num_encodable_values() >= expected_num_keys { + return Err(StorageError::TooManyKeysProvidedForIter { + num_keys_provided: keys.num_encodable_values(), + max_keys_expected: expected_num_keys - 1, + }); + } + let block_hash = self.client.block_hash(); let key_bytes = self.key(keys)?; @@ -356,23 +234,22 @@ where .await .map_err(|e| StorageError::RpcError { reason: e })?; - let sub = sub.filter_map(async |item| { - let item = match item { - Ok(ArchiveStorageEvent::Item(item)) => item, - Ok(ArchiveStorageEvent::Error(err)) => { - return Some(Err(StorageError::StorageEventError { reason: err.error })); - } - Ok(ArchiveStorageEvent::Done) => return None, - Err(e) => return Some(Err(StorageError::RpcError { reason: e })), - }; - - item.value.map(|value| { - Ok(StorageEntry::new( - &self.info, - item.key.0, - Cow::Owned(value.0), - )) - }) + let info = self.info.clone(); + let sub = sub.filter_map(move |item| { + let info = info.clone(); + async move { + let item = match item { + Ok(ArchiveStorageEvent::Item(item)) => item, + Ok(ArchiveStorageEvent::Error(err)) => { + return Some(Err(StorageError::StorageEventError { reason: err.error })); + } + Ok(ArchiveStorageEvent::Done) => return None, + Err(e) => return Some(Err(StorageError::RpcError { reason: e })), + }; + + item.value + .map(|value| Ok(StorageEntry::new(info, item.key.0, Cow::Owned(value.0)))) + } }); Ok(Box::pin(sub)) @@ -384,16 +261,14 @@ where // Dev note: We don't have any functions that can take an already-encoded key and fetch an entry from // it yet, so we don't expose this. If we did expose it, we might want to return some struct that wraps // the key bytes and some metadata about them. Or maybe just fetch_raw and iter_raw. - fn key(&self, keys: Keys) -> Result, StorageError> { - with_info!(info = &self.info => { - let mut key_bytes = Vec::new(); - frame_decode::storage::encode_storage_key_with_info_to( + fn key(&self, keys: Keys) -> Result, StorageError> { + with_info!(info = &*self.info => { + let key_bytes = frame_decode::storage::encode_storage_key_with_info( &self.pallet_name, - &self.storage_name, + &self.entry_name, keys, &info.info, info.resolver, - &mut key_bytes, ).map_err(|e| StorageError::KeyEncodeError { reason: e })?; Ok(key_bytes) }) diff --git a/historic/src/storage/storage_entry.rs b/historic/src/storage/storage_entry.rs index 736b092fb01..90aa0f68409 100644 --- a/historic/src/storage/storage_entry.rs +++ b/historic/src/storage/storage_entry.rs @@ -1,21 +1,21 @@ use super::storage_info::AnyStorageInfo; use super::storage_key::StorageKey; use super::storage_value::StorageValue; -use crate::error::{StorageKeyError, StorageValueError}; -use scale_decode::DecodeAsType; +use crate::error::StorageKeyError; use std::borrow::Cow; +use std::sync::Arc; /// This represents a storage entry, which is a key-value pair in the storage. -pub struct StorageEntry<'entry, 'atblock> { +pub struct StorageEntry<'atblock> { key: Vec, // This contains the storage information already: - value: StorageValue<'entry, 'atblock>, + value: StorageValue<'atblock>, } -impl<'entry, 'atblock> StorageEntry<'entry, 'atblock> { +impl<'atblock> StorageEntry<'atblock> { /// Create a new storage entry. pub fn new( - info: &'entry AnyStorageInfo<'atblock>, + info: Arc>, key: Vec, value: Cow<'atblock, [u8]>, ) -> Self { @@ -30,11 +30,6 @@ impl<'entry, 'atblock> StorageEntry<'entry, 'atblock> { &self.key } - /// Get the raw bytes for this storage entry's value. - pub fn value_bytes(&self) -> &[u8] { - self.value.bytes() - } - /// Consume this storage entry and return the raw bytes for the key and value. pub fn into_key_and_value_bytes(self) -> (Vec, Vec) { (self.key, self.value.into_bytes()) @@ -42,12 +37,12 @@ impl<'entry, 'atblock> StorageEntry<'entry, 'atblock> { /// Decode the key for this storage entry. This gives back a type from which we can /// decode specific parts of the key hash (where applicable). - pub fn decode_key(&'_ self) -> Result, StorageKeyError> { - StorageKey::new(self.value.info, &self.key) + pub fn key(&'_ self) -> Result, StorageKeyError> { + StorageKey::new(&self.value.info, &self.key) } - /// Decode this storage value. - pub fn decode_value(&self) -> Result { - self.value.decode::() + /// Return the storage value. + pub fn value(&self) -> &StorageValue<'atblock> { + &self.value } } diff --git a/historic/src/storage/storage_info.rs b/historic/src/storage/storage_info.rs index 4c660890b39..76909f6bddd 100644 --- a/historic/src/storage/storage_info.rs +++ b/historic/src/storage/storage_info.rs @@ -44,7 +44,7 @@ impl<'atblock> AnyStorageInfo<'atblock> { Resolver: scale_type_resolver::TypeResolver, AnyStorageInfo<'atblock>: From>, { - m.get_storage_info(pallet_name, entry_name) + m.storage_info(pallet_name, entry_name) .map(|frame_storage_info| { let info = StorageInfo { info: frame_storage_info, @@ -84,7 +84,7 @@ impl<'atblock> From> } } -pub struct StorageInfo<'atblock, TypeId, Resolver> { +pub struct StorageInfo<'atblock, TypeId: Clone, Resolver> { pub info: frame_decode::storage::StorageInfo<'atblock, TypeId>, pub resolver: &'atblock Resolver, } diff --git a/historic/src/storage/storage_key.rs b/historic/src/storage/storage_key.rs index ad72f7f37c6..cbabe0e6a06 100644 --- a/historic/src/storage/storage_key.rs +++ b/historic/src/storage/storage_key.rs @@ -3,7 +3,7 @@ use crate::{error::StorageKeyError, storage::storage_info::with_info}; use scale_info_legacy::{LookupName, TypeRegistrySet}; // This is part of our public interface. -pub use frame_decode::storage::StorageHasher; +pub use frame_decode::storage::{IntoDecodableValues, StorageHasher}; enum AnyStorageKeyInfo<'atblock> { Legacy(StorageKeyInfo<'atblock, LookupName, TypeRegistrySet<'atblock>>), @@ -78,6 +78,23 @@ impl<'entry, 'atblock> StorageKey<'entry, 'atblock> { }) } + /// Attempt to decode the values contained within this storage key to the `Target` type + /// provided. This type is typically a tuple of types which each implement [`scale_decode::DecodeAsType`] + /// and correspond to each of the key types present, in order. + pub fn decode_as(&self) -> Result { + with_key_info!(info = &self.info => { + let values = frame_decode::storage::decode_storage_key_values( + self.bytes, + &info.info, + info.resolver + ).map_err(|e| { + StorageKeyError::DecodeKeyValueError { reason: e } + })?; + + Ok(values) + }) + } + /// Iterate over the parts of this storage key. Each part of a storage key corresponds to a /// single value that has been hashed. pub fn parts(&'_ self) -> impl ExactSizeIterator> { @@ -137,7 +154,7 @@ impl<'key, 'entry, 'atblock> StorageKeyPart<'key, 'entry, 'atblock> { /// is available as a part of the key hash, allowing us to decode it into anything /// implementing [`scale_decode::DecodeAsType`]. If the key was produced using a /// different hasher, this will return `None`. - pub fn decode(&self) -> Result, StorageKeyError> { + pub fn decode_as(&self) -> Result, StorageKeyError> { with_key_info!(info = &self.info => { let part_info = &info.info[self.index]; let Some(value_info) = part_info.value() else { diff --git a/historic/src/storage/storage_value.rs b/historic/src/storage/storage_value.rs index f4f2704e50b..16a502f7b95 100644 --- a/historic/src/storage/storage_value.rs +++ b/historic/src/storage/storage_value.rs @@ -3,16 +3,17 @@ use super::storage_info::with_info; use crate::error::StorageValueError; use scale_decode::DecodeAsType; use std::borrow::Cow; +use std::sync::Arc; /// This represents a storage value. -pub struct StorageValue<'entry, 'atblock> { - pub(crate) info: &'entry AnyStorageInfo<'atblock>, +pub struct StorageValue<'atblock> { + pub(crate) info: Arc>, bytes: Cow<'atblock, [u8]>, } -impl<'entry, 'atblock> StorageValue<'entry, 'atblock> { +impl<'atblock> StorageValue<'atblock> { /// Create a new storage value. - pub fn new(info: &'entry AnyStorageInfo<'atblock>, bytes: Cow<'atblock, [u8]>) -> Self { + pub fn new(info: Arc>, bytes: Cow<'atblock, [u8]>) -> Self { Self { info, bytes } } @@ -27,8 +28,8 @@ impl<'entry, 'atblock> StorageValue<'entry, 'atblock> { } /// Decode this storage value. - pub fn decode(&self) -> Result { - with_info!(info = &self.info => { + pub fn decode_as(&self) -> Result { + with_info!(info = &*self.info => { let cursor = &mut &*self.bytes; let value = T::decode_as_type( diff --git a/metadata/benches/bench.rs b/metadata/benches/bench.rs index c9adb411091..64f5d6e9d92 100644 --- a/metadata/benches/bench.rs +++ b/metadata/benches/bench.rs @@ -74,11 +74,9 @@ fn bench_get_storage_hash(c: &mut Criterion) { }; for storage in storage_entries.entries() { - let storage_name = storage.name(); - let bench_name = format!("{pallet_name}/{storage_name}"); - group.bench_function(&bench_name, |b| { - b.iter(|| pallet.storage_hash(storage_name)) - }); + let entry_name = storage.name(); + let bench_name = format!("{pallet_name}/{entry_name}"); + group.bench_function(&bench_name, |b| b.iter(|| pallet.storage_hash(entry_name))); } } } diff --git a/metadata/src/from/mod.rs b/metadata/src/from/mod.rs index 777b00451fd..16e0f816d17 100644 --- a/metadata/src/from/mod.rs +++ b/metadata/src/from/mod.rs @@ -33,6 +33,15 @@ pub enum TryFromError { /// Invalid type path. #[error("Type has an invalid path {0}")] InvalidTypePath(String), + /// Cannot decode storage entry information. + #[error("Error decoding storage entry information: {0}")] + StorageInfoError(#[from] frame_decode::storage::StorageInfoError<'static>), + /// Cannot decode Runtime API information. + #[error("Error decoding Runtime API information: {0}")] + RuntimeInfoError(#[from] frame_decode::runtime_apis::RuntimeApiInfoError<'static>), + /// Cannot decode View Function information. + #[error("Error decoding View Function information: {0}")] + ViewFunctionInfoError(#[from] frame_decode::view_functions::ViewFunctionInfoError<'static>), } impl TryFrom for crate::Metadata { diff --git a/metadata/src/from/v14.rs b/metadata/src/from/v14.rs index 5326f158bce..894654c75ee 100644 --- a/metadata/src/from/v14.rs +++ b/metadata/src/from/v14.rs @@ -6,9 +6,8 @@ use super::TryFromError; use crate::utils::variant_index::VariantIndex; use crate::{ - ArcStr, ConstantMetadata, CustomMetadataInner, ExtrinsicMetadata, Metadata, OuterEnumsMetadata, - PalletMetadataInner, StorageEntryMetadata, StorageEntryModifier, StorageEntryType, - StorageHasher, StorageMetadata, TransactionExtensionMetadataInner, + ConstantMetadata, CustomMetadataInner, ExtrinsicMetadata, Metadata, OuterEnumsMetadata, + PalletMetadataInner, StorageEntryMetadata, StorageMetadata, TransactionExtensionMetadataInner, utils::ordered_map::OrderedMap, }; use alloc::borrow::ToOwned; @@ -16,6 +15,7 @@ use alloc::collections::BTreeMap; use alloc::string::String; use alloc::vec::Vec; use alloc::{format, vec}; +use frame_decode::storage::StorageTypeInfo; use frame_metadata::v14; use hashbrown::HashMap; use scale_info::form::PortableForm; @@ -28,24 +28,38 @@ impl TryFrom for Metadata { let mut pallets = OrderedMap::new(); let mut pallets_by_index = HashMap::new(); - for (pos, p) in m.pallets.into_iter().enumerate() { - let name: ArcStr = p.name.into(); - - let storage = p.storage.map(|s| StorageMetadata { - prefix: s.prefix, - entries: s - .entries - .into_iter() - .map(|s| { - let name: ArcStr = s.name.clone().into(); - (name.clone(), from_storage_entry_metadata(name, s)) - }) - .collect(), - }); - let constants = p.constants.into_iter().map(|c| { - let name: ArcStr = c.name.clone().into(); - (name.clone(), from_constant_metadata(name, c)) - }); + for (pos, p) in m.pallets.iter().enumerate() { + let name: String = p.name.clone(); + + let storage = match &p.storage { + None => None, + Some(s) => Some(StorageMetadata { + prefix: s.prefix.clone(), + entries: s + .entries + .iter() + .map(|s| { + let entry_name: String = s.name.clone().into(); + let storage_info = m + .storage_info(&name, &entry_name) + .map_err(|e| e.into_owned())? + .into_owned(); + let storage_entry = StorageEntryMetadata { + name: entry_name, + info: storage_info, + docs: s.docs.clone().into(), + }; + + Ok::<_, TryFromError>((name.clone(), storage_entry)) + }) + .collect::>()?, + }), + }; + + let constants = p + .constants + .iter() + .map(|c| (name.clone(), from_constant_metadata(c.clone()))); let call_variant_index = VariantIndex::build(p.calls.as_ref().map(|c| c.ty.id), &m.types); @@ -58,14 +72,14 @@ impl TryFrom for Metadata { pallets.push_insert( name.clone(), PalletMetadataInner { - name, + name: name.clone(), index: p.index, storage, - call_ty: p.calls.map(|c| c.ty.id), + call_ty: p.calls.as_ref().map(|c| c.ty.id), call_variant_index, - event_ty: p.event.map(|e| e.ty.id), + event_ty: p.event.as_ref().map(|e| e.ty.id), event_variant_index, - error_ty: p.error.map(|e| e.ty.id), + error_ty: p.error.as_ref().map(|e| e.ty.id), error_variant_index, constants: constants.collect(), view_functions: Default::default(), @@ -135,59 +149,9 @@ fn from_extrinsic_metadata( } } -fn from_storage_hasher(value: v14::StorageHasher) -> StorageHasher { - match value { - v14::StorageHasher::Blake2_128 => StorageHasher::Blake2_128, - v14::StorageHasher::Blake2_256 => StorageHasher::Blake2_256, - v14::StorageHasher::Blake2_128Concat => StorageHasher::Blake2_128Concat, - v14::StorageHasher::Twox128 => StorageHasher::Twox128, - v14::StorageHasher::Twox256 => StorageHasher::Twox256, - v14::StorageHasher::Twox64Concat => StorageHasher::Twox64Concat, - v14::StorageHasher::Identity => StorageHasher::Identity, - } -} - -fn from_storage_entry_type(value: v14::StorageEntryType) -> StorageEntryType { - match value { - v14::StorageEntryType::Plain(ty) => StorageEntryType::Plain(ty.id), - v14::StorageEntryType::Map { - hashers, - key, - value, - } => StorageEntryType::Map { - hashers: hashers.into_iter().map(from_storage_hasher).collect(), - key_ty: key.id, - value_ty: value.id, - }, - } -} - -fn from_storage_entry_modifier(value: v14::StorageEntryModifier) -> StorageEntryModifier { - match value { - v14::StorageEntryModifier::Optional => StorageEntryModifier::Optional, - v14::StorageEntryModifier::Default => StorageEntryModifier::Default, - } -} - -fn from_storage_entry_metadata( - name: ArcStr, - s: v14::StorageEntryMetadata, -) -> StorageEntryMetadata { - StorageEntryMetadata { - name, - modifier: from_storage_entry_modifier(s.modifier), - entry_type: from_storage_entry_type(s.ty), - default: s.default, - docs: s.docs, - } -} - -fn from_constant_metadata( - name: ArcStr, - s: v14::PalletConstantMetadata, -) -> ConstantMetadata { +fn from_constant_metadata(s: v14::PalletConstantMetadata) -> ConstantMetadata { ConstantMetadata { - name, + name: s.name, ty: s.ty.id, value: s.value, docs: s.docs, diff --git a/metadata/src/from/v15.rs b/metadata/src/from/v15.rs index 3fc0f07db9a..d80b6c35739 100644 --- a/metadata/src/from/v15.rs +++ b/metadata/src/from/v15.rs @@ -6,14 +6,15 @@ use super::TryFromError; use crate::utils::variant_index::VariantIndex; use crate::{ - ArcStr, ConstantMetadata, ExtrinsicMetadata, Metadata, MethodParamMetadata, OuterEnumsMetadata, - PalletMetadataInner, RuntimeApiMetadataInner, RuntimeApiMethodMetadataInner, - StorageEntryMetadata, StorageEntryModifier, StorageEntryType, StorageHasher, StorageMetadata, + ConstantMetadata, ExtrinsicMetadata, Metadata, OuterEnumsMetadata, PalletMetadataInner, + RuntimeApiMetadataInner, RuntimeApiMethodMetadataInner, StorageEntryMetadata, StorageMetadata, TransactionExtensionMetadataInner, utils::ordered_map::OrderedMap, }; use alloc::collections::BTreeMap; use alloc::vec; use alloc::vec::Vec; +use frame_decode::runtime_apis::RuntimeApiTypeInfo; +use frame_decode::storage::StorageTypeInfo; use frame_metadata::v15; use hashbrown::HashMap; use scale_info::form::PortableForm; @@ -23,23 +24,37 @@ impl TryFrom for Metadata { fn try_from(m: v15::RuntimeMetadataV15) -> Result { let mut pallets = OrderedMap::new(); let mut pallets_by_index = HashMap::new(); - for (pos, p) in m.pallets.into_iter().enumerate() { - let name: ArcStr = p.name.into(); - - let storage = p.storage.map(|s| StorageMetadata { - prefix: s.prefix, - entries: s - .entries - .into_iter() - .map(|s| { - let name: ArcStr = s.name.clone().into(); - (name.clone(), from_storage_entry_metadata(name, s)) - }) - .collect(), - }); - let constants = p.constants.into_iter().map(|c| { - let name: ArcStr = c.name.clone().into(); - (name.clone(), from_constant_metadata(name, c)) + for (pos, p) in m.pallets.iter().enumerate() { + let name = p.name.clone(); + + let storage = match &p.storage { + None => None, + Some(s) => Some(StorageMetadata { + prefix: s.prefix.clone(), + entries: s + .entries + .iter() + .map(|s| { + let entry_name = s.name.clone(); + let storage_info = m + .storage_info(&name, &entry_name) + .map_err(|e| e.into_owned())? + .into_owned(); + let storage_entry = StorageEntryMetadata { + name: entry_name.clone(), + info: storage_info, + docs: s.docs.clone().into(), + }; + + Ok::<_, TryFromError>((entry_name, storage_entry)) + }) + .collect::>()?, + }), + }; + + let constants = p.constants.iter().map(|c| { + let name = c.name.clone(); + (name, from_constant_metadata(c.clone())) }); let call_variant_index = @@ -56,24 +71,50 @@ impl TryFrom for Metadata { name, index: p.index, storage, - call_ty: p.calls.map(|c| c.ty.id), + call_ty: p.calls.as_ref().map(|c| c.ty.id), call_variant_index, - event_ty: p.event.map(|e| e.ty.id), + event_ty: p.event.as_ref().map(|e| e.ty.id), event_variant_index, - error_ty: p.error.map(|e| e.ty.id), + error_ty: p.error.as_ref().map(|e| e.ty.id), error_variant_index, constants: constants.collect(), view_functions: Default::default(), associated_types: Default::default(), - docs: p.docs, + docs: p.docs.clone(), }, ); } - let apis = m.apis.into_iter().map(|api| { - let name: ArcStr = api.name.clone().into(); - (name.clone(), from_runtime_api_metadata(name, api)) - }); + let apis = m + .apis + .iter() + .map(|api| { + let trait_name = api.name.clone(); + let methods = api + .methods + .iter() + .map(|method| { + let method_name = method.name.clone(); + let method_info = RuntimeApiMethodMetadataInner { + info: m + .runtime_api_info(&trait_name, &method.name) + .map_err(|e| e.into_owned())? + .into_owned(), + name: method.name.clone(), + docs: method.docs.clone(), + }; + Ok((method_name, method_info)) + }) + .collect::>()?; + + let runtime_api_metadata = RuntimeApiMetadataInner { + name: trait_name.clone(), + methods, + docs: api.docs.clone(), + }; + Ok((trait_name, runtime_api_metadata)) + }) + .collect::>()?; let dispatch_error_ty = m .types @@ -88,7 +129,7 @@ impl TryFrom for Metadata { pallets_by_index, extrinsic: from_extrinsic_metadata(m.extrinsic), dispatch_error_ty, - apis: apis.collect(), + apis: apis, outer_enums: OuterEnumsMetadata { call_enum_ty: m.outer_enums.call_enum_ty.id, event_enum_ty: m.outer_enums.event_enum_ty.id, @@ -130,104 +171,11 @@ fn from_extrinsic_metadata(value: v15::ExtrinsicMetadata) -> Extri } } -fn from_storage_hasher(value: v15::StorageHasher) -> StorageHasher { - match value { - v15::StorageHasher::Blake2_128 => StorageHasher::Blake2_128, - v15::StorageHasher::Blake2_256 => StorageHasher::Blake2_256, - v15::StorageHasher::Blake2_128Concat => StorageHasher::Blake2_128Concat, - v15::StorageHasher::Twox128 => StorageHasher::Twox128, - v15::StorageHasher::Twox256 => StorageHasher::Twox256, - v15::StorageHasher::Twox64Concat => StorageHasher::Twox64Concat, - v15::StorageHasher::Identity => StorageHasher::Identity, - } -} - -fn from_storage_entry_type(value: v15::StorageEntryType) -> StorageEntryType { - match value { - v15::StorageEntryType::Plain(ty) => StorageEntryType::Plain(ty.id), - v15::StorageEntryType::Map { - hashers, - key, - value, - } => StorageEntryType::Map { - hashers: hashers.into_iter().map(from_storage_hasher).collect(), - key_ty: key.id, - value_ty: value.id, - }, - } -} - -fn from_storage_entry_modifier(value: v15::StorageEntryModifier) -> StorageEntryModifier { - match value { - v15::StorageEntryModifier::Optional => StorageEntryModifier::Optional, - v15::StorageEntryModifier::Default => StorageEntryModifier::Default, - } -} - -fn from_storage_entry_metadata( - name: ArcStr, - s: v15::StorageEntryMetadata, -) -> StorageEntryMetadata { - StorageEntryMetadata { - name, - modifier: from_storage_entry_modifier(s.modifier), - entry_type: from_storage_entry_type(s.ty), - default: s.default, - docs: s.docs, - } -} - -fn from_constant_metadata( - name: ArcStr, - s: v15::PalletConstantMetadata, -) -> ConstantMetadata { +fn from_constant_metadata(s: v15::PalletConstantMetadata) -> ConstantMetadata { ConstantMetadata { - name, + name: s.name, ty: s.ty.id, value: s.value, docs: s.docs, } } - -fn from_runtime_api_metadata( - name: ArcStr, - s: v15::RuntimeApiMetadata, -) -> RuntimeApiMetadataInner { - RuntimeApiMetadataInner { - name, - docs: s.docs, - methods: s - .methods - .into_iter() - .map(|m| { - let name: ArcStr = m.name.clone().into(); - (name.clone(), from_runtime_api_method_metadata(name, m)) - }) - .collect(), - } -} - -fn from_runtime_api_method_metadata( - name: ArcStr, - s: v15::RuntimeApiMethodMetadata, -) -> RuntimeApiMethodMetadataInner { - RuntimeApiMethodMetadataInner { - name, - inputs: s - .inputs - .into_iter() - .map(from_runtime_api_method_param_metadata) - .collect(), - output_ty: s.output.id, - docs: s.docs, - } -} - -fn from_runtime_api_method_param_metadata( - s: v15::RuntimeApiMethodParamMetadata, -) -> MethodParamMetadata { - MethodParamMetadata { - name: s.name, - ty: s.ty.id, - } -} diff --git a/metadata/src/from/v16.rs b/metadata/src/from/v16.rs index 53a0d26a1fa..29354b01106 100644 --- a/metadata/src/from/v16.rs +++ b/metadata/src/from/v16.rs @@ -6,11 +6,13 @@ use super::TryFromError; use crate::utils::variant_index::VariantIndex; use crate::{ - ArcStr, ConstantMetadata, ExtrinsicMetadata, Metadata, MethodParamMetadata, OuterEnumsMetadata, - PalletMetadataInner, RuntimeApiMetadataInner, RuntimeApiMethodMetadataInner, - StorageEntryMetadata, StorageEntryModifier, StorageEntryType, StorageHasher, StorageMetadata, + ConstantMetadata, ExtrinsicMetadata, Metadata, OuterEnumsMetadata, PalletMetadataInner, + RuntimeApiMetadataInner, RuntimeApiMethodMetadataInner, StorageEntryMetadata, StorageMetadata, TransactionExtensionMetadataInner, ViewFunctionMetadataInner, utils::ordered_map::OrderedMap, }; +use frame_decode::runtime_apis::RuntimeApiTypeInfo; +use frame_decode::storage::StorageTypeInfo; +use frame_decode::view_functions::ViewFunctionTypeInfo; use frame_metadata::{v15, v16}; use hashbrown::HashMap; use scale_info::form::PortableForm; @@ -18,31 +20,57 @@ use scale_info::form::PortableForm; impl TryFrom for Metadata { type Error = TryFromError; fn try_from(m: v16::RuntimeMetadataV16) -> Result { - let types = m.types; + let types = &m.types; let mut pallets = OrderedMap::new(); let mut pallets_by_index = HashMap::new(); - for (pos, p) in m.pallets.into_iter().enumerate() { - let name: ArcStr = p.name.into(); - - let storage = p.storage.map(|s| StorageMetadata { - prefix: s.prefix, - entries: s - .entries - .into_iter() - .map(|s| { - let name: ArcStr = s.name.clone().into(); - (name.clone(), from_storage_entry_metadata(name, s)) - }) - .collect(), - }); - let constants = p.constants.into_iter().map(|c| { - let name: ArcStr = c.name.clone().into(); - (name.clone(), from_constant_metadata(name, c)) - }); - let view_functions = p.view_functions.into_iter().map(|v| { - let name: ArcStr = v.name.clone().into(); - (name.clone(), from_view_function_metadata(name, v)) + for (pos, p) in m.pallets.iter().enumerate() { + let name = p.name.clone(); + + let storage = match &p.storage { + None => None, + Some(s) => Some(StorageMetadata { + prefix: s.prefix.clone(), + entries: s + .entries + .iter() + .map(|s| { + let entry_name = s.name.clone(); + let storage_info = m + .storage_info(&name, &entry_name) + .map_err(|e| e.into_owned())? + .into_owned(); + let storage_entry = StorageEntryMetadata { + name: entry_name.clone(), + info: storage_info, + docs: s.docs.clone().into(), + }; + + Ok::<_, TryFromError>((entry_name, storage_entry)) + }) + .collect::>()?, + }), + }; + + let view_functions = p + .view_functions + .iter() + .map(|vf| { + let view_function_metadata = ViewFunctionMetadataInner { + name: vf.name.clone(), + info: m + .view_function_info(&name, &vf.name) + .map_err(|e| e.into_owned())? + .into_owned(), + docs: vf.docs.clone(), + }; + Ok((vf.name.clone(), view_function_metadata)) + }) + .collect::>()?; + + let constants = p.constants.iter().map(|c| { + let name = c.name.clone(); + (name, from_constant_metadata(c.clone())) }); let call_variant_index = VariantIndex::build(p.calls.as_ref().map(|c| c.ty.id), &types); @@ -53,8 +81,8 @@ impl TryFrom for Metadata { let associated_types = p .associated_types - .into_iter() - .map(|t| (t.name, t.ty.id)) + .iter() + .map(|t| (t.name.clone(), t.ty.id)) .collect(); pallets_by_index.insert(p.index, pos); @@ -64,24 +92,50 @@ impl TryFrom for Metadata { name, index: p.index, storage, - call_ty: p.calls.map(|c| c.ty.id), + call_ty: p.calls.as_ref().map(|c| c.ty.id), call_variant_index, - event_ty: p.event.map(|e| e.ty.id), + event_ty: p.event.as_ref().map(|e| e.ty.id), event_variant_index, - error_ty: p.error.map(|e| e.ty.id), + error_ty: p.error.as_ref().map(|e| e.ty.id), error_variant_index, constants: constants.collect(), - view_functions: view_functions.collect(), + view_functions, associated_types, - docs: p.docs, + docs: p.docs.clone(), }, ); } - let apis = m.apis.into_iter().map(|api| { - let name: ArcStr = api.name.clone().into(); - (name.clone(), from_runtime_api_metadata(name, api)) - }); + let apis = m + .apis + .iter() + .map(|api| { + let trait_name = api.name.clone(); + let methods = api + .methods + .iter() + .map(|method| { + let method_name = method.name.clone(); + let method_info = RuntimeApiMethodMetadataInner { + info: m + .runtime_api_info(&trait_name, &method.name) + .map_err(|e| e.into_owned())? + .into_owned(), + name: method.name.clone(), + docs: method.docs.clone(), + }; + Ok((method_name, method_info)) + }) + .collect::>()?; + + let runtime_api_metadata = RuntimeApiMetadataInner { + name: trait_name.clone(), + methods, + docs: api.docs.clone(), + }; + Ok((trait_name, runtime_api_metadata)) + }) + .collect::>()?; let custom_map = m .custom @@ -103,12 +157,12 @@ impl TryFrom for Metadata { .map(|ty| ty.id); Ok(Metadata { - types, + types: m.types, pallets, pallets_by_index, extrinsic: from_extrinsic_metadata(m.extrinsic), dispatch_error_ty, - apis: apis.collect(), + apis, outer_enums: OuterEnumsMetadata { call_enum_ty: m.outer_enums.call_enum_ty.id, event_enum_ty: m.outer_enums.event_enum_ty.id, @@ -147,118 +201,11 @@ fn from_extrinsic_metadata(value: v16::ExtrinsicMetadata) -> Extri } } -fn from_storage_hasher(value: v16::StorageHasher) -> StorageHasher { - match value { - v16::StorageHasher::Blake2_128 => StorageHasher::Blake2_128, - v16::StorageHasher::Blake2_256 => StorageHasher::Blake2_256, - v16::StorageHasher::Blake2_128Concat => StorageHasher::Blake2_128Concat, - v16::StorageHasher::Twox128 => StorageHasher::Twox128, - v16::StorageHasher::Twox256 => StorageHasher::Twox256, - v16::StorageHasher::Twox64Concat => StorageHasher::Twox64Concat, - v16::StorageHasher::Identity => StorageHasher::Identity, - } -} - -fn from_storage_entry_type(value: v16::StorageEntryType) -> StorageEntryType { - match value { - v16::StorageEntryType::Plain(ty) => StorageEntryType::Plain(ty.id), - v16::StorageEntryType::Map { - hashers, - key, - value, - } => StorageEntryType::Map { - hashers: hashers.into_iter().map(from_storage_hasher).collect(), - key_ty: key.id, - value_ty: value.id, - }, - } -} - -fn from_storage_entry_modifier(value: v16::StorageEntryModifier) -> StorageEntryModifier { - match value { - v16::StorageEntryModifier::Optional => StorageEntryModifier::Optional, - v16::StorageEntryModifier::Default => StorageEntryModifier::Default, - } -} - -fn from_storage_entry_metadata( - name: ArcStr, - s: v16::StorageEntryMetadata, -) -> StorageEntryMetadata { - StorageEntryMetadata { - name, - modifier: from_storage_entry_modifier(s.modifier), - entry_type: from_storage_entry_type(s.ty), - default: s.default, - docs: s.docs, - } -} - -fn from_constant_metadata( - name: ArcStr, - s: v16::PalletConstantMetadata, -) -> ConstantMetadata { +fn from_constant_metadata(s: v16::PalletConstantMetadata) -> ConstantMetadata { ConstantMetadata { - name, + name: s.name, ty: s.ty.id, value: s.value, docs: s.docs, } } - -fn from_runtime_api_metadata( - name: ArcStr, - s: v16::RuntimeApiMetadata, -) -> RuntimeApiMetadataInner { - RuntimeApiMetadataInner { - name, - docs: s.docs, - methods: s - .methods - .into_iter() - .map(|m| { - let name: ArcStr = m.name.clone().into(); - (name.clone(), from_runtime_api_method_metadata(name, m)) - }) - .collect(), - } -} - -fn from_runtime_api_method_metadata( - name: ArcStr, - s: v16::RuntimeApiMethodMetadata, -) -> RuntimeApiMethodMetadataInner { - RuntimeApiMethodMetadataInner { - name, - inputs: s - .inputs - .into_iter() - .map(|param| MethodParamMetadata { - name: param.name, - ty: param.ty.id, - }) - .collect(), - output_ty: s.output.id, - docs: s.docs, - } -} - -fn from_view_function_metadata( - name: ArcStr, - s: v16::PalletViewFunctionMetadata, -) -> ViewFunctionMetadataInner { - ViewFunctionMetadataInner { - name, - query_id: s.id, - inputs: s - .inputs - .into_iter() - .map(|param| MethodParamMetadata { - name: param.name, - ty: param.ty.id, - }) - .collect(), - output_ty: s.output.id, - docs: s.docs, - } -} diff --git a/metadata/src/lib.rs b/metadata/src/lib.rs index f680b36883c..fd395765825 100644 --- a/metadata/src/lib.rs +++ b/metadata/src/lib.rs @@ -24,13 +24,22 @@ mod utils; use alloc::borrow::Cow; use alloc::collections::BTreeMap; -use alloc::string::String; -use alloc::sync::Arc; +use alloc::string::{String, ToString}; use alloc::vec::Vec; +use frame_decode::constants::{Constant, ConstantInfo, ConstantInfoError}; +use frame_decode::custom_values::{CustomValue, CustomValueInfo, CustomValueInfoError}; use frame_decode::extrinsics::{ ExtrinsicCallInfo, ExtrinsicExtensionInfo, ExtrinsicInfoArg, ExtrinsicInfoError, ExtrinsicSignatureInfo, }; +use frame_decode::runtime_apis::{ + RuntimeApi, RuntimeApiInfo, RuntimeApiInfoError, RuntimeApiInput, +}; +use frame_decode::storage::{StorageEntry, StorageInfo, StorageInfoError, StorageKeyInfo}; +use frame_decode::view_functions::{ + ViewFunction, ViewFunctionInfo, ViewFunctionInfoError, ViewFunctionInput, +}; + use hashbrown::HashMap; use scale_info::{PortableRegistry, Variant, form::PortableForm}; use utils::{ @@ -39,8 +48,7 @@ use utils::{ variant_index::VariantIndex, }; -type ArcStr = Arc; - +pub use frame_decode::storage::StorageHasher; pub use from::SUPPORTED_METADATA_VERSIONS; pub use from::TryFromError; pub use utils::validation::MetadataHasher; @@ -55,7 +63,7 @@ pub struct Metadata { /// Type registry containing all types used in the metadata. types: PortableRegistry, /// Metadata of all the pallets. - pallets: OrderedMap, + pallets: OrderedMap, /// Find the location in the pallet Vec by pallet index. pallets_by_index: HashMap, /// Metadata of the extrinsic. @@ -65,7 +73,7 @@ pub struct Metadata { /// The type Id of the `DispatchError` type, which Subxt makes use of. dispatch_error_ty: Option, /// Details about each of the runtime API traits. - apis: OrderedMap, + apis: OrderedMap, /// Allows users to add custom types to the metadata. A map that associates a string key to a `CustomValueMetadata`. custom: CustomMetadataInner, } @@ -75,7 +83,7 @@ pub struct Metadata { impl frame_decode::extrinsics::ExtrinsicTypeInfo for Metadata { type TypeId = u32; - fn get_call_info( + fn extrinsic_call_info( &self, pallet_index: u8, call_index: u8, @@ -108,7 +116,7 @@ impl frame_decode::extrinsics::ExtrinsicTypeInfo for Metadata { }) } - fn get_signature_info( + fn extrinsic_signature_info( &self, ) -> Result, ExtrinsicInfoError<'_>> { Ok(ExtrinsicSignatureInfo { @@ -117,7 +125,7 @@ impl frame_decode::extrinsics::ExtrinsicTypeInfo for Metadata { }) } - fn get_extension_info( + fn extrinsic_extension_info( &self, extension_version: Option, ) -> Result, ExtrinsicInfoError<'_>> { @@ -142,6 +150,197 @@ impl frame_decode::extrinsics::ExtrinsicTypeInfo for Metadata { Ok(ExtrinsicExtensionInfo { extension_ids }) } } +impl frame_decode::storage::StorageTypeInfo for Metadata { + type TypeId = u32; + + fn storage_info( + &self, + pallet_name: &str, + storage_entry: &str, + ) -> Result, StorageInfoError<'_>> { + let pallet = + self.pallet_by_name(pallet_name) + .ok_or_else(|| StorageInfoError::PalletNotFound { + pallet_name: pallet_name.to_string(), + })?; + let entry = pallet + .storage() + .map(|storage| storage.entry_by_name(storage_entry)) + .flatten() + .ok_or_else(|| StorageInfoError::StorageNotFound { + name: storage_entry.to_string(), + pallet_name: Cow::Borrowed(pallet.name()), + })?; + + let info = StorageInfo { + keys: Cow::Borrowed(&*entry.info.keys), + value_id: entry.info.value_id, + default_value: entry + .info + .default_value + .as_ref() + .map(|def| Cow::Borrowed(&**def)), + }; + + Ok(info) + } + + fn storage_entries(&self) -> impl Iterator> { + self.pallets().flat_map(|pallet| { + let pallet_name = pallet.name(); + pallet.storage().into_iter().flat_map(|storage| { + storage.entries().iter().map(|entry| StorageEntry { + pallet_name: Cow::Borrowed(pallet_name), + storage_entry: Cow::Borrowed(entry.name()), + }) + }) + }) + } +} +impl frame_decode::runtime_apis::RuntimeApiTypeInfo for Metadata { + type TypeId = u32; + + fn runtime_api_info( + &self, + trait_name: &str, + method_name: &str, + ) -> Result, RuntimeApiInfoError<'_>> { + let api_trait = + self.apis + .get_by_key(trait_name) + .ok_or_else(|| RuntimeApiInfoError::TraitNotFound { + trait_name: trait_name.to_string(), + })?; + let api_method = api_trait.methods.get_by_key(method_name).ok_or_else(|| { + RuntimeApiInfoError::MethodNotFound { + trait_name: Cow::Borrowed(&api_trait.name), + method_name: method_name.to_string(), + } + })?; + + let info = RuntimeApiInfo { + inputs: Cow::Borrowed(&api_method.info.inputs), + output_id: api_method.info.output_id, + }; + + Ok(info) + } + + fn runtime_apis(&self) -> impl Iterator> { + self.runtime_api_traits().flat_map(|api_trait| { + let trait_name = api_trait.name(); + api_trait.methods().map(|method| RuntimeApi { + trait_name: Cow::Borrowed(trait_name), + method_name: Cow::Borrowed(method.name()), + }) + }) + } +} +impl frame_decode::view_functions::ViewFunctionTypeInfo for Metadata { + type TypeId = u32; + + fn view_function_info( + &self, + pallet_name: &str, + function_name: &str, + ) -> Result, ViewFunctionInfoError<'_>> { + let pallet = self.pallet_by_name(pallet_name).ok_or_else(|| { + ViewFunctionInfoError::PalletNotFound { + pallet_name: pallet_name.to_string(), + } + })?; + let function = pallet.view_function_by_name(function_name).ok_or_else(|| { + ViewFunctionInfoError::FunctionNotFound { + pallet_name: Cow::Borrowed(pallet.name()), + function_name: function_name.to_string(), + } + })?; + + let info = ViewFunctionInfo { + inputs: Cow::Borrowed(&function.inner.info.inputs), + output_id: function.inner.info.output_id, + query_id: *function.query_id(), + }; + + Ok(info) + } + + fn view_functions(&self) -> impl Iterator> { + self.pallets().flat_map(|pallet| { + let pallet_name = pallet.name(); + pallet.view_functions().map(|function| ViewFunction { + pallet_name: Cow::Borrowed(pallet_name), + function_name: Cow::Borrowed(function.name()), + }) + }) + } +} +impl frame_decode::constants::ConstantTypeInfo for Metadata { + type TypeId = u32; + + fn constant_info( + &self, + pallet_name: &str, + constant_name: &str, + ) -> Result, ConstantInfoError<'_>> { + let pallet = self.pallet_by_name("pallet_name").ok_or_else(|| { + ConstantInfoError::PalletNotFound { + pallet_name: pallet_name.to_string(), + } + })?; + let constant = pallet.constant_by_name(constant_name).ok_or_else(|| { + ConstantInfoError::ConstantNotFound { + pallet_name: Cow::Borrowed(pallet.name()), + constant_name: constant_name.to_string(), + } + })?; + + let info = ConstantInfo { + bytes: &constant.value, + type_id: constant.ty, + }; + + Ok(info) + } + + fn constants(&self) -> impl Iterator> { + self.pallets().flat_map(|pallet| { + let pallet_name = pallet.name(); + pallet.constants().map(|constant| Constant { + pallet_name: Cow::Borrowed(pallet_name), + constant_name: Cow::Borrowed(constant.name()), + }) + }) + } +} +impl frame_decode::custom_values::CustomValueTypeInfo for Metadata { + type TypeId = u32; + + fn custom_value_info( + &self, + name: &str, + ) -> Result, CustomValueInfoError> { + let custom_value = self + .custom() + .get(name) + .ok_or_else(|| CustomValueInfoError { + not_found: name.to_string(), + })?; + + let info = CustomValueInfo { + bytes: &custom_value.data, + type_id: custom_value.type_id, + }; + + Ok(info) + } + + fn custom_values(&self) -> impl Iterator> { + self.custom.map.iter().map(|(name, _)| CustomValue { + name: Cow::Borrowed(name), + }) + } +} impl Metadata { /// Access the underlying type registry. @@ -217,20 +416,6 @@ impl Metadata { }) } - /// Access a view function given its query ID, if any. - pub fn view_function_by_query_id( - &'_ self, - query_id: &[u8; 32], - ) -> Option> { - // Dev note: currently, we only have pallet view functions, and here - // we just do a naive thing of iterating over the pallets to find the one - // we're looking for. Eventually we should construct a separate map of view - // functions for easy querying here. - self.pallets() - .flat_map(|p| p.view_functions()) - .find(|vf| vf.query_id() == query_id) - } - /// Returns custom user defined types pub fn custom(&self) -> CustomMetadata<'_> { CustomMetadata { @@ -418,7 +603,7 @@ impl<'a> PalletMetadata<'a> { #[derive(Debug, Clone)] struct PalletMetadataInner { /// Pallet name. - name: ArcStr, + name: String, /// Pallet index. index: u8, /// Pallet storage metadata. @@ -436,9 +621,9 @@ struct PalletMetadataInner { /// Error variants by name/u8. error_variant_index: VariantIndex, /// Map from constant name to constant details. - constants: OrderedMap, + constants: OrderedMap, /// Details about each of the pallet view functions. - view_functions: OrderedMap, + view_functions: OrderedMap, /// Mapping from associated type to type ID describing its shape. associated_types: BTreeMap, /// Pallet documentation. @@ -451,7 +636,7 @@ pub struct StorageMetadata { /// The common prefix used by all storage entries. prefix: String, /// Map from storage entry name to details. - entries: OrderedMap, + entries: OrderedMap, } impl StorageMetadata { @@ -475,13 +660,9 @@ impl StorageMetadata { #[derive(Debug, Clone)] pub struct StorageEntryMetadata { /// Variable name of the storage entry. - name: ArcStr, - /// An `Option` modifier of that storage entry. - modifier: StorageEntryModifier, - /// Type of the value stored in the entry. - entry_type: StorageEntryType, - /// Default value (SCALE encoded). - default: Vec, + name: String, + /// Information about the storage entry. + info: StorageInfo<'static, u32>, /// Storage entry documentation. docs: Vec, } @@ -491,17 +672,18 @@ impl StorageEntryMetadata { pub fn name(&self) -> &str { &self.name } - /// Is the entry value optional or does it have a default value. - pub fn modifier(&self) -> StorageEntryModifier { - self.modifier + /// Keys in this storage entry. + pub fn keys(&self) -> impl ExactSizeIterator> { + let keys = &*self.info.keys; + keys.iter() } - /// Type of the storage entry. - pub fn entry_type(&self) -> &StorageEntryType { - &self.entry_type + /// Value type for this storage entry. + pub fn value_ty(&self) -> u32 { + self.info.value_id } - /// The SCALE encoded default value for this entry. - pub fn default_bytes(&self) -> &[u8] { - &self.default + /// The default value, if one exists, for this entry. + pub fn default_value(&self) -> Option<&[u8]> { + self.info.default_value.as_deref() } /// Storage entry documentation. pub fn docs(&self) -> &[String] { @@ -509,101 +691,11 @@ impl StorageEntryMetadata { } } -/// The type of a storage entry. -#[derive(Debug, Clone)] -pub enum StorageEntryType { - /// Plain storage entry (just the value). - Plain(u32), - /// A storage map. - Map { - /// One or more hashers, should be one hasher per key element. - hashers: Vec, - /// The type of the key, can be a tuple with elements for each of the hashers. - key_ty: u32, - /// The type of the value. - value_ty: u32, - }, -} - -impl StorageEntryType { - /// The type of the value. - pub fn value_ty(&self) -> u32 { - match self { - StorageEntryType::Map { value_ty, .. } | StorageEntryType::Plain(value_ty) => *value_ty, - } - } - - /// The type of the key, can be a tuple with elements for each of the hashers. None for a Plain storage entry. - pub fn key_ty(&self) -> Option { - match self { - StorageEntryType::Map { key_ty, .. } => Some(*key_ty), - StorageEntryType::Plain(_) => None, - } - } -} - -/// Hasher used by storage maps. -#[derive(Debug, Clone, Copy)] -pub enum StorageHasher { - /// 128-bit Blake2 hash. - Blake2_128, - /// 256-bit Blake2 hash. - Blake2_256, - /// Multiple 128-bit Blake2 hashes concatenated. - Blake2_128Concat, - /// 128-bit XX hash. - Twox128, - /// 256-bit XX hash. - Twox256, - /// Multiple 64-bit XX hashes concatenated. - Twox64Concat, - /// Identity hashing (no hashing). - Identity, -} - -impl StorageHasher { - /// The hash produced by a [`StorageHasher`] can have these two components, in order: - /// - /// 1. A fixed size hash. (not present for [`StorageHasher::Identity`]). - /// 2. The SCALE encoded key that was used as an input to the hasher (only present for - /// [`StorageHasher::Twox64Concat`], [`StorageHasher::Blake2_128Concat`] or [`StorageHasher::Identity`]). - /// - /// This function returns the number of bytes used to represent the first of these. - pub fn len_excluding_key(&self) -> usize { - match self { - StorageHasher::Blake2_128Concat => 16, - StorageHasher::Twox64Concat => 8, - StorageHasher::Blake2_128 => 16, - StorageHasher::Blake2_256 => 32, - StorageHasher::Twox128 => 16, - StorageHasher::Twox256 => 32, - StorageHasher::Identity => 0, - } - } - - /// Returns true if the key used to produce the hash is appended to the hash itself. - pub fn ends_with_key(&self) -> bool { - matches!( - self, - StorageHasher::Blake2_128Concat | StorageHasher::Twox64Concat | StorageHasher::Identity - ) - } -} - -/// Is the storage entry optional, or does it have a default value. -#[derive(Debug, Clone, Copy, Eq, PartialEq)] -pub enum StorageEntryModifier { - /// The storage entry returns an `Option`, with `None` if the key is not present. - Optional, - /// The storage entry returns `T::Default` if the key is not present. - Default, -} - /// Metadata for a single constant. #[derive(Debug, Clone)] pub struct ConstantMetadata { /// Name of the pallet constant. - name: ArcStr, + name: String, /// Type of the pallet constant. ty: u32, /// Value stored in the constant (SCALE encoded). @@ -816,9 +908,9 @@ impl<'a> RuntimeApiMetadata<'a> { #[derive(Debug, Clone)] struct RuntimeApiMetadataInner { /// Trait name. - name: ArcStr, + name: String, /// Trait methods. - methods: OrderedMap, + methods: OrderedMap, /// Trait documentation. docs: Vec, } @@ -841,12 +933,15 @@ impl<'a> RuntimeApiMethodMetadata<'a> { &self.inner.docs } /// Method inputs. - pub fn inputs(&self) -> impl ExactSizeIterator + use<'a> { - self.inner.inputs.iter() + pub fn inputs( + &self, + ) -> impl ExactSizeIterator> + use<'a> { + let inputs = &*self.inner.info.inputs; + inputs.iter() } /// Method return type. pub fn output_ty(&self) -> u32 { - self.inner.output_ty + self.inner.info.output_id } /// Return a hash for the method. pub fn hash(&self) -> [u8; HASH_LEN] { @@ -857,11 +952,9 @@ impl<'a> RuntimeApiMethodMetadata<'a> { #[derive(Debug, Clone)] struct RuntimeApiMethodMetadataInner { /// Method name. - name: ArcStr, - /// Method parameters. - inputs: Vec, - /// Method output type. - output_ty: u32, + name: String, + /// Info. + info: RuntimeApiInfo<'static, u32>, /// Method documentation. docs: Vec, } @@ -882,19 +975,22 @@ impl<'a> ViewFunctionMetadata<'a> { /// Query ID. This is used to query the function. Roughly, it is constructed by doing /// `twox_128(pallet_name) ++ twox_128("fn_name(fnarg_types) -> return_ty")` . pub fn query_id(&self) -> &'a [u8; 32] { - &self.inner.query_id + &self.inner.info.query_id } /// Method documentation. pub fn docs(&self) -> &'a [String] { &self.inner.docs } /// Method inputs. - pub fn inputs(&self) -> impl ExactSizeIterator + use<'a> { - self.inner.inputs.iter() + pub fn inputs( + &self, + ) -> impl ExactSizeIterator> + use<'a> { + let inputs = &*self.inner.info.inputs; + inputs.iter() } /// Method return type. pub fn output_ty(&self) -> u32 { - self.inner.output_ty + self.inner.info.output_id } /// Return a hash for the method. The query ID of a view function validates it to some /// degree, but only takes type _names_ into account. This hash takes into account the @@ -907,13 +1003,9 @@ impl<'a> ViewFunctionMetadata<'a> { #[derive(Debug, Clone)] struct ViewFunctionMetadataInner { /// View function name. - name: ArcStr, - /// View function query ID. - query_id: [u8; 32], - /// Input types. - inputs: Vec, - /// Output type. - output_ty: u32, + name: String, + /// Info. + info: ViewFunctionInfo<'static, u32>, /// Documentation. docs: Vec, } diff --git a/metadata/src/utils/validation.rs b/metadata/src/utils/validation.rs index 7ad72452051..f87ff74818c 100644 --- a/metadata/src/utils/validation.rs +++ b/metadata/src/utils/validation.rs @@ -6,8 +6,7 @@ use crate::{ CustomMetadata, CustomValueMetadata, ExtrinsicMetadata, Metadata, PalletMetadata, - RuntimeApiMetadata, RuntimeApiMethodMetadata, StorageEntryMetadata, StorageEntryType, - ViewFunctionMetadata, + RuntimeApiMetadata, RuntimeApiMethodMetadata, StorageEntryMetadata, ViewFunctionMetadata, }; use alloc::vec::Vec; use hashbrown::HashMap; @@ -290,29 +289,26 @@ fn get_extrinsic_hash(registry: &PortableRegistry, extrinsic: &ExtrinsicMetadata fn get_storage_entry_hash(registry: &PortableRegistry, entry: &StorageEntryMetadata) -> Hash { let mut bytes = concat_and_hash3( &hash(entry.name.as_bytes()), - // Cloning 'entry.modifier' should essentially be a copy. - &[entry.modifier as u8; HASH_LEN], - &hash(&entry.default), + &get_type_hash(registry, entry.info.value_id), + &hash( + entry + .info + .default_value + .as_ref() + .map(|b| &**b) + .unwrap_or_default(), + ), ); - match &entry.entry_type { - StorageEntryType::Plain(ty) => concat_and_hash2(&bytes, &get_type_hash(registry, *ty)), - StorageEntryType::Map { - hashers, - key_ty, - value_ty, - } => { - for hasher in hashers { - // Cloning the hasher should essentially be a copy. - bytes = concat_and_hash2(&bytes, &[*hasher as u8; HASH_LEN]); - } - concat_and_hash3( - &bytes, - &get_type_hash(registry, *key_ty), - &get_type_hash(registry, *value_ty), - ) - } + for key in &*entry.info.keys { + bytes = concat_and_hash3( + &bytes, + &[key.hasher as u8; HASH_LEN], + &get_type_hash(registry, key.key_id), + ) } + + bytes } fn get_custom_metadata_hash(custom_metadata: &CustomMetadata) -> Hash { @@ -382,7 +378,7 @@ pub fn get_runtime_api_hash(runtime_api: &RuntimeApiMethodMetadata) -> Hash { bytes = concat_and_hash3( &bytes, &hash(input.name.as_bytes()), - &get_type_hash(registry, input.ty), + &get_type_hash(registry, input.id), ); } @@ -419,7 +415,7 @@ pub fn get_view_function_hash(view_function: &ViewFunctionMetadata) -> Hash { bytes = concat_and_hash3( &bytes, &hash(input.name.as_bytes()), - &get_type_hash(registry, input.ty), + &get_type_hash(registry, input.id), ); } diff --git a/subxt/Cargo.toml b/subxt/Cargo.toml index 7caf8df4d00..7ae5336067a 100644 --- a/subxt/Cargo.toml +++ b/subxt/Cargo.toml @@ -90,6 +90,7 @@ sp-crypto-hashing = { workspace = true } thiserror = { workspace = true } tracing = { workspace = true } frame-metadata = { workspace = true } +frame-decode = { workspace = true } either = { workspace = true } web-time = { workspace = true } diff --git a/subxt/examples/block_decoding_dynamic.rs b/subxt/examples/block_decoding_dynamic.rs index eb45cf51b88..44ba483221c 100644 --- a/subxt/examples/block_decoding_dynamic.rs +++ b/subxt/examples/block_decoding_dynamic.rs @@ -21,10 +21,10 @@ async fn main() -> Result<(), Box> { continue; // we do not look at inherents in this example }; - let meta = ext.extrinsic_metadata()?; - let fields = ext.field_values()?; + // Decode the fields into our dynamic Value type to display: + let fields = ext.decode_as_fields::()?; - println!(" {}/{}", meta.pallet.name(), meta.variant.name); + println!(" {}/{}", ext.pallet_name(), ext.call_name()); println!(" Transaction Extensions:"); for signed_ext in transaction_extensions.iter() { // We only want to take a look at these 3 signed extensions, because the others all just have unit fields. diff --git a/subxt/examples/blocks_subscribing.rs b/subxt/examples/blocks_subscribing.rs index 05ebb69c1e3..f0f0a37d43e 100644 --- a/subxt/examples/blocks_subscribing.rs +++ b/subxt/examples/blocks_subscribing.rs @@ -42,7 +42,7 @@ async fn main() -> Result<(), Box> { let evt = evt?; let pallet_name = evt.pallet_name(); let event_name = evt.variant_name(); - let event_values = evt.field_values()?; + let event_values = evt.decode_as_fields::()?; println!(" {pallet_name}_{event_name}"); println!(" {event_values}"); diff --git a/subxt/examples/constants_dynamic.rs b/subxt/examples/constants_dynamic.rs index b44db292eab..b001ad6fff6 100644 --- a/subxt/examples/constants_dynamic.rs +++ b/subxt/examples/constants_dynamic.rs @@ -1,4 +1,5 @@ #![allow(missing_docs)] +use subxt::dynamic::Value; use subxt::{OnlineClient, PolkadotConfig}; #[tokio::main] @@ -7,12 +8,15 @@ async fn main() -> Result<(), Box> { let api = OnlineClient::::new().await?; // A dynamic query to obtain some constant: - let constant_query = subxt::dynamic::constant("System", "BlockLength"); + let constant_query = subxt::dynamic::constant::("System", "BlockLength"); - // Obtain the value: + // Obtain the decoded constant: let value = api.constants().at(&constant_query)?; - println!("Constant bytes: {:?}", value.encoded()); - println!("Constant value: {}", value.to_value()?); + // Or obtain the bytes for the constant: + let bytes = api.constants().bytes_at(&constant_query)?; + + println!("Constant bytes: {:?}", bytes); + println!("Constant value: {}", value); Ok(()) } diff --git a/subxt/examples/constants_static.rs b/subxt/examples/constants_static.rs index 3343dd36f86..2bb1aecbf6b 100644 --- a/subxt/examples/constants_static.rs +++ b/subxt/examples/constants_static.rs @@ -15,6 +15,10 @@ async fn main() -> Result<(), Box> { // Obtain the value: let value = api.constants().at(&constant_query)?; + // Or obtain the bytes: + let bytes = api.constants().bytes_at(&constant_query)?; + + println!("Encoded block length: {bytes:?}"); println!("Block length: {value:?}"); Ok(()) } diff --git a/subxt/examples/events.rs b/subxt/examples/events.rs index aaa9c1e3191..9861c9238ef 100644 --- a/subxt/examples/events.rs +++ b/subxt/examples/events.rs @@ -19,7 +19,7 @@ async fn main() -> Result<(), Box> { let pallet = event.pallet_name(); let variant = event.variant_name(); - let field_values = event.field_values()?; + let field_values = event.decode_as_fields::()?; println!("{pallet}::{variant}: {field_values}"); } diff --git a/subxt/examples/runtime_apis_dynamic.rs b/subxt/examples/runtime_apis_dynamic.rs index f69ea34be11..ef9c4ac0714 100644 --- a/subxt/examples/runtime_apis_dynamic.rs +++ b/subxt/examples/runtime_apis_dynamic.rs @@ -1,5 +1,5 @@ #![allow(missing_docs)] -use subxt::dynamic::Value; +use subxt::utils::AccountId32; use subxt::{OnlineClient, config::PolkadotConfig}; use subxt_signer::sr25519::dev; @@ -8,14 +8,14 @@ async fn main() -> Result<(), Box> { // Create a client to use: let api = OnlineClient::::new().await?; - // Create a dynamically runtime API payload that calls the - // `AccountNonceApi_account_nonce` function. - let account = dev::alice().public_key(); - let runtime_api_call = subxt::dynamic::runtime_api_call( - "AccountNonceApi", - "account_nonce", - vec![Value::from_bytes(account)], - ); + // Create a "dynamic" runtime API payload that calls the + // `AccountNonceApi_account_nonce` function. We could use the + // `scale_value::Value` type as output, and a vec of those as inputs, + // but since we know the input + return types we can pass them directly. + // There is one input argument, so the inputs are a tuple of one element. + let account: AccountId32 = dev::alice().public_key().into(); + let runtime_api_call = + subxt::dynamic::runtime_api_call::<_, u64>("AccountNonceApi", "account_nonce", (account,)); // Submit the call to get back a result. let nonce = api @@ -25,6 +25,6 @@ async fn main() -> Result<(), Box> { .call(runtime_api_call) .await?; - println!("Account nonce: {:#?}", nonce.to_value()); + println!("Account nonce: {:#?}", nonce); Ok(()) } diff --git a/subxt/examples/storage_fetch.rs b/subxt/examples/storage_fetch.rs index e29a87dccaa..1fe491898c3 100644 --- a/subxt/examples/storage_fetch.rs +++ b/subxt/examples/storage_fetch.rs @@ -10,22 +10,23 @@ pub mod polkadot {} async fn main() -> Result<(), Box> { // Create a new API client, configured to talk to Polkadot nodes. let api = OnlineClient::::new().await?; + let account = dev::alice().public_key().into(); // Build a storage query to access account information. - let account = dev::alice().public_key().into(); - let storage_query = polkadot::storage().system().account(account); + let storage_query = polkadot::storage().system().account(); - // Use that query to `fetch` a result. This returns an `Option<_>`, which will be - // `None` if no value exists at the given address. You can also use `fetch_default` - // where applicable, which will return the default value if none exists. - let result = api - .storage() - .at_latest() + // Use that query to access a storage entry, fetch a result and decode the value. + // The static address knows that fetching requires a tuple of one value, an + // AccountId32. + let client_at = api.storage().at_latest().await?; + let account_info = client_at + .entry(storage_query)? + .fetch((account,)) .await? - .fetch(&storage_query) - .await?; + .decode()?; - let v = result.unwrap().data.free; - println!("Alice: {v}"); + // The static address that we got from the subxt macro knows the expected input + // and return types, so it is decoded into a static type for us. + println!("Alice: {account_info:?}"); Ok(()) } diff --git a/subxt/examples/storage_fetch_dynamic.rs b/subxt/examples/storage_fetch_dynamic.rs index 17bf1de785d..61a81fef98a 100644 --- a/subxt/examples/storage_fetch_dynamic.rs +++ b/subxt/examples/storage_fetch_dynamic.rs @@ -1,5 +1,6 @@ #![allow(missing_docs)] use subxt::dynamic::{At, Value}; +use subxt::utils::AccountId32; use subxt::{OnlineClient, PolkadotConfig}; use subxt_signer::sr25519::dev; @@ -9,20 +10,25 @@ async fn main() -> Result<(), Box> { let api = OnlineClient::::new().await?; // Build a dynamic storage query to access account information. - let account = dev::alice().public_key(); - let storage_query = - subxt::dynamic::storage("System", "Account", vec![Value::from_bytes(account)]); + // here, we assume that there is one value to provide at this entry + // to access a value; an AccountId32. In this example we don't know the + // return type and so we set it to `Value`, which anything can decode into. + let account: AccountId32 = dev::alice().public_key().into(); + let storage_query = subxt::dynamic::storage::<(AccountId32,), Value>("System", "Account"); - // Use that query to `fetch` a result. Because the query is dynamic, we don't know what the result - // type will be either, and so we get a type back that can be decoded into a dynamic Value type. - let result = api - .storage() - .at_latest() + // Use that query to access a storage entry, fetch a result and decode the value. + let client_at = api.storage().at_latest().await?; + let account_info = client_at + .entry(storage_query)? + .fetch((account,)) .await? - .fetch(&storage_query) - .await?; - let value = result.unwrap().to_value()?; + .decode()?; - println!("Alice has free balance: {:?}", value.at("data").at("free")); + // With out `Value` type we can dig in to find what we want using the `At` + // trait and `.at()` method that this provides on the Value. + println!( + "Alice has free balance: {}", + account_info.at("data").at("free").unwrap() + ); Ok(()) } diff --git a/subxt/examples/storage_iterating.rs b/subxt/examples/storage_iterating.rs index f64fad6c8d9..3ff74029bd9 100644 --- a/subxt/examples/storage_iterating.rs +++ b/subxt/examples/storage_iterating.rs @@ -1,7 +1,9 @@ #![allow(missing_docs)] +use subxt::ext::futures::StreamExt; use subxt::{OnlineClient, PolkadotConfig}; -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale")] +// Generate an interface that we can use from the node's metadata. +#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] pub mod polkadot {} #[tokio::main] @@ -9,17 +11,31 @@ async fn main() -> Result<(), Box> { // Create a new API client, configured to talk to Polkadot nodes. let api = OnlineClient::::new().await?; - // Build a storage query to iterate over account information. - let storage_query = polkadot::storage().system().account_iter(); + // Build a storage query to access account information. Same as if we were + // fetching a single value from this entry. + let storage_query = polkadot::storage().system().account(); - // Get back an iterator of results (here, we are fetching 10 items at - // a time from the node, but we always iterate over one at a time). - let mut results = api.storage().at_latest().await?.iter(storage_query).await?; + // Use that query to access a storage entry, iterate over it and decode values. + let client_at = api.storage().at_latest().await?; - while let Some(Ok(kv)) = results.next().await { - println!("Keys decoded: {:?}", kv.keys); - println!("Key: 0x{}", hex::encode(&kv.key_bytes)); - println!("Value: {:?}", kv.value); + // We provide an empty tuple when iterating. If the storage entry had been an N map with + // multiple keys, then we could provide any prefix of those keys to iterate over. This is + // statically type checked, so only a valid number/type of keys in the tuple is accepted. + let mut values = client_at.entry(storage_query)?.iter(()).await?; + + while let Some(kv) = values.next().await { + let kv = kv?; + + // The key decodes into the type that the static address knows about, in this case a + // tuple of one entry, because the only part of the key that we can decode is the + // AccountId32 for each user. + let (account_id32,) = kv.key()?.decode()?; + + // The value decodes into a statically generated type which holds account information. + let value = kv.value().decode()?; + + let value_data = value.data; + println!("{account_id32}:\n {value_data:?}"); } Ok(()) diff --git a/subxt/examples/storage_iterating_dynamic.rs b/subxt/examples/storage_iterating_dynamic.rs index 8cae0f8d7db..443c977eef2 100644 --- a/subxt/examples/storage_iterating_dynamic.rs +++ b/subxt/examples/storage_iterating_dynamic.rs @@ -1,23 +1,41 @@ #![allow(missing_docs)] -use subxt::{OnlineClient, PolkadotConfig, dynamic::Value}; +use subxt::ext::futures::StreamExt; +use subxt::utils::AccountId32; +use subxt::{ + OnlineClient, PolkadotConfig, + dynamic::{At, Value}, +}; #[tokio::main] async fn main() -> Result<(), Box> { // Create a new API client, configured to talk to Polkadot nodes. let api = OnlineClient::::new().await?; - // Build a dynamic storage query to iterate account information. - // With a dynamic query, we can just provide an empty vector as the keys to iterate over all entries. - let keys: Vec = vec![]; - let storage_query = subxt::dynamic::storage("System", "Account", keys); + // Build a dynamic storage query to access account information. + // here, we assume that there is one value to provide at this entry + // to access a value; an AccountId32. In this example we don't know the + // return type and so we set it to `Value`, which anything can decode into. + let storage_query = subxt::dynamic::storage::<(AccountId32,), Value>("System", "Account"); - // Use that query to return an iterator over the results. - let mut results = api.storage().at_latest().await?.iter(storage_query).await?; + // Use that query to access a storage entry, iterate over it and decode values. + let client_at = api.storage().at_latest().await?; + let mut values = client_at.entry(storage_query)?.iter(()).await?; - while let Some(Ok(kv)) = results.next().await { - println!("Keys decoded: {:?}", kv.keys); - println!("Key: 0x{}", hex::encode(&kv.key_bytes)); - println!("Value: {:?}", kv.value.to_value()?); + while let Some(kv) = values.next().await { + let kv = kv?; + + // The key decodes into the first type we provided in the address. Since there's just + // one key, it is a tuple of one entry, an AccountId32. If we didn't know how many + // keys or their type, we could set the key to `Vec` instead. + let (account_id32,) = kv.key()?.decode()?; + + // The value decodes into the second type we provided in the address. In this example, + // we just decode it into our `Value` type and then look at the "data" field in this + // (which implicitly assumes we get a struct shaped thing back with such a field). + let value = kv.value().decode()?; + + let value_data = value.at("data").unwrap(); + println!("{account_id32}:\n {value_data}"); } Ok(()) diff --git a/subxt/examples/storage_iterating_partial.rs b/subxt/examples/storage_iterating_partial.rs deleted file mode 100644 index dc901d8f6a0..00000000000 --- a/subxt/examples/storage_iterating_partial.rs +++ /dev/null @@ -1,77 +0,0 @@ -#![allow(missing_docs)] -use polkadot::multisig::events::NewMultisig; -use polkadot::runtime_types::{ - frame_system::pallet::Call, rococo_runtime::RuntimeCall, sp_weights::weight_v2::Weight, -}; -use subxt::utils::AccountId32; -use subxt::{OnlineClient, PolkadotConfig}; -use subxt_signer::sr25519::{Keypair, dev}; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a new API client, configured to talk to Polkadot nodes. - let api = OnlineClient::::new().await?; - - // Prepare the chain to have 3 open multisig requests (2 of them are alice + bob): - let alice_signer = dev::alice(); - let bob = AccountId32(dev::bob().public_key().0); - let charlie = AccountId32(dev::charlie().public_key().0); - - let new_multisig_1 = submit_remark_as_multi(&alice_signer, &bob, b"Hello", &api).await?; - let new_multisig_2 = submit_remark_as_multi(&alice_signer, &bob, b"Hi", &api).await?; - let new_multisig_3 = submit_remark_as_multi(&alice_signer, &charlie, b"Hello", &api).await?; - - // Note: the NewMultisig event contains the multisig address we need to use for the storage queries: - assert_eq!(new_multisig_1.multisig, new_multisig_2.multisig); - assert_ne!(new_multisig_1.multisig, new_multisig_3.multisig); - - // Build a storage query to iterate over open multisig extrinsics from - // new_multisig_1.multisig which is the AccountId of the alice + bob multisig account - let alice_bob_account_id = new_multisig_1.multisig; - let storage_query = polkadot::storage() - .multisig() - .multisigs_iter1(alice_bob_account_id); - - // Get back an iterator of results. - let mut results = api.storage().at_latest().await?.iter(storage_query).await?; - - while let Some(Ok(kv)) = results.next().await { - println!("Keys decoded: {:?}", kv.keys); - println!("Key: 0x{}", hex::encode(&kv.key_bytes)); - println!("Value: {:?}", kv.value); - } - Ok(()) -} - -async fn submit_remark_as_multi( - signer: &Keypair, - other: &AccountId32, - remark: &[u8], - api: &OnlineClient, -) -> Result> { - let multisig_remark_tx = polkadot::tx().multisig().as_multi( - 2, - vec![other.clone()], - None, - RuntimeCall::System(Call::remark { - remark: remark.to_vec(), - }), - Weight { - ref_time: 0, - proof_size: 0, - }, - ); - let events = api - .tx() - .sign_and_submit_then_watch_default(&multisig_remark_tx, signer) - .await? - .wait_for_finalized_success() - .await?; - let new_multisig = events - .find_first::()? - .expect("should contain event"); - Ok(new_multisig) -} diff --git a/subxt/src/backend/chain_head/follow_stream.rs b/subxt/src/backend/chain_head/follow_stream.rs index e82e8cb9c48..f06b5d06776 100644 --- a/subxt/src/backend/chain_head/follow_stream.rs +++ b/subxt/src/backend/chain_head/follow_stream.rs @@ -3,7 +3,7 @@ // see LICENSE for license details. use crate::config::{Config, HashFor}; -use crate::error::Error; +use crate::error::BackendError; use futures::{FutureExt, Stream, StreamExt, TryStreamExt}; use std::future::Future; use std::pin::Pin; @@ -34,12 +34,16 @@ pub type FollowEventStreamGetter = Box FollowEventStreamFut /// The future which will return a stream of follow events and the subscription ID for it. pub type FollowEventStreamFut = Pin< - Box, String), Error>> + Send + 'static>, + Box< + dyn Future, String), BackendError>> + + Send + + 'static, + >, >; /// The stream of follow events. pub type FollowEventStream = - Pin, Error>> + Send + 'static>>; + Pin, BackendError>> + Send + 'static>>; /// Either a ready message with the current subscription ID, or /// an event from the stream itself. @@ -108,7 +112,7 @@ impl FollowStream { let stream = methods.chainhead_v1_follow(true).await?; // Extract the subscription ID: let Some(sub_id) = stream.subscription_id().map(ToOwned::to_owned) else { - return Err(Error::Other( + return Err(BackendError::Other( "Subscription ID expected for chainHead_follow response, but not given" .to_owned(), )); @@ -128,7 +132,7 @@ impl FollowStream { impl std::marker::Unpin for FollowStream {} impl Stream for FollowStream { - type Item = Result, Error>; + type Item = Result, BackendError>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -228,7 +232,7 @@ pub(super) mod test_utils { where Hash: Send + 'static, F: Fn() -> I + Send + 'static, - I: IntoIterator, Error>>, + I: IntoIterator, BackendError>>, { let start_idx = Arc::new(AtomicUsize::new(0)); @@ -307,7 +311,7 @@ pub mod test { Ok(FollowEvent::Stop), Ok(ev_new_block(1, 2)), // Nothing should be emitted after an error: - Err(Error::Other("ended".to_owned())), + Err(BackendError::Other("ended".to_owned())), Ok(ev_new_block(2, 3)), ] }); diff --git a/subxt/src/backend/chain_head/follow_stream_driver.rs b/subxt/src/backend/chain_head/follow_stream_driver.rs index 3fd2d1af410..f1ff507729e 100644 --- a/subxt/src/backend/chain_head/follow_stream_driver.rs +++ b/subxt/src/backend/chain_head/follow_stream_driver.rs @@ -4,7 +4,7 @@ use super::follow_stream_unpin::{BlockRef, FollowStreamMsg, FollowStreamUnpin}; use crate::config::Hash; -use crate::error::{Error, RpcError}; +use crate::error::{BackendError, RpcError}; use futures::stream::{Stream, StreamExt}; use std::collections::{HashMap, HashSet, VecDeque}; use std::ops::DerefMut; @@ -42,7 +42,7 @@ impl FollowStreamDriver { } impl Stream for FollowStreamDriver { - type Item = Result<(), Error>; + type Item = Result<(), BackendError>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { match self.inner.poll_next_unpin(cx) { @@ -421,7 +421,7 @@ where H: Hash, F: Fn(FollowEvent>) -> Vec>, { - type Item = Result<(String, Vec>), Error>; + type Item = Result<(String, Vec>), BackendError>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { if self.is_done { @@ -500,7 +500,7 @@ mod test_utils { where H: Hash + 'static, F: Fn() -> I + Send + 'static, - I: IntoIterator, Error>>, + I: IntoIterator, BackendError>>, { let (stream, _) = test_unpin_stream_getter(events, max_life); FollowStreamDriver::new(stream) @@ -537,7 +537,7 @@ mod test { Ok(ev_new_block(0, 1)), Ok(ev_best_block(1)), Ok(ev_finalized([1], [])), - Err(Error::Other("ended".to_owned())), + Err(BackendError::Other("ended".to_owned())), ] }, 10, @@ -580,7 +580,7 @@ mod test { Ok(ev_finalized([1], [])), Ok(ev_new_block(1, 2)), Ok(ev_new_block(2, 3)), - Err(Error::Other("ended".to_owned())), + Err(BackendError::Other("ended".to_owned())), ] }, 10, @@ -630,7 +630,7 @@ mod test { Ok(ev_new_block(1, 2)), Ok(ev_new_block(2, 3)), Ok(ev_finalized([1], [])), - Err(Error::Other("ended".to_owned())), + Err(BackendError::Other("ended".to_owned())), ] }, 10, @@ -668,7 +668,7 @@ mod test { Ok(FollowEvent::Stop), Ok(ev_initialized(1)), Ok(ev_finalized([2], [])), - Err(Error::Other("ended".to_owned())), + Err(BackendError::Other("ended".to_owned())), ] }, 10, @@ -714,7 +714,7 @@ mod test { // Emulate that we missed some blocks. Ok(ev_initialized(13)), Ok(ev_finalized([14], [])), - Err(Error::Other("ended".to_owned())), + Err(BackendError::Other("ended".to_owned())), ] }, 10, @@ -742,7 +742,7 @@ mod test { ) ); assert!( - matches!(&evs[1], Err(Error::Rpc(RpcError::ClientError(subxt_rpcs::Error::DisconnectedWillReconnect(e)))) if e.contains("Missed at least one block when the connection was lost")) + matches!(&evs[1], Err(BackendError::Rpc(RpcError::ClientError(subxt_rpcs::Error::DisconnectedWillReconnect(e)))) if e.contains("Missed at least one block when the connection was lost")) ); assert_eq!( evs[2].as_ref().unwrap(), diff --git a/subxt/src/backend/chain_head/follow_stream_unpin.rs b/subxt/src/backend/chain_head/follow_stream_unpin.rs index 303ef6e32f2..db0995f5740 100644 --- a/subxt/src/backend/chain_head/follow_stream_unpin.rs +++ b/subxt/src/backend/chain_head/follow_stream_unpin.rs @@ -5,7 +5,7 @@ use super::ChainHeadRpcMethods; use super::follow_stream::FollowStream; use crate::config::{Config, Hash, HashFor}; -use crate::error::Error; +use crate::error::BackendError; use futures::stream::{FuturesUnordered, Stream, StreamExt}; use subxt_rpcs::methods::chain_head::{ BestBlockChanged, Finalized, FollowEvent, Initialized, NewBlock, @@ -71,7 +71,7 @@ pub type UnpinFut = Pin + Send + 'static>>; impl std::marker::Unpin for FollowStreamUnpin {} impl Stream for FollowStreamUnpin { - type Item = Result>, Error>; + type Item = Result>, BackendError>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.as_mut(); @@ -482,7 +482,7 @@ pub(super) mod test_utils { where H: Hash + 'static, F: Fn() -> I + Send + 'static, - I: IntoIterator, Error>>, + I: IntoIterator, BackendError>>, { // Unpin requests will come here so that we can look out for them. let (unpin_tx, unpin_rx) = std::sync::mpsc::channel(); @@ -567,7 +567,7 @@ mod test { Ok(ev_new_block(0, 1)), Ok(ev_new_block(1, 2)), Ok(ev_new_block(2, 3)), - Err(Error::Other("ended".to_owned())), + Err(BackendError::Other("ended".to_owned())), ] }, 10, @@ -593,7 +593,7 @@ mod test { [ Ok(ev_initialized(0)), Ok(ev_finalized([1], [])), - Err(Error::Other("ended".to_owned())), + Err(BackendError::Other("ended".to_owned())), ] }, 3, @@ -624,7 +624,7 @@ mod test { Ok(ev_finalized([3], [])), Ok(ev_finalized([4], [])), Ok(ev_finalized([5], [])), - Err(Error::Other("ended".to_owned())), + Err(BackendError::Other("ended".to_owned())), ] }, 3, @@ -663,7 +663,7 @@ mod test { Ok(ev_new_block(1, 2)), Ok(ev_finalized([1], [])), Ok(ev_finalized([2], [])), - Err(Error::Other("ended".to_owned())), + Err(BackendError::Other("ended".to_owned())), ] }, 10, @@ -711,7 +711,7 @@ mod test { Ok(ev_finalized([1], [])), Ok(ev_finalized([2], [3])), Ok(ev_finalized([4], [])), - Err(Error::Other("ended".to_owned())), + Err(BackendError::Other("ended".to_owned())), ] }, 10, @@ -771,7 +771,7 @@ mod test { Ok(ev_best_block(1)), Ok(ev_finalized([1], [])), Ok(ev_finalized([2], [])), - Err(Error::Other("ended".to_owned())), + Err(BackendError::Other("ended".to_owned())), ] }, 10, diff --git a/subxt/src/backend/chain_head/mod.rs b/subxt/src/backend/chain_head/mod.rs index 0d0024a4ee9..18521ce08ff 100644 --- a/subxt/src/backend/chain_head/mod.rs +++ b/subxt/src/backend/chain_head/mod.rs @@ -22,7 +22,7 @@ use crate::backend::{ TransactionStatus, utils::retry, }; use crate::config::{Config, Hash, HashFor}; -use crate::error::{Error, RpcError}; +use crate::error::{BackendError, RpcError}; use async_trait::async_trait; use follow_stream_driver::{FollowStreamDriver, FollowStreamDriverHandle}; use futures::future::Either; @@ -229,7 +229,7 @@ impl ChainHeadBackend { async fn stream_headers( &self, f: F, - ) -> Result>)>, Error> + ) -> Result>)>, BackendError> where F: Fn( FollowEvent>>, @@ -290,7 +290,7 @@ impl Backend for ChainHeadBackend { &self, keys: Vec>, at: HashFor, - ) -> Result, Error> { + ) -> Result, BackendError> { retry(|| async { let queries = keys.iter().map(|key| StorageQuery { key: &**key, @@ -325,7 +325,7 @@ impl Backend for ChainHeadBackend { &self, key: Vec, at: HashFor, - ) -> Result>, Error> { + ) -> Result>, BackendError> { retry(|| async { // Ask for hashes, and then just ignore them and return the keys that come back. let query = StorageQuery { @@ -351,7 +351,7 @@ impl Backend for ChainHeadBackend { &self, key: Vec, at: HashFor, - ) -> Result, Error> { + ) -> Result, BackendError> { retry(|| async { let query = StorageQuery { key: &*key, @@ -386,7 +386,7 @@ impl Backend for ChainHeadBackend { .await } - async fn genesis_hash(&self) -> Result, Error> { + async fn genesis_hash(&self) -> Result, BackendError> { retry(|| async { let genesis_hash = self.methods.chainspec_v1_genesis_hash().await?; Ok(genesis_hash) @@ -394,7 +394,7 @@ impl Backend for ChainHeadBackend { .await } - async fn block_header(&self, at: HashFor) -> Result, Error> { + async fn block_header(&self, at: HashFor) -> Result, BackendError> { retry(|| async { let sub_id = get_subscription_id(&self.follow_handle).await?; let header = self.methods.chainhead_v1_header(&sub_id, at).await?; @@ -403,7 +403,7 @@ impl Backend for ChainHeadBackend { .await } - async fn block_body(&self, at: HashFor) -> Result>>, Error> { + async fn block_body(&self, at: HashFor) -> Result>>, BackendError> { retry(|| async { let sub_id = get_subscription_id(&self.follow_handle).await?; @@ -432,7 +432,7 @@ impl Backend for ChainHeadBackend { .await } - async fn latest_finalized_block_ref(&self) -> Result>, Error> { + async fn latest_finalized_block_ref(&self) -> Result>, BackendError> { let next_ref: Option>> = self .follow_handle .subscribe() @@ -452,17 +452,19 @@ impl Backend for ChainHeadBackend { next_ref.ok_or_else(|| RpcError::SubscriptionDropped.into()) } - async fn current_runtime_version(&self) -> Result { + async fn current_runtime_version(&self) -> Result { // Just start a stream of version infos, and return the first value we get from it. let runtime_version = self.stream_runtime_version().await?.next().await; match runtime_version { - None => Err(Error::Rpc(RpcError::SubscriptionDropped)), + None => Err(BackendError::Rpc(RpcError::SubscriptionDropped)), Some(Err(e)) => Err(e), Some(Ok(version)) => Ok(version), } } - async fn stream_runtime_version(&self) -> Result, Error> { + async fn stream_runtime_version( + &self, + ) -> Result, BackendError> { // Keep track of runtime details announced in new blocks, and then when blocks // are finalized, find the latest of these that has runtime details, and clear the rest. let mut runtimes = HashMap::new(); @@ -526,7 +528,7 @@ impl Backend for ChainHeadBackend { let runtime_details = match runtime_event { RuntimeEvent::Invalid(err) => { - return std::future::ready(Some(Err(Error::Other(err.error)))) + return std::future::ready(Some(Err(BackendError::Other(format!("Invalid runtime error using chainHead RPCs: {}", err.error))))) } RuntimeEvent::Valid(ev) => ev, }; @@ -544,7 +546,7 @@ impl Backend for ChainHeadBackend { async fn stream_all_block_headers( &self, _hasher: T::Hasher, - ) -> Result>)>, Error> { + ) -> Result>)>, BackendError> { // TODO: https://github.com/paritytech/subxt/issues/1568 // // It's possible that blocks may be silently missed if @@ -562,7 +564,7 @@ impl Backend for ChainHeadBackend { async fn stream_best_block_headers( &self, _hasher: T::Hasher, - ) -> Result>)>, Error> { + ) -> Result>)>, BackendError> { // TODO: https://github.com/paritytech/subxt/issues/1568 // // It's possible that blocks may be silently missed if @@ -578,7 +580,7 @@ impl Backend for ChainHeadBackend { async fn stream_finalized_block_headers( &self, _hasher: T::Hasher, - ) -> Result>)>, Error> { + ) -> Result>)>, BackendError> { self.stream_headers(|ev| match ev { FollowEvent::Initialized(init) => init.finalized_block_hashes, FollowEvent::Finalized(ev) => ev.finalized_block_hashes, @@ -590,12 +592,12 @@ impl Backend for ChainHeadBackend { async fn submit_transaction( &self, extrinsic: &[u8], - ) -> Result>>, Error> { + ) -> Result>>, BackendError> { // Submit a transaction. This makes no attempt to sync with follow events, async fn submit_transaction_ignoring_follow_events( extrinsic: &[u8], methods: &ChainHeadRpcMethods, - ) -> Result>>, Error> { + ) -> Result>>, BackendError> { let tx_progress = methods .transactionwatch_v1_submit_and_watch(extrinsic) .await? @@ -637,7 +639,7 @@ impl Backend for ChainHeadBackend { transaction_timeout_secs: u64, methods: &ChainHeadRpcMethods, follow_handle: &FollowStreamDriverHandle>, - ) -> Result>>, Error> { + ) -> Result>>, BackendError> { // We care about new and finalized block hashes. enum SeenBlockMarker { New, @@ -664,7 +666,7 @@ impl Backend for ChainHeadBackend { let start_instant = web_time::Instant::now(); // A quick helper to return a generic error. - let err_other = |s: &str| Some(Err(Error::Other(s.into()))); + let err_other = |s: &str| Some(Err(BackendError::Other(s.into()))); // Now we can attempt to associate tx events with pinned blocks. let tx_stream = futures::stream::poll_fn(move |cx| { @@ -828,7 +830,7 @@ impl Backend for ChainHeadBackend { method: &str, call_parameters: Option<&[u8]>, at: HashFor, - ) -> Result, Error> { + ) -> Result, BackendError> { retry(|| async { let sub_id = get_subscription_id(&self.follow_handle).await?; @@ -867,7 +869,7 @@ impl Backend for ChainHeadBackend { /// A helper to obtain a subscription ID. async fn get_subscription_id( follow_handle: &FollowStreamDriverHandle, -) -> Result { +) -> Result { let Some(sub_id) = follow_handle.subscribe().subscription_id().await else { return Err(RpcError::SubscriptionDropped.into()); }; diff --git a/subxt/src/backend/chain_head/storage_items.rs b/subxt/src/backend/chain_head/storage_items.rs index 5a108b64985..6519e63a677 100644 --- a/subxt/src/backend/chain_head/storage_items.rs +++ b/subxt/src/backend/chain_head/storage_items.rs @@ -5,7 +5,7 @@ use super::follow_stream_driver::FollowStreamDriverHandle; use super::follow_stream_unpin::BlockRef; use crate::config::{Config, HashFor}; -use crate::error::{Error, RpcError}; +use crate::error::{BackendError, RpcError}; use futures::{FutureExt, Stream, StreamExt}; use std::collections::VecDeque; use std::future::Future; @@ -36,7 +36,7 @@ impl StorageItems { at: HashFor, follow_handle: &FollowStreamDriverHandle>, methods: ChainHeadRpcMethods, - ) -> Result { + ) -> Result { let sub_id = super::get_subscription_id(follow_handle).await?; // Subscribe to events and make the initial request to get an operation ID. @@ -92,10 +92,10 @@ impl StorageItems { pub type FollowEventStream = Pin>> + Send + 'static>>; pub type ContinueFutGetter = Box ContinueFut + Send + 'static>; -pub type ContinueFut = Pin> + Send + 'static>>; +pub type ContinueFut = Pin> + Send + 'static>>; impl Stream for StorageItems { - type Item = Result; + type Item = Result; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { loop { @@ -157,7 +157,7 @@ impl Stream for StorageItems { FollowEvent::OperationError(err) if err.operation_id == *self.operation_id => { // Something went wrong obtaining storage items; mark as done and return the error. self.done = true; - return Poll::Ready(Some(Err(Error::Other(err.error)))); + return Poll::Ready(Some(Err(BackendError::Other(err.error)))); } _ => { // We don't care about this event; wait for the next. diff --git a/subxt/src/backend/legacy.rs b/subxt/src/backend/legacy.rs index 34d5a0ffaea..d6edcdbd7d7 100644 --- a/subxt/src/backend/legacy.rs +++ b/subxt/src/backend/legacy.rs @@ -11,10 +11,8 @@ use crate::backend::{ Backend, BlockRef, RuntimeVersion, StorageResponse, StreamOf, StreamOfResults, TransactionStatus, }; -use crate::{ - Error, - config::{Config, HashFor, Header}, -}; +use crate::config::{Config, HashFor, Header}; +use crate::error::BackendError; use async_trait::async_trait; use futures::TryStreamExt; use futures::{Future, FutureExt, Stream, StreamExt, future, future::Either, stream}; @@ -101,12 +99,12 @@ impl Backend for LegacyBackend { &self, keys: Vec>, at: HashFor, - ) -> Result, Error> { + ) -> Result, BackendError> { fn get_entry( key: Vec, at: HashFor, methods: LegacyRpcMethods, - ) -> impl Future, Error>> { + ) -> impl Future, BackendError>> { retry(move || { let methods = methods.clone(); let key = key.clone(); @@ -138,7 +136,7 @@ impl Backend for LegacyBackend { &self, key: Vec, at: HashFor, - ) -> Result>, Error> { + ) -> Result>, BackendError> { let keys = StorageFetchDescendantKeysStream { at, key, @@ -169,7 +167,7 @@ impl Backend for LegacyBackend { &self, key: Vec, at: HashFor, - ) -> Result, Error> { + ) -> Result, BackendError> { let keys_stream = StorageFetchDescendantKeysStream { at, key, @@ -187,7 +185,7 @@ impl Backend for LegacyBackend { }))) } - async fn genesis_hash(&self) -> Result, Error> { + async fn genesis_hash(&self) -> Result, BackendError> { retry(|| async { let hash = self.methods.genesis_hash().await?; Ok(hash) @@ -195,7 +193,7 @@ impl Backend for LegacyBackend { .await } - async fn block_header(&self, at: HashFor) -> Result, Error> { + async fn block_header(&self, at: HashFor) -> Result, BackendError> { retry(|| async { let header = self.methods.chain_get_header(Some(at)).await?; Ok(header) @@ -203,7 +201,7 @@ impl Backend for LegacyBackend { .await } - async fn block_body(&self, at: HashFor) -> Result>>, Error> { + async fn block_body(&self, at: HashFor) -> Result>>, BackendError> { retry(|| async { let Some(details) = self.methods.chain_get_block(Some(at)).await? else { return Ok(None); @@ -215,7 +213,7 @@ impl Backend for LegacyBackend { .await } - async fn latest_finalized_block_ref(&self) -> Result>, Error> { + async fn latest_finalized_block_ref(&self) -> Result>, BackendError> { retry(|| async { let hash = self.methods.chain_get_finalized_head().await?; Ok(BlockRef::from_hash(hash)) @@ -223,7 +221,7 @@ impl Backend for LegacyBackend { .await } - async fn current_runtime_version(&self) -> Result { + async fn current_runtime_version(&self) -> Result { retry(|| async { let details = self.methods.state_get_runtime_version(None).await?; Ok(RuntimeVersion { @@ -234,7 +232,9 @@ impl Backend for LegacyBackend { .await } - async fn stream_runtime_version(&self) -> Result, Error> { + async fn stream_runtime_version( + &self, + ) -> Result, BackendError> { let methods = self.methods.clone(); let retry_sub = retry_stream(move || { @@ -274,7 +274,7 @@ impl Backend for LegacyBackend { async fn stream_all_block_headers( &self, hasher: T::Hasher, - ) -> Result>)>, Error> { + ) -> Result>)>, BackendError> { let methods = self.methods.clone(); let retry_sub = retry_stream(move || { let methods = methods.clone(); @@ -297,7 +297,7 @@ impl Backend for LegacyBackend { async fn stream_best_block_headers( &self, hasher: T::Hasher, - ) -> Result>)>, Error> { + ) -> Result>)>, BackendError> { let methods = self.methods.clone(); let retry_sub = retry_stream(move || { @@ -321,7 +321,7 @@ impl Backend for LegacyBackend { async fn stream_finalized_block_headers( &self, hasher: T::Hasher, - ) -> Result>)>, Error> { + ) -> Result>)>, BackendError> { let this = self.clone(); let retry_sub = retry_stream(move || { @@ -361,7 +361,7 @@ impl Backend for LegacyBackend { async fn submit_transaction( &self, extrinsic: &[u8], - ) -> Result>>, Error> { + ) -> Result>>, BackendError> { let sub = self .methods .author_submit_and_watch_extrinsic(extrinsic) @@ -423,7 +423,7 @@ impl Backend for LegacyBackend { method: &str, call_parameters: Option<&[u8]>, at: HashFor, - ) -> Result, Error> { + ) -> Result, BackendError> { retry(|| async { let res = self .methods @@ -442,11 +442,11 @@ pub fn subscribe_to_block_headers_filling_in_gaps( methods: LegacyRpcMethods, sub: S, mut last_block_num: Option, -) -> impl Stream> + Send +) -> impl Stream> + Send where T: Config, S: Stream> + Send, - E: Into + Send + 'static, + E: Into + Send + 'static, { sub.flat_map(move |s| { // Get the header, or return a stream containing just the error. @@ -470,7 +470,7 @@ where async move { let hash = methods.chain_get_block_hash(Some(n.into())).await?; let header = methods.chain_get_header(hash).await?; - Ok::<_, Error>(header) + Ok::<_, BackendError>(header) } }) .filter_map(async |h| h.transpose()); @@ -495,7 +495,8 @@ pub struct StorageFetchDescendantKeysStream { // What key do we start paginating from? None = from the beginning. pagination_start_key: Option>, // Keys, future and cached: - keys_fut: Option>, Error>> + Send + 'static>>>, + keys_fut: + Option>, BackendError>> + Send + 'static>>>, // Set to true when we're done: done: bool, } @@ -503,7 +504,7 @@ pub struct StorageFetchDescendantKeysStream { impl std::marker::Unpin for StorageFetchDescendantKeysStream {} impl Stream for StorageFetchDescendantKeysStream { - type Item = Result>, Error>; + type Item = Result>, BackendError>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.as_mut(); loop { @@ -584,7 +585,7 @@ pub struct StorageFetchDescendantValuesStream { results_fut: Option< Pin< Box< - dyn Future, Vec)>>, Error>> + dyn Future, Vec)>>, BackendError>> + Send + 'static, >, @@ -595,7 +596,7 @@ pub struct StorageFetchDescendantValuesStream { } impl Stream for StorageFetchDescendantValuesStream { - type Item = Result; + type Item = Result; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.as_mut(); loop { diff --git a/subxt/src/backend/mod.rs b/subxt/src/backend/mod.rs index 72549fc35c8..95395a2a297 100644 --- a/subxt/src/backend/mod.rs +++ b/subxt/src/backend/mod.rs @@ -11,14 +11,14 @@ pub mod legacy; pub mod utils; use crate::config::{Config, HashFor}; -use crate::error::Error; -use crate::metadata::Metadata; +use crate::error::BackendError; use async_trait::async_trait; use codec::{Decode, Encode}; use futures::{Stream, StreamExt}; use std::pin::Pin; use std::sync::Arc; use subxt_core::client::RuntimeVersion; +use subxt_metadata::Metadata; /// Some re-exports from the [`subxt_rpcs`] crate, also accessible in full via [`crate::ext::subxt_rpcs`]. pub mod rpc { @@ -82,66 +82,67 @@ pub trait Backend: sealed::Sealed + Send + Sync + 'static { &self, keys: Vec>, at: HashFor, - ) -> Result, Error>; + ) -> Result, BackendError>; /// Fetch keys underneath the given key from storage. async fn storage_fetch_descendant_keys( &self, key: Vec, at: HashFor, - ) -> Result>, Error>; + ) -> Result>, BackendError>; /// Fetch values underneath the given key from storage. async fn storage_fetch_descendant_values( &self, key: Vec, at: HashFor, - ) -> Result, Error>; + ) -> Result, BackendError>; /// Fetch the genesis hash - async fn genesis_hash(&self) -> Result, Error>; + async fn genesis_hash(&self) -> Result, BackendError>; /// Get a block header - async fn block_header(&self, at: HashFor) -> Result, Error>; + async fn block_header(&self, at: HashFor) -> Result, BackendError>; /// Return the extrinsics found in the block. Each extrinsic is represented /// by a vector of bytes which has _not_ been SCALE decoded (in other words, the /// first bytes in the vector will decode to the compact encoded length of the extrinsic) - async fn block_body(&self, at: HashFor) -> Result>>, Error>; + async fn block_body(&self, at: HashFor) -> Result>>, BackendError>; /// Get the most recent finalized block hash. /// Note: needed only in blocks client for finalized block stream; can prolly be removed. - async fn latest_finalized_block_ref(&self) -> Result>, Error>; + async fn latest_finalized_block_ref(&self) -> Result>, BackendError>; /// Get information about the current runtime. - async fn current_runtime_version(&self) -> Result; + async fn current_runtime_version(&self) -> Result; /// A stream of all new runtime versions as they occur. - async fn stream_runtime_version(&self) -> Result, Error>; + async fn stream_runtime_version(&self) + -> Result, BackendError>; /// A stream of all new block headers as they arrive. async fn stream_all_block_headers( &self, hasher: T::Hasher, - ) -> Result>)>, Error>; + ) -> Result>)>, BackendError>; /// A stream of best block headers. async fn stream_best_block_headers( &self, hasher: T::Hasher, - ) -> Result>)>, Error>; + ) -> Result>)>, BackendError>; /// A stream of finalized block headers. async fn stream_finalized_block_headers( &self, hasher: T::Hasher, - ) -> Result>)>, Error>; + ) -> Result>)>, BackendError>; /// Submit a transaction. This will return a stream of events about it. async fn submit_transaction( &self, bytes: &[u8], - ) -> Result>>, Error>; + ) -> Result>>, BackendError>; /// Make a call to some runtime API. async fn call( @@ -149,7 +150,7 @@ pub trait Backend: sealed::Sealed + Send + Sync + 'static { method: &str, call_parameters: Option<&[u8]>, at: HashFor, - ) -> Result, Error>; + ) -> Result, BackendError>; } /// helpful utility methods derived from those provided on [`Backend`] @@ -160,7 +161,7 @@ pub trait BackendExt: Backend { &self, key: Vec, at: HashFor, - ) -> Result>, Error> { + ) -> Result>, BackendError> { self.storage_fetch_values(vec![key], at) .await? .next() @@ -176,32 +177,39 @@ pub trait BackendExt: Backend { method: &str, call_parameters: Option<&[u8]>, at: HashFor, - ) -> Result { + ) -> Result { let bytes = self.call(method, call_parameters, at).await?; - let res = D::decode(&mut &*bytes)?; + let res = + D::decode(&mut &*bytes).map_err(BackendError::CouldNotScaleDecodeRuntimeResponse)?; Ok(res) } /// Return the metadata at some version. - async fn metadata_at_version(&self, version: u32, at: HashFor) -> Result { + async fn metadata_at_version( + &self, + version: u32, + at: HashFor, + ) -> Result { let param = version.encode(); let opaque: Option = self .call_decoding("Metadata_metadata_at_version", Some(¶m), at) .await?; let Some(opaque) = opaque else { - return Err(Error::Other("Metadata version not found".into())); + return Err(BackendError::MetadataVersionNotFound(version)); }; - let metadata: Metadata = Decode::decode(&mut &opaque.0[..])?; + let metadata: Metadata = + Decode::decode(&mut &opaque.0[..]).map_err(BackendError::CouldNotDecodeMetadata)?; Ok(metadata) } /// Return V14 metadata from the legacy `Metadata_metadata` call. - async fn legacy_metadata(&self, at: HashFor) -> Result { + async fn legacy_metadata(&self, at: HashFor) -> Result { let opaque: frame_metadata::OpaqueMetadata = self.call_decoding("Metadata_metadata", None, at).await?; - let metadata: Metadata = Decode::decode(&mut &opaque.0[..])?; + let metadata: Metadata = + Decode::decode(&mut &opaque.0[..]).map_err(BackendError::CouldNotDecodeMetadata)?; Ok(metadata) } } @@ -325,8 +333,8 @@ impl StreamOf { } } -/// A stream of [`Result`]. -pub type StreamOfResults = StreamOf>; +/// A stream of [`Result`]. +pub type StreamOfResults = StreamOf>; /// The status of the transaction. /// @@ -541,7 +549,7 @@ mod test { /// - `call` /// The test covers them because they follow the simple pattern of: /// ```rust,no_run,standalone_crate - /// async fn THE_THING(&self) -> Result, Error> { + /// async fn THE_THING(&self) -> Result, BackendError> { /// retry(|| ).await /// } /// ``` @@ -574,7 +582,7 @@ mod test { /// ```rust,no_run,standalone_crate /// async fn stream_the_thing( /// &self, - /// ) -> Result>)>, Error> { + /// ) -> Result>)>, BackendError> { /// let methods = self.methods.clone(); /// let retry_sub = retry_stream(move || { /// let methods = methods.clone(); @@ -635,7 +643,7 @@ mod test { ); assert!(matches!( results.next().await.unwrap(), - Err(Error::Rpc(RpcError::ClientError( + Err(BackendError::Rpc(RpcError::ClientError( subxt_rpcs::Error::Client(_) ))) )); @@ -644,7 +652,6 @@ mod test { } mod unstable_backend { - use crate::error::RpcError; use subxt_rpcs::methods::chain_head::{ self, Bytes, Initialized, MethodResponse, MethodResponseStarted, OperationError, OperationId, OperationStorageItems, RuntimeSpec, RuntimeVersionEvent, @@ -858,7 +865,7 @@ mod test { .next() .await .unwrap() - .is_err_and(|e| matches!(e, Error::Other(e) if e == "error")) + .is_err_and(|e| matches!(e, BackendError::Other(e) if e == "error")) ); assert!(response.next().await.is_none()); } @@ -1047,7 +1054,7 @@ mod test { let response = backend .storage_fetch_values(["ID1".into()].into(), random_hash()) .await; - assert!(matches!(response, Err(Error::Rpc(RpcError::LimitReached)))); + assert!(matches!(response, Err(e) if e.is_rpc_limit_reached())); // Advance the driver until a new chainHead_follow subscription has been started up. let _ = driver.next().await.unwrap(); diff --git a/subxt/src/backend/utils.rs b/subxt/src/backend/utils.rs index f18a9cf929a..5ead7056f99 100644 --- a/subxt/src/backend/utils.rs +++ b/subxt/src/backend/utils.rs @@ -1,7 +1,7 @@ //! RPC utils. use super::{StreamOf, StreamOfResults}; -use crate::error::Error; +use crate::error::BackendError; use futures::future::BoxFuture; use futures::{FutureExt, Stream, StreamExt}; use std::{future::Future, pin::Pin, task::Poll}; @@ -10,10 +10,11 @@ use std::{future::Future, pin::Pin, task::Poll}; type ResubscribeGetter = Box ResubscribeFuture + Send>; /// Future that resolves to a subscription stream. -type ResubscribeFuture = Pin, Error>> + Send>>; +type ResubscribeFuture = + Pin, BackendError>> + Send>>; pub(crate) enum PendingOrStream { - Pending(BoxFuture<'static, Result, Error>>), + Pending(BoxFuture<'static, Result, BackendError>>), Stream(StreamOfResults), } @@ -35,7 +36,7 @@ struct RetrySubscription { impl std::marker::Unpin for RetrySubscription {} impl Stream for RetrySubscription { - type Item = Result; + type Item = Result; fn poll_next( mut self: Pin<&mut Self>, @@ -92,7 +93,7 @@ impl Stream for RetrySubscription { /// ```rust,no_run,standalone_crate /// use subxt::backend::utils::retry; /// -/// async fn some_future() -> Result<(), subxt::error::Error> { +/// async fn some_future() -> Result<(), subxt::error::BackendError> { /// Ok(()) /// } /// @@ -101,10 +102,10 @@ impl Stream for RetrySubscription { /// let result = retry(|| some_future()).await; /// } /// ``` -pub async fn retry(mut retry_future: F) -> Result +pub async fn retry(mut retry_future: F) -> Result where F: FnMut() -> T, - T: Future>, + T: Future>, { const REJECTED_MAX_RETRIES: usize = 10; let mut rejected_retries = 0; @@ -163,7 +164,7 @@ where /// }).await; /// } /// ``` -pub async fn retry_stream(sub_stream: F) -> Result, Error> +pub async fn retry_stream(sub_stream: F) -> Result, BackendError> where F: FnMut() -> ResubscribeFuture + Send + 'static + Clone, R: Send + 'static, @@ -187,12 +188,12 @@ mod tests { use super::*; use crate::backend::StreamOf; - fn disconnect_err() -> Error { - Error::Rpc(subxt_rpcs::Error::DisconnectedWillReconnect(String::new()).into()) + fn disconnect_err() -> BackendError { + BackendError::Rpc(subxt_rpcs::Error::DisconnectedWillReconnect(String::new()).into()) } - fn custom_err() -> Error { - Error::Other(String::new()) + fn custom_err() -> BackendError { + BackendError::Other(String::new()) } #[tokio::test] @@ -213,7 +214,7 @@ mod tests { let result = retry_stream .take(5) - .collect::>>() + .collect::>>() .await; assert!(matches!(result[0], Ok(r) if r == 1)); @@ -270,6 +271,6 @@ mod tests { assert!(matches!(result[0], Ok(r) if r == 1)); assert!(matches!(result[1], Err(ref e) if e.is_disconnected_will_reconnect())); - assert!(matches!(result[2], Err(ref e) if matches!(e, Error::Other(_)))); + assert!(matches!(result[2], Err(ref e) if matches!(e, BackendError::Other(_)))); } } diff --git a/subxt/src/blocks/block_types.rs b/subxt/src/blocks/block_types.rs index 00326d7f4d7..cac88b254a4 100644 --- a/subxt/src/blocks/block_types.rs +++ b/subxt/src/blocks/block_types.rs @@ -7,10 +7,10 @@ use crate::{ blocks::Extrinsics, client::{OfflineClientT, OnlineClientT}, config::{Config, HashFor, Header}, - error::{BlockError, DecodeError, Error}, + error::{AccountNonceError, BlockError, EventsError, ExtrinsicError}, events, runtime_api::RuntimeApi, - storage::Storage, + storage::StorageClientAt, }; use codec::{Decode, Encode}; @@ -84,38 +84,51 @@ where C: OnlineClientT, { /// Return the events associated with the block, fetching them from the node if necessary. - pub async fn events(&self) -> Result, Error> { + pub async fn events(&self) -> Result, EventsError> { get_events(&self.client, self.hash(), &self.cached_events).await } /// Fetch and return the extrinsics in the block body. - pub async fn extrinsics(&self) -> Result, Error> { + pub async fn extrinsics(&self) -> Result, ExtrinsicError> { let block_hash = self.hash(); - let Some(extrinsics) = self.client.backend().block_body(block_hash).await? else { - return Err(BlockError::not_found(block_hash).into()); - }; - Extrinsics::new( + let extrinsics = self + .client + .backend() + .block_body(block_hash) + .await + .map_err(ExtrinsicError::CannotGetBlockBody)? + .ok_or_else(|| ExtrinsicError::BlockNotFound(block_hash.into()))?; + + let extrinsics = Extrinsics::new( self.client.clone(), extrinsics, self.cached_events.clone(), block_hash, - ) + )?; + + Ok(extrinsics) } /// Work with storage. - pub fn storage(&self) -> Storage { - Storage::new(self.client.clone(), self.block_ref.clone()) + pub fn storage(&self) -> StorageClientAt { + StorageClientAt::new(self.client.clone(), self.block_ref.clone()) } /// Execute a runtime API call at this block. - pub async fn runtime_api(&self) -> Result, Error> { - Ok(RuntimeApi::new(self.client.clone(), self.block_ref.clone())) + pub async fn runtime_api(&self) -> RuntimeApi { + RuntimeApi::new(self.client.clone(), self.block_ref.clone()) } /// Get the account nonce for a given account ID at this block. - pub async fn account_nonce(&self, account_id: &T::AccountId) -> Result { - get_account_nonce(&self.client, account_id, self.hash()).await + pub async fn account_nonce(&self, account_id: &T::AccountId) -> Result { + get_account_nonce(&self.client, account_id, self.hash()) + .await + .map_err(|e| BlockError::AccountNonceError { + block_hash: self.hash().into(), + account_id: account_id.encode().into(), + reason: e, + }) } } @@ -124,7 +137,7 @@ pub(crate) async fn get_events( client: &C, block_hash: HashFor, cached_events: &AsyncMutex>>, -) -> Result, Error> +) -> Result, EventsError> where T: Config, C: OnlineClientT, @@ -152,7 +165,7 @@ pub(crate) async fn get_account_nonce( client: &C, account_id: &T::AccountId, block_hash: HashFor, -) -> Result +) -> Result where C: OnlineClientT, T: Config, @@ -173,10 +186,9 @@ where 4 => u32::decode(cursor)?.into(), 8 => u64::decode(cursor)?, _ => { - return Err(Error::Decode(DecodeError::custom_string(format!( - "state call AccountNonceApi_account_nonce returned an unexpected number of bytes: {} (expected 2, 4 or 8)", - account_nonce_bytes.len() - )))); + return Err(AccountNonceError::WrongNumberOfBytes( + account_nonce_bytes.len(), + )); } }; Ok(account_nonce) diff --git a/subxt/src/blocks/blocks_client.rs b/subxt/src/blocks/blocks_client.rs index 62cad074d3b..87e5f556b56 100644 --- a/subxt/src/blocks/blocks_client.rs +++ b/subxt/src/blocks/blocks_client.rs @@ -7,7 +7,7 @@ use crate::{ backend::{BlockRef, StreamOfResults}, client::OnlineClientT, config::{Config, HashFor}, - error::{BlockError, Error}, + error::BlockError, utils::PhantomDataSendSync, }; use derive_where::derive_where; @@ -15,7 +15,7 @@ use futures::StreamExt; use std::future::Future; type BlockStream = StreamOfResults; -type BlockStreamRes = Result, Error>; +type BlockStreamRes = Result, BlockError>; /// A client for working with blocks. #[derive_where(Clone; Client)] @@ -49,14 +49,14 @@ where pub fn at( &self, block_ref: impl Into>>, - ) -> impl Future, Error>> + Send + 'static { + ) -> impl Future, BlockError>> + Send + 'static { self.at_or_latest(Some(block_ref.into())) } /// Obtain block details of the latest finalized block. pub fn at_latest( &self, - ) -> impl Future, Error>> + Send + 'static { + ) -> impl Future, BlockError>> + Send + 'static { self.at_or_latest(None) } @@ -65,18 +65,35 @@ where fn at_or_latest( &self, block_ref: Option>>, - ) -> impl Future, Error>> + Send + 'static { + ) -> impl Future, BlockError>> + Send + 'static { let client = self.client.clone(); async move { // If a block ref isn't provided, we'll get the latest finalized ref to use. let block_ref = match block_ref { Some(r) => r, - None => client.backend().latest_finalized_block_ref().await?, + None => client + .backend() + .latest_finalized_block_ref() + .await + .map_err(BlockError::CouldNotGetLatestBlock)?, }; - let block_header = match client.backend().block_header(block_ref.hash()).await? { + let maybe_block_header = client + .backend() + .block_header(block_ref.hash()) + .await + .map_err(|e| BlockError::CouldNotGetBlockHeader { + block_hash: block_ref.hash().into(), + reason: e, + })?; + + let block_header = match maybe_block_header { Some(header) => header, - None => return Err(BlockError::not_found(block_ref.hash()).into()), + None => { + return Err(BlockError::BlockNotFound { + block_hash: block_ref.hash().into(), + }); + } }; Ok(Block::new(block_header, block_ref, client)) @@ -89,14 +106,18 @@ where /// the time. pub fn subscribe_all( &self, - ) -> impl Future>, Error>> + Send + 'static + ) -> impl Future>, BlockError>> + Send + 'static where Client: Send + Sync + 'static, { let client = self.client.clone(); let hasher = client.hasher(); header_sub_fut_to_block_sub(self.clone(), async move { - let stream = client.backend().stream_all_block_headers(hasher).await?; + let stream = client + .backend() + .stream_all_block_headers(hasher) + .await + .map_err(BlockError::CouldNotSubscribeToAllBlocks)?; BlockStreamRes::Ok(stream) }) } @@ -107,14 +128,18 @@ where /// the time. pub fn subscribe_best( &self, - ) -> impl Future>, Error>> + Send + 'static + ) -> impl Future>, BlockError>> + Send + 'static where Client: Send + Sync + 'static, { let client = self.client.clone(); let hasher = client.hasher(); header_sub_fut_to_block_sub(self.clone(), async move { - let stream = client.backend().stream_best_block_headers(hasher).await?; + let stream = client + .backend() + .stream_best_block_headers(hasher) + .await + .map_err(BlockError::CouldNotSubscribeToBestBlocks)?; BlockStreamRes::Ok(stream) }) } @@ -122,7 +147,7 @@ where /// Subscribe to finalized blocks. pub fn subscribe_finalized( &self, - ) -> impl Future>, Error>> + Send + 'static + ) -> impl Future>, BlockError>> + Send + 'static where Client: Send + Sync + 'static, { @@ -132,7 +157,8 @@ where let stream = client .backend() .stream_finalized_block_headers(hasher) - .await?; + .await + .map_err(BlockError::CouldNotSubscribeToFinalizedBlocks)?; BlockStreamRes::Ok(stream) }) } @@ -143,10 +169,10 @@ where async fn header_sub_fut_to_block_sub( blocks_client: BlocksClient, sub: S, -) -> Result>, Error> +) -> Result>, BlockError> where T: Config, - S: Future>)>, Error>> + S: Future>)>, BlockError>> + Send + 'static, Client: OnlineClientT + Send + Sync + 'static, diff --git a/subxt/src/blocks/extrinsic_types.rs b/subxt/src/blocks/extrinsic_types.rs index 4ba491a7ba7..11f55e75eef 100644 --- a/subxt/src/blocks/extrinsic_types.rs +++ b/subxt/src/blocks/extrinsic_types.rs @@ -6,18 +6,16 @@ use crate::{ blocks::block_types::{CachedEvents, get_events}, client::{OfflineClientT, OnlineClientT}, config::{Config, HashFor}, - error::Error, + error::{EventsError, ExtrinsicDecodeErrorAt, ExtrinsicError}, events, }; - use derive_where::derive_where; -use scale_decode::DecodeAsType; +use scale_decode::{DecodeAsFields, DecodeAsType}; use subxt_core::blocks::{ExtrinsicDetails as CoreExtrinsicDetails, Extrinsics as CoreExtrinsics}; // Re-export anything that's directly returned/used in the APIs below. pub use subxt_core::blocks::{ - ExtrinsicMetadataDetails, ExtrinsicTransactionExtension, ExtrinsicTransactionExtensions, - StaticExtrinsic, + ExtrinsicTransactionExtension, ExtrinsicTransactionExtensions, StaticExtrinsic, }; /// The body of a block. @@ -38,7 +36,7 @@ where extrinsics: Vec>, cached_events: CachedEvents, hash: HashFor, - ) -> Result { + ) -> Result { let inner = CoreExtrinsics::decode_from(extrinsics, client.metadata())?; Ok(Self { inner, @@ -81,10 +79,10 @@ where /// If an error occurs, all subsequent iterations return `None`. pub fn find( &self, - ) -> impl Iterator, Error>> { + ) -> impl Iterator, ExtrinsicError>> { self.inner.find::().map(|res| { match res { - Err(e) => Err(Error::from(e)), + Err(e) => Err(ExtrinsicError::from(e)), Ok(ext) => { // Wrap details from subxt-core into what we want here: let details = ExtrinsicDetails::new( @@ -105,18 +103,22 @@ where /// Iterate through the extrinsics using metadata to dynamically decode and skip /// them, and return the first extrinsic found which decodes to the provided `E` type. - pub fn find_first(&self) -> Result>, Error> { + pub fn find_first( + &self, + ) -> Result>, ExtrinsicError> { self.find::().next().transpose() } /// Iterate through the extrinsics using metadata to dynamically decode and skip /// them, and return the last extrinsic found which decodes to the provided `Ev` type. - pub fn find_last(&self) -> Result>, Error> { + pub fn find_last( + &self, + ) -> Result>, ExtrinsicError> { self.find::().last().transpose() } /// Find an extrinsics that decodes to the type provided. Returns true if it was found. - pub fn has(&self) -> Result { + pub fn has(&self) -> Result { Ok(self.find::().next().transpose()?.is_some()) } } @@ -213,32 +215,27 @@ where } /// See [`subxt_core::blocks::ExtrinsicDetails::pallet_name()`]. - pub fn pallet_name(&self) -> Result<&str, Error> { - self.inner.pallet_name().map_err(Into::into) - } - - /// See [`subxt_core::blocks::ExtrinsicDetails::variant_name()`]. - pub fn variant_name(&self) -> Result<&str, Error> { - self.inner.variant_name().map_err(Into::into) + pub fn pallet_name(&self) -> &str { + self.inner.pallet_name() } - /// See [`subxt_core::blocks::ExtrinsicDetails::extrinsic_metadata()`]. - pub fn extrinsic_metadata(&self) -> Result, Error> { - self.inner.extrinsic_metadata().map_err(Into::into) + /// See [`subxt_core::blocks::ExtrinsicDetails::call_name()`]. + pub fn call_name(&self) -> &str { + self.inner.call_name() } - /// See [`subxt_core::blocks::ExtrinsicDetails::field_values()`]. - pub fn field_values(&self) -> Result, Error> { - self.inner.field_values().map_err(Into::into) + /// See [`subxt_core::blocks::ExtrinsicDetails::decode_as_fields()`]. + pub fn decode_as_fields(&self) -> Result { + self.inner.decode_as_fields().map_err(Into::into) } /// See [`subxt_core::blocks::ExtrinsicDetails::as_extrinsic()`]. - pub fn as_extrinsic(&self) -> Result, Error> { + pub fn as_extrinsic(&self) -> Result, ExtrinsicError> { self.inner.as_extrinsic::().map_err(Into::into) } /// See [`subxt_core::blocks::ExtrinsicDetails::as_root_extrinsic()`]. - pub fn as_root_extrinsic(&self) -> Result { + pub fn as_root_extrinsic(&self) -> Result { self.inner.as_root_extrinsic::().map_err(Into::into) } } @@ -249,7 +246,7 @@ where C: OnlineClientT, { /// The events associated with the extrinsic. - pub async fn events(&self) -> Result, Error> { + pub async fn events(&self) -> Result, EventsError> { let events = get_events(&self.client, self.block_hash, &self.cached_events).await?; let ext_hash = self.inner.hash(); Ok(ExtrinsicEvents::new(ext_hash, self.index(), events)) @@ -308,7 +305,7 @@ impl ExtrinsicEvents { /// /// This works in the same way that [`events::Events::iter()`] does, with the /// exception that it filters out events not related to the submitted extrinsic. - pub fn iter(&self) -> impl Iterator, Error>> { + pub fn iter(&self) -> impl Iterator, EventsError>> { self.events.iter().filter(|ev| { ev.as_ref() .map(|ev| ev.phase() == events::Phase::ApplyExtrinsic(self.idx)) @@ -320,7 +317,7 @@ impl ExtrinsicEvents { /// /// This works in the same way that [`events::Events::find()`] does, with the /// exception that it filters out events not related to the submitted extrinsic. - pub fn find(&self) -> impl Iterator> { + pub fn find(&self) -> impl Iterator> { self.iter() .filter_map(|ev| ev.and_then(|ev| ev.as_event::()).transpose()) } @@ -330,7 +327,7 @@ impl ExtrinsicEvents { /// /// This works in the same way that [`events::Events::find_first()`] does, with the /// exception that it ignores events not related to the submitted extrinsic. - pub fn find_first(&self) -> Result, Error> { + pub fn find_first(&self) -> Result, EventsError> { self.find::().next().transpose() } @@ -339,7 +336,7 @@ impl ExtrinsicEvents { /// /// This works in the same way that [`events::Events::find_last()`] does, with the /// exception that it ignores events not related to the submitted extrinsic. - pub fn find_last(&self) -> Result, Error> { + pub fn find_last(&self) -> Result, EventsError> { self.find::().last().transpose() } @@ -347,7 +344,7 @@ impl ExtrinsicEvents { /// /// This works in the same way that [`events::Events::has()`] does, with the /// exception that it ignores events not related to the submitted extrinsic. - pub fn has(&self) -> Result { + pub fn has(&self) -> Result { Ok(self.find::().next().transpose()?.is_some()) } } diff --git a/subxt/src/book/usage/blocks.rs b/subxt/src/book/usage/blocks.rs index e6e714a649c..6bf3394b0a5 100644 --- a/subxt/src/book/usage/blocks.rs +++ b/subxt/src/book/usage/blocks.rs @@ -64,7 +64,7 @@ //! get only [the first one](crate::blocks::Extrinsics::find_first), or [the last one](crate::blocks::Extrinsics::find_last). //! //! The following example monitors `TransferKeepAlive` extrinsics on the Polkadot network. -//! We statically decode them and access the [tip](crate::blocks::ExtrinsicTransactionExtensions::tip()) and [account nonce](crate::blocks::ExtrinsicTransactionExtensions::nonce()) +//! We statically decode them and access the [tip](crate::blocks::ExtrinsicExtrinsicParams::tip()) and [account nonce](crate::blocks::ExtrinsicExtrinsicParams::nonce()) //! transaction extensions. //! //! ```rust,ignore @@ -90,10 +90,10 @@ //! The [Config](crate::Config) implementation for your chain defines which transaction extensions you expect. //! Once you get hold of the [ExtrinsicDetails](crate::blocks::ExtrinsicDetails) for an extrinsic you are interested in, //! you can try to [get its transaction extensions](crate::blocks::ExtrinsicDetails::transaction_extensions()). -//! These are only available on V4 signed extrinsics or V5 general extrinsics. You can try to [find a specific transaction extension](crate::blocks::ExtrinsicTransactionExtensions::find), -//! in the returned [transaction extensions](crate::blocks::ExtrinsicTransactionExtensions). +//! These are only available on V4 signed extrinsics or V5 general extrinsics. You can try to [find a specific transaction extension](crate::blocks::ExtrinsicExtrinsicParams::find), +//! in the returned [transaction extensions](crate::blocks::ExtrinsicExtrinsicParams). //! -//! Subxt also provides utility functions to get the [tip](crate::blocks::ExtrinsicTransactionExtensions::tip()) and the -//! [account nonce](crate::blocks::ExtrinsicTransactionExtensions::tip()) associated with an extrinsic, given its transaction extensions. +//! Subxt also provides utility functions to get the [tip](crate::blocks::ExtrinsicExtrinsicParams::tip()) and the +//! [account nonce](crate::blocks::ExtrinsicExtrinsicParams::tip()) associated with an extrinsic, given its transaction extensions. //! If you prefer to do things dynamically you can get the data of the transaction extension as a [scale value](crate::blocks::ExtrinsicTransactionExtension::value()). //! diff --git a/subxt/src/book/usage/storage.rs b/subxt/src/book/usage/storage.rs index a76003c4a94..53eb1a7ff5b 100644 --- a/subxt/src/book/usage/storage.rs +++ b/subxt/src/book/usage/storage.rs @@ -113,13 +113,6 @@ #![doc = include_str!("../../../examples/storage_iterating_dynamic.rs")] //! ``` //! -//! Here is an example of iterating over partial keys. In this example some multi-signature operations -//! are sent to the node. We can iterate over the pending multisig operations of a single multisig account: -//! -//! ```rust,ignore -#![doc = include_str!("../../../examples/storage_iterating_partial.rs")] -//! ``` -//! //! ### Advanced //! //! For more advanced use cases, have a look at [`crate::storage::Storage::fetch_raw`] and diff --git a/subxt/src/client/mod.rs b/subxt/src/client/mod.rs index 3ca69b11576..8b9c917eec6 100644 --- a/subxt/src/client/mod.rs +++ b/subxt/src/client/mod.rs @@ -13,6 +13,6 @@ mod online_client; pub use offline_client::{OfflineClient, OfflineClientT}; pub use online_client::{ - ClientRuntimeUpdater, OnlineClient, OnlineClientT, RuntimeUpdaterStream, Update, UpgradeError, + ClientRuntimeUpdater, OnlineClient, OnlineClientT, RuntimeUpdaterStream, Update, }; pub use subxt_core::client::{ClientState, RuntimeVersion}; diff --git a/subxt/src/client/online_client.rs b/subxt/src/client/online_client.rs index 849b006b698..1361558811f 100644 --- a/subxt/src/client/online_client.rs +++ b/subxt/src/client/online_client.rs @@ -10,7 +10,7 @@ use crate::{ blocks::{BlockRef, BlocksClient}, config::{Config, HashFor}, constants::ConstantsClient, - error::Error, + error::{BackendError, OnlineClientError, RuntimeUpdateeApplyError, RuntimeUpdaterError}, events::EventsClient, runtime_api::RuntimeApiClient, storage::StorageClient, @@ -18,6 +18,7 @@ use crate::{ view_functions::ViewFunctionsClient, }; use derive_where::derive_where; +use futures::TryFutureExt; use futures::future; use std::sync::{Arc, RwLock}; use subxt_core::client::{ClientState, RuntimeVersion}; @@ -60,13 +61,13 @@ impl std::fmt::Debug for OnlineClient { impl OnlineClient { /// Construct a new [`OnlineClient`] using default settings which /// point to a locally running node on `ws://127.0.0.1:9944`. - pub async fn new() -> Result, Error> { + pub async fn new() -> Result, OnlineClientError> { let url = "ws://127.0.0.1:9944"; OnlineClient::from_url(url).await } /// Construct a new [`OnlineClient`], providing a URL to connect to. - pub async fn from_url(url: impl AsRef) -> Result, Error> { + pub async fn from_url(url: impl AsRef) -> Result, OnlineClientError> { subxt_rpcs::utils::validate_url_is_secure(url.as_ref())?; OnlineClient::from_insecure_url(url).await } @@ -74,7 +75,9 @@ impl OnlineClient { /// Construct a new [`OnlineClient`], providing a URL to connect to. /// /// Allows insecure URLs without SSL encryption, e.g. (http:// and ws:// URLs). - pub async fn from_insecure_url(url: impl AsRef) -> Result, Error> { + pub async fn from_insecure_url( + url: impl AsRef, + ) -> Result, OnlineClientError> { let client = RpcClient::from_insecure_url(url).await?; let backend = LegacyBackend::builder().build(client); OnlineClient::from_backend(Arc::new(backend)).await @@ -86,7 +89,7 @@ impl OnlineClient { /// This will use the current default [`Backend`], which may change in future releases. pub async fn from_rpc_client( rpc_client: impl Into, - ) -> Result, Error> { + ) -> Result, OnlineClientError> { let rpc_client = rpc_client.into(); let backend = Arc::new(LegacyBackend::builder().build(rpc_client)); OnlineClient::from_backend(backend).await @@ -110,7 +113,7 @@ impl OnlineClient { runtime_version: RuntimeVersion, metadata: impl Into, rpc_client: impl Into, - ) -> Result, Error> { + ) -> Result, OnlineClientError> { let rpc_client = rpc_client.into(); let backend = Arc::new(LegacyBackend::builder().build(rpc_client)); OnlineClient::from_backend_with(genesis_hash, runtime_version, metadata, backend) @@ -118,13 +121,23 @@ impl OnlineClient { /// Construct a new [`OnlineClient`] by providing an underlying [`Backend`] /// implementation to power it. Other details will be obtained from the chain. - pub async fn from_backend>(backend: Arc) -> Result, Error> { - let latest_block = backend.latest_finalized_block_ref().await?; + pub async fn from_backend>( + backend: Arc, + ) -> Result, OnlineClientError> { + let latest_block = backend + .latest_finalized_block_ref() + .await + .map_err(OnlineClientError::CannotGetLatestFinalizedBlock)?; let (genesis_hash, runtime_version, metadata) = future::join3( - backend.genesis_hash(), - backend.current_runtime_version(), - OnlineClient::fetch_metadata(&*backend, latest_block.hash()), + backend + .genesis_hash() + .map_err(OnlineClientError::CannotGetGenesisHash), + backend + .current_runtime_version() + .map_err(OnlineClientError::CannotGetCurrentRuntimeVersion), + OnlineClient::fetch_metadata(&*backend, latest_block.hash()) + .map_err(OnlineClientError::CannotFetchMetadata), ) .await; @@ -148,7 +161,7 @@ impl OnlineClient { runtime_version: RuntimeVersion, metadata: impl Into, backend: Arc, - ) -> Result, Error> { + ) -> Result, OnlineClientError> { use subxt_core::config::Hasher; let metadata = metadata.into(); @@ -169,7 +182,7 @@ impl OnlineClient { async fn fetch_metadata( backend: &dyn Backend, block_hash: HashFor, - ) -> Result { + ) -> Result { #[cfg(feature = "unstable-metadata")] { /// The unstable metadata version number. @@ -194,7 +207,7 @@ impl OnlineClient { async fn fetch_latest_stable_metadata( backend: &dyn Backend, block_hash: HashFor, - ) -> Result { + ) -> Result { // The metadata versions we support in Subxt, from newest to oldest. use subxt_metadata::SUPPORTED_METADATA_VERSIONS; @@ -416,9 +429,9 @@ impl ClientRuntimeUpdater { } /// Tries to apply a new update. - pub fn apply_update(&self, update: Update) -> Result<(), UpgradeError> { + pub fn apply_update(&self, update: Update) -> Result<(), RuntimeUpdateeApplyError> { if !self.is_runtime_version_different(&update.runtime_version) { - return Err(UpgradeError::SameVersion); + return Err(RuntimeUpdateeApplyError::SameVersion); } self.do_update(update); @@ -430,12 +443,12 @@ impl ClientRuntimeUpdater { /// /// *Note:* This will run indefinitely until it errors, so the typical usage /// would be to run it in a separate background task. - pub async fn perform_runtime_updates(&self) -> Result<(), Error> { + pub async fn perform_runtime_updates(&self) -> Result<(), RuntimeUpdaterError> { // Obtain an update subscription to further detect changes in the runtime version of the node. let mut runtime_version_stream = self.runtime_updates().await?; - while let Some(update) = runtime_version_stream.next().await { - let update = update?; + loop { + let update = runtime_version_stream.next().await?; // This only fails if received the runtime version is the same the current runtime version // which might occur because that runtime subscriptions in substrate sends out the initial @@ -443,8 +456,6 @@ impl ClientRuntimeUpdater { // Thus, fine to ignore here as it strictly speaking isn't really an error let _ = self.apply_update(update); } - - Ok(()) } /// Low-level API to get runtime updates as a stream but it's doesn't check if the @@ -452,9 +463,16 @@ impl ClientRuntimeUpdater { /// /// Instead that's up to the user of this API to decide when to update and /// to perform the actual updating. - pub async fn runtime_updates(&self) -> Result, Error> { + pub async fn runtime_updates(&self) -> Result, RuntimeUpdaterError> { + let stream = self + .0 + .backend() + .stream_runtime_version() + .await + .map_err(RuntimeUpdaterError::CannotStreamRuntimeVersion)?; + Ok(RuntimeUpdaterStream { - stream: self.0.backend().stream_runtime_version().await?, + stream, client: self.0.clone(), }) } @@ -468,38 +486,27 @@ pub struct RuntimeUpdaterStream { impl RuntimeUpdaterStream { /// Wait for the next runtime update. - pub async fn next(&mut self) -> Option> { - let runtime_version = match self.stream.next().await? { - Ok(runtime_version) => runtime_version, - Err(err) => return Some(Err(err)), - }; - - let at = - match wait_runtime_upgrade_in_finalized_block(&self.client, &runtime_version).await? { - Ok(at) => at, - Err(err) => return Some(Err(err)), - }; - - let metadata = match OnlineClient::fetch_metadata(self.client.backend(), at.hash()).await { - Ok(metadata) => metadata, - Err(err) => return Some(Err(err)), - }; - - Some(Ok(Update { + pub async fn next(&mut self) -> Result { + let runtime_version = self + .stream + .next() + .await + .ok_or(RuntimeUpdaterError::UnexpectedEndOfUpdateStream)? + .map_err(RuntimeUpdaterError::CannotGetNextRuntimeVersion)?; + + let at = wait_runtime_upgrade_in_finalized_block(&self.client, &runtime_version).await?; + + let metadata = OnlineClient::fetch_metadata(self.client.backend(), at.hash()) + .await + .map_err(RuntimeUpdaterError::CannotFetchNewMetadata)?; + + Ok(Update { metadata, runtime_version, - })) + }) } } -/// Error that can occur during upgrade. -#[non_exhaustive] -#[derive(Debug, Clone)] -pub enum UpgradeError { - /// The version is the same as the current version. - SameVersion, -} - /// Represents the state when a runtime upgrade occurred. pub struct Update { runtime_version: RuntimeVersion, @@ -522,64 +529,52 @@ impl Update { async fn wait_runtime_upgrade_in_finalized_block( client: &OnlineClient, runtime_version: &RuntimeVersion, -) -> Option>, Error>> { - use scale_value::At; - +) -> Result>, RuntimeUpdaterError> { let hasher = client .inner .read() .expect("Lock shouldn't be poisoned") .hasher; - let mut block_sub = match client + let mut block_sub = client .backend() .stream_finalized_block_headers(hasher) .await - { - Ok(s) => s, - Err(err) => return Some(Err(err)), - }; + .map_err(RuntimeUpdaterError::CannotStreamFinalizedBlocks)?; let block_ref = loop { - let (_, block_ref) = match block_sub.next().await? { - Ok(n) => n, - Err(err) => return Some(Err(err)), - }; - - let key: Vec = vec![]; - let addr = crate::dynamic::storage("System", "LastRuntimeUpgrade", key); - - let chunk = match client.storage().at(block_ref.hash()).fetch(&addr).await { - Ok(Some(v)) => v, - Ok(None) => { - // The storage `system::lastRuntimeUpgrade` should always exist. - // - unreachable!("The storage item `system::lastRuntimeUpgrade` should always exist") - } - Err(e) => return Some(Err(e)), - }; - - let scale_val = match chunk.to_value() { - Ok(v) => v, - Err(e) => return Some(Err(e.into())), - }; - - let Some(Ok(spec_version)) = scale_val - .at("spec_version") - .and_then(|v| v.as_u128()) - .map(u32::try_from) - else { - return Some(Err(Error::Other( - "Decoding `RuntimeVersion::spec_version` as u32 failed".to_string(), - ))); - }; + let (_, block_ref) = block_sub + .next() + .await + .ok_or(RuntimeUpdaterError::UnexpectedEndOfBlockStream)? + .map_err(RuntimeUpdaterError::CannotGetNextFinalizedBlock)?; + + let addr = + crate::dynamic::storage::<(), scale_value::Value>("System", "LastRuntimeUpgrade"); + + let client_at = client.storage().at(block_ref.hash()); + let value = client_at + .entry(addr) + // The storage `system::lastRuntimeUpgrade` should always exist. + // + .map_err(|_| RuntimeUpdaterError::CantFindSystemLastRuntimeUpgrade)? + .fetch(()) + .await + .map_err(RuntimeUpdaterError::CantFetchLastRuntimeUpgrade)? + .decode_as::() + .map_err(RuntimeUpdaterError::CannotDecodeLastRuntimeUpgrade)?; + + #[derive(scale_decode::DecodeAsType)] + struct LastRuntimeUpgrade { + spec_version: u32, + } // We are waiting for the chain to have the same spec version // as sent out via the runtime subscription. - if spec_version == runtime_version.spec_version { + if value.spec_version == runtime_version.spec_version { break block_ref; } }; - Some(Ok(block_ref)) + Ok(block_ref) } diff --git a/subxt/src/constants/constants_client.rs b/subxt/src/constants/constants_client.rs index b098e0684a6..516dc3dd699 100644 --- a/subxt/src/constants/constants_client.rs +++ b/subxt/src/constants/constants_client.rs @@ -2,7 +2,7 @@ // This file is dual-licensed as Apache-2.0 or GPL-3.0. // see LICENSE for license details. -use crate::{Config, client::OfflineClientT, error::Error}; +use crate::{Config, client::OfflineClientT, error::ConstantError}; use derive_where::derive_where; use subxt_core::constants::address::Address; @@ -28,16 +28,22 @@ impl> ConstantsClient { /// if the address is valid (or if it's not possible to check since the address has no validation hash). /// Return an error if the address was not valid or something went wrong trying to validate it (ie /// the pallet or constant in question do not exist at all). - pub fn validate(&self, address: &Addr) -> Result<(), Error> { + pub fn validate(&self, address: &Addr) -> Result<(), ConstantError> { let metadata = self.client.metadata(); - subxt_core::constants::validate(address, &metadata).map_err(Error::from) + subxt_core::constants::validate(address, &metadata) } /// Access the constant at the address given, returning the type defined by this address. /// This is probably used with addresses given from static codegen, although you can manually /// construct your own, too. - pub fn at(&self, address: &Addr) -> Result { + pub fn at(&self, address: &Addr) -> Result { let metadata = self.client.metadata(); - subxt_core::constants::get(address, &metadata).map_err(Error::from) + subxt_core::constants::get(address, &metadata) + } + + /// Access the bytes of a constant by the address it is registered under. + pub fn bytes_at(&self, address: &Addr) -> Result, ConstantError> { + let metadata = self.client.metadata(); + subxt_core::constants::get_bytes(address, &metadata) } } diff --git a/subxt/src/constants/mod.rs b/subxt/src/constants/mod.rs index dd7b45138fe..b9b3e9380af 100644 --- a/subxt/src/constants/mod.rs +++ b/subxt/src/constants/mod.rs @@ -7,6 +7,4 @@ mod constants_client; pub use constants_client::ConstantsClient; -pub use subxt_core::constants::address::{ - Address, DefaultAddress, DynamicAddress, StaticAddress, dynamic, -}; +pub use subxt_core::constants::address::{Address, DynamicAddress, StaticAddress, dynamic}; diff --git a/subxt/src/custom_values/custom_values_client.rs b/subxt/src/custom_values/custom_values_client.rs index 5141f150c31..ed2525a3bf5 100644 --- a/subxt/src/custom_values/custom_values_client.rs +++ b/subxt/src/custom_values/custom_values_client.rs @@ -1,8 +1,8 @@ use crate::client::OfflineClientT; -use crate::{Config, Error}; +use crate::{Config, error::CustomValueError}; use derive_where::derive_where; -use subxt_core::custom_values::address::{Address, Yes}; +use subxt_core::custom_values::address::{Address, Maybe}; /// A client for accessing custom values stored in the metadata. #[derive_where(Clone; Client)] @@ -24,29 +24,32 @@ impl CustomValuesClient { impl> CustomValuesClient { /// Access a custom value by the address it is registered under. This can be just a [str] to get back a dynamic value, /// or a static address from the generated static interface to get a value of a static type returned. - pub fn at + ?Sized>( + pub fn at + ?Sized>( &self, address: &Addr, - ) -> Result { - subxt_core::custom_values::get(address, &self.client.metadata()).map_err(Into::into) + ) -> Result { + subxt_core::custom_values::get(address, &self.client.metadata()) } /// Access the bytes of a custom value by the address it is registered under. - pub fn bytes_at(&self, address: &Addr) -> Result, Error> { - subxt_core::custom_values::get_bytes(address, &self.client.metadata()).map_err(Into::into) + pub fn bytes_at( + &self, + address: &Addr, + ) -> Result, CustomValueError> { + subxt_core::custom_values::get_bytes(address, &self.client.metadata()) } /// Run the validation logic against some custom value address you'd like to access. Returns `Ok(())` /// if the address is valid (or if it's not possible to check since the address has no validation hash). /// Returns an error if the address was not valid (wrong name, type or raw bytes) - pub fn validate(&self, address: &Addr) -> Result<(), Error> { - subxt_core::custom_values::validate(address, &self.client.metadata()).map_err(Into::into) + pub fn validate(&self, address: &Addr) -> Result<(), CustomValueError> { + subxt_core::custom_values::validate(address, &self.client.metadata()) } } #[cfg(test)] mod tests { - use crate::custom_values::CustomValuesClient; + use crate::custom_values::{self, CustomValuesClient}; use crate::{Metadata, OfflineClient, SubstrateConfig}; use codec::Encode; use scale_decode::DecodeAsType; @@ -117,10 +120,12 @@ mod tests { }, mock_metadata(), ); + let custom_value_client = CustomValuesClient::new(client); assert!(custom_value_client.at("No one").is_err()); - let person_decoded_value_thunk = custom_value_client.at("Person").unwrap(); - let person: Person = person_decoded_value_thunk.as_type().unwrap(); + + let person_addr = custom_values::dynamic::("Person"); + let person = custom_value_client.at(&person_addr).unwrap(); assert_eq!( person, Person { diff --git a/subxt/src/custom_values/mod.rs b/subxt/src/custom_values/mod.rs index 25103e9c31b..e1f5d3a0d06 100644 --- a/subxt/src/custom_values/mod.rs +++ b/subxt/src/custom_values/mod.rs @@ -7,4 +7,4 @@ mod custom_values_client; pub use custom_values_client::CustomValuesClient; -pub use subxt_core::custom_values::address::{Address, StaticAddress, Yes}; +pub use subxt_core::custom_values::address::{Address, DynamicAddress, StaticAddress, dynamic}; diff --git a/subxt/src/error/dispatch_error.rs b/subxt/src/error/dispatch_error.rs index 0d307e7f449..7b1123aa59d 100644 --- a/subxt/src/error/dispatch_error.rs +++ b/subxt/src/error/dispatch_error.rs @@ -5,14 +5,12 @@ //! A representation of the dispatch error; an error returned when //! something fails in trying to submit/execute a transaction. -use crate::metadata::{DecodeWithMetadata, Metadata}; +use super::{DispatchErrorDecodeError, ModuleErrorDecodeError, ModuleErrorDetailsError}; +use crate::metadata::Metadata; use core::fmt::Debug; use scale_decode::{DecodeAsType, TypeResolver, visitor::DecodeAsTypeResult}; - use std::{borrow::Cow, marker::PhantomData}; -use super::{Error, MetadataError}; - /// An error dispatching a transaction. #[derive(Debug, thiserror::Error, PartialEq, Eq)] #[non_exhaustive] @@ -169,11 +167,19 @@ impl std::fmt::Display for ModuleError { impl ModuleError { /// Return more details about this error. - pub fn details(&self) -> Result, MetadataError> { - let pallet = self.metadata.pallet_by_index_err(self.pallet_index())?; + pub fn details(&self) -> Result, ModuleErrorDetailsError> { + let pallet = self.metadata.pallet_by_index(self.pallet_index()).ok_or( + ModuleErrorDetailsError::PalletNotFound { + pallet_index: self.pallet_index(), + }, + )?; + let variant = pallet .error_variant_by_index(self.error_index()) - .ok_or_else(|| MetadataError::VariantIndexNotFound(self.error_index()))?; + .ok_or_else(|| ModuleErrorDetailsError::ErrorVariantNotFound { + pallet_name: pallet.name().into(), + error_index: self.error_index(), + })?; Ok(ModuleErrorDetails { pallet, variant }) } @@ -209,12 +215,13 @@ impl ModuleError { } /// Attempts to decode the ModuleError into the top outer Error enum. - pub fn as_root_error(&self) -> Result { + pub fn as_root_error(&self) -> Result { let decoded = E::decode_as_type( &mut &self.bytes[..], self.metadata.outer_enums().error_enum_ty(), self.metadata.types(), - )?; + ) + .map_err(ModuleErrorDecodeError)?; Ok(decoded) } @@ -223,7 +230,7 @@ impl ModuleError { /// Details about the module error. pub struct ModuleErrorDetails<'a> { /// The pallet that the error is in - pub pallet: crate::metadata::types::PalletMetadata<'a>, + pub pallet: subxt_metadata::PalletMetadata<'a>, /// The variant representing the error pub variant: &'a scale_info::Variant, } @@ -234,11 +241,11 @@ impl DispatchError { pub fn decode_from<'a>( bytes: impl Into>, metadata: Metadata, - ) -> Result { + ) -> Result { let bytes = bytes.into(); let dispatch_error_ty_id = metadata .dispatch_error_ty() - .ok_or(MetadataError::DispatchErrorNotFound)?; + .ok_or(DispatchErrorDecodeError::DispatchErrorTypeIdNotFound)?; // The aim is to decode our bytes into roughly this shape. This is copied from // `sp_runtime::DispatchError`; we need the variant names and any inner variant @@ -290,11 +297,12 @@ impl DispatchError { } // Decode into our temporary error: - let decoded_dispatch_err = DecodedDispatchError::decode_with_metadata( + let decoded_dispatch_err = DecodedDispatchError::decode_as_type( &mut &*bytes, dispatch_error_ty_id, - &metadata, - )?; + metadata.types(), + ) + .map_err(DispatchErrorDecodeError::CouldNotDecodeDispatchError)?; // Convert into the outward-facing error, mainly by handling the Module variant. let dispatch_error = match decoded_dispatch_err { @@ -333,7 +341,9 @@ impl DispatchError { "Can't decode error sp_runtime::DispatchError: bytes do not match known shapes" ); // Return _all_ of the bytes; every "unknown" return should be consistent. - return Err(super::Error::Unknown(bytes.to_vec())); + return Err(DispatchErrorDecodeError::CouldNotDecodeModuleError { + bytes: bytes.to_vec(), + }); }; // And return our outward-facing version: diff --git a/subxt/src/error/hex.rs b/subxt/src/error/hex.rs new file mode 100644 index 00000000000..01d67a998ea --- /dev/null +++ b/subxt/src/error/hex.rs @@ -0,0 +1,15 @@ +/// Display hex strings. +#[derive(PartialEq, Eq, Clone, Debug, PartialOrd, Ord)] +pub struct Hex(String); + +impl> From for Hex { + fn from(value: T) -> Self { + Hex(hex::encode(value.as_ref())) + } +} + +impl std::fmt::Display for Hex { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } +} diff --git a/subxt/src/error/mod.rs b/subxt/src/error/mod.rs index d19bdba1988..b64826d7d55 100644 --- a/subxt/src/error/mod.rs +++ b/subxt/src/error/mod.rs @@ -5,8 +5,7 @@ //! Types representing the errors that can be returned. mod dispatch_error; - -use subxt_core::error::{BlockError as CoreBlockError, Error as CoreError}; +mod hex; crate::macros::cfg_unstable_light_client! { pub use subxt_lightclient::LightClientError; @@ -18,120 +17,159 @@ pub use dispatch_error::{ }; // Re-expose the errors we use from other crates here: -pub use crate::metadata::Metadata; +pub use crate::Metadata; +pub use hex::Hex; pub use scale_decode::Error as DecodeError; pub use scale_encode::Error as EncodeError; -pub use subxt_core::error::{ExtrinsicError, MetadataError, StorageAddressError}; pub use subxt_metadata::TryFromError as MetadataTryFromError; -/// The underlying error enum, generic over the type held by the `Runtime` -/// variant. Prefer to use the [`Error`] and [`Error`] aliases over -/// using this type directly. +// Re-export core error types we're just reusing. +pub use subxt_core::error::{ + ConstantError, + CustomValueError, + EventsError as CoreEventsError, + // These errors are exposed as-is: + ExtrinsicDecodeErrorAt, + // These errors are wrapped: + ExtrinsicError as CoreExtrinsicError, + RuntimeApiError as CoreRuntimeApiError, + StorageError as CoreStorageError, + StorageKeyError, + StorageValueError, + ViewFunctionError as CoreViewFunctionError, +}; + +/// A global error type. Any of the errors exposed here can convert into this +/// error via `.into()`, but this error isn't itself exposed from anything. #[derive(Debug, thiserror::Error)] #[non_exhaustive] +#[allow(missing_docs)] pub enum Error { - /// Io error. - #[error("Io error: {0}")] - Io(#[from] std::io::Error), - /// Codec error. - #[error("Scale codec error: {0}")] - Codec(#[from] codec::Error), - /// Rpc error. #[error(transparent)] - Rpc(#[from] RpcError), - /// Serde serialization error - #[error("Serde json error: {0}")] - Serialization(#[from] serde_json::error::Error), - /// Error working with metadata. - #[error("Metadata error: {0}")] - Metadata(#[from] MetadataError), - /// Error decoding metadata. - #[error("Metadata Decoding error: {0}")] - MetadataDecoding(#[from] MetadataTryFromError), - /// Runtime error. - #[error("Runtime error: {0}")] - Runtime(#[from] DispatchError), - /// Error decoding to a [`crate::dynamic::Value`]. - #[error("Error decoding into dynamic value: {0}")] - Decode(#[from] DecodeError), - /// Error encoding from a [`crate::dynamic::Value`]. - #[error("Error encoding from dynamic value: {0}")] - Encode(#[from] EncodeError), - /// Transaction progress error. - #[error("Transaction error: {0}")] - Transaction(#[from] TransactionError), - /// Error constructing the appropriate extrinsic params. - #[error("Extrinsic params error: {0}")] - Extrinsic(#[from] ExtrinsicError), - /// Block related error. - #[error("Block error: {0}")] - Block(#[from] BlockError), - /// An error encoding a storage address. - #[error("Error encoding storage address: {0}")] - StorageAddress(#[from] StorageAddressError), - /// The bytes representing an error that we were unable to decode. - #[error("An error occurred but it could not be decoded: {0:?}")] - Unknown(Vec), - /// Light client error. - #[cfg(feature = "unstable-light-client")] - #[cfg_attr(docsrs, doc(cfg(feature = "unstable-light-client")))] - #[error("An error occurred but it could not be decoded: {0}")] - LightClient(#[from] LightClientError), - /// Other error. - #[error("Other error: {0}")] - Other(String), -} - -impl From for Error { - fn from(value: CoreError) -> Self { - match value { - CoreError::Codec(e) => Error::Codec(e), - CoreError::Metadata(e) => Error::Metadata(e), - CoreError::StorageAddress(e) => Error::StorageAddress(e), - CoreError::Decode(e) => Error::Decode(e), - CoreError::Encode(e) => Error::Encode(e), - CoreError::Extrinsic(e) => Error::Extrinsic(e), - CoreError::Block(e) => Error::Block(e.into()), - } - } + ExtrinsicDecodeErrorAt(#[from] ExtrinsicDecodeErrorAt), + #[error(transparent)] + ConstantError(#[from] ConstantError), + #[error(transparent)] + CustomValueError(#[from] CustomValueError), + #[error(transparent)] + StorageKeyError(#[from] StorageKeyError), + #[error(transparent)] + StorageValueError(#[from] StorageValueError), + #[error(transparent)] + BackendError(#[from] BackendError), + #[error(transparent)] + BlockError(#[from] BlockError), + #[error(transparent)] + AccountNonceError(#[from] AccountNonceError), + #[error(transparent)] + OnlineClientError(#[from] OnlineClientError), + #[error(transparent)] + RuntimeUpdaterError(#[from] RuntimeUpdaterError), + #[error(transparent)] + RuntimeUpdateeApplyError(#[from] RuntimeUpdateeApplyError), + #[error(transparent)] + RuntimeApiError(#[from] RuntimeApiError), + #[error(transparent)] + EventsError(#[from] EventsError), + #[error(transparent)] + ExtrinsicError(#[from] ExtrinsicError), + #[error(transparent)] + ViewFunctionError(#[from] ViewFunctionError), + #[error(transparent)] + TransactionProgressError(#[from] TransactionProgressError), + #[error(transparent)] + TransactionStatusError(#[from] TransactionStatusError), + #[error(transparent)] + TransactionEventsError(#[from] TransactionEventsError), + #[error(transparent)] + TransactionFinalizedSuccessError(#[from] TransactionFinalizedSuccessError), + #[error(transparent)] + ModuleErrorDetailsError(#[from] ModuleErrorDetailsError), + #[error(transparent)] + ModuleErrorDecodeError(#[from] ModuleErrorDecodeError), + #[error(transparent)] + DispatchErrorDecodeError(#[from] DispatchErrorDecodeError), + #[error(transparent)] + StorageError(#[from] StorageError), } -impl<'a> From<&'a str> for Error { - fn from(error: &'a str) -> Self { - Error::Other(error.into()) +impl From for Error { + fn from(value: std::convert::Infallible) -> Self { + match value {} } } -impl From for Error { - fn from(error: String) -> Self { - Error::Other(error) +impl Error { + /// Checks whether the error was caused by a RPC re-connection. + pub fn is_disconnected_will_reconnect(&self) -> bool { + matches!( + self.backend_error(), + Some(BackendError::Rpc(RpcError::ClientError( + subxt_rpcs::Error::DisconnectedWillReconnect(_) + ))) + ) } -} -impl From for Error { - fn from(value: std::convert::Infallible) -> Self { - match value {} + /// Checks whether the error was caused by a RPC request being rejected. + pub fn is_rpc_limit_reached(&self) -> bool { + matches!( + self.backend_error(), + Some(BackendError::Rpc(RpcError::LimitReached)) + ) } -} -impl From for Error { - fn from(value: scale_decode::visitor::DecodeError) -> Self { - Error::Decode(value.into()) + fn backend_error(&self) -> Option<&BackendError> { + match self { + Error::ExtrinsicDecodeErrorAt(_) + | Error::ConstantError(_) + | Error::CustomValueError(_) + | Error::StorageKeyError(_) + | Error::StorageValueError(_) + | Error::BackendError(_) + | Error::RuntimeUpdateeApplyError(_) + | Error::TransactionStatusError(_) + | Error::ModuleErrorDetailsError(_) + | Error::ModuleErrorDecodeError(_) + | Error::DispatchErrorDecodeError(_) => None, + Error::BlockError(e) => e.backend_error(), + Error::AccountNonceError(e) => e.backend_error(), + Error::OnlineClientError(e) => e.backend_error(), + Error::RuntimeUpdaterError(e) => e.backend_error(), + Error::RuntimeApiError(e) => e.backend_error(), + Error::EventsError(e) => e.backend_error(), + Error::ExtrinsicError(e) => e.backend_error(), + Error::ViewFunctionError(e) => e.backend_error(), + Error::TransactionProgressError(e) => e.backend_error(), + Error::TransactionEventsError(e) => e.backend_error(), + Error::TransactionFinalizedSuccessError(e) => e.backend_error(), + Error::StorageError(e) => e.backend_error(), + } } } -impl From for Error { - fn from(value: subxt_rpcs::Error) -> Self { - Error::Rpc(value.into()) - } +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum BackendError { + #[error("Backend error: RPC error: {0}")] + Rpc(#[from] RpcError), + #[error("Backend error: Could not find metadata version {0}")] + MetadataVersionNotFound(u32), + #[error("Backend error: Could not codec::Decode Runtime API response: {0}")] + CouldNotScaleDecodeRuntimeResponse(codec::Error), + #[error("Backend error: Could not codec::Decode metadata bytes into subxt::Metadata: {0}")] + CouldNotDecodeMetadata(codec::Error), + // This is for errors in `Backend` implementations which aren't any of the "pre-defined" set above: + #[error("Custom backend error: {0}")] + Other(String), } -impl Error { +impl BackendError { /// Checks whether the error was caused by a RPC re-connection. pub fn is_disconnected_will_reconnect(&self) -> bool { matches!( self, - Error::Rpc(RpcError::ClientError( + BackendError::Rpc(RpcError::ClientError( subxt_rpcs::Error::DisconnectedWillReconnect(_) )) ) @@ -139,7 +177,13 @@ impl Error { /// Checks whether the error was caused by a RPC request being rejected. pub fn is_rpc_limit_reached(&self) -> bool { - matches!(self, Error::Rpc(RpcError::LimitReached)) + matches!(self, BackendError::Rpc(RpcError::LimitReached)) + } +} + +impl From for BackendError { + fn from(value: subxt_rpcs::Error) -> Self { + BackendError::Rpc(RpcError::ClientError(value)) } } @@ -148,8 +192,6 @@ impl Error { #[derive(Debug, thiserror::Error)] #[non_exhaustive] pub enum RpcError { - // Dev note: We need the error to be safely sent between threads - // for `subscribe_to_block_headers_filling_in_gaps` and friends. /// Error related to the RPC client. #[error("RPC error: {0}")] ClientError(#[from] subxt_rpcs::Error), @@ -157,77 +199,318 @@ pub enum RpcError { /// which is not technically an RPC error but is treated as an error in our own APIs. #[error("RPC error: limit reached")] LimitReached, - /// The RPC subscription dropped. + /// The RPC subscription was dropped. #[error("RPC error: subscription dropped.")] SubscriptionDropped, } /// Block error -#[derive(Clone, Debug, thiserror::Error)] +#[derive(Debug, thiserror::Error)] #[non_exhaustive] +#[allow(missing_docs)] pub enum BlockError { - /// An error containing the hash of the block that was not found. - #[error("Could not find a block with hash {0} (perhaps it was on a non-finalized fork?)")] - NotFound(String), - /// Leftover bytes found after decoding the extrinsic. #[error( - "After decoding the exntrinsic at index {extrinsic_index}, {num_leftover_bytes} bytes were left, suggesting that decoding may have failed" + "Could not find the block body with hash {block_hash} (perhaps it was on a non-finalized fork?)" )] - LeftoverBytes { - /// Index of the extrinsic that failed to decode. - extrinsic_index: usize, - /// Number of bytes leftover after decoding the extrinsic. - num_leftover_bytes: usize, + BlockNotFound { block_hash: Hex }, + #[error("Could not download the block header with hash {block_hash}: {reason}")] + CouldNotGetBlockHeader { + block_hash: Hex, + reason: BackendError, }, - /// Decoding error. - #[error("Cannot decode extrinsic at index {extrinsic_index}: {error}")] - ExtrinsicDecodeError { - /// Index of the extrinsic that failed to decode. - extrinsic_index: usize, - /// The decode error. - error: subxt_core::error::ExtrinsicDecodeError, + #[error("Could not download the latest block header: {0}")] + CouldNotGetLatestBlock(BackendError), + #[error("Could not subscribe to all blocks: {0}")] + CouldNotSubscribeToAllBlocks(BackendError), + #[error("Could not subscribe to best blocks: {0}")] + CouldNotSubscribeToBestBlocks(BackendError), + #[error("Could not subscribe to finalized blocks: {0}")] + CouldNotSubscribeToFinalizedBlocks(BackendError), + #[error("Error getting account nonce at block {block_hash}")] + AccountNonceError { + block_hash: Hex, + account_id: Hex, + reason: AccountNonceError, }, } -impl From for BlockError { - fn from(value: CoreBlockError) -> Self { - match value { - CoreBlockError::LeftoverBytes { - extrinsic_index, - num_leftover_bytes, - } => BlockError::LeftoverBytes { - extrinsic_index, - num_leftover_bytes, - }, - CoreBlockError::ExtrinsicDecodeError { - extrinsic_index, - error, - } => BlockError::ExtrinsicDecodeError { - extrinsic_index, - error, - }, +impl BlockError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + BlockError::CouldNotGetBlockHeader { reason: e, .. } + | BlockError::CouldNotGetLatestBlock(e) + | BlockError::CouldNotSubscribeToAllBlocks(e) + | BlockError::CouldNotSubscribeToBestBlocks(e) + | BlockError::CouldNotSubscribeToFinalizedBlocks(e) => Some(e), + _ => None, } } } -impl BlockError { - /// Produce an error that a block with the given hash cannot be found. - pub fn not_found(hash: impl AsRef<[u8]>) -> BlockError { - let hash = format!("0x{}", hex::encode(hash)); - BlockError::NotFound(hash) +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum AccountNonceError { + #[error("Could not retrieve account nonce: {0}")] + CouldNotRetrieve(#[from] BackendError), + #[error("Could not decode account nonce: {0}")] + CouldNotDecode(#[from] codec::Error), + #[error("Wrong number of account nonce bytes returned: {0} (expected 2, 4 or 8)")] + WrongNumberOfBytes(usize), +} + +impl AccountNonceError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + AccountNonceError::CouldNotRetrieve(e) => Some(e), + _ => None, + } } } -/// Transaction error. -#[derive(Clone, Debug, Eq, thiserror::Error, PartialEq)] +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum OnlineClientError { + #[error("Cannot construct OnlineClient: {0}")] + RpcError(#[from] subxt_rpcs::Error), + #[error( + "Cannot construct OnlineClient: Cannot fetch latest finalized block to obtain init details from: {0}" + )] + CannotGetLatestFinalizedBlock(BackendError), + #[error("Cannot construct OnlineClient: Cannot fetch genesis hash: {0}")] + CannotGetGenesisHash(BackendError), + #[error("Cannot construct OnlineClient: Cannot fetch current runtime version: {0}")] + CannotGetCurrentRuntimeVersion(BackendError), + #[error("Cannot construct OnlineClient: Cannot fetch metadata: {0}")] + CannotFetchMetadata(BackendError), +} + +impl OnlineClientError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + OnlineClientError::CannotGetLatestFinalizedBlock(e) + | OnlineClientError::CannotGetGenesisHash(e) + | OnlineClientError::CannotGetCurrentRuntimeVersion(e) + | OnlineClientError::CannotFetchMetadata(e) => Some(e), + _ => None, + } + } +} + +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum RuntimeUpdaterError { + #[error("Error subscribing to runtime updates: The update stream ended unexpectedly")] + UnexpectedEndOfUpdateStream, + #[error("Error subscribing to runtime updates: The finalized block stream ended unexpectedly")] + UnexpectedEndOfBlockStream, + #[error("Error subscribing to runtime updates: Can't stream runtime version: {0}")] + CannotStreamRuntimeVersion(BackendError), + #[error("Error subscribing to runtime updates: Can't get next runtime version in stream: {0}")] + CannotGetNextRuntimeVersion(BackendError), + #[error("Error subscribing to runtime updates: Cannot stream finalized blocks: {0}")] + CannotStreamFinalizedBlocks(BackendError), + #[error("Error subscribing to runtime updates: Cannot get next finalized block in stream: {0}")] + CannotGetNextFinalizedBlock(BackendError), + #[error("Cannot fetch new metadata for runtime update: {0}")] + CannotFetchNewMetadata(BackendError), + #[error( + "Error subscribing to runtime updates: Cannot find the System.LastRuntimeUpgrade storage entry" + )] + CantFindSystemLastRuntimeUpgrade, + #[error("Error subscribing to runtime updates: Cannot fetch last runtime upgrade: {0}")] + CantFetchLastRuntimeUpgrade(StorageError), + #[error("Error subscribing to runtime updates: Cannot decode last runtime upgrade: {0}")] + CannotDecodeLastRuntimeUpgrade(StorageValueError), +} + +impl RuntimeUpdaterError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + RuntimeUpdaterError::CannotStreamRuntimeVersion(e) + | RuntimeUpdaterError::CannotGetNextRuntimeVersion(e) + | RuntimeUpdaterError::CannotStreamFinalizedBlocks(e) + | RuntimeUpdaterError::CannotGetNextFinalizedBlock(e) + | RuntimeUpdaterError::CannotFetchNewMetadata(e) => Some(e), + _ => None, + } + } +} + +/// Error that can occur during upgrade. +#[non_exhaustive] +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum RuntimeUpdateeApplyError { + #[error("The proposed runtime update is the same as the current version")] + SameVersion, +} + +/// Error working with Runtime APIs +#[non_exhaustive] +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum RuntimeApiError { + #[error("Cannot access Runtime APIs at latest block: Cannot fetch latest finalized block: {0}")] + CannotGetLatestFinalizedBlock(BackendError), + #[error("{0}")] + OfflineError(#[from] CoreRuntimeApiError), + #[error("Cannot call the Runtime API: {0}")] + CannotCallApi(BackendError), +} + +impl RuntimeApiError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + RuntimeApiError::CannotGetLatestFinalizedBlock(e) + | RuntimeApiError::CannotCallApi(e) => Some(e), + _ => None, + } + } +} + +/// Error working with events. +#[non_exhaustive] +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum EventsError { + #[error("{0}")] + OfflineError(#[from] CoreEventsError), + #[error("Cannot access events at latest block: Cannot fetch latest finalized block: {0}")] + CannotGetLatestFinalizedBlock(BackendError), + #[error("Cannot fetch event bytes: {0}")] + CannotFetchEventBytes(BackendError), +} + +impl EventsError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + EventsError::CannotGetLatestFinalizedBlock(e) + | EventsError::CannotFetchEventBytes(e) => Some(e), + _ => None, + } + } +} + +/// Error working with extrinsics. +#[non_exhaustive] +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum ExtrinsicError { + #[error("{0}")] + OfflineError(#[from] CoreExtrinsicError), + #[error("Could not download block body to extract extrinsics from: {0}")] + CannotGetBlockBody(BackendError), + #[error("Block not found: {0}")] + BlockNotFound(Hex), + #[error("{0}")] + CouldNotDecodeExtrinsics(#[from] ExtrinsicDecodeErrorAt), + #[error( + "Extrinsic submission error: Cannot get latest finalized block to grab account nonce at: {0}" + )] + CannotGetLatestFinalizedBlock(BackendError), + #[error("Cannot find block header for block {block_hash}")] + CannotFindBlockHeader { block_hash: Hex }, + #[error("Error getting account nonce at block {block_hash}")] + AccountNonceError { + block_hash: Hex, + account_id: Hex, + reason: AccountNonceError, + }, + #[error("Cannot submit extrinsic: {0}")] + ErrorSubmittingTransaction(BackendError), + #[error("A transaction status error was returned while submitting the extrinsic: {0}")] + TransactionStatusError(TransactionStatusError), + #[error( + "The transaction status stream encountered an error while submitting the extrinsic: {0}" + )] + TransactionStatusStreamError(BackendError), + #[error( + "The transaction status stream unexpectedly ended, so we don't know the status of the submitted extrinsic" + )] + UnexpectedEndOfTransactionStatusStream, + #[error("Cannot get fee info from Runtime API: {0}")] + CannotGetFeeInfo(BackendError), + #[error("Cannot get validation info from Runtime API: {0}")] + CannotGetValidationInfo(BackendError), + #[error("Cannot decode ValidationResult bytes: {0}")] + CannotDecodeValidationResult(codec::Error), + #[error("ValidationResult bytes could not be decoded")] + UnexpectedValidationResultBytes(Vec), +} + +impl ExtrinsicError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + ExtrinsicError::CannotGetBlockBody(e) + | ExtrinsicError::CannotGetLatestFinalizedBlock(e) + | ExtrinsicError::ErrorSubmittingTransaction(e) + | ExtrinsicError::TransactionStatusStreamError(e) + | ExtrinsicError::CannotGetFeeInfo(e) + | ExtrinsicError::CannotGetValidationInfo(e) => Some(e), + ExtrinsicError::AccountNonceError { reason, .. } => reason.backend_error(), + _ => None, + } + } +} + +/// Error working with View Functions. #[non_exhaustive] -pub enum TransactionError { - /// The block hash that the transaction was added to could not be found. - /// This is probably because the block was retracted before being finalized. +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum ViewFunctionError { + #[error("{0}")] + OfflineError(#[from] CoreViewFunctionError), #[error( - "The block containing the transaction can no longer be found (perhaps it was on a non-finalized fork?)" + "Cannot access View Functions at latest block: Cannot fetch latest finalized block: {0}" )] - BlockNotFound, + CannotGetLatestFinalizedBlock(BackendError), + #[error("Cannot call the View Function Runtime API: {0}")] + CannotCallApi(BackendError), +} + +impl ViewFunctionError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + ViewFunctionError::CannotGetLatestFinalizedBlock(e) + | ViewFunctionError::CannotCallApi(e) => Some(e), + _ => None, + } + } +} + +/// Error during the transaction progress. +#[non_exhaustive] +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum TransactionProgressError { + #[error("Cannot get the next transaction progress update: {0}")] + CannotGetNextProgressUpdate(BackendError), + #[error("Error during transaction progress: {0}")] + TransactionStatusError(#[from] TransactionStatusError), + #[error( + "The transaction status stream unexpectedly ended, so we have no further transaction progress updates" + )] + UnexpectedEndOfTransactionStatusStream, +} + +impl TransactionProgressError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + TransactionProgressError::CannotGetNextProgressUpdate(e) => Some(e), + TransactionProgressError::TransactionStatusError(_) => None, + TransactionProgressError::UnexpectedEndOfTransactionStatusStream => None, + } + } +} + +/// An error emitted as the result of a transaction progress update. +#[derive(Clone, Debug, Eq, thiserror::Error, PartialEq)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum TransactionStatusError { /// An error happened on the node that the transaction was submitted to. #[error("Error handling transaction: {0}")] Error(String), @@ -238,3 +521,156 @@ pub enum TransactionError { #[error("The transaction was dropped: {0}")] Dropped(String), } + +/// Error fetching events for a just-submitted transaction +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum TransactionEventsError { + #[error( + "The block containing the submitted transaction ({block_hash}) could not be downloaded: {error}" + )] + CannotFetchBlockBody { + block_hash: Hex, + error: BackendError, + }, + #[error( + "Cannot find the the submitted transaction (hash: {transaction_hash}) in the block (hash: {block_hash}) it is supposed to be in." + )] + CannotFindTransactionInBlock { + block_hash: Hex, + transaction_hash: Hex, + }, + #[error("The block containing the submitted transaction ({block_hash}) could not be found")] + BlockNotFound { block_hash: Hex }, + #[error( + "Could not decode event at index {event_index} for the submitted transaction at block {block_hash}: {error}" + )] + CannotDecodeEventInBlock { + event_index: usize, + block_hash: Hex, + error: EventsError, + }, + #[error("Could not fetch events for the submitted transaction: {error}")] + CannotFetchEventsForTransaction { + block_hash: Hex, + transaction_hash: Hex, + error: EventsError, + }, + #[error("The transaction led to a DispatchError, but we failed to decode it: {error}")] + CannotDecodeDispatchError { + error: DispatchErrorDecodeError, + bytes: Vec, + }, + #[error("The transaction failed with the following dispatch error: {0}")] + ExtrinsicFailed(#[from] DispatchError), +} + +impl TransactionEventsError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + TransactionEventsError::CannotFetchBlockBody { error, .. } => Some(error), + TransactionEventsError::CannotDecodeEventInBlock { error, .. } + | TransactionEventsError::CannotFetchEventsForTransaction { error, .. } => { + error.backend_error() + } + _ => None, + } + } +} + +/// Error waiting for the transaction to be finalized and successful. +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum TransactionFinalizedSuccessError { + #[error("Could not finalize the transaction: {0}")] + FinalizationError(#[from] TransactionProgressError), + #[error("The transaction did not succeed: {0}")] + SuccessError(#[from] TransactionEventsError), +} + +impl TransactionFinalizedSuccessError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + TransactionFinalizedSuccessError::FinalizationError(e) => e.backend_error(), + TransactionFinalizedSuccessError::SuccessError(e) => e.backend_error(), + } + } +} + +/// Error decoding the [`DispatchError`] +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum ModuleErrorDetailsError { + #[error( + "Could not get details for the DispatchError: could not find pallet index {pallet_index}" + )] + PalletNotFound { pallet_index: u8 }, + #[error( + "Could not get details for the DispatchError: could not find error index {error_index} in pallet {pallet_name}" + )] + ErrorVariantNotFound { + pallet_name: String, + error_index: u8, + }, +} + +/// Error decoding the [`ModuleError`] +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +#[error("Could not decode the DispatchError::Module payload into the given type: {0}")] +pub struct ModuleErrorDecodeError(scale_decode::Error); + +/// Error decoding the [`DispatchError`] +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum DispatchErrorDecodeError { + #[error( + "Could not decode the DispatchError: could not find the corresponding type ID in the metadata" + )] + DispatchErrorTypeIdNotFound, + #[error("Could not decode the DispatchError: {0}")] + CouldNotDecodeDispatchError(scale_decode::Error), + #[error("Could not decode the DispatchError::Module variant")] + CouldNotDecodeModuleError { + /// The bytes corresponding to the Module variant we were unable to decode: + bytes: Vec, + }, +} + +/// Error working with storage. +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum StorageError { + #[error("{0}")] + Offline(#[from] CoreStorageError), + #[error("Cannot access storage at latest block: Cannot fetch latest finalized block: {0}")] + CannotGetLatestFinalizedBlock(BackendError), + #[error( + "No storage value found at the given address, and no default value to fall back to using." + )] + NoValueFound, + #[error("Cannot fetch the storage value: {0}")] + CannotFetchValue(BackendError), + #[error("Cannot iterate storage values: {0}")] + CannotIterateValues(BackendError), + #[error("Encountered an error iterating over storage values: {0}")] + StreamFailure(BackendError), +} + +impl StorageError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + StorageError::CannotGetLatestFinalizedBlock(e) + | StorageError::CannotFetchValue(e) + | StorageError::CannotIterateValues(e) + | StorageError::StreamFailure(e) => Some(e), + _ => None, + } + } +} diff --git a/subxt/src/events/events_client.rs b/subxt/src/events/events_client.rs index 354e93840a0..dc0ee1924c1 100644 --- a/subxt/src/events/events_client.rs +++ b/subxt/src/events/events_client.rs @@ -6,7 +6,7 @@ use crate::backend::{Backend, BackendExt, BlockRef}; use crate::{ client::OnlineClientT, config::{Config, HashFor}, - error::Error, + error::EventsError, events::Events, }; use derive_where::derive_where; @@ -44,12 +44,14 @@ where pub fn at( &self, block_ref: impl Into>>, - ) -> impl Future, Error>> + Send + 'static { + ) -> impl Future, EventsError>> + Send + 'static { self.at_or_latest(Some(block_ref.into())) } /// Obtain events for the latest finalized block. - pub fn at_latest(&self) -> impl Future, Error>> + Send + 'static { + pub fn at_latest( + &self, + ) -> impl Future, EventsError>> + Send + 'static { self.at_or_latest(None) } @@ -57,7 +59,7 @@ where fn at_or_latest( &self, block_ref: Option>>, - ) -> impl Future, Error>> + Send + 'static { + ) -> impl Future, EventsError>> + Send + 'static { // Clone and pass the client in like this so that we can explicitly // return a Future that's Send + 'static, rather than tied to &self. let client = self.client.clone(); @@ -65,7 +67,11 @@ where // If a block ref isn't provided, we'll get the latest finalized block to use. let block_ref = match block_ref { Some(r) => r, - None => client.backend().latest_finalized_block_ref().await?, + None => client + .backend() + .latest_finalized_block_ref() + .await + .map_err(EventsError::CannotGetLatestFinalizedBlock)?, }; let event_bytes = get_event_bytes(client.backend(), block_ref.hash()).await?; @@ -88,9 +94,11 @@ fn system_events_key() -> [u8; 32] { pub(crate) async fn get_event_bytes( backend: &dyn Backend, block_hash: HashFor, -) -> Result, Error> { - Ok(backend +) -> Result, EventsError> { + let bytes = backend .storage_fetch_value(system_events_key().to_vec(), block_hash) - .await? - .unwrap_or_default()) + .await + .map_err(EventsError::CannotFetchEventBytes)? + .unwrap_or_default(); + Ok(bytes) } diff --git a/subxt/src/events/events_type.rs b/subxt/src/events/events_type.rs index b3bc52c094a..04b706656e6 100644 --- a/subxt/src/events/events_type.rs +++ b/subxt/src/events/events_type.rs @@ -1,9 +1,10 @@ use crate::{ - Error, Metadata, + Metadata, config::{Config, HashFor}, + error::EventsError, }; use derive_where::derive_where; -use scale_decode::DecodeAsType; +use scale_decode::{DecodeAsFields, DecodeAsType}; use subxt_core::events::{EventDetails as CoreEventDetails, Events as CoreEvents}; pub use subxt_core::events::{EventMetadataDetails, Phase, StaticEvent}; @@ -49,7 +50,7 @@ impl Events { // use of it with our `FilterEvents` stuff. pub fn iter( &self, - ) -> impl Iterator, Error>> + Send + Sync + 'static { + ) -> impl Iterator, EventsError>> + Send + Sync + 'static { self.inner .iter() .map(|item| item.map(|e| EventDetails { inner: e }).map_err(Into::into)) @@ -58,24 +59,24 @@ impl Events { /// Iterate through the events using metadata to dynamically decode and skip /// them, and return only those which should decode to the provided `Ev` type. /// If an error occurs, all subsequent iterations return `None`. - pub fn find(&self) -> impl Iterator> { + pub fn find(&self) -> impl Iterator> { self.inner.find::().map(|item| item.map_err(Into::into)) } /// Iterate through the events using metadata to dynamically decode and skip /// them, and return the first event found which decodes to the provided `Ev` type. - pub fn find_first(&self) -> Result, Error> { + pub fn find_first(&self) -> Result, EventsError> { self.inner.find_first::().map_err(Into::into) } /// Iterate through the events using metadata to dynamically decode and skip /// them, and return the last event found which decodes to the provided `Ev` type. - pub fn find_last(&self) -> Result, Error> { + pub fn find_last(&self) -> Result, EventsError> { self.inner.find_last::().map_err(Into::into) } /// Find an event that decodes to the type provided. Returns true if it was found. - pub fn has(&self) -> Result { + pub fn has(&self) -> Result { self.inner.has::().map_err(Into::into) } } @@ -138,20 +139,20 @@ impl EventDetails { /// Decode and provide the event fields back in the form of a [`scale_value::Composite`] /// type which represents the named or unnamed fields that were present in the event. - pub fn field_values(&self) -> Result, Error> { - self.inner.field_values().map_err(Into::into) + pub fn decode_as_fields(&self) -> Result { + self.inner.decode_as_fields().map_err(Into::into) } /// Attempt to decode these [`EventDetails`] into a type representing the event fields. /// Such types are exposed in the codegen as `pallet_name::events::EventName` types. - pub fn as_event(&self) -> Result, Error> { + pub fn as_event(&self) -> Result, EventsError> { self.inner.as_event::().map_err(Into::into) } /// Attempt to decode these [`EventDetails`] into a root event type (which includes /// the pallet and event enum variants as well as the event fields). A compatible /// type for this is exposed via static codegen as a root level `Event` type. - pub fn as_root_event(&self) -> Result { + pub fn as_root_event(&self) -> Result { self.inner.as_root_event::().map_err(Into::into) } diff --git a/subxt/src/events/mod.rs b/subxt/src/events/mod.rs index e9a807ee126..185cafa2508 100644 --- a/subxt/src/events/mod.rs +++ b/subxt/src/events/mod.rs @@ -9,8 +9,8 @@ mod events_client; mod events_type; -use crate::Error; use crate::client::OnlineClientT; +use crate::error::EventsError; use subxt_core::{ Metadata, config::{Config, HashFor}, @@ -24,7 +24,7 @@ pub async fn new_events_from_client( metadata: Metadata, block_hash: HashFor, client: C, -) -> Result, Error> +) -> Result, EventsError> where T: Config, C: OnlineClientT, diff --git a/subxt/src/lib.rs b/subxt/src/lib.rs index b6b872952bc..cad61c9b993 100644 --- a/subxt/src/lib.rs +++ b/subxt/src/lib.rs @@ -68,16 +68,13 @@ pub mod config { /// Types representing the metadata obtained from a node. pub mod metadata { - pub use subxt_core::metadata::{DecodeWithMetadata, EncodeWithMetadata, Metadata}; - // Expose metadata types under a sub module in case somebody needs to reference them: - pub use subxt_metadata as types; + pub use subxt_metadata::*; } /// Submit dynamic transactions. pub mod dynamic { pub use subxt_core::dynamic::{ - At, DecodedValue, DecodedValueThunk, Value, constant, runtime_api_call, storage, tx, - view_function_call, + At, DecodedValue, Value, constant, runtime_api_call, storage, tx, view_function_call, }; } diff --git a/subxt/src/runtime_api/mod.rs b/subxt/src/runtime_api/mod.rs index 3c58c5b2352..f6dafb31cba 100644 --- a/subxt/src/runtime_api/mod.rs +++ b/subxt/src/runtime_api/mod.rs @@ -9,6 +9,4 @@ mod runtime_types; pub use runtime_client::RuntimeApiClient; pub use runtime_types::RuntimeApi; -pub use subxt_core::runtime_api::payload::{ - DefaultPayload, DynamicPayload, Payload, StaticPayload, dynamic, -}; +pub use subxt_core::runtime_api::payload::{DynamicPayload, Payload, StaticPayload, dynamic}; diff --git a/subxt/src/runtime_api/runtime_client.rs b/subxt/src/runtime_api/runtime_client.rs index 9597b031a1b..6412468be76 100644 --- a/subxt/src/runtime_api/runtime_client.rs +++ b/subxt/src/runtime_api/runtime_client.rs @@ -8,7 +8,7 @@ use crate::{ backend::BlockRef, client::OnlineClientT, config::{Config, HashFor}, - error::Error, + error::RuntimeApiError, }; use derive_where::derive_where; use std::{future::Future, marker::PhantomData}; @@ -43,13 +43,17 @@ where /// Obtain a runtime API interface at the latest finalized block. pub fn at_latest( &self, - ) -> impl Future, Error>> + Send + 'static { + ) -> impl Future, RuntimeApiError>> + Send + 'static { // Clone and pass the client in like this so that we can explicitly // return a Future that's Send + 'static, rather than tied to &self. let client = self.client.clone(); async move { // get the ref for the latest finalized block and use that. - let block_ref = client.backend().latest_finalized_block_ref().await?; + let block_ref = client + .backend() + .latest_finalized_block_ref() + .await + .map_err(RuntimeApiError::CannotGetLatestFinalizedBlock)?; Ok(RuntimeApi::new(client, block_ref)) } diff --git a/subxt/src/runtime_api/runtime_types.rs b/subxt/src/runtime_api/runtime_types.rs index 86254b359d2..51d5ac601ff 100644 --- a/subxt/src/runtime_api/runtime_types.rs +++ b/subxt/src/runtime_api/runtime_types.rs @@ -7,7 +7,7 @@ use crate::{ backend::BlockRef, client::OnlineClientT, config::{Config, HashFor}, - error::Error, + error::RuntimeApiError, }; use derive_where::derive_where; use std::{future::Future, marker::PhantomData}; @@ -40,7 +40,7 @@ where /// if the payload is valid (or if it's not possible to check since the payload has no validation hash). /// Return an error if the payload was not valid or something went wrong trying to validate it (ie /// the runtime API in question do not exist at all) - pub fn validate(&self, payload: &Call) -> Result<(), Error> { + pub fn validate(&self, payload: &Call) -> Result<(), RuntimeApiError> { subxt_core::runtime_api::validate(payload, &self.client.metadata()).map_err(Into::into) } @@ -50,7 +50,7 @@ where &self, function: &'a str, call_parameters: Option<&'a [u8]>, - ) -> impl Future, Error>> + use<'a, Client, T> { + ) -> impl Future, RuntimeApiError>> + use<'a, Client, T> { let client = self.client.clone(); let block_hash = self.block_ref.hash(); // Ensure that the returned future doesn't have a lifetime tied to api.runtime_api(), @@ -59,7 +59,8 @@ where let data = client .backend() .call(function, call_parameters, block_hash) - .await?; + .await + .map_err(RuntimeApiError::CannotCallApi)?; Ok(data) } } @@ -68,7 +69,8 @@ where pub fn call( &self, payload: Call, - ) -> impl Future> + use { + ) -> impl Future> + use + { let client = self.client.clone(); let block_hash = self.block_ref.hash(); // Ensure that the returned future doesn't have a lifetime tied to api.runtime_api(), @@ -87,7 +89,8 @@ where let bytes = client .backend() .call(&call_name, Some(call_args.as_slice()), block_hash) - .await?; + .await + .map_err(RuntimeApiError::CannotCallApi)?; // Decode the response. let value = subxt_core::runtime_api::decode_value(&mut &*bytes, &payload, &metadata)?; diff --git a/subxt/src/storage/mod.rs b/subxt/src/storage/mod.rs index e7867c8b1c1..437441c7a76 100644 --- a/subxt/src/storage/mod.rs +++ b/subxt/src/storage/mod.rs @@ -5,10 +5,11 @@ //! Types associated with accessing and working with storage items. mod storage_client; -mod storage_type; +mod storage_client_at; +// mod storage_value; +// mod storage_key_value; +// mod storage_key; pub use storage_client::StorageClient; -pub use storage_type::{Storage, StorageKeyValuePair}; -pub use subxt_core::storage::address::{ - Address, DefaultAddress, DynamicAddress, StaticAddress, StaticStorageKey, StorageKey, dynamic, -}; +pub use storage_client_at::StorageClientAt; +pub use subxt_core::storage::address::{Address, DynamicAddress, StaticAddress, dynamic}; diff --git a/subxt/src/storage/storage_client.rs b/subxt/src/storage/storage_client.rs index c02a4ba1577..ca6fcffa57a 100644 --- a/subxt/src/storage/storage_client.rs +++ b/subxt/src/storage/storage_client.rs @@ -2,12 +2,12 @@ // This file is dual-licensed as Apache-2.0 or GPL-3.0. // see LICENSE for license details. -use super::storage_type::Storage; +use super::storage_client_at::StorageClientAt; use crate::{ backend::BlockRef, client::{OfflineClientT, OnlineClientT}, config::{Config, HashFor}, - error::Error, + error::StorageError, }; use derive_where::derive_where; use std::{future::Future, marker::PhantomData}; @@ -39,24 +39,9 @@ where /// if the address is valid (or if it's not possible to check since the address has no validation hash). /// Return an error if the address was not valid or something went wrong trying to validate it (ie /// the pallet or storage entry in question do not exist at all). - pub fn validate(&self, address: &Addr) -> Result<(), Error> { + pub fn validate(&self, address: &Addr) -> Result<(), StorageError> { subxt_core::storage::validate(address, &self.client.metadata()).map_err(Into::into) } - - /// Convert some storage address into the raw bytes that would be submitted to the node in order - /// to retrieve the entries at the root of the associated address. - pub fn address_root_bytes(&self, address: &Addr) -> Vec { - subxt_core::storage::get_address_root_bytes(address) - } - - /// Convert some storage address into the raw bytes that would be submitted to the node in order - /// to retrieve an entry. This fails if [`Address::append_entry_bytes`] does; in the built-in - /// implementation this would be if the pallet and storage entry being asked for is not available on the - /// node you're communicating with, or if the metadata is missing some type information (which should not - /// happen). - pub fn address_bytes(&self, address: &Addr) -> Result, Error> { - subxt_core::storage::get_address_bytes(address, &self.client.metadata()).map_err(Into::into) - } } impl StorageClient @@ -65,22 +50,27 @@ where Client: OnlineClientT, { /// Obtain storage at some block hash. - pub fn at(&self, block_ref: impl Into>>) -> Storage { - Storage::new(self.client.clone(), block_ref.into()) + pub fn at(&self, block_ref: impl Into>>) -> StorageClientAt { + StorageClientAt::new(self.client.clone(), block_ref.into()) } /// Obtain storage at the latest finalized block. pub fn at_latest( &self, - ) -> impl Future, Error>> + Send + 'static { + ) -> impl Future, StorageError>> + Send + 'static + { // Clone and pass the client in like this so that we can explicitly // return a Future that's Send + 'static, rather than tied to &self. let client = self.client.clone(); async move { // get the ref for the latest finalized block and use that. - let block_ref = client.backend().latest_finalized_block_ref().await?; + let block_ref = client + .backend() + .latest_finalized_block_ref() + .await + .map_err(StorageError::CannotGetLatestFinalizedBlock)?; - Ok(Storage::new(client, block_ref)) + Ok(StorageClientAt::new(client, block_ref)) } } } diff --git a/subxt/src/storage/storage_client_at.rs b/subxt/src/storage/storage_client_at.rs new file mode 100644 index 00000000000..4f5e608983e --- /dev/null +++ b/subxt/src/storage/storage_client_at.rs @@ -0,0 +1,220 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use crate::{ + backend::{BackendExt, BlockRef}, + client::{OfflineClientT, OnlineClientT}, + config::{Config, HashFor}, + error::StorageError, +}; +use derive_where::derive_where; +use futures::StreamExt; +use std::marker::PhantomData; +use subxt_core::Metadata; +use subxt_core::storage::{PrefixOf, address::Address}; +use subxt_core::utils::{Maybe, Yes}; + +pub use subxt_core::storage::{StorageKeyValue, StorageValue}; + +/// Query the runtime storage. +#[derive_where(Clone; Client)] +pub struct StorageClientAt { + client: Client, + metadata: Metadata, + block_ref: BlockRef>, + _marker: PhantomData, +} + +impl StorageClientAt +where + T: Config, + Client: OfflineClientT, +{ + /// Create a new [`StorageClientAt`]. + pub(crate) fn new(client: Client, block_ref: BlockRef>) -> Self { + // Retrieve and store metadata here so that we can borrow it in + // subsequent structs, and thus also borrow storage info and + // things that borrow from metadata. + let metadata = client.metadata(); + + Self { + client, + metadata, + block_ref, + _marker: PhantomData, + } + } +} + +impl StorageClientAt +where + T: Config, + Client: OfflineClientT, +{ + /// This returns a [`StorageEntryClient`], which allows working with the storage entry at the provided address. + pub fn entry( + &self, + address: Addr, + ) -> Result, StorageError> { + let inner = subxt_core::storage::entry(address, &self.metadata)?; + Ok(StorageEntryClient { + inner, + client: self.client.clone(), + block_ref: self.block_ref.clone(), + _marker: core::marker::PhantomData, + }) + } +} + +/// This represents a single storage entry (be it a plain value or map) +/// and the operations that can be performed on it. +pub struct StorageEntryClient<'atblock, T: Config, Client, Addr, IsPlain> { + inner: subxt_core::storage::StorageEntry<'atblock, Addr, IsPlain>, + client: Client, + block_ref: BlockRef>, + _marker: PhantomData, +} + +impl<'atblock, T, Client, Addr, IsPlain> StorageEntryClient<'atblock, T, Client, Addr, IsPlain> +where + T: Config, + Addr: Address, +{ + /// Name of the pallet containing this storage entry. + pub fn pallet_name(&self) -> &str { + self.inner.pallet_name() + } + + /// Name of the storage entry. + pub fn entry_name(&self) -> &str { + self.inner.entry_name() + } + + /// Is the storage entry a plain value? + pub fn is_plain(&self) -> bool { + self.inner.is_plain() + } + + /// Is the storage entry a map? + pub fn is_map(&self) -> bool { + self.inner.is_map() + } + + /// Return the default value for this storage entry, if there is one. Returns `None` if there + /// is no default value. + pub fn default_value(&self) -> Option> { + self.inner.default_value() + } +} + +// Plain values get a fetch method with no extra arguments. +impl<'atblock, T, Client, Addr> StorageEntryClient<'atblock, T, Client, Addr, Yes> +where + T: Config, + Addr: Address, + Client: OnlineClientT, +{ + pub async fn fetch(&self) -> Result, StorageError> { + let value = self.try_fetch().await?.map_or_else( + || self.inner.default_value().ok_or(StorageError::NoValueFound), + Ok, + )?; + + Ok(value) + } + + pub async fn try_fetch( + &self, + ) -> Result>, StorageError> { + let value = self + .client + .backend() + .storage_fetch_value(self.key_prefix().to_vec(), self.block_ref.hash()) + .await + .map_err(StorageError::CannotFetchValue)? + .map(|bytes| self.inner.value(bytes)); + + Ok(value) + } + + /// The keys for plain storage values are always 32 byte hashes. + pub fn key_prefix(&self) -> [u8; 32] { + self.inner.key_prefix() + } +} + +// When HasDefaultValue = Yes, we expect there to exist a valid default value and will use that +// if we fetch an entry and get nothing back. +impl<'atblock, T, Client, Addr> StorageEntryClient<'atblock, T, Client, Addr, Maybe> +where + T: Config, + Addr: Address, + Client: OnlineClientT, +{ + pub async fn fetch( + &self, + keys: Addr::KeyParts, + ) -> Result, StorageError> { + let value = self + .try_fetch(keys) + .await? + .or_else(|| self.default_value()) + .unwrap(); + + Ok(value) + } + + pub async fn try_fetch( + &self, + keys: Addr::KeyParts, + ) -> Result>, StorageError> { + let key = self.inner.fetch_key(keys)?; + + let value = self + .client + .backend() + .storage_fetch_value(key, self.block_ref.hash()) + .await + .map_err(StorageError::CannotFetchValue)? + .map(|bytes| self.inner.value(bytes)) + .or_else(|| self.default_value()); + + Ok(value) + } + + pub async fn iter>( + &self, + keys: Keys, + ) -> Result< + impl futures::Stream, StorageError>> + + use<'atblock, Addr, Client, T, Keys>, + StorageError, + > { + let key_bytes = self.inner.iter_key(keys)?; + let block_hash = self.block_ref.hash(); + let inner = self.inner.clone(); + + let stream = self + .client + .backend() + .storage_fetch_descendant_values(key_bytes, block_hash) + .await + .map_err(StorageError::CannotIterateValues)? + .map(move |kv| { + let kv = match kv { + Ok(kv) => kv, + Err(e) => return Err(StorageError::StreamFailure(e)), + }; + Ok(inner.key_value(kv.key, kv.value)) + }); + + Ok(Box::pin(stream)) + } + + /// The first 32 bytes of the storage entry key, which points to the entry but not necessarily + /// a single storage value (unless the entry is a plain value). + pub fn key_prefix(&self) -> [u8; 32] { + self.inner.key_prefix() + } +} diff --git a/subxt/src/storage/storage_type.rs b/subxt/src/storage/storage_type.rs deleted file mode 100644 index b82539dc839..00000000000 --- a/subxt/src/storage/storage_type.rs +++ /dev/null @@ -1,323 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use crate::{ - backend::{BackendExt, BlockRef}, - client::OnlineClientT, - config::{Config, HashFor}, - error::{Error, MetadataError, StorageAddressError}, - metadata::DecodeWithMetadata, -}; -use codec::Decode; -use derive_where::derive_where; -use futures::StreamExt; -use std::{future::Future, marker::PhantomData}; -use subxt_core::storage::address::{Address, StorageHashers, StorageKey}; -use subxt_core::utils::Yes; - -/// This is returned from a couple of storage functions. -pub use crate::backend::StreamOfResults; - -/// Query the runtime storage. -#[derive_where(Clone; Client)] -pub struct Storage { - client: Client, - block_ref: BlockRef>, - _marker: PhantomData, -} - -impl Storage { - /// Create a new [`Storage`] - pub(crate) fn new(client: Client, block_ref: BlockRef>) -> Self { - Self { - client, - block_ref, - _marker: PhantomData, - } - } -} - -impl Storage -where - T: Config, - Client: OnlineClientT, -{ - /// Fetch the raw encoded value at the key given. - pub fn fetch_raw( - &self, - key: impl Into>, - ) -> impl Future>, Error>> + 'static { - let client = self.client.clone(); - let key = key.into(); - // Keep this alive until the call is complete: - let block_ref = self.block_ref.clone(); - // Manual future so lifetime not tied to api.storage(). - async move { - let data = client - .backend() - .storage_fetch_value(key, block_ref.hash()) - .await?; - Ok(data) - } - } - - /// Stream all of the raw keys underneath the key given - pub fn fetch_raw_keys( - &self, - key: impl Into>, - ) -> impl Future>, Error>> + 'static { - let client = self.client.clone(); - let block_hash = self.block_ref.hash(); - let key = key.into(); - // Manual future so lifetime not tied to api.storage(). - async move { - let keys = client - .backend() - .storage_fetch_descendant_keys(key, block_hash) - .await?; - Ok(keys) - } - } - - /// Fetch a decoded value from storage at a given address. - /// - /// # Example - /// - /// ```rust,no_run,standalone_crate - /// use subxt::{ PolkadotConfig, OnlineClient }; - /// - /// #[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale")] - /// pub mod polkadot {} - /// - /// # #[tokio::main] - /// # async fn main() { - /// let api = OnlineClient::::new().await.unwrap(); - /// - /// // Address to a storage entry we'd like to access. - /// let address = polkadot::storage().xcm_pallet().queries(12345); - /// - /// // Fetch just the keys, returning up to 10 keys. - /// let value = api - /// .storage() - /// .at_latest() - /// .await - /// .unwrap() - /// .fetch(&address) - /// .await - /// .unwrap(); - /// - /// println!("Value: {:?}", value); - /// # } - /// ``` - pub fn fetch<'address, Addr>( - &self, - address: &'address Addr, - ) -> impl Future, Error>> + use<'address, Addr, Client, T> - where - Addr: Address + 'address, - { - let client = self.clone(); - async move { - let metadata = client.client.metadata(); - - // Metadata validation checks whether the static address given - // is likely to actually correspond to a real storage entry or not. - // if not, it means static codegen doesn't line up with runtime - // metadata. - subxt_core::storage::validate(address, &metadata)?; - - // Look up the return type ID to enable DecodeWithMetadata: - let lookup_bytes = subxt_core::storage::get_address_bytes(address, &metadata)?; - if let Some(data) = client.fetch_raw(lookup_bytes).await? { - let val = subxt_core::storage::decode_value(&mut &*data, address, &metadata)?; - Ok(Some(val)) - } else { - Ok(None) - } - } - } - - /// Fetch a StorageKey that has a default value with an optional block hash. - pub fn fetch_or_default<'address, Addr>( - &self, - address: &'address Addr, - ) -> impl Future> + use<'address, Addr, Client, T> - where - Addr: Address + 'address, - { - let client = self.clone(); - async move { - // Metadata validation happens via .fetch(): - if let Some(data) = client.fetch(address).await? { - Ok(data) - } else { - let metadata = client.client.metadata(); - let val = subxt_core::storage::default_value(address, &metadata)?; - Ok(val) - } - } - } - - /// Returns an iterator of key value pairs. - /// - /// ```rust,no_run,standalone_crate - /// use subxt::{ PolkadotConfig, OnlineClient }; - /// - /// #[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale")] - /// pub mod polkadot {} - /// - /// # #[tokio::main] - /// # async fn main() { - /// let api = OnlineClient::::new().await.unwrap(); - /// - /// // Address to the root of a storage entry that we'd like to iterate over. - /// let address = polkadot::storage().xcm_pallet().version_notifiers_iter(); - /// - /// // Iterate over keys and values at that address. - /// let mut iter = api - /// .storage() - /// .at_latest() - /// .await - /// .unwrap() - /// .iter(address) - /// .await - /// .unwrap(); - /// - /// while let Some(Ok(kv)) = iter.next().await { - /// println!("Key bytes: 0x{}", hex::encode(&kv.key_bytes)); - /// println!("Value: {}", kv.value); - /// } - /// # } - /// ``` - pub fn iter( - &self, - address: Addr, - ) -> impl Future>, Error>> + 'static - where - Addr: Address + 'static, - Addr::Keys: 'static + Sized, - { - let client = self.client.clone(); - let block_ref = self.block_ref.clone(); - async move { - let metadata = client.metadata(); - let (_pallet, entry) = subxt_core::storage::lookup_storage_entry_details( - address.pallet_name(), - address.entry_name(), - &metadata, - )?; - - // Metadata validation checks whether the static address given - // is likely to actually correspond to a real storage entry or not. - // if not, it means static codegen doesn't line up with runtime - // metadata. - subxt_core::storage::validate(&address, &metadata)?; - - // Look up the return type for flexible decoding. Do this once here to avoid - // potentially doing it every iteration if we used `decode_storage_with_metadata` - // in the iterator. - let entry = entry.entry_type(); - - let return_type_id = entry.value_ty(); - let hashers = StorageHashers::new(entry, metadata.types())?; - - // The address bytes of this entry: - let address_bytes = subxt_core::storage::get_address_bytes(&address, &metadata)?; - let s = client - .backend() - .storage_fetch_descendant_values(address_bytes, block_ref.hash()) - .await? - .map(move |kv| { - let kv = match kv { - Ok(kv) => kv, - Err(e) => return Err(e), - }; - let value = Addr::Target::decode_with_metadata( - &mut &*kv.value, - return_type_id, - &metadata, - )?; - - let key_bytes = kv.key; - let cursor = &mut &key_bytes[..]; - strip_storage_address_root_bytes(cursor)?; - - let keys = ::decode_storage_key( - cursor, - &mut hashers.iter(), - metadata.types(), - )?; - - Ok(StorageKeyValuePair:: { - keys, - key_bytes, - value, - }) - }); - - let s = StreamOfResults::new(Box::pin(s)); - Ok(s) - } - } - - /// The storage version of a pallet. - /// The storage version refers to the `frame_support::traits::Metadata::StorageVersion` type. - pub async fn storage_version(&self, pallet_name: impl AsRef) -> Result { - // check that the pallet exists in the metadata: - self.client - .metadata() - .pallet_by_name(pallet_name.as_ref()) - .ok_or_else(|| MetadataError::PalletNameNotFound(pallet_name.as_ref().into()))?; - - // construct the storage key. This is done similarly in `frame_support::traits::metadata::StorageVersion::storage_key()`. - pub const STORAGE_VERSION_STORAGE_KEY_POSTFIX: &[u8] = b":__STORAGE_VERSION__:"; - let mut key_bytes: Vec = vec![]; - key_bytes.extend(&sp_crypto_hashing::twox_128( - pallet_name.as_ref().as_bytes(), - )); - key_bytes.extend(&sp_crypto_hashing::twox_128( - STORAGE_VERSION_STORAGE_KEY_POSTFIX, - )); - - // fetch the raw bytes and decode them into the StorageVersion struct: - let storage_version_bytes = self.fetch_raw(key_bytes).await?.ok_or_else(|| { - format!( - "Unexpected: entry for storage version in pallet \"{}\" not found", - pallet_name.as_ref() - ) - })?; - u16::decode(&mut &storage_version_bytes[..]).map_err(Into::into) - } - - /// Fetch the runtime WASM code. - pub async fn runtime_wasm_code(&self) -> Result, Error> { - // note: this should match the `CODE` constant in `sp_core::storage::well_known_keys` - const CODE: &str = ":code"; - self.fetch_raw(CODE.as_bytes()).await?.ok_or_else(|| { - format!("Unexpected: entry for well known key \"{CODE}\" not found").into() - }) - } -} - -/// Strips the first 32 bytes (16 for the pallet hash, 16 for the entry hash) off some storage address bytes. -fn strip_storage_address_root_bytes(address_bytes: &mut &[u8]) -> Result<(), StorageAddressError> { - if address_bytes.len() >= 32 { - *address_bytes = &address_bytes[32..]; - Ok(()) - } else { - Err(StorageAddressError::UnexpectedAddressBytes) - } -} - -/// A pair of keys and values together with all the bytes that make up the storage address. -/// `keys` is `None` if non-concat hashers are used. In this case the keys could not be extracted back from the key_bytes. -#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] -pub struct StorageKeyValuePair { - /// The bytes that make up the address of the storage entry. - pub key_bytes: Vec, - /// The keys that can be used to construct the address of this storage entry. - pub keys: T::Keys, - /// The value of the storage entry. - pub value: T::Target, -} diff --git a/subxt/src/tx/tx_client.rs b/subxt/src/tx/tx_client.rs index 6c26c357416..030846a9986 100644 --- a/subxt/src/tx/tx_client.rs +++ b/subxt/src/tx/tx_client.rs @@ -6,13 +6,13 @@ use crate::{ backend::{BackendExt, BlockRef, TransactionStatus}, client::{OfflineClientT, OnlineClientT}, config::{Config, ExtrinsicParams, HashFor, Header}, - error::{BlockError, Error}, + error::{ExtrinsicError, TransactionStatusError}, tx::{Payload, Signer as SignerT, TxProgress}, utils::PhantomDataSendSync, }; use codec::{Compact, Decode, Encode}; use derive_where::derive_where; -use futures::future::try_join; +use futures::future::{TryFutureExt, try_join}; use subxt_core::tx::TransactionVersion; /// A client for working with transactions. @@ -37,7 +37,7 @@ impl> TxClient { /// if the call is valid (or if it's not possible to check since the call has no validation hash). /// Return an error if the call was not valid or something went wrong trying to validate it (ie /// the pallet or call in question do not exist at all). - pub fn validate(&self, call: &Call) -> Result<(), Error> + pub fn validate(&self, call: &Call) -> Result<(), ExtrinsicError> where Call: Payload, { @@ -45,7 +45,7 @@ impl> TxClient { } /// Return the SCALE encoded bytes representing the call data of the transaction. - pub fn call_data(&self, call: &Call) -> Result, Error> + pub fn call_data(&self, call: &Call) -> Result, ExtrinsicError> where Call: Payload, { @@ -55,7 +55,10 @@ impl> TxClient { /// Creates an unsigned transaction without submitting it. Depending on the metadata, we might end /// up constructing either a v4 or v5 transaction. See [`Self::create_v4_unsigned`] or /// [`Self::create_v5_bare`] if you'd like to explicitly create an unsigned transaction of a certain version. - pub fn create_unsigned(&self, call: &Call) -> Result, Error> + pub fn create_unsigned( + &self, + call: &Call, + ) -> Result, ExtrinsicError> where Call: Payload, { @@ -78,7 +81,7 @@ impl> TxClient { pub fn create_v4_unsigned( &self, call: &Call, - ) -> Result, Error> + ) -> Result, ExtrinsicError> where Call: Payload, { @@ -95,7 +98,10 @@ impl> TxClient { /// /// Prefer [`Self::create_unsigned()`] if you don't know which version to create; this will pick the /// most suitable one for the given chain. - pub fn create_v5_bare(&self, call: &Call) -> Result, Error> + pub fn create_v5_bare( + &self, + call: &Call, + ) -> Result, ExtrinsicError> where Call: Payload, { @@ -117,7 +123,7 @@ impl> TxClient { &self, call: &Call, params: >::Params, - ) -> Result, Error> + ) -> Result, ExtrinsicError> where Call: Payload, { @@ -148,7 +154,7 @@ impl> TxClient { &self, call: &Call, params: >::Params, - ) -> Result, Error> + ) -> Result, ExtrinsicError> where Call: Payload, { @@ -175,7 +181,7 @@ impl> TxClient { &self, call: &Call, params: >::Params, - ) -> Result, Error> + ) -> Result, ExtrinsicError> where Call: Payload, { @@ -198,9 +204,21 @@ where C: OnlineClientT, { /// Get the account nonce for a given account ID. - pub async fn account_nonce(&self, account_id: &T::AccountId) -> Result { - let block_ref = self.client.backend().latest_finalized_block_ref().await?; - crate::blocks::get_account_nonce(&self.client, account_id, block_ref.hash()).await + pub async fn account_nonce(&self, account_id: &T::AccountId) -> Result { + let block_ref = self + .client + .backend() + .latest_finalized_block_ref() + .await + .map_err(ExtrinsicError::CannotGetLatestFinalizedBlock)?; + + crate::blocks::get_account_nonce(&self.client, account_id, block_ref.hash()) + .await + .map_err(|e| ExtrinsicError::AccountNonceError { + block_hash: block_ref.hash().into(), + account_id: account_id.encode().into(), + reason: e, + }) } /// Creates a partial transaction, without submitting it. This can then be signed and submitted. @@ -209,7 +227,7 @@ where call: &Call, account_id: &T::AccountId, mut params: >::Params, - ) -> Result, Error> + ) -> Result, ExtrinsicError> where Call: Payload, { @@ -226,7 +244,7 @@ where call: &Call, account_id: &T::AccountId, mut params: >::Params, - ) -> Result, Error> + ) -> Result, ExtrinsicError> where Call: Payload, { @@ -243,7 +261,7 @@ where call: &Call, account_id: &T::AccountId, mut params: >::Params, - ) -> Result, Error> + ) -> Result, ExtrinsicError> where Call: Payload, { @@ -257,7 +275,7 @@ where call: &Call, signer: &Signer, params: >::Params, - ) -> Result, Error> + ) -> Result, ExtrinsicError> where Call: Payload, Signer: SignerT, @@ -278,7 +296,7 @@ where &mut self, call: &Call, signer: &Signer, - ) -> Result, Error> + ) -> Result, ExtrinsicError> where Call: Payload, Signer: SignerT, @@ -297,7 +315,7 @@ where call: &Call, signer: &Signer, params: >::Params, - ) -> Result, Error> + ) -> Result, ExtrinsicError> where Call: Payload, Signer: SignerT, @@ -322,7 +340,7 @@ where &mut self, call: &Call, signer: &Signer, - ) -> Result, Error> + ) -> Result, ExtrinsicError> where Call: Payload, Signer: SignerT, @@ -345,7 +363,7 @@ where call: &Call, signer: &Signer, params: >::Params, - ) -> Result, Error> + ) -> Result, ExtrinsicError> where Call: Payload, Signer: SignerT, @@ -485,7 +503,7 @@ where /// /// Returns a [`TxProgress`], which can be used to track the status of the transaction /// and obtain details about it, once it has made it into a block. - pub async fn submit_and_watch(&self) -> Result, Error> { + pub async fn submit_and_watch(&self) -> Result, ExtrinsicError> { // Get a hash of the transaction (we'll need this later). let ext_hash = self.hash(); @@ -494,7 +512,8 @@ where .client .backend() .submit_transaction(self.encoded()) - .await?; + .await + .map_err(ExtrinsicError::ErrorSubmittingTransaction)?; Ok(TxProgress::new(sub, self.client.clone(), ext_hash)) } @@ -504,13 +523,14 @@ where /// It's usually better to call `submit_and_watch` to get an idea of the progress of the /// submission and whether it's eventually successful or not. This call does not guarantee /// success, and is just sending the transaction to the chain. - pub async fn submit(&self) -> Result, Error> { + pub async fn submit(&self) -> Result, ExtrinsicError> { let ext_hash = self.hash(); let mut sub = self .client .backend() .submit_transaction(self.encoded()) - .await?; + .await + .map_err(ExtrinsicError::ErrorSubmittingTransaction)?; // If we get a bad status or error back straight away then error, else return the hash. match sub.next().await { @@ -520,20 +540,22 @@ where | TransactionStatus::InBestBlock { .. } | TransactionStatus::NoLongerInBestBlock | TransactionStatus::InFinalizedBlock { .. } => Ok(ext_hash), - TransactionStatus::Error { message } => { - Err(Error::Other(format!("Transaction error: {message}"))) - } + TransactionStatus::Error { message } => Err( + ExtrinsicError::TransactionStatusError(TransactionStatusError::Error(message)), + ), TransactionStatus::Invalid { message } => { - Err(Error::Other(format!("Transaction invalid: {message}"))) + Err(ExtrinsicError::TransactionStatusError( + TransactionStatusError::Invalid(message), + )) } TransactionStatus::Dropped { message } => { - Err(Error::Other(format!("Transaction dropped: {message}"))) + Err(ExtrinsicError::TransactionStatusError( + TransactionStatusError::Dropped(message), + )) } }, - Some(Err(e)) => Err(e), - None => Err(Error::Other( - "Transaction broadcast was unsuccessful; stream terminated early".into(), - )), + Some(Err(e)) => Err(ExtrinsicError::TransactionStatusStreamError(e)), + None => Err(ExtrinsicError::UnexpectedEndOfTransactionStatusStream), } } @@ -541,8 +563,13 @@ where /// valid can be added to a block, but may still end up in an error state. /// /// Returns `Ok` with a [`ValidationResult`], which is the result of attempting to dry run the transaction. - pub async fn validate(&self) -> Result { - let latest_block_ref = self.client.backend().latest_finalized_block_ref().await?; + pub async fn validate(&self) -> Result { + let latest_block_ref = self + .client + .backend() + .latest_finalized_block_ref() + .await + .map_err(ExtrinsicError::CannotGetLatestFinalizedBlock)?; self.validate_at(latest_block_ref).await } @@ -553,7 +580,7 @@ where pub async fn validate_at( &self, at: impl Into>>, - ) -> Result { + ) -> Result { let block_hash = at.into().hash(); // Approach taken from https://github.com/paritytech/json-rpc-interface-spec/issues/55. @@ -570,17 +597,23 @@ where Some(¶ms), block_hash, ) - .await?; + .await + .map_err(ExtrinsicError::CannotGetValidationInfo)?; ValidationResult::try_from_bytes(res) } /// This returns an estimate for what the transaction is expected to cost to execute, less any tips. /// The actual amount paid can vary from block to block based on node traffic and other factors. - pub async fn partial_fee_estimate(&self) -> Result { + pub async fn partial_fee_estimate(&self) -> Result { let mut params = self.encoded().to_vec(); (self.encoded().len() as u32).encode_to(&mut params); - let latest_block_ref = self.client.backend().latest_finalized_block_ref().await?; + let latest_block_ref = self + .client + .backend() + .latest_finalized_block_ref() + .await + .map_err(ExtrinsicError::CannotGetLatestFinalizedBlock)?; // destructuring RuntimeDispatchInfo, see type information // data layout: {weight_ref_time: Compact, weight_proof_size: Compact, class: u8, partial_fee: u128} @@ -592,7 +625,9 @@ where Some(¶ms), latest_block_ref.hash(), ) - .await?; + .await + .map_err(ExtrinsicError::CannotGetFeeInfo)?; + Ok(partial_fee) } } @@ -602,19 +637,33 @@ async fn inject_account_nonce_and_block>( client: &Client, account_id: &T::AccountId, params: &mut >::Params, -) -> Result<(), Error> { +) -> Result<(), ExtrinsicError> { use subxt_core::config::transaction_extensions::Params; - let block_ref = client.backend().latest_finalized_block_ref().await?; + let block_ref = client + .backend() + .latest_finalized_block_ref() + .await + .map_err(ExtrinsicError::CannotGetLatestFinalizedBlock)?; let (block_header, account_nonce) = try_join( - client.backend().block_header(block_ref.hash()), - crate::blocks::get_account_nonce(client, account_id, block_ref.hash()), + client + .backend() + .block_header(block_ref.hash()) + .map_err(ExtrinsicError::CannotGetLatestFinalizedBlock), + crate::blocks::get_account_nonce(client, account_id, block_ref.hash()).map_err(|e| { + ExtrinsicError::AccountNonceError { + block_hash: block_ref.hash().into(), + account_id: account_id.encode().into(), + reason: e, + } + }), ) .await?; - let block_header = - block_header.ok_or_else(|| Error::Block(BlockError::not_found(block_ref.hash())))?; + let block_header = block_header.ok_or_else(|| ExtrinsicError::CannotFindBlockHeader { + block_hash: block_ref.hash().into(), + })?; params.inject_account_nonce(account_nonce); params.inject_block(block_header.number().into(), block_ref.hash()); @@ -624,26 +673,29 @@ async fn inject_account_nonce_and_block>( impl ValidationResult { #[allow(clippy::get_first)] - fn try_from_bytes(bytes: Vec) -> Result { + fn try_from_bytes(bytes: Vec) -> Result { // TaggedTransactionQueue_validate_transaction returns this: // https://github.com/paritytech/substrate/blob/0cdf7029017b70b7c83c21a4dc0aa1020e7914f6/primitives/runtime/src/transaction_validity.rs#L210 // We copy some of the inner types and put the three states (valid, invalid, unknown) into one enum, // because from our perspective, the call was successful regardless. if bytes.get(0) == Some(&0) { // ok: valid. Decode but, for now we discard most of the information - let res = TransactionValid::decode(&mut &bytes[1..])?; + let res = TransactionValid::decode(&mut &bytes[1..]) + .map_err(ExtrinsicError::CannotDecodeValidationResult)?; Ok(ValidationResult::Valid(res)) } else if bytes.get(0) == Some(&1) && bytes.get(1) == Some(&0) { // error: invalid - let res = TransactionInvalid::decode(&mut &bytes[2..])?; + let res = TransactionInvalid::decode(&mut &bytes[2..]) + .map_err(ExtrinsicError::CannotDecodeValidationResult)?; Ok(ValidationResult::Invalid(res)) } else if bytes.get(0) == Some(&1) && bytes.get(1) == Some(&1) { // error: unknown - let res = TransactionUnknown::decode(&mut &bytes[2..])?; + let res = TransactionUnknown::decode(&mut &bytes[2..]) + .map_err(ExtrinsicError::CannotDecodeValidationResult)?; Ok(ValidationResult::Unknown(res)) } else { // unable to decode the bytes; they aren't what we expect. - Err(crate::Error::Unknown(bytes)) + Err(ExtrinsicError::UnexpectedValidationResultBytes(bytes)) } } } diff --git a/subxt/src/tx/tx_progress.rs b/subxt/src/tx/tx_progress.rs index 0940e613de1..83126dcd3e2 100644 --- a/subxt/src/tx/tx_progress.rs +++ b/subxt/src/tx/tx_progress.rs @@ -10,7 +10,10 @@ use crate::{ backend::{BlockRef, StreamOfResults, TransactionStatus as BackendTxStatus}, client::OnlineClientT, config::{Config, HashFor}, - error::{DispatchError, Error, RpcError, TransactionError}, + error::{ + DispatchError, TransactionEventsError, TransactionFinalizedSuccessError, + TransactionProgressError, TransactionStatusError, + }, events::EventsClient, utils::strip_compact_prefix, }; @@ -67,7 +70,7 @@ where /// Return the next transaction status when it's emitted. This just delegates to the /// [`futures::Stream`] implementation for [`TxProgress`], but allows you to /// avoid importing that trait if you don't otherwise need it. - pub async fn next(&mut self) -> Option, Error>> { + pub async fn next(&mut self) -> Option, TransactionProgressError>> { StreamExt::next(self).await } @@ -81,24 +84,26 @@ where /// probability that the transaction will not make it into a block but there is no guarantee /// that this is true. In those cases the stream is closed however, so you currently have no way to find /// out if they finally made it into a block or not. - pub async fn wait_for_finalized(mut self) -> Result, Error> { + pub async fn wait_for_finalized(mut self) -> Result, TransactionProgressError> { while let Some(status) = self.next().await { match status? { // Finalized! Return. TxStatus::InFinalizedBlock(s) => return Ok(s), // Error scenarios; return the error. - TxStatus::Error { message } => return Err(TransactionError::Error(message).into()), + TxStatus::Error { message } => { + return Err(TransactionStatusError::Error(message).into()); + } TxStatus::Invalid { message } => { - return Err(TransactionError::Invalid(message).into()); + return Err(TransactionStatusError::Invalid(message).into()); } TxStatus::Dropped { message } => { - return Err(TransactionError::Dropped(message).into()); + return Err(TransactionStatusError::Dropped(message).into()); } // Ignore and wait for next status event: _ => continue, } } - Err(RpcError::SubscriptionDropped.into()) + Err(TransactionProgressError::UnexpectedEndOfTransactionStatusStream) } /// Wait for the transaction to be finalized, and for the transaction events to indicate @@ -114,14 +119,14 @@ where /// out if they finally made it into a block or not. pub async fn wait_for_finalized_success( self, - ) -> Result, Error> { + ) -> Result, TransactionFinalizedSuccessError> { let evs = self.wait_for_finalized().await?.wait_for_success().await?; Ok(evs) } } impl Stream for TxProgress { - type Item = Result, Error>; + type Item = Result, TransactionProgressError>; fn poll_next( mut self: std::pin::Pin<&mut Self>, @@ -132,37 +137,41 @@ impl Stream for TxProgress { None => return Poll::Ready(None), }; - sub.poll_next_unpin(cx).map_ok(|status| { - match status { - BackendTxStatus::Validated => TxStatus::Validated, - BackendTxStatus::Broadcasted => TxStatus::Broadcasted, - BackendTxStatus::NoLongerInBestBlock => TxStatus::NoLongerInBestBlock, - BackendTxStatus::InBestBlock { hash } => { - TxStatus::InBestBlock(TxInBlock::new(hash, self.ext_hash, self.client.clone())) - } - // These stream events mean that nothing further will be sent: - BackendTxStatus::InFinalizedBlock { hash } => { - self.sub = None; - TxStatus::InFinalizedBlock(TxInBlock::new( + sub.poll_next_unpin(cx) + .map_err(TransactionProgressError::CannotGetNextProgressUpdate) + .map_ok(|status| { + match status { + BackendTxStatus::Validated => TxStatus::Validated, + BackendTxStatus::Broadcasted => TxStatus::Broadcasted, + BackendTxStatus::NoLongerInBestBlock => TxStatus::NoLongerInBestBlock, + BackendTxStatus::InBestBlock { hash } => TxStatus::InBestBlock(TxInBlock::new( hash, self.ext_hash, self.client.clone(), - )) - } - BackendTxStatus::Error { message } => { - self.sub = None; - TxStatus::Error { message } - } - BackendTxStatus::Invalid { message } => { - self.sub = None; - TxStatus::Invalid { message } + )), + // These stream events mean that nothing further will be sent: + BackendTxStatus::InFinalizedBlock { hash } => { + self.sub = None; + TxStatus::InFinalizedBlock(TxInBlock::new( + hash, + self.ext_hash, + self.client.clone(), + )) + } + BackendTxStatus::Error { message } => { + self.sub = None; + TxStatus::Error { message } + } + BackendTxStatus::Invalid { message } => { + self.sub = None; + TxStatus::Invalid { message } + } + BackendTxStatus::Dropped { message } => { + self.sub = None; + TxStatus::Dropped { message } + } } - BackendTxStatus::Dropped { message } => { - self.sub = None; - TxStatus::Dropped { message } - } - } - }) + }) } } @@ -258,15 +267,27 @@ impl> TxInBlock { /// /// **Note:** This has to download block details from the node and decode events /// from them. - pub async fn wait_for_success(&self) -> Result, Error> { + pub async fn wait_for_success( + &self, + ) -> Result, TransactionEventsError> { let events = self.fetch_events().await?; // Try to find any errors; return the first one we encounter. - for ev in events.iter() { - let ev = ev?; + for (ev_idx, ev) in events.iter().enumerate() { + let ev = ev.map_err(|e| TransactionEventsError::CannotDecodeEventInBlock { + event_index: ev_idx, + block_hash: self.block_hash().into(), + error: e, + })?; + if ev.pallet_name() == "System" && ev.variant_name() == "ExtrinsicFailed" { let dispatch_error = - DispatchError::decode_from(ev.field_bytes(), self.client.metadata())?; + DispatchError::decode_from(ev.field_bytes(), self.client.metadata()).map_err( + |e| TransactionEventsError::CannotDecodeDispatchError { + error: e, + bytes: ev.field_bytes().to_vec(), + }, + )?; return Err(dispatch_error.into()); } } @@ -280,15 +301,23 @@ impl> TxInBlock { /// /// **Note:** This has to download block details from the node and decode events /// from them. - pub async fn fetch_events(&self) -> Result, Error> { + pub async fn fetch_events( + &self, + ) -> Result, TransactionEventsError> { let hasher = self.client.hasher(); let block_body = self .client .backend() .block_body(self.block_ref.hash()) - .await? - .ok_or(Error::Transaction(TransactionError::BlockNotFound))?; + .await + .map_err(|e| TransactionEventsError::CannotFetchBlockBody { + block_hash: self.block_hash().into(), + error: e, + })? + .ok_or_else(|| TransactionEventsError::BlockNotFound { + block_hash: self.block_hash().into(), + })?; let extrinsic_idx = block_body .iter() @@ -302,11 +331,21 @@ impl> TxInBlock { }) // If we successfully obtain the block hash we think contains our // extrinsic, the extrinsic should be in there somewhere.. - .ok_or(Error::Transaction(TransactionError::BlockNotFound))?; + .ok_or_else(|| TransactionEventsError::CannotFindTransactionInBlock { + block_hash: self.block_hash().into(), + transaction_hash: self.ext_hash.into(), + })?; let events = EventsClient::new(self.client.clone()) .at(self.block_ref.clone()) - .await?; + .await + .map_err( + |e| TransactionEventsError::CannotFetchEventsForTransaction { + block_hash: self.block_hash().into(), + transaction_hash: self.ext_hash.into(), + error: e, + }, + )?; Ok(crate::blocks::ExtrinsicEvents::new( self.ext_hash, @@ -318,10 +357,11 @@ impl> TxInBlock { #[cfg(test)] mod test { + use super::*; use subxt_core::client::RuntimeVersion; use crate::{ - Error, SubstrateConfig, + SubstrateConfig, backend::{StreamOfResults, TransactionStatus}, client::{OfflineClientT, OnlineClientT}, config::{Config, HashFor}, @@ -375,7 +415,7 @@ mod test { let finalized_result = tx_progress.wait_for_finalized().await; assert!(matches!( finalized_result, - Err(Error::Transaction(crate::error::TransactionError::Error(e))) if e == "err" + Err(TransactionProgressError::TransactionStatusError(TransactionStatusError::Error(e))) if e == "err" )); } @@ -390,7 +430,7 @@ mod test { let finalized_result = tx_progress.wait_for_finalized().await; assert!(matches!( finalized_result, - Err(Error::Transaction(crate::error::TransactionError::Invalid(e))) if e == "err" + Err(TransactionProgressError::TransactionStatusError(TransactionStatusError::Invalid(e))) if e == "err" )); } @@ -405,7 +445,7 @@ mod test { let finalized_result = tx_progress.wait_for_finalized().await; assert!(matches!( finalized_result, - Err(Error::Transaction(crate::error::TransactionError::Dropped(e))) if e == "err" + Err(TransactionProgressError::TransactionStatusError(TransactionStatusError::Dropped(e))) if e == "err" )); } diff --git a/subxt/src/view_functions/mod.rs b/subxt/src/view_functions/mod.rs index f544bf7818a..df095bfb093 100644 --- a/subxt/src/view_functions/mod.rs +++ b/subxt/src/view_functions/mod.rs @@ -7,8 +7,6 @@ mod view_function_types; mod view_functions_client; -pub use subxt_core::view_functions::payload::{ - DefaultPayload, DynamicPayload, Payload, StaticPayload, dynamic, -}; +pub use subxt_core::view_functions::payload::{DynamicPayload, Payload, StaticPayload, dynamic}; pub use view_function_types::ViewFunctionsApi; pub use view_functions_client::ViewFunctionsClient; diff --git a/subxt/src/view_functions/view_function_types.rs b/subxt/src/view_functions/view_function_types.rs index 62371e2487e..aad1baa32e0 100644 --- a/subxt/src/view_functions/view_function_types.rs +++ b/subxt/src/view_functions/view_function_types.rs @@ -7,7 +7,7 @@ use crate::{ backend::BlockRef, client::OnlineClientT, config::{Config, HashFor}, - error::Error, + error::ViewFunctionError, }; use derive_where::derive_where; use std::{future::Future, marker::PhantomData}; @@ -40,7 +40,7 @@ where /// if the payload is valid (or if it's not possible to check since the payload has no validation hash). /// Return an error if the payload was not valid or something went wrong trying to validate it (ie /// the View Function in question do not exist at all) - pub fn validate(&self, payload: &Call) -> Result<(), Error> { + pub fn validate(&self, payload: &Call) -> Result<(), ViewFunctionError> { subxt_core::view_functions::validate(payload, &self.client.metadata()).map_err(Into::into) } @@ -48,7 +48,8 @@ where pub fn call( &self, payload: Call, - ) -> impl Future> + use { + ) -> impl Future> + use + { let client = self.client.clone(); let block_hash = self.block_ref.hash(); // Ensure that the returned future doesn't have a lifetime tied to api.view_functions(), @@ -68,7 +69,8 @@ where let bytes = client .backend() .call(call_name, Some(call_args.as_slice()), block_hash) - .await?; + .await + .map_err(ViewFunctionError::CannotCallApi)?; // Decode the response. let value = diff --git a/subxt/src/view_functions/view_functions_client.rs b/subxt/src/view_functions/view_functions_client.rs index 5ce39179028..cdd0efe5c47 100644 --- a/subxt/src/view_functions/view_functions_client.rs +++ b/subxt/src/view_functions/view_functions_client.rs @@ -8,7 +8,7 @@ use crate::{ backend::BlockRef, client::OnlineClientT, config::{Config, HashFor}, - error::Error, + error::ViewFunctionError, }; use derive_where::derive_where; use std::{future::Future, marker::PhantomData}; @@ -43,13 +43,18 @@ where /// Obtain an interface to call View Functions at the latest finalized block. pub fn at_latest( &self, - ) -> impl Future, Error>> + Send + 'static { + ) -> impl Future, ViewFunctionError>> + Send + 'static + { // Clone and pass the client in like this so that we can explicitly // return a Future that's Send + 'static, rather than tied to &self. let client = self.client.clone(); async move { // get the ref for the latest finalized block and use that. - let block_ref = client.backend().latest_finalized_block_ref().await?; + let block_ref = client + .backend() + .latest_finalized_block_ref() + .await + .map_err(ViewFunctionError::CannotGetLatestFinalizedBlock)?; Ok(ViewFunctionsApi::new(client, block_ref)) }