From 80996ed8f93be1a3c61724c8c96feb614bf4f0df Mon Sep 17 00:00:00 2001 From: Dimitris Apostolou Date: Sun, 22 Jan 2023 01:00:55 +0200 Subject: [PATCH] Fix typos (#7254) --- .../TransactionResult.module.css | 2 +- apps/explorer/src/utils/objectUtils.ts | 2 +- apps/explorer/src/utils/timeUtils.ts | 2 +- .../src/background/keyring/VaultStorage.ts | 2 +- .../TransferCoinForm/StepTwo.tsx | 2 +- apps/wallet/src/ui/styles/utils/index.scss | 2 +- crates/rccheck/src/ed25519_certgen.rs | 2 +- crates/rccheck/src/lib.rs | 10 +++++----- .../size_limits/move_object_size_limit.move | 2 +- .../sui/move_call_incorrect_function.move | 2 +- crates/sui-adapter/src/adapter.rs | 4 ++-- .../src/embedded_reconfig_observer.rs | 4 ++-- .../src/fullnode_reconfig_observer.rs | 2 +- .../fullnode_execute_transaction_test.rs | 4 ++-- crates/sui-config/src/p2p.rs | 2 +- .../sui-core/src/authority/authority_store.rs | 2 +- crates/sui-core/src/authority_aggregator.rs | 18 ++++++++--------- .../checkpoints/checkpoint_executor/tests.rs | 2 +- crates/sui-core/src/consensus_adapter.rs | 2 +- crates/sui-core/src/quorum_driver/mod.rs | 6 +++--- .../src/unit_tests/execution_driver_tests.rs | 2 +- crates/sui-cost-tables/src/bytecode_tables.rs | 20 +++++++++---------- crates/sui-cost-tables/src/units_types.rs | 2 +- crates/sui-framework/docs/bulletproofs.md | 2 +- crates/sui-framework/docs/hex.md | 4 ++-- crates/sui-framework/docs/object.md | 4 ++-- crates/sui-framework/docs/publisher.md | 2 +- crates/sui-framework/docs/validator_set.md | 2 +- .../sources/crypto/bulletproofs.move | 2 +- .../sources/governance/validator_set.move | 2 +- crates/sui-framework/sources/hex.move | 4 ++-- crates/sui-framework/sources/object.move | 4 ++-- crates/sui-framework/sources/publisher.move | 2 +- .../sources/test/test_scenario.move | 4 ++-- .../2022-12-02-202249_packages/up.sql | 2 +- .../src/handlers/handler_orchestrator.rs | 2 +- .../src/handlers/transaction_handler.rs | 6 +++--- .../src/models/transaction_logs.rs | 2 +- crates/sui-json/src/tests.rs | 4 ++-- crates/sui-network/src/discovery/builder.rs | 4 ++-- crates/sui-network/src/discovery/tests.rs | 2 +- crates/sui-protocol-constants/src/lib.rs | 2 +- crates/sui-sdk/src/apis.rs | 2 +- crates/sui-simulator/README.md | 10 +++++----- .../src/write_path_pending_tx_log.rs | 2 +- crates/sui-types/src/coin.rs | 4 ++-- crates/sui-types/src/messages.rs | 4 ++-- crates/sui-verifier/src/id_leak_verifier.rs | 2 +- crates/sui/src/genesis_ceremony.rs | 2 +- crates/telemetry-subscribers/src/lib.rs | 2 +- crates/typed-store-derive/src/lib.rs | 14 ++++++------- crates/typed-store/src/metrics.rs | 4 ++-- crates/typed-store/src/rocks/mod.rs | 2 +- crates/typed-store/src/rocks/tests.rs | 2 +- deny.toml | 4 ++-- .../dashboards/monitor_services.json | 2 +- narwhal/benchmark/README.md | 4 ++-- narwhal/config/src/duration_format.rs | 8 ++++---- narwhal/consensus/src/dag.rs | 6 +++--- narwhal/consensus/src/tusk.rs | 2 +- narwhal/dag/src/node_dag.rs | 8 ++++---- narwhal/deny.toml | 4 ++-- narwhal/executor/src/subscriber.rs | 2 +- narwhal/network/src/admin.rs | 2 +- narwhal/storage/src/certificate_store.rs | 2 +- narwhal/worker/src/quorum_waiter.rs | 2 +- scripts/lldb_frame_sizes.py | 2 +- sdk/bcs/src/index.ts | 2 +- sdk/typescript/src/types/framework.ts | 2 +- .../examples/capy/sources/capy_market.move | 2 +- .../examples/defi/sources/pool.move | 2 +- .../examples/defi/sources/subscription.move | 2 +- .../examples/frenemies/sources/frenemies.move | 2 +- .../frenemies/tests/frenemies_tests.move | 4 ++-- .../fungible_tokens/sources/private_coin.move | 4 ++-- .../sources/regulated_coin.move | 2 +- .../sources/drand_based_scratch_card.move | 4 ++-- .../examples/games/sources/sea_hero.move | 2 +- .../examples/nfts/sources/chat.move | 2 +- 79 files changed, 141 insertions(+), 141 deletions(-) diff --git a/apps/explorer/src/pages/transaction-result/TransactionResult.module.css b/apps/explorer/src/pages/transaction-result/TransactionResult.module.css index e022ea97a49e6..5f532fb2a88f4 100644 --- a/apps/explorer/src/pages/transaction-result/TransactionResult.module.css +++ b/apps/explorer/src/pages/transaction-result/TransactionResult.module.css @@ -32,7 +32,7 @@ .txeventsright { @apply col-span-5 overflow-y-scroll md:col-span-4; - max-height: 100vh; /* enables scrolling list independtly of page */ + max-height: 100vh; /* enables scrolling list independently of page */ } .txcomponent { diff --git a/apps/explorer/src/utils/objectUtils.ts b/apps/explorer/src/utils/objectUtils.ts index c8842f6c91999..18124c2b02110 100644 --- a/apps/explorer/src/utils/objectUtils.ts +++ b/apps/explorer/src/utils/objectUtils.ts @@ -22,7 +22,7 @@ export function parseImageURL(data: any): string { if (findIPFSvalue(url)) return url; - // String respresenting true http/https URLs are valid: + // String representing true http/https URLs are valid: try { new URL(url); return url; diff --git a/apps/explorer/src/utils/timeUtils.ts b/apps/explorer/src/utils/timeUtils.ts index 9ba63225dc5fc..142eee0b8f0f9 100644 --- a/apps/explorer/src/utils/timeUtils.ts +++ b/apps/explorer/src/utils/timeUtils.ts @@ -92,7 +92,7 @@ export function useTimeAgo( return formattedTime; } -// TODO - this need a bit of modification to account for multiple display formate types +// TODO - this need a bit of modification to account for multiple display format types export const timeAgo = ( epochMilliSecs: number | null | undefined, timeNow?: number | null, diff --git a/apps/wallet/src/background/keyring/VaultStorage.ts b/apps/wallet/src/background/keyring/VaultStorage.ts index d1f67fbc943a1..61f926cea4ad8 100644 --- a/apps/wallet/src/background/keyring/VaultStorage.ts +++ b/apps/wallet/src/background/keyring/VaultStorage.ts @@ -131,7 +131,7 @@ class VaultStorageClass { * Import a new keypair to the vault and saves it to storage. If keypair already exists it ignores it. * NOTE: make sure you verify the password before calling this method * @param keypair The keypair to import - * @param password The password to be used to store the vault. Make sure to verify that it's the correct password (of the current vault) and then call this function. It does't verify the password see {@link VaultStorage.verifyPassword}. + * @param password The password to be used to store the vault. Make sure to verify that it's the correct password (of the current vault) and then call this function. It doesn't verify the password see {@link VaultStorage.verifyPassword}. * @returns True if the key was imported, false otherwise */ public async importKeypair(keypair: ExportedKeypair, password: string) { diff --git a/apps/wallet/src/ui/app/pages/home/transfer-coin/TransferCoinForm/StepTwo.tsx b/apps/wallet/src/ui/app/pages/home/transfer-coin/TransferCoinForm/StepTwo.tsx index 3d57fc12f304c..fb7414d168035 100644 --- a/apps/wallet/src/ui/app/pages/home/transfer-coin/TransferCoinForm/StepTwo.tsx +++ b/apps/wallet/src/ui/app/pages/home/transfer-coin/TransferCoinForm/StepTwo.tsx @@ -65,7 +65,7 @@ function StepTwo({
- Enter or search the address of the recepient below to start + Enter or search the address of the recipient below to start sending coins.
diff --git a/apps/wallet/src/ui/styles/utils/index.scss b/apps/wallet/src/ui/styles/utils/index.scss index ef2e72ea7dd97..8601447434885 100644 --- a/apps/wallet/src/ui/styles/utils/index.scss +++ b/apps/wallet/src/ui/styles/utils/index.scss @@ -235,7 +235,7 @@ $main-extra-space: sizing.$main-bottom-space; font-weight: 600; letter-spacing: 0; } @else if $type == 'mono-type' { - // TODO chanage to mono font + // TODO change to mono font font-style: normal; font-weight: 500; font-size: 14px; diff --git a/crates/rccheck/src/ed25519_certgen.rs b/crates/rccheck/src/ed25519_certgen.rs index b5cd61b92275b..59c3b250971e7 100644 --- a/crates/rccheck/src/ed25519_certgen.rs +++ b/crates/rccheck/src/ed25519_certgen.rs @@ -133,7 +133,7 @@ impl Certifiable for Ed25519 { // ed25519 OID oid: ed25519::pkcs8::ALGORITHM_OID, // some environments require a type ASN.1 NULL, use the commented alternative if so - // this instead matches our rcgen-produced certificates for compatibiltiy + // this instead matches our rcgen-produced certificates for compatibility // use pkcs8::spki::der::asn1; parameters: None, // Some(asn1::Any::from(asn1::Null)), }, diff --git a/crates/rccheck/src/lib.rs b/crates/rccheck/src/lib.rs index 8ec6ba4e24e5d..8a090c041e00f 100644 --- a/crates/rccheck/src/lib.rs +++ b/crates/rccheck/src/lib.rs @@ -40,7 +40,7 @@ pub(crate) mod test_utils; pub mod ed25519_certgen; -// Re-export our version of rustls to stave off compatiblity issues +// Re-export our version of rustls to stave off compatibility issues pub use rustls; type SignatureAlgorithms = &'static [&'static webpki::SignatureAlgorithm]; @@ -292,7 +292,7 @@ impl ClientCertVerifier for Psk { } fn client_auth_root_subjects(&self) -> Option { - // We can't guarantee subjects before having seen the cert. This should not be None for compatiblity reasons + // We can't guarantee subjects before having seen the cert. This should not be None for compatibility reasons Some(rustls::DistinguishedNames::new()) } @@ -368,14 +368,14 @@ impl ServerCertVerifier for Psk { .map_err(pki_error) .map(|_| cert)?; - // log additional certificate transaparency info (which is pointless in our self-signed context) and return + // log additional certificate transparency info (which is pointless in our self-signed context) and return let mut peekable = scts.peekable(); if peekable.peek().is_none() { - tracing::trace!("Met unvalidated certificate transparency data"); + tracing::trace!("Met invalidated certificate transparency data"); } if !ocsp_response.is_empty() { - tracing::trace!("Unvalidated OCSP response: {:?}", ocsp_response.to_vec()); + tracing::trace!("Invalidated OCSP response: {:?}", ocsp_response.to_vec()); } cert.verify_is_valid_for_dns_name(dns_nameref) diff --git a/crates/sui-adapter-transactional-tests/tests/size_limits/move_object_size_limit.move b/crates/sui-adapter-transactional-tests/tests/size_limits/move_object_size_limit.move index 271fc6eaf23b2..e7399469b26cb 100644 --- a/crates/sui-adapter-transactional-tests/tests/size_limits/move_object_size_limit.move +++ b/crates/sui-adapter-transactional-tests/tests/size_limits/move_object_size_limit.move @@ -85,7 +85,7 @@ module Test::M1 { // create at size limit should succeed //# run Test::M1::transfer_object_with_size --args 256000 -// addding 1 byte to an object at the size limit should fail +// adding 1 byte to an object at the size limit should fail //# run Test::M1::add_byte --args object(108) // create at size limit, wrap, increase to over size limit while wrapped, then unwrap. should fail diff --git a/crates/sui-adapter-transactional-tests/tests/sui/move_call_incorrect_function.move b/crates/sui-adapter-transactional-tests/tests/sui/move_call_incorrect_function.move index 058469d3c80a1..9bd1e226d21a8 100644 --- a/crates/sui-adapter-transactional-tests/tests/sui/move_call_incorrect_function.move +++ b/crates/sui-adapter-transactional-tests/tests/sui/move_call_incorrect_function.move @@ -12,7 +12,7 @@ module Test::M { } -// Instead of calling on the Test package, we are calling a non-existant package +// Instead of calling on the Test package, we are calling a non-existent package //# run 0x242::M::create // Calling a non-existent function. diff --git a/crates/sui-adapter/src/adapter.rs b/crates/sui-adapter/src/adapter.rs index 3d78d87b192f8..bfc42aa8042e0 100644 --- a/crates/sui-adapter/src/adapter.rs +++ b/crates/sui-adapter/src/adapter.rs @@ -422,7 +422,7 @@ fn init_modules< let mut args = vec![]; // an init function can have one or two arguments, with the last one always being of type // &mut TxContext and the additional (first) one representing a characteristic type (see - // char_type verfier pass for additional explanation) + // char_type verifier pass for additional explanation) if parameters.len() == 2 { // characteristic type is a struct with a single bool filed which in bcs is encoded as // 0x01 @@ -536,7 +536,7 @@ pub fn generate_package_id( /// Update `state_view` with the effects of successfully executing a transaction: /// - Look for each input in `by_value_objects` to determine whether the object was transferred, frozen, or deleted /// - Update objects passed via a mutable reference in `mutable_refs` to their new values -/// - Process creation of new objects and user-emittd events in `events` +/// - Process creation of new objects and user-emitted events in `events` #[allow(clippy::too_many_arguments)] fn process_successful_execution( state_view: &mut S, diff --git a/crates/sui-benchmark/src/embedded_reconfig_observer.rs b/crates/sui-benchmark/src/embedded_reconfig_observer.rs index c011d0f9636d3..c5534fe9a8c89 100644 --- a/crates/sui-benchmark/src/embedded_reconfig_observer.rs +++ b/crates/sui-benchmark/src/embedded_reconfig_observer.rs @@ -13,7 +13,7 @@ use tracing::{error, info, trace}; /// A ReconfigObserver that polls validators periodically /// to get new epoch information. /// Caveat: -/// 1. it does not guarantee to insert every commitee into +/// 1. it does not guarantee to insert every committee into /// committee store. This is fine in scenarios such as /// stress, but may not be suitable in some other cases. /// 2. because of 1, if it misses intermediate committee(s) @@ -21,7 +21,7 @@ use tracing::{error, info, trace}; /// fail to get quorum on the latest committee info from /// demissioned validators and then stop working. /// Background: this is a temporary solution for stress before -/// we see fullnode reconfiguration stablilizes. +/// we see fullnode reconfiguration stabilizes. #[derive(Clone, Default)] pub struct EmbeddedReconfigObserver {} diff --git a/crates/sui-benchmark/src/fullnode_reconfig_observer.rs b/crates/sui-benchmark/src/fullnode_reconfig_observer.rs index e52c8f971fe26..acf794697fbb6 100644 --- a/crates/sui-benchmark/src/fullnode_reconfig_observer.rs +++ b/crates/sui-benchmark/src/fullnode_reconfig_observer.rs @@ -17,7 +17,7 @@ use tracing::{debug, error, trace}; /// A ReconfigObserver that polls FullNode periodically /// to get new epoch information. -/// Caveat: it does not guarantee to insert every commitee +/// Caveat: it does not guarantee to insert every committee /// into committee store. This is fine in scenarios such /// as stress, but may not be suitable in some other cases. #[derive(Clone)] diff --git a/crates/sui-cluster-test/src/test_case/fullnode_execute_transaction_test.rs b/crates/sui-cluster-test/src/test_case/fullnode_execute_transaction_test.rs index cb66967a7042b..0df937375144f 100644 --- a/crates/sui-cluster-test/src/test_case/fullnode_execute_transaction_test.rs +++ b/crates/sui-cluster-test/src/test_case/fullnode_execute_transaction_test.rs @@ -64,7 +64,7 @@ impl TestCaseImpl for FullNodeExecuteTransactionTest { let effects = response.effects.unwrap(); if !matches!(effects.status, SuiExecutionStatus::Success { .. }) { panic!( - "Failed to execute transfer tranasction {:?}: {:?}", + "Failed to execute transfer transaction {:?}: {:?}", txn_digest, effects.status ) } @@ -89,7 +89,7 @@ impl TestCaseImpl for FullNodeExecuteTransactionTest { let effects = response.effects.unwrap(); if !matches!(effects.status, SuiExecutionStatus::Success { .. }) { panic!( - "Failed to execute transfer tranasction {:?}: {:?}", + "Failed to execute transfer transaction {:?}: {:?}", txn_digest, effects.status ) } diff --git a/crates/sui-config/src/p2p.rs b/crates/sui-config/src/p2p.rs index c76f1bff8339b..a7b9592991e92 100644 --- a/crates/sui-config/src/p2p.rs +++ b/crates/sui-config/src/p2p.rs @@ -145,7 +145,7 @@ pub struct DiscoveryConfig { #[serde(skip_serializing_if = "Option::is_none")] pub interval_period_ms: Option, - /// Target number of conncurrent connections to establish. + /// Target number of concurrent connections to establish. /// /// If unspecified, this will default to `4`. #[serde(skip_serializing_if = "Option::is_none")] diff --git a/crates/sui-core/src/authority/authority_store.rs b/crates/sui-core/src/authority/authority_store.rs index 45271669e7362..9f466cc896573 100644 --- a/crates/sui-core/src/authority/authority_store.rs +++ b/crates/sui-core/src/authority/authority_store.rs @@ -1256,7 +1256,7 @@ impl ModuleResolver for AuthorityStore { // TODO: duplicated code with ModuleResolver for InMemoryStorage in memory_storage.rs. fn get_module(&self, module_id: &ModuleId) -> Result>, Self::Error> { // TODO: We should cache the deserialized modules to avoid - // fetching from the store / re-deserializing them everytime. + // fetching from the store / re-deserializing them every time. // https://github.com/MystenLabs/sui/issues/809 Ok(self .get_package(&ObjectID::from(*module_id.address()))? diff --git a/crates/sui-core/src/authority_aggregator.rs b/crates/sui-core/src/authority_aggregator.rs index 6d8832914edfa..7b5b57dba9e3f 100644 --- a/crates/sui-core/src/authority_aggregator.rs +++ b/crates/sui-core/src/authority_aggregator.rs @@ -588,7 +588,7 @@ impl TransactionCertifier for NetworkTransactionCertifier { AuthAggMetrics::new(®istry), )?; - net.authorty_ask_for_cert_with_retry_and_timeout(transaction, self_store, timeout) + net.authority_ask_for_cert_with_retry_and_timeout(transaction, self_store, timeout) .await } } @@ -615,7 +615,7 @@ impl TransactionCertifier for LocalTransactionCertifier { &Registry::new(), ); - net.authorty_ask_for_cert_with_retry_and_timeout(transaction, self_store, timeout) + net.authority_ask_for_cert_with_retry_and_timeout(transaction, self_store, timeout) .await } } @@ -679,7 +679,7 @@ where pub(crate) async fn quorum_map_then_reduce_with_timeout_and_prefs<'a, S, V, FMap, FReduce>( &'a self, - authority_prefences: Option<&BTreeSet>, + authority_preferences: Option<&BTreeSet>, initial_state: S, map_each_authority: FMap, reduce_result: FReduce, @@ -694,7 +694,7 @@ where Result, ) -> AsyncResult<'a, ReduceOutput, SuiError>, { - let authorities_shuffled = self.committee.shuffle_by_stake(authority_prefences, None); + let authorities_shuffled = self.committee.shuffle_by_stake(authority_preferences, None); // First, execute in parallel for each authority FMap. let mut responses: futures::stream::FuturesUnordered<_> = authorities_shuffled @@ -1068,7 +1068,7 @@ where .collect::>(); // Sort by votes. The last item is the one with the most votes, we will examine it. // We don't order by epoch to prevent it from being stuck when some byzantine validators - // give wrong results. At the end of day, we need quorum to make acertain. + // give wrong results. At the end of day, we need quorum to be certain. committee_and_votes.sort_by(|lhs, rhs| Ord::cmp(&lhs.1, &rhs.1)); let (committee, votes) = committee_and_votes .pop() @@ -1085,7 +1085,7 @@ where /// Return all the information in the network regarding the latest state of a specific object. /// For each authority queried, we obtain the latest object state along with the certificate that - /// lead up to that state. The results from each authority are aggreated for the return. + /// lead up to that state. The results from each authority are aggregated for the return. /// The first part of the return value is a map from each unique (ObjectRef, TransactionDigest) /// pair to the content of the object as well as a list of authorities that responded this /// pair. @@ -1503,7 +1503,7 @@ where signed_effects: SignedTransactionEffects, ) -> Result<(), EffectsCertError> { // If we get a certificate in the same epoch, then we use it. - // A certificate in a past epoch does not guaranteee finality + // A certificate in a past epoch does not guarantee finality // and validators may reject to process it. if certificate.epoch() == self.committee.epoch { debug!(?tx_digest, name=?name.concise(), weight, "Received prev certificate from validator handle_transaction"); @@ -1847,7 +1847,7 @@ where .await } - pub async fn authorty_ask_for_cert_with_retry_and_timeout( + pub async fn authority_ask_for_cert_with_retry_and_timeout( &self, transaction: &VerifiedTransaction, self_store: &Arc, @@ -1889,7 +1889,7 @@ where } /// Given an AuthorityAggregator on genesis (epoch 0), catch up to the latest epoch and fill in -/// all past epoches' committee information. +/// all past epochs' committee information. /// Note: this function assumes >= 2/3 validators on genesis are still serving the network. pub async fn reconfig_from_genesis( mut aggregator: AuthorityAggregator, diff --git a/crates/sui-core/src/checkpoints/checkpoint_executor/tests.rs b/crates/sui-core/src/checkpoints/checkpoint_executor/tests.rs index 15bc9fbcbb72b..4a65199902125 100644 --- a/crates/sui-core/src/checkpoints/checkpoint_executor/tests.rs +++ b/crates/sui-core/src/checkpoints/checkpoint_executor/tests.rs @@ -320,7 +320,7 @@ async fn init_executor_test( /// Creates and simulates syncing of a new checkpoint by StateSync, i.e. new /// checkpoint is persisted, along with its contents, highest synced checkpoint -/// watermark is udpated, and message is broadcasted notifying of the newly synced +/// watermark is updated, and message is broadcasted notifying of the newly synced /// checkpoint. Returns created checkpoints fn sync_new_checkpoints( checkpoint_store: &CheckpointStore, diff --git a/crates/sui-core/src/consensus_adapter.rs b/crates/sui-core/src/consensus_adapter.rs index 34cf1a4e9f667..c8daac28ae1e2 100644 --- a/crates/sui-core/src/consensus_adapter.rs +++ b/crates/sui-core/src/consensus_adapter.rs @@ -402,7 +402,7 @@ pub fn position_submit_certificate( ourselves: &AuthorityName, tx_digest: &TransactionDigest, ) -> usize { - // the 32 is as requirement of the deault StdRng::from_seed choice + // the 32 is as requirement of the default StdRng::from_seed choice let digest_bytes = tx_digest.into_bytes(); // permute the validators deterministically, based on the digest diff --git a/crates/sui-core/src/quorum_driver/mod.rs b/crates/sui-core/src/quorum_driver/mod.rs index ada1b3eb3ed1b..9d06ab33f1d44 100644 --- a/crates/sui-core/src/quorum_driver/mod.rs +++ b/crates/sui-core/src/quorum_driver/mod.rs @@ -231,7 +231,7 @@ where Ok(ticket) } - // Used when the it is called in a compoent holding the notifier, and a ticket is + // Used when the it is called in a component holding the notifier, and a ticket is // already obtained prior to calling this function, for instance, TransactionOrchestrator pub async fn submit_transaction_no_ticket( &self, @@ -366,7 +366,7 @@ where pub async fn update_validators(&self, new_validators: Arc>) { info!( - "Quorum Drvier updating AuthorityAggregator with committee {}", + "Quorum Driver updating AuthorityAggregator with committee {}", new_validators.committee ); self.validators.store(new_validators); @@ -584,7 +584,7 @@ where } } - // Used when the it is called in a compoent holding the notifier, and a ticket is + // Used when the it is called in a component holding the notifier, and a ticket is // already obtained prior to calling this function, for instance, TransactionOrchestrator pub async fn submit_transaction_no_ticket( &self, diff --git a/crates/sui-core/src/unit_tests/execution_driver_tests.rs b/crates/sui-core/src/unit_tests/execution_driver_tests.rs index 417077176e963..2f88458d8d882 100644 --- a/crates/sui-core/src/unit_tests/execution_driver_tests.rs +++ b/crates/sui-core/src/unit_tests/execution_driver_tests.rs @@ -507,7 +507,7 @@ async fn test_per_object_overload() { // Stop execution on the last authority, to simulate having a backlog. authorities[3].shutdown_execution_for_test(); - // Make sure execution driver has exitted. + // Make sure execution driver has exited. sleep(Duration::from_secs(1)).await; // Sign and try execute 1000 txns on the first three authorities. And enqueue them on the last authority. diff --git a/crates/sui-cost-tables/src/bytecode_tables.rs b/crates/sui-cost-tables/src/bytecode_tables.rs index f45e2a11d3209..f0a991e593164 100644 --- a/crates/sui-cost-tables/src/bytecode_tables.rs +++ b/crates/sui-cost-tables/src/bytecode_tables.rs @@ -212,7 +212,7 @@ impl<'b> GasMeter for GasStatus<'b> { args: impl ExactSizeIterator, _num_locals: NumArgs, ) -> PartialVMResult<()> { - // TODO (Gas Maintainance) + // TODO (Gas Maintenance) self.charge_instr_with_size(Opcodes::CALL, (args.len() as u64 + 1).into()) } @@ -224,7 +224,7 @@ impl<'b> GasMeter for GasStatus<'b> { args: impl ExactSizeIterator, _num_locals: NumArgs, ) -> PartialVMResult<()> { - // TODO (Gas Maintainance) + // TODO (Gas Maintenance) self.charge_instr_with_size( Opcodes::CALL_GENERIC, ((ty_args.len() + args.len() + 1) as u64).into(), @@ -292,7 +292,7 @@ impl<'b> GasMeter for GasStatus<'b> { val: impl ValueView, _old_val: impl ValueView, ) -> PartialVMResult<()> { - // TODO (Gas Maintainance) + // TODO (Gas Maintenance) self.charge_instr_with_size(Opcodes::WRITE_REF, val.legacy_abstract_memory_size()) } @@ -411,7 +411,7 @@ impl<'b> GasMeter for GasStatus<'b> { expect_num_elements: NumArgs, _elems: impl ExactSizeIterator, ) -> PartialVMResult<()> { - // TODO (Gas Maintainance) + // TODO (Gas Maintenance) self.charge_instr_with_size( Opcodes::VEC_PUSH_BACK, u64::from(expect_num_elements).into(), @@ -426,7 +426,7 @@ impl<'b> GasMeter for GasStatus<'b> { &mut self, _loaded: Option<(NumBytes, impl ValueView)>, ) -> PartialVMResult<()> { - // TODO (Gas Maintainance) + // TODO (Gas Maintenance) Ok(()) } @@ -435,12 +435,12 @@ impl<'b> GasMeter for GasStatus<'b> { amount: InternalGas, _ret_vals: Option>, ) -> PartialVMResult<()> { - // TODO (Gas Maintainance) + // TODO (Gas Maintenance) self.deduct_gas(amount) } fn charge_pop(&mut self, _popped_val: impl ValueView) -> PartialVMResult<()> { - // TODO (Gas Maintainance) + // TODO (Gas Maintenance) Ok(()) } @@ -448,7 +448,7 @@ impl<'b> GasMeter for GasStatus<'b> { &mut self, _val: impl ValueView, ) -> PartialVMResult<()> { - // TODO (Gas Maintainance) + // TODO (Gas Maintenance) Ok(()) } @@ -457,7 +457,7 @@ impl<'b> GasMeter for GasStatus<'b> { _ty_args: impl ExactSizeIterator, _args: impl ExactSizeIterator, ) -> PartialVMResult<()> { - // TODO (Gas Maintainance) + // TODO (Gas Maintenance) Ok(()) } @@ -465,7 +465,7 @@ impl<'b> GasMeter for GasStatus<'b> { &mut self, _locals: impl Iterator, ) -> PartialVMResult<()> { - // TODO (Gas Maintainance) + // TODO (Gas Maintenance) Ok(()) } } diff --git a/crates/sui-cost-tables/src/units_types.rs b/crates/sui-cost-tables/src/units_types.rs index 9ee9a8ed43edd..eea96c0435053 100644 --- a/crates/sui-cost-tables/src/units_types.rs +++ b/crates/sui-cost-tables/src/units_types.rs @@ -94,7 +94,7 @@ impl LinearEquation { if y < self.min { Err(anyhow!( - "Value {} is below minumum allowed {}", + "Value {} is below minimum allowed {}", u64::from(y), u64::from(self.min) )) diff --git a/crates/sui-framework/docs/bulletproofs.md b/crates/sui-framework/docs/bulletproofs.md index 6cfd105ffbb33..55df4365ac5d0 100644 --- a/crates/sui-framework/docs/bulletproofs.md +++ b/crates/sui-framework/docs/bulletproofs.md @@ -55,7 +55,7 @@ Only bit_length = 64, 32, 16, 8 will work. @param proof: The bulletproof @param commitment: The commitment which we are trying to verify the range proof for -@param bit_length: The bit length that we prove the committed value is whithin. Note that bit_length must be either 64, 32, 16, or 8. +@param bit_length: The bit length that we prove the committed value is within. Note that bit_length must be either 64, 32, 16, or 8. If the range proof is valid, execution succeeds, else panics. diff --git a/crates/sui-framework/docs/hex.md b/crates/sui-framework/docs/hex.md index d84061b3f55ef..f449b18f63ed2 100644 --- a/crates/sui-framework/docs/hex.md +++ b/crates/sui-framework/docs/hex.md @@ -92,8 +92,8 @@ Decode hex into bytes Takes a hex string (no 0x prefix) (e.g. b"0f3a") Returns vector of bytes that represents the hex string (e.g. x"0f3a") Hex string can be case insensitive (e.g. b"0F3A" and b"0f3a" both return x"0f3a") -Aborts if the hex string does not have an even number of characters (as each hex charater is 2 characters long) -Aborts if the hex string contains non-valid hex characters (valid charaters are 0 - 9, a - f, A - F) +Aborts if the hex string does not have an even number of characters (as each hex character is 2 characters long) +Aborts if the hex string contains non-valid hex characters (valid characters are 0 - 9, a - f, A - F)
public fun decode(hex: vector<u8>): vector<u8>
diff --git a/crates/sui-framework/docs/object.md b/crates/sui-framework/docs/object.md
index 7542dbd125c75..9c680287a6aa9 100644
--- a/crates/sui-framework/docs/object.md
+++ b/crates/sui-framework/docs/object.md
@@ -76,7 +76,7 @@ as you want for a given obj, and each UID as its first field.
-These are globaly unique in the sense that no two values of type UID are ever equal, in
+These are globally unique in the sense that no two values of type UID are ever equal, in
 other words for any two values id1: UID and id2: UID, id1 != id2.
 This is a privileged type that can only be derived from a TxContext.
 UID doesn't have the drop ability, so deleting a UID requires a call to delete.
@@ -507,7 +507,7 @@ Get the inner bytes for the underlying I
 Get the UID for obj.
 Safe because Sui has an extra bytecode verifier pass that forces every struct with
 the key ability to have a distinguished UID field.
-Cannot be made public as the access to UID for a given object must be privledged, and
+Cannot be made public as the access to UID for a given object must be privileged, and
 restrictable in the object's module.
 
 
diff --git a/crates/sui-framework/docs/publisher.md b/crates/sui-framework/docs/publisher.md
index f3f753bd580db..ed9088a0d5c6d 100644
--- a/crates/sui-framework/docs/publisher.md
+++ b/crates/sui-framework/docs/publisher.md
@@ -207,7 +207,7 @@ Check whether type belongs to the same package as the publisher object.
 
 ## Function `is_module`
 
-Check whether a type belogs to the same module as the publisher object.
+Check whether a type belongs to the same module as the publisher object.
 
 
 
public fun is_module<T>(self: &publisher::Publisher): bool
diff --git a/crates/sui-framework/docs/validator_set.md b/crates/sui-framework/docs/validator_set.md
index ac13de139853f..e886b1edf8d7d 100644
--- a/crates/sui-framework/docs/validator_set.md
+++ b/crates/sui-framework/docs/validator_set.md
@@ -126,7 +126,7 @@
 
 
The metadata of the validator set for the next epoch. This is kept up-to-dated. - Everytime a change request is received, this set is updated. + Every time a change request is received, this set is updated. TODO: This is currently not used. We may use it latter for enforcing min/max stake.
diff --git a/crates/sui-framework/sources/crypto/bulletproofs.move b/crates/sui-framework/sources/crypto/bulletproofs.move index 5eaa2df37d184..35bb039bc8902 100644 --- a/crates/sui-framework/sources/crypto/bulletproofs.move +++ b/crates/sui-framework/sources/crypto/bulletproofs.move @@ -9,7 +9,7 @@ module sui::bulletproofs { /// @param proof: The bulletproof /// @param commitment: The commitment which we are trying to verify the range proof for - /// @param bit_length: The bit length that we prove the committed value is whithin. Note that bit_length must be either 64, 32, 16, or 8. + /// @param bit_length: The bit length that we prove the committed value is within. Note that bit_length must be either 64, 32, 16, or 8. /// /// If the range proof is valid, execution succeeds, else panics. public fun verify_full_range_proof(proof: &vector, commitment: &RistrettoPoint, bit_length: u64): bool { diff --git a/crates/sui-framework/sources/governance/validator_set.move b/crates/sui-framework/sources/governance/validator_set.move index c618430265585..3aa4ccf0742ec 100644 --- a/crates/sui-framework/sources/governance/validator_set.move +++ b/crates/sui-framework/sources/governance/validator_set.move @@ -44,7 +44,7 @@ module sui::validator_set { pending_removals: vector, /// The metadata of the validator set for the next epoch. This is kept up-to-dated. - /// Everytime a change request is received, this set is updated. + /// Every time a change request is received, this set is updated. /// TODO: This is currently not used. We may use it latter for enforcing min/max stake. next_epoch_validators: vector, diff --git a/crates/sui-framework/sources/hex.move b/crates/sui-framework/sources/hex.move index 1485165e23cf9..5b42fa3753e82 100644 --- a/crates/sui-framework/sources/hex.move +++ b/crates/sui-framework/sources/hex.move @@ -30,8 +30,8 @@ module sui::hex { /// Takes a hex string (no 0x prefix) (e.g. b"0f3a") /// Returns vector of `bytes` that represents the hex string (e.g. x"0f3a") /// Hex string can be case insensitive (e.g. b"0F3A" and b"0f3a" both return x"0f3a") - /// Aborts if the hex string does not have an even number of characters (as each hex charater is 2 characters long) - /// Aborts if the hex string contains non-valid hex characters (valid charaters are 0 - 9, a - f, A - F) + /// Aborts if the hex string does not have an even number of characters (as each hex character is 2 characters long) + /// Aborts if the hex string contains non-valid hex characters (valid characters are 0 - 9, a - f, A - F) public fun decode(hex: vector): vector { let (i, r, l) = (0, vector[], vector::length(&hex)); assert!(l % 2 == 0, EInvalidHexLength); diff --git a/crates/sui-framework/sources/object.move b/crates/sui-framework/sources/object.move index 771618f2c7862..526999063491f 100644 --- a/crates/sui-framework/sources/object.move +++ b/crates/sui-framework/sources/object.move @@ -34,7 +34,7 @@ module sui::object { /// Globally unique IDs that define an object's ID in storage. Any Sui Object, that is a struct /// with the `key` ability, must have `id: UID` as its first field. - /// These are globaly unique in the sense that no two values of type `UID` are ever equal, in + /// These are globally unique in the sense that no two values of type `UID` are ever equal, in /// other words for any two values `id1: UID` and `id2: UID`, `id1` != `id2`. /// This is a privileged type that can only be derived from a `TxContext`. /// `UID` doesn't have the `drop` ability, so deleting a `UID` requires a call to `delete`. @@ -137,7 +137,7 @@ module sui::object { /// Get the `UID` for `obj`. /// Safe because Sui has an extra bytecode verifier pass that forces every struct with /// the `key` ability to have a distinguished `UID` field. - /// Cannot be made public as the access to `UID` for a given object must be privledged, and + /// Cannot be made public as the access to `UID` for a given object must be privileged, and /// restrictable in the object's module. native fun borrow_uid(obj: &T): &UID; diff --git a/crates/sui-framework/sources/publisher.move b/crates/sui-framework/sources/publisher.move index 0c577f934a517..382c36ae6bd75 100644 --- a/crates/sui-framework/sources/publisher.move +++ b/crates/sui-framework/sources/publisher.move @@ -60,7 +60,7 @@ module sui::publisher { (type_name::get_address(&type) == self.package) } - /// Check whether a type belogs to the same module as the publisher object. + /// Check whether a type belongs to the same module as the publisher object. public fun is_module(self: &Publisher): bool { let type = type_name::get(); diff --git a/crates/sui-framework/sources/test/test_scenario.move b/crates/sui-framework/sources/test/test_scenario.move index 0c5b7114303b9..c960bc4d06583 100644 --- a/crates/sui-framework/sources/test/test_scenario.move +++ b/crates/sui-framework/sources/test/test_scenario.move @@ -47,7 +47,7 @@ module sui::test_scenario { /// }; /// // end the first transaction and begin a new one where addr2 is the sender /// // Starting a new transaction moves any objects transferred into their respective - /// // inventiories. In other words, if you call `take_from_sender` before `next_tx`, `addr2` + /// // inventories. In other words, if you call `take_from_sender` before `next_tx`, `addr2` /// // will not yet have `some_object` /// test_scenario::next_tx(scenario, addr2); /// { @@ -80,7 +80,7 @@ module sui::test_scenario { shared: vector, /// The objects frozen this transaction frozen: vector, - /// The number of user events emmitted this transaction + /// The number of user events emitted this transaction num_user_events: u64, } diff --git a/crates/sui-indexer/migrations/2022-12-02-202249_packages/up.sql b/crates/sui-indexer/migrations/2022-12-02-202249_packages/up.sql index 79066a20c76ae..f21ef590f43ee 100644 --- a/crates/sui-indexer/migrations/2022-12-02-202249_packages/up.sql +++ b/crates/sui-indexer/migrations/2022-12-02-202249_packages/up.sql @@ -2,7 +2,7 @@ CREATE TABLE packages ( package_id TEXT PRIMARY KEY, author TEXT NOT NULL, -- means the column cannot be null, - -- the element in the array can stil be null + -- the element in the array can still be null module_names TEXT[] NOT NULL, package_content TEXT NOT NULL ); diff --git a/crates/sui-indexer/src/handlers/handler_orchestrator.rs b/crates/sui-indexer/src/handlers/handler_orchestrator.rs index e94421eed3854..3c943d920f1c6 100644 --- a/crates/sui-indexer/src/handlers/handler_orchestrator.rs +++ b/crates/sui-indexer/src/handlers/handler_orchestrator.rs @@ -205,6 +205,6 @@ impl HandlerOrchestrator { publish_event_handle, ]) .await - .expect("Handler orchestrator shoult not run into errors."); + .expect("Handler orchestrator should not run into errors."); } } diff --git a/crates/sui-indexer/src/handlers/transaction_handler.rs b/crates/sui-indexer/src/handlers/transaction_handler.rs index dad2801644da8..4aecdf728ff1f 100644 --- a/crates/sui-indexer/src/handlers/transaction_handler.rs +++ b/crates/sui-indexer/src/handlers/transaction_handler.rs @@ -14,7 +14,7 @@ use tracing::info; use sui_indexer::errors::IndexerError; use sui_indexer::metrics::IndexerTransactionHandlerMetrics; -use sui_indexer::models::transaction_logs::{commit_transction_log, read_transaction_log}; +use sui_indexer::models::transaction_logs::{commit_transaction_log, read_transaction_log}; use sui_indexer::models::transactions::commit_transactions; use sui_indexer::utils::log_errors_to_pg; use sui_indexer::{get_pg_pool_connection, PgConnectionPool}; @@ -92,11 +92,11 @@ impl TransactionHandler { // Transaction page's next cursor can be None when latest transaction page is // reached, if we use the None cursor to read transactions, it will read from genesis, // thus here we do not commit / use the None cursor. - // This will cause duplidate run of the current batch, but will not cause duplidate rows + // This will cause duplicate run of the current batch, but will not cause duplicate rows // b/c of the uniqueness restriction of the table. if let Some(next_cursor_val) = page.next_cursor { // canonical txn digest is Base58 encoded - commit_transction_log(&mut pg_pool_conn, Some(next_cursor_val.base58_encode()))?; + commit_transaction_log(&mut pg_pool_conn, Some(next_cursor_val.base58_encode()))?; self.transaction_handler_metrics .total_transactions_processed .inc_by(txn_count as u64); diff --git a/crates/sui-indexer/src/models/transaction_logs.rs b/crates/sui-indexer/src/models/transaction_logs.rs index 0dd724de9e99b..eeca5ca92660a 100644 --- a/crates/sui-indexer/src/models/transaction_logs.rs +++ b/crates/sui-indexer/src/models/transaction_logs.rs @@ -33,7 +33,7 @@ pub fn read_transaction_log( }) } -pub fn commit_transction_log( +pub fn commit_transaction_log( pg_pool_conn: &mut PgPoolConnection, txn_digest: Option, ) -> Result { diff --git a/crates/sui-json/src/tests.rs b/crates/sui-json/src/tests.rs index 9ec6688175011..aa984f29a474f 100644 --- a/crates/sui-json/src/tests.rs +++ b/crates/sui-json/src/tests.rs @@ -152,12 +152,12 @@ fn test_basic_args_linter_pure_args_bad() { Value::from(bad_hex_val), MoveTypeLayout::Vector(Box::new(MoveTypeLayout::U8)), ), - // u8 vector from heterogenous array + // u8 vector from heterogeneous array ( json!([1, 2, 3, true, 5, 6, 7]), MoveTypeLayout::Vector(Box::new(MoveTypeLayout::U8)), ), - // U64 deep nest, bad because heterogenous array + // U64 deep nest, bad because heterogeneous array ( json!([[[9, 53, 434], [0], [300]], [], [300, 4, 5, 6, 7]]), MoveTypeLayout::Vector(Box::new(MoveTypeLayout::Vector(Box::new(MoveTypeLayout::U64)))), diff --git a/crates/sui-network/src/discovery/builder.rs b/crates/sui-network/src/discovery/builder.rs index c5d136f303005..b708e728f365c 100644 --- a/crates/sui-network/src/discovery/builder.rs +++ b/crates/sui-network/src/discovery/builder.rs @@ -34,7 +34,7 @@ impl Builder { pub(super) fn build_internal(self) -> (UnstartedDiscovery, Server) { let Builder { config } = self; let config = config.unwrap(); - let (sender, reciever) = oneshot::channel(); + let (sender, receiver) = oneshot::channel(); let handle = Handle { _shutdown_handle: Arc::new(sender), @@ -56,7 +56,7 @@ impl Builder { UnstartedDiscovery { handle, config, - shutdown_handle: reciever, + shutdown_handle: receiver, state, }, server, diff --git a/crates/sui-network/src/discovery/tests.rs b/crates/sui-network/src/discovery/tests.rs index c6599ee87a9d3..029034c436943 100644 --- a/crates/sui-network/src/discovery/tests.rs +++ b/crates/sui-network/src/discovery/tests.rs @@ -77,7 +77,7 @@ async fn get_known_peers() -> Result<()> { assert_eq!(response.own_info, our_info); assert!(response.known_peers.is_empty()); - // Normal resonse with some known peers + // Normal response with some known peers let other_peer = NodeInfo { peer_id: PeerId([13; 32]), addresses: Vec::new(), diff --git a/crates/sui-protocol-constants/src/lib.rs b/crates/sui-protocol-constants/src/lib.rs index f92868ba8feef..ee994abcac6a9 100644 --- a/crates/sui-protocol-constants/src/lib.rs +++ b/crates/sui-protocol-constants/src/lib.rs @@ -86,7 +86,7 @@ pub const OBJ_ACCESS_COST_DELETE_PER_BYTE: u64 = 40; /// Meant to approximate the cost of checking locks for each object // TODO: I'm not sure that this cost makes sense. Checking locks is "free" // in the sense that an invalid tx that can never be committed/pay gas can -// force validators to check an abitrary number of locks. If those checks are +// force validators to check an arbitrary number of locks. If those checks are // "free" for invalid transactions, why charge for them in valid transactions // TODO: if we keep this, I think we probably want it to be a fixed cost rather // than a per-byte cost. checking an object lock should not require loading an diff --git a/crates/sui-sdk/src/apis.rs b/crates/sui-sdk/src/apis.rs index 7cb0a8d43fd62..81d0d655d2502 100644 --- a/crates/sui-sdk/src/apis.rs +++ b/crates/sui-sdk/src/apis.rs @@ -470,7 +470,7 @@ impl QuorumDriver { /// defaults to `ExecuteTransactionRequestType::WaitForLocalExecution`. /// When `ExecuteTransactionRequestType::WaitForLocalExecution` is used, /// but returned `confirmed_local_execution` is false, the client polls - /// the fullnode untils the fullnode recognizes this transaction, or + /// the fullnode until the fullnode recognizes this transaction, or /// until times out (see WAIT_FOR_TX_TIMEOUT_SEC). If it times out, an /// error is returned from this call. pub async fn execute_transaction( diff --git a/crates/sui-simulator/README.md b/crates/sui-simulator/README.md index 8b453c0536075..74e75ece52639 100644 --- a/crates/sui-simulator/README.md +++ b/crates/sui-simulator/README.md @@ -42,7 +42,7 @@ It has the following main components: - `mach_absolute_time()`, `clock_gettime()`: Intercepted to provide deterministic high-resolution timing behavior. - TODO: `gettimeofday()`: We would like to intercept this to provide deterministic wall-clock operations (e.g. on dates, etc). However, intercepting this currently breaks RocksDB. - This interception behavior is in effect only in threads that have explicity enabled it, which generally includes the main test thread only. In other threads, the interceptors delegate the call to the system library implementation via `dlsym()`. See implementation [here](https://github.com/MystenLabs/mysten-sim/blob/main/msim/src/sim/intercept.rs#L34-L48). + This interception behavior is in effect only in threads that have explicitly enabled it, which generally includes the main test thread only. In other threads, the interceptors delegate the call to the system library implementation via `dlsym()`. See implementation [here](https://github.com/MystenLabs/mysten-sim/blob/main/msim/src/sim/intercept.rs#L34-L48). 1. Procedural macros that replace `#[tokio::test]` and run test code inside a testing environment. These are `#[sui_test]` and `#[sim_test]` and are documented below. The test harness created by these macros initializes the simulator runtime with a starting seed, generates the simulator configuration, and runs the test inside a newly created thread. The test must be run in its own thread in order to provide each test case with fresh thread local storage. @@ -57,13 +57,13 @@ You can then run tests by doing: $ cargo simtest -The simtest command calls `cargo nextest`, so you can add any valid `nextest` option to the commandline. +The simtest command calls `cargo nextest`, so you can add any valid `nextest` option to the command line. `cargo simtest` also reads the following environment variables: - `MSIM_TEST_SEED` - the random seed for the global PRNG. Must be a positive decimal integer that fits into a `u64`. The default value is `1`. -- `MSIM_TEST_NUM` - the number of times to repeat each test. Each reptition of a test is done with a different random seed, starting from the value of `MSIM_TEST_SEED` for the first repitition. The next seed is computed using the following function: +- `MSIM_TEST_NUM` - the number of times to repeat each test. Each repetition of a test is done with a different random seed, starting from the value of `MSIM_TEST_SEED` for the first repetition. The next seed is computed using the following function: fn next_seed(seed: u64) -> u64 { use rand::Rng; @@ -140,7 +140,7 @@ Or in the case of async code: `SuiNodeHandle` runs the provided callbacks/futures inside the context of the appropriate simulator node, so that network requests, spawned tasks, etc continue running in the correct context. -Note that it is trival to exfiltrate state from the remote node, e.g.: +Note that it is trivial to exfiltrate state from the remote node, e.g.: let node_state = handle.with(|node| { node.state() @@ -159,7 +159,7 @@ Also, the world will not end if you break this rule. You just might see confusin - `config = "config_expr"` - This argument accepts a string which will be evaluated as an expression that returns the configuration for the test. Generally, you should make this a function call, and then define the function to return the config. The function must return a type that can implements `Into` - the most common choice is `SimConfig`, but `Vec` and `Vec<(usize /* repeat count */, SimConfig)>` are also supported by default. See https://github.com/MystenLabs/mysten-sim/blob/main/msim/src/sim/config.rs for the `TestConfig` implementation. -- `check_determinism` - If set, the framework will run the test twice, and verify that it executes identically each time. (It does this by keeping a log which contains an entry for every call to the PRNG. Each entry contains a hash of the value yeilded by the PRNG at that point + the current time.). Tests with `check_determinism` are usually for testing the framework itself, so you probably won't need to use this. +- `check_determinism` - If set, the framework will run the test twice, and verify that it executes identically each time. (It does this by keeping a log which contains an entry for every call to the PRNG. Each entry contains a hash of the value yielded by the PRNG at that point + the current time.). Tests with `check_determinism` are usually for testing the framework itself, so you probably won't need to use this. ### Configuring the network: diff --git a/crates/sui-storage/src/write_path_pending_tx_log.rs b/crates/sui-storage/src/write_path_pending_tx_log.rs index 3600359620fb6..e51cea3d1ea80 100644 --- a/crates/sui-storage/src/write_path_pending_tx_log.rs +++ b/crates/sui-storage/src/write_path_pending_tx_log.rs @@ -43,7 +43,7 @@ impl WritePathPendingTransactionLog { // Returns whether the table currently has this transaction in record. // If not, write the transaction and return true; otherwise return false. - // Because the record will be cleanded up when the transaction finishes, + // Because the record will be cleaned up when the transaction finishes, // even when it returns true, the callsite of this function should check // the transaction status before doing anything, to avoid duplicates. pub async fn write_pending_transaction_maybe( diff --git a/crates/sui-types/src/coin.rs b/crates/sui-types/src/coin.rs index 72d1adb9df6c4..5e9ef92e5b402 100644 --- a/crates/sui-types/src/coin.rs +++ b/crates/sui-types/src/coin.rs @@ -127,7 +127,7 @@ impl Coin { } // Shift balance of coins_to_merge to this coin. - // Related coin objects need to be updated in temporary_store to presist the changes, + // Related coin objects need to be updated in temporary_store to persist the changes, // including deleting the coin objects that have been merged. pub fn merge_coins(&mut self, coins_to_merge: &mut [Coin]) { let total_coins = coins_to_merge.iter().fold(0, |acc, c| acc + c.value()); @@ -139,7 +139,7 @@ impl Coin { } // Split amount out of this coin to a new coin. - // Related coin objects need to be updated in temporary_store to presist the changes, + // Related coin objects need to be updated in temporary_store to persist the changes, // including creating the coin object related to the newly created coin. pub fn split_coin(&mut self, amount: u64, new_coin_id: UID) -> Result { self.balance.withdraw(amount)?; diff --git a/crates/sui-types/src/messages.rs b/crates/sui-types/src/messages.rs index a2fbd45fff31e..8a1d8dd181e1c 100644 --- a/crates/sui-types/src/messages.rs +++ b/crates/sui-types/src/messages.rs @@ -147,7 +147,7 @@ pub struct PayAllSui { /// 2. accumulate all residual SUI from input coins left and deposit all SUI to the first /// input coin, then use the first input coin as the gas coin object. /// 3. the balance of the first input coin after tx is sum(input_coins) - sum(amounts) - actual_gas_cost -/// 4. all other input coints other than the first one are deleted. +/// 4. all other input coins other than the first one are deleted. #[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)] pub struct PaySui { /// The coins to be used for payment. @@ -175,7 +175,7 @@ pub struct Pay { pub struct ChangeEpoch { /// The next (to become) epoch ID. pub epoch: EpochId, - /// The total amount of gas charged for staroge during the epoch. + /// The total amount of gas charged for storage during the epoch. pub storage_charge: u64, /// The total amount of gas charged for computation during the epoch. pub computation_charge: u64, diff --git a/crates/sui-verifier/src/id_leak_verifier.rs b/crates/sui-verifier/src/id_leak_verifier.rs index 2c929172e1c84..de2c300b29174 100644 --- a/crates/sui-verifier/src/id_leak_verifier.rs +++ b/crates/sui-verifier/src/id_leak_verifier.rs @@ -240,7 +240,7 @@ fn execute_inner( bytecode: &Bytecode, _: CodeOffset, ) -> PartialVMResult<()> { - // TODO: Better dianostics with location + // TODO: Better diagnostics with location match bytecode { Bytecode::Pop => { verifier.stack.pop().unwrap(); diff --git a/crates/sui/src/genesis_ceremony.rs b/crates/sui/src/genesis_ceremony.rs index b4cd905e7b071..07476896dd9ec 100644 --- a/crates/sui/src/genesis_ceremony.rs +++ b/crates/sui/src/genesis_ceremony.rs @@ -173,7 +173,7 @@ pub fn run(cmd: Ceremony) -> Result<()> { let mut builder = Builder::load(&dir)?; - // Don't sign unless the unsinged checkpoint has already been created + // Don't sign unless the unsigned checkpoint has already been created if builder.unsigned_genesis_checkpoint().is_none() { return Err(anyhow::anyhow!( "Unable to verify and sign genesis checkpoint; it hasn't been built yet" diff --git a/crates/telemetry-subscribers/src/lib.rs b/crates/telemetry-subscribers/src/lib.rs index d288de62ebec1..5d51f14b75b43 100644 --- a/crates/telemetry-subscribers/src/lib.rs +++ b/crates/telemetry-subscribers/src/lib.rs @@ -373,7 +373,7 @@ impl TelemetryConfig { let (nb_output, worker_guard) = get_output(config.log_file.clone()); if config.json_log_output { // See https://www.lpalmieri.com/posts/2020-09-27-zero-to-production-4-are-we-observable-yet/#5-7-tracing-bunyan-formatter - // Also Bunyan layer addes JSON logging for tracing spans with duration information + // Also Bunyan layer adds JSON logging for tracing spans with duration information let json_layer = JsonStorageLayer .and_then( BunyanFormattingLayer::new(config.service_name, nb_output) diff --git a/crates/typed-store-derive/src/lib.rs b/crates/typed-store-derive/src/lib.rs index 75c397a5eccfe..eee4de6b89e93 100644 --- a/crates/typed-store-derive/src/lib.rs +++ b/crates/typed-store-derive/src/lib.rs @@ -145,7 +145,7 @@ fn extract_generics_names(generics: &Generics) -> Vec { .iter() .map(|g| match g { syn::GenericParam::Type(t) => t.ident.clone(), - _ => panic!("Unspoorted generic type"), + _ => panic!("Unsupported generic type"), }) .collect() } @@ -154,13 +154,13 @@ fn extract_generics_names(generics: &Generics) -> Vec { /// It operates on a struct where all the members are of Store or DBMap /// `TypedStoreDebug` traits are then derived /// The main features are: -/// 1. Flexible confguration of each table (colum family) via defaults and overrides +/// 1. Flexible configuration of each table (column family) via defaults and overrides /// 2. Auto-generated `open` routine /// 3. Auto-generated `read_only_mode` handle /// 4. Auto-generated memory stats method /// 5. Other convenience features /// -/// 1. Flexible confguration: +/// 1. Flexible configuration: /// a. Static options specified at struct definition /// The definer of the struct can specify the default options for each table using annotations /// We can also supply column family options on the default ones @@ -190,7 +190,7 @@ fn extract_generics_names(generics: &Generics) -> Vec { /// table1: DBMap, /// #[default_options_override_fn = "custom_fn_name2"] /// table2: DBMap, -/// // Nothing specifed so `typed_store::rocks::default_db_options` is used +/// // Nothing specified so `typed_store::rocks::default_db_options` is used /// table3: DBMap, /// #[default_options_override_fn = "custom_fn_name1"] /// table4: DBMap, @@ -248,7 +248,7 @@ fn extract_generics_names(generics: &Generics) -> Vec { /// table1: DBMap, /// #[default_options_override_fn = "custom_fn_name2"] /// table2: DBMap, -/// // Nothing specifed so `typed_store::rocks::default_db_options` is used +/// // Nothing specified so `typed_store::rocks::default_db_options` is used /// table3: DBMap, /// #[default_options_override_fn = "custom_fn_name1"] /// table4: DBMap, @@ -421,7 +421,7 @@ pub fn derive_dbmap_utils_general(input: TokenStream) -> TokenStream { )* ] }; - // Safe to call unwrap because we will have atleast one field_name entry in the struct + // Safe to call unwrap because we will have at least one field_name entry in the struct let rwopt_cfs: std::collections::HashMap = opt_cfs.iter().map(|q| (q.0.as_str().to_string(), q.1.rw_options.clone())).collect(); let opt_cfs: Vec<_> = opt_cfs.iter().map(|q| (q.0.as_str(), &q.1.options)).collect(); let db = match (as_secondary_with_path, is_transaction) { @@ -794,7 +794,7 @@ pub fn derive_sallydb_general(input: TokenStream) -> TokenStream { )* ] }; - // Safe to call unwrap because we will have atleast one field_name entry in the struct + // Safe to call unwrap because we will have at least one field_name entry in the struct let rwopt_cfs: std::collections::HashMap = opt_cfs.iter().map(|q| (q.0.as_str().to_string(), q.1.rw_options.clone())).collect(); let opt_cfs: Vec<_> = opt_cfs.iter().map(|q| (q.0.as_str(), &q.1.options)).collect(); let db = match access_type { diff --git a/crates/typed-store/src/metrics.rs b/crates/typed-store/src/metrics.rs index e7f56e10973ce..74e22f9fcdadd 100644 --- a/crates/typed-store/src/metrics.rs +++ b/crates/typed-store/src/metrics.rs @@ -178,7 +178,7 @@ impl ColumnFamilyMetrics { If this number is 1, it means some part of the column family requires compaction in order to maintain shape of LSM tree, but the compaction is pending because the desired compaction job is either waiting for - other dependnent compactions to be finished or waiting for an available + other dependent compactions to be finished or waiting for an available compaction thread.", &["cf_name"], registry, @@ -200,7 +200,7 @@ impl ColumnFamilyMetrics { .unwrap(), rocksdb_estimate_oldest_key_time: register_int_gauge_vec_with_registry!( "rocksdb_estimate_oldest_key_time", - "Estimation of the oldest key timestamp in the DB. Only vailable + "Estimation of the oldest key timestamp in the DB. Only available for FIFO compaction with compaction_options_fifo.allow_compaction = false.", &["cf_name"], registry, diff --git a/crates/typed-store/src/rocks/mod.rs b/crates/typed-store/src/rocks/mod.rs index 32b418a96b156..8727302f96620 100644 --- a/crates/typed-store/src/rocks/mod.rs +++ b/crates/typed-store/src/rocks/mod.rs @@ -576,7 +576,7 @@ impl DBMap { /// Opens a database from a path, with specific options and an optional column family. /// /// This database is used to perform operations on single column family, and parametrizes - /// all operations in `DBBatch` when writting across column families. + /// all operations in `DBBatch` when writing across column families. #[instrument(level="debug", skip_all, fields(path = ?path.as_ref(), cf = ?opt_cf), err)] pub fn open>( path: P, diff --git a/crates/typed-store/src/rocks/tests.rs b/crates/typed-store/src/rocks/tests.rs index f311f88d403d0..5171f50e6a3d1 100644 --- a/crates/typed-store/src/rocks/tests.rs +++ b/crates/typed-store/src/rocks/tests.rs @@ -484,7 +484,7 @@ async fn test_delete_range() { } // range operator is not inclusive of to - assert!(db.contains_key(&100).expect("Failed to query legel key")); + assert!(db.contains_key(&100).expect("Failed to query legal key")); } #[tokio::test] diff --git a/deny.toml b/deny.toml index a60e054fbfcaf..34f4a78b395c5 100644 --- a/deny.toml +++ b/deny.toml @@ -70,7 +70,7 @@ ignore = [ [licenses] # The lint level for crates which do not have a detectable license unlicensed = "deny" -# List of explictly allowed licenses +# List of explicitly allowed licenses # See https://spdx.org/licenses/ for list of possible licenses # [possible values: any SPDX 3.11 short identifier (+ optional exception)]. allow = [ @@ -86,7 +86,7 @@ allow = [ "Unicode-DFS-2016", #"Apache-2.0 WITH LLVM-exception", ] -# List of explictly disallowed licenses +# List of explicitly disallowed licenses # See https://spdx.org/licenses/ for list of possible licenses # [possible values: any SPDX 3.11 short identifier (+ optional exception)]. deny = [ diff --git a/narwhal/Docker/templates/grafana/provisioning/dashboards/monitor_services.json b/narwhal/Docker/templates/grafana/provisioning/dashboards/monitor_services.json index 3d955a4079344..981870389b8d9 100644 --- a/narwhal/Docker/templates/grafana/provisioning/dashboards/monitor_services.json +++ b/narwhal/Docker/templates/grafana/provisioning/dashboards/monitor_services.json @@ -3235,7 +3235,7 @@ "thresholds": [], "timeFrom": null, "timeShift": null, - "title": "Rule group evaulation problems", + "title": "Rule group evaluation problems", "tooltip": { "msResolution": false, "shared": true, diff --git a/narwhal/benchmark/README.md b/narwhal/benchmark/README.md index 782e18cb69138..24e9a65b9741f 100644 --- a/narwhal/benchmark/README.md +++ b/narwhal/benchmark/README.md @@ -133,14 +133,14 @@ The 'Consensus TPS' and 'Consensus latency' report the average throughput and la Memory profiling for benchmarks are possible via `jemalloc` on Linux. It can be enabled in the following way: -- Intall `jemalloc`, e.g. `sudo apt install libjemalloc-dev` +- Install `jemalloc`, e.g. `sudo apt install libjemalloc-dev` - Enable `jemalloc` by setting `export MALLOC_CONF=prof:true,prof_prefix:jeprof.out,lg_prof_interval:33` before launching the benchmark script. Memory profiles with names `jeprof.out*` will be written to the currently directory. To visualize the profile, -- Intall `graphviz`, e.g. `sudo apt install graphviz` +- Install `graphviz`, e.g. `sudo apt install graphviz` - `sudo jeprof --svg > prof.svg` ## AWS Benchmarks diff --git a/narwhal/config/src/duration_format.rs b/narwhal/config/src/duration_format.rs index e330fc68f69e0..fc574fba72990 100644 --- a/narwhal/config/src/duration_format.rs +++ b/narwhal/config/src/duration_format.rs @@ -3,10 +3,10 @@ //! Allow us to deserialize Duration values in a more human friendly format //! (e.x in json files). The deserialization supports to time units: -//! * miliseconds +//! * milliseconds //! * seconds //! -//! To identify miliseconds then a string of the following format should be +//! To identify milliseconds then a string of the following format should be //! provided: ms , for example "20ms", or "2_000ms". //! //! To identify seconds, then the following format should be used: @@ -35,7 +35,7 @@ where } Err(serde::de::Error::custom(format!( - "Wrong format detected: {s}. It should be number in miliseconds, e.x 10ms" + "Wrong format detected: {s}. It should be number in milliseconds, e.x 10ms" ))) } @@ -65,7 +65,7 @@ mod tests { } #[test] - fn parse_miliseconds_and_seconds() { + fn parse_milliseconds_and_seconds() { // GIVEN let input = r#"{ "property_1": "1_000ms", diff --git a/narwhal/consensus/src/dag.rs b/narwhal/consensus/src/dag.rs index acc0403d57b57..27ed204126699 100644 --- a/narwhal/consensus/src/dag.rs +++ b/narwhal/consensus/src/dag.rs @@ -46,7 +46,7 @@ struct InnerDag { /// The Virtual DAG data structure, which lets us track certificates in a memory-conscious way dag: NodeDag, - /// Secondary index: An authority-aware map of the DAG's veertex Certificates + /// Secondary index: An authority-aware map of the DAG's vertex Certificates vertices: RwLock>, /// Metrics handler @@ -471,7 +471,7 @@ impl Dag { /// Removes certificates from the Dag, reclaiming memory in the process. /// - /// Note: If some digests are unkown to the Dag, this will return an error, but will nonetheless delete + /// Note: If some digests are unknown to the Dag, this will return an error, but will nonetheless delete /// the certificates for known digests which are removable. /// pub async fn remove>( @@ -494,7 +494,7 @@ impl Dag { .expect("Failed to receive reply to Remove command from store") } /// Returns the certificate for the digest by waiting until it is - /// avaialable in the dag + /// available in the dag pub async fn notify_read( &self, digest: CertificateDigest, diff --git a/narwhal/consensus/src/tusk.rs b/narwhal/consensus/src/tusk.rs index fbf884f2b9156..128807617626e 100644 --- a/narwhal/consensus/src/tusk.rs +++ b/narwhal/consensus/src/tusk.rs @@ -150,7 +150,7 @@ impl Tusk { ) -> Option<&'a (CertificateDigest, Certificate)> { // TODO: We should elect the leader of round r-2 using the common coin revealed at round r. // At this stage, we are guaranteed to have 2f+1 certificates from round r (which is enough to - // compute the coin). We currently just use a stake-weighted choise seeded by the round. + // compute the coin). We currently just use a stake-weighted choice seeded by the round. // // Note: this function is often called with even rounds only. While we do not aim at random selection // yet (see issue #10), repeated calls to this function should still pick from the whole roster of leaders. diff --git a/narwhal/dag/src/node_dag.rs b/narwhal/dag/src/node_dag.rs index 8f01a907af0cb..3b17625b6b614 100644 --- a/narwhal/dag/src/node_dag.rs +++ b/narwhal/dag/src/node_dag.rs @@ -438,7 +438,7 @@ mod tests { let mut digests = Vec::new(); for node in dag.iter() { digests.push(node.digest()); - // the elements are generated in order & with no missing parents => no suprises + // the elements are generated in order & with no missing parents => no surprises assert!(node_dag.try_insert(node.clone()).is_ok()); } let mut heads = HashSet::new(); @@ -465,7 +465,7 @@ mod tests { let mut digests = Vec::new(); for node in dag.iter() { digests.push(node.digest()); - // the elements are generated in order & with no missing parents => no suprises + // the elements are generated in order & with no missing parents => no surprises assert!(node_dag.try_insert(node.clone()).is_ok()); } let mut heads = HashSet::new(); @@ -495,7 +495,7 @@ mod tests { if node.compressible(){ compressibles.push(node.digest()); } - // the elements are generated in order & with no missing parents => no suprises + // the elements are generated in order & with no missing parents => no surprises assert!(node_dag.try_insert(node.clone()).is_ok()); } } @@ -532,7 +532,7 @@ mod tests { { for node in dag.iter() { digests.push(node.digest()); - // the elements are generated in order & with no missing parents => no suprises + // the elements are generated in order & with no missing parents => no surprises assert!(node_dag.try_insert(node.clone()).is_ok()); } } diff --git a/narwhal/deny.toml b/narwhal/deny.toml index fe3cf87de6ac2..34430b1a62d39 100644 --- a/narwhal/deny.toml +++ b/narwhal/deny.toml @@ -66,7 +66,7 @@ ignore = [ [licenses] # The lint level for crates which do not have a detectable license unlicensed = "deny" -# List of explictly allowed licenses +# List of explicitly allowed licenses # See https://spdx.org/licenses/ for list of possible licenses # [possible values: any SPDX 3.11 short identifier (+ optional exception)]. allow = [ @@ -80,7 +80,7 @@ allow = [ "Unicode-DFS-2016", #"Apache-2.0 WITH LLVM-exception", ] -# List of explictly disallowed licenses +# List of explicitly disallowed licenses # See https://spdx.org/licenses/ for list of possible licenses # [possible values: any SPDX 3.11 short identifier (+ optional exception)]. deny = [ diff --git a/narwhal/executor/src/subscriber.rs b/narwhal/executor/src/subscriber.rs index 1ca1fd5ae9849..2ddb9be58902a 100644 --- a/narwhal/executor/src/subscriber.rs +++ b/narwhal/executor/src/subscriber.rs @@ -163,7 +163,7 @@ impl Subscriber { // It's important to have the futures in ordered fashion as we want // to guarantee that will deliver to the executor the certificates // in the same order we received from rx_sequence. So it doesn't - // mater if we somehow managed to fetch the batches from a later + // matter if we somehow managed to fetch the batches from a later // certificate. Unless the earlier certificate's payload has been // fetched, no later certificate will be delivered. let mut waiting = FuturesOrdered::new(); diff --git a/narwhal/network/src/admin.rs b/narwhal/network/src/admin.rs index e77c0f3b682f0..83f9f6e535456 100644 --- a/narwhal/network/src/admin.rs +++ b/narwhal/network/src/admin.rs @@ -50,7 +50,7 @@ pub fn start_admin_server( handles.push(spawn_logged_monitored_task!( async move { - // retry a few times before quiting + // retry a few times before quitting let mut total_retries = 10; loop { diff --git a/narwhal/storage/src/certificate_store.rs b/narwhal/storage/src/certificate_store.rs index 97fab8603a98d..f966e7a16a63f 100644 --- a/narwhal/storage/src/certificate_store.rs +++ b/narwhal/storage/src/certificate_store.rs @@ -721,7 +721,7 @@ mod test { // and populate the rest with a write_all store.write_all(certs).unwrap(); - // now wait on handle an assert result for a signle certificate + // now wait on handle an assert result for a single certificate let received_certificate = handle_1 .await .expect("error") diff --git a/narwhal/worker/src/quorum_waiter.rs b/narwhal/worker/src/quorum_waiter.rs index bedd6c109e3b9..b9ff1edcd1232 100644 --- a/narwhal/worker/src/quorum_waiter.rs +++ b/narwhal/worker/src/quorum_waiter.rs @@ -113,7 +113,7 @@ impl QuorumWaiter { // Wait for the first 2f nodes to send back an Ack. Then we consider the batch // delivered and we send its digest to the primary (that will include it into - // the dag). This should reduce the amount of synching. + // the dag). This should reduce the amount of syncing. let threshold = self.committee.quorum_threshold(); let mut total_stake = self.committee.stake(&self.name); diff --git a/scripts/lldb_frame_sizes.py b/scripts/lldb_frame_sizes.py index 802de8f261b8d..2f20181a99f31 100644 --- a/scripts/lldb_frame_sizes.py +++ b/scripts/lldb_frame_sizes.py @@ -7,7 +7,7 @@ # # LLDB Utility to print the current backtrace with estimated stack # frame sizes (useful for figuring out which frames are contributing -# most to a stack overlow). +# most to a stack overflow). # # == Usage == # diff --git a/sdk/bcs/src/index.ts b/sdk/bcs/src/index.ts index 19edc5e6df17a..cca1a24870454 100644 --- a/sdk/bcs/src/index.ts +++ b/sdk/bcs/src/index.ts @@ -1027,7 +1027,7 @@ export function decodeStr(data: string, encoding: string): Uint8Array { /** * Register the base set of primitive and common types. - * Is called in the `BCS` contructor automatically but can + * Is called in the `BCS` constructor automatically but can * be ignored if the `withPrimitives` argument is not set. */ export function registerPrimitives(bcs: BCS): void { diff --git a/sdk/typescript/src/types/framework.ts b/sdk/typescript/src/types/framework.ts index 8d5e016f2b517..f58e16d0a36d8 100644 --- a/sdk/typescript/src/types/framework.ts +++ b/sdk/typescript/src/types/framework.ts @@ -113,7 +113,7 @@ export class Coin { * * @param amount coin balance * @param exclude object ids of the coins to exclude - * @return an arbitray coin with balance greater than or equal to `amount + * @return an arbitrary coin with balance greater than or equal to `amount */ static selectCoinWithBalanceGreaterThanOrEqual( coins: ObjectDataFull[], diff --git a/sui_programmability/examples/capy/sources/capy_market.move b/sui_programmability/examples/capy/sources/capy_market.move index e419363db56ea..d354a9e2d1531 100644 --- a/sui_programmability/examples/capy/sources/capy_market.move +++ b/sui_programmability/examples/capy/sources/capy_market.move @@ -179,7 +179,7 @@ module capy::capy_market { } /// Withdraw profits from the marketplace as a single Coin (accumulated as a DOF). - /// Uses sender of transaction to determine storage and controll access. + /// Uses sender of transaction to determine storage and control access. entry fun take_profits( market: &mut CapyMarket, ctx: &TxContext diff --git a/sui_programmability/examples/defi/sources/pool.move b/sui_programmability/examples/defi/sources/pool.move index 5d2b0aca9bc4c..401228802f96e 100644 --- a/sui_programmability/examples/defi/sources/pool.move +++ b/sui_programmability/examples/defi/sources/pool.move @@ -568,7 +568,7 @@ module defi::pool_tests { // Try small values assert!(pool::get_input_price(10, 1000, 1000, 0) == 9, 0); - // Even with 0 comission there's this small loss of 1 + // Even with 0 commission there's this small loss of 1 assert!(pool::get_input_price(10000, max_val, max_val, 0) == 9999, 0); assert!(pool::get_input_price(1000, max_val, max_val, 0) == 999, 0); assert!(pool::get_input_price(100, max_val, max_val, 0) == 99, 0); diff --git a/sui_programmability/examples/defi/sources/subscription.move b/sui_programmability/examples/defi/sources/subscription.move index d91d264b4d856..1f514b63b5a80 100644 --- a/sui_programmability/examples/defi/sources/subscription.move +++ b/sui_programmability/examples/defi/sources/subscription.move @@ -104,7 +104,7 @@ module defi::dev_pass { /// Rough outline of an AMM. /// For simplicity pool implementation details are omitted but marked as comments to -/// show correllation with the `defi/pool.move` example. +/// show correlation with the `defi/pool.move` example. module defi::some_amm { use defi::dev_pass::{Self, Subscription, SingleUse}; use sui::tx_context::{Self, TxContext}; diff --git a/sui_programmability/examples/frenemies/sources/frenemies.move b/sui_programmability/examples/frenemies/sources/frenemies.move index d84a2aeacdbc3..be75f9caded29 100644 --- a/sui_programmability/examples/frenemies/sources/frenemies.move +++ b/sui_programmability/examples/frenemies/sources/frenemies.move @@ -96,7 +96,7 @@ module frenemies::frenemies { scorecard.assignment = assignment::get(state, ctx); } - /// Return the name associatd with this scorecard + /// Return the name associated with this scorecard public fun name(self: &Scorecard): &Name { &self.name } diff --git a/sui_programmability/examples/frenemies/tests/frenemies_tests.move b/sui_programmability/examples/frenemies/tests/frenemies_tests.move index 5c4b7d9645b8e..a2462e69ef1b0 100644 --- a/sui_programmability/examples/frenemies/tests/frenemies_tests.move +++ b/sui_programmability/examples/frenemies/tests/frenemies_tests.move @@ -77,7 +77,7 @@ module frenemies::frenemies_tests { #[expected_failure(abort_code = frenemies::frenemies::EScoreNotYetAvailable)] #[test] fun score_in_start_epoch() { - // attemping to get a score during the start epoch should fail + // attempting to get a score during the start epoch should fail let validators = vector[@0x1]; let scenario_val = init_test(copy validators, validators); let scenario = &mut scenario_val; @@ -99,7 +99,7 @@ module frenemies::frenemies_tests { #[expected_failure(abort_code = frenemies::frenemies::EScoreNotYetAvailable)] #[test] fun double_update() { - // attemping to update a scorecard twice should fail + // attempting to update a scorecard twice should fail let validators = vector[@0x1]; let scenario_val = init_test(copy validators, validators); let scenario = &mut scenario_val; diff --git a/sui_programmability/examples/fungible_tokens/sources/private_coin.move b/sui_programmability/examples/fungible_tokens/sources/private_coin.move index db11848cda1d7..83ab7519d711c 100644 --- a/sui_programmability/examples/fungible_tokens/sources/private_coin.move +++ b/sui_programmability/examples/fungible_tokens/sources/private_coin.move @@ -189,7 +189,7 @@ module fungible_tokens::private_coin { transfer::transfer(mint(c, amount, ctx), recipient) } - /// Split coin from `self`, the splitted coin will be a private coin worth the value committed by `new_commitment`, + /// Split coin from `self`, the split coin will be a private coin worth the value committed by `new_commitment`, /// the remaining balance is left in `self`. Note that performing split on a public coin turns it into a private coin. public entry fun split_and_transfer( c: &mut PrivateCoin, new_commitment: vector, proof: vector, recipient: address, ctx: &mut TxContext @@ -198,7 +198,7 @@ module fungible_tokens::private_coin { transfer::transfer(take(&mut c.balance, ristretto_point, proof, ctx), recipient) } - /// Split coin from `self`, the splitted coin will be a public coin worth `value`. + /// Split coin from `self`, the split coin will be a public coin worth `value`. /// the remaining balance is left in `self`. `self` should retain it's privacy option after this call. public entry fun split_public_and_transfer(self: &mut PrivateCoin, value: u64, proof: vector, recipient: address, ctx: &mut TxContext) { transfer::transfer( diff --git a/sui_programmability/examples/fungible_tokens/sources/regulated_coin.move b/sui_programmability/examples/fungible_tokens/sources/regulated_coin.move index 64c39c617d2eb..41f9a43766987 100644 --- a/sui_programmability/examples/fungible_tokens/sources/regulated_coin.move +++ b/sui_programmability/examples/fungible_tokens/sources/regulated_coin.move @@ -163,7 +163,7 @@ module abc::abc { // === Admin actions: creating balances, minting coins and banning addresses === /// Create an empty `RCoin` instance for account `for`. AbcTreasuryCap is passed for - /// authentification purposes - only admin can create new accounts. + /// authentication purposes - only admin can create new accounts. public entry fun create(_: &AbcTreasuryCap, for: address, ctx: &mut TxContext) { transfer::transfer(zero(for, ctx), for) } diff --git a/sui_programmability/examples/games/sources/drand_based_scratch_card.move b/sui_programmability/examples/games/sources/drand_based_scratch_card.move index 23c4b2778e593..2eed1d3978bfa 100644 --- a/sui_programmability/examples/games/sources/drand_based_scratch_card.move +++ b/sui_programmability/examples/games/sources/drand_based_scratch_card.move @@ -8,7 +8,7 @@ /// round. This creates two objects: /// - Game - an immutable object that includes all parameters to be used when buying tickets. /// - Reward - a shared object that holds the reward. It can be withdrawn by any winner ("first come, first served"). -/// If not withdrawn within a few epoches, can be returned to the game creator. +/// If not withdrawn within a few epochs, can be returned to the game creator. /// /// A user who wishes to play game G should: /// - Check if G.epoch is the current epoch, and that G.base_drand_round + 24h is in the future. @@ -132,7 +132,7 @@ module games::drand_based_scratch_card { // The randomness for the current ticket is derived by HMAC(drand randomness, ticket id). // A solution like checking if (drand randomness % reward_factor) == (ticket id % reward_factor) is not secure // as the adversary can control the values of ticket id. (For this particular game this attack is not - // devestating, but for similar games it might be.) + // devastating, but for similar games it might be.) let random_key = drand_lib::derive_randomness(drand_sig); let randomness = hmac_sha3_256(&random_key, &object::id_to_bytes(&object::id(&ticket))); let is_winner = (drand_lib::safe_selection(game.reward_factor, digest::sha3_256_digest_to_bytes(&randomness)) == 0); diff --git a/sui_programmability/examples/games/sources/sea_hero.move b/sui_programmability/examples/games/sources/sea_hero.move index 5b00fe6f12d60..e828f71b0e099 100644 --- a/sui_programmability/examples/games/sources/sea_hero.move +++ b/sui_programmability/examples/games/sources/sea_hero.move @@ -89,7 +89,7 @@ module games::sea_hero { // --- Object and coin creation --- - /// Game admin can reate a monster wrapping a coin worth `reward` and send + /// Game admin can create a monster wrapping a coin worth `reward` and send /// it to `recipient` public entry fun create_monster( admin: &mut SeaHeroAdmin, diff --git a/sui_programmability/examples/nfts/sources/chat.move b/sui_programmability/examples/nfts/sources/chat.move index 46bf28b1d7da9..100f6fc032977 100644 --- a/sui_programmability/examples/nfts/sources/chat.move +++ b/sui_programmability/examples/nfts/sources/chat.move @@ -23,7 +23,7 @@ module nfts::chat { // Post's text. text: String, // Set if referencing an another object (i.e., due to a Like, Retweet, Reply etc). - // We allow referencing any object type, not ony Chat NFTs. + // We allow referencing any object type, not only Chat NFTs. ref_id: Option
, // app-specific metadata. We do not enforce a metadata format and delegate this to app layer. metadata: vector,