From 126ca26aaa955bbb002db90308223916a998179f Mon Sep 17 00:00:00 2001 From: kevaundray Date: Fri, 3 Feb 2023 19:31:55 +0000 Subject: [PATCH] chore: Add spellchecker (#682) --- .github/workflows/rust.yml | 11 + CONTRIBUTING.md | 2 +- Cargo.toml | 2 +- README.md | 4 +- crates/fm/src/lib.rs | 4 +- crates/nargo/src/cli/compile_cmd.rs | 4 +- crates/nargo/src/cli/gates_cmd.rs | 4 +- crates/nargo/src/cli/mod.rs | 6 +- crates/nargo/src/cli/prove_cmd.rs | 4 +- crates/nargo/src/cli/verify_cmd.rs | 8 +- crates/nargo/tests/prove_and_verify.rs | 26 +- .../tests/test_data/range_fail/src/main.nr | 2 +- crates/noirc_abi/src/input_parser/mod.rs | 4 +- crates/noirc_abi/src/input_parser/toml.rs | 2 +- crates/noirc_driver/src/lib.rs | 12 +- crates/noirc_driver/src/main.rs | 8 +- crates/noirc_evaluator/src/lib.rs | 42 ++-- crates/noirc_evaluator/src/ssa/acir_gen.rs | 92 +++---- crates/noirc_evaluator/src/ssa/anchor.rs | 4 +- crates/noirc_evaluator/src/ssa/block.rs | 18 +- crates/noirc_evaluator/src/ssa/code_gen.rs | 42 ++-- crates/noirc_evaluator/src/ssa/conditional.rs | 42 ++-- crates/noirc_evaluator/src/ssa/context.rs | 110 ++++----- crates/noirc_evaluator/src/ssa/flatten.rs | 32 +-- crates/noirc_evaluator/src/ssa/function.rs | 53 ++-- crates/noirc_evaluator/src/ssa/inline.rs | 24 +- crates/noirc_evaluator/src/ssa/integer.rs | 88 ++++--- crates/noirc_evaluator/src/ssa/mem.rs | 4 +- crates/noirc_evaluator/src/ssa/mod.rs | 2 +- crates/noirc_evaluator/src/ssa/node.rs | 58 ++--- .../src/ssa/{optim.rs => optimizations.rs} | 30 ++- crates/noirc_evaluator/src/ssa/ssa_form.rs | 2 +- crates/noirc_frontend/src/ast/expression.rs | 2 +- crates/noirc_frontend/src/ast/function.rs | 2 +- crates/noirc_frontend/src/ast/mod.rs | 12 +- crates/noirc_frontend/src/ast/statement.rs | 2 +- crates/noirc_frontend/src/graph/mod.rs | 2 +- .../src/hir/def_collector/dc_crate.rs | 2 +- .../src/hir/resolution/resolver.rs | 12 +- crates/noirc_frontend/src/hir/scope/mod.rs | 2 +- .../noirc_frontend/src/hir/type_check/expr.rs | 66 ++--- .../noirc_frontend/src/hir/type_check/mod.rs | 2 +- .../noirc_frontend/src/hir/type_check/stmt.rs | 14 +- crates/noirc_frontend/src/hir_def/types.rs | 233 +++++++++--------- crates/noirc_frontend/src/lexer/lexer.rs | 10 +- crates/noirc_frontend/src/lexer/token.rs | 16 +- crates/noirc_frontend/src/lib.rs | 2 +- crates/noirc_frontend/src/main.rs | 2 +- .../ast.rs | 10 +- .../mod.rs | 56 ++--- .../printer.rs | 0 crates/noirc_frontend/src/node_interner.rs | 4 +- crates/noirc_frontend/src/parser/parser.rs | 64 ++--- crates/readme.md | 2 +- crates/wasm/src/lib.rs | 2 +- cspell.json | 74 ++++++ noir_stdlib/src/array.nr | 2 +- 57 files changed, 723 insertions(+), 618 deletions(-) rename crates/noirc_evaluator/src/ssa/{optim.rs => optimizations.rs} (94%) rename crates/noirc_frontend/src/{monomorphisation => monomorphization}/ast.rs (95%) rename crates/noirc_frontend/src/{monomorphisation => monomorphization}/mod.rs (95%) rename crates/noirc_frontend/src/{monomorphisation => monomorphization}/printer.rs (100%) create mode 100644 cspell.json diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 05448211414..63665b399c9 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -95,3 +95,14 @@ jobs: with: command: fmt args: --all -- --check + + spellcheck: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: streetsidesoftware/cspell-action@v2 + with: + files: | + **/*.{md,rs} + incremental_files_only : true # Run this action on files which have changed in PR + strict: false # Do not fail, if a spelling mistake is found (This can be annoying for contributors) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 891c3559942..eaa89357fd9 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -27,7 +27,7 @@ Before you create a pull request, create an issue that thoroughly describes the Noir is still very new and there are many cryptographic primitives that we have yet to build that will be useful for the community. If you have other ideas, please reach out on the [Aztec Discord](https://discord.gg/aztec) to discuss. You can find the current list of requested primitives in the [issues section](https://github.com/noir-lang/noir/labels/noir-stdlib) marked with the label `noir-stdlib`. -## Funding Opportunties +## Funding Opportunities Aztec is offering grants to people and teams that want to use, test or build Noir. You can find more information about the grants program [here](https://aztec.network/grants). diff --git a/Cargo.toml b/Cargo.toml index 81a6361b7b4..6abd49313a1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,7 +16,7 @@ default-members = ["crates/nargo"] [workspace.package] version = "0.1.0" -authors = ["Kevaundray Wedderburn "] +authors = ["The Noir Team "] edition = "2021" rust-version = "1.64" diff --git a/README.md b/README.md index acad40d4c97..c03b9de897e 100644 --- a/README.md +++ b/README.md @@ -44,8 +44,8 @@ The current focus is to gather as much feedback as possible while in the alpha p Concretely the following items are on the road map: -- General code sanitisation and documentation (ongoing effort) -- Prover and Verifier Key logic. (Prover and Verifier preprocess per compile) +- General code sanitization and documentation (ongoing effort) +- Prover and Verifier Key logic. (Prover and Verifier pre-process per compile) - Fallback mechanism for backend unsupported opcodes - Visibility modifiers - Signed integers diff --git a/crates/fm/src/lib.rs b/crates/fm/src/lib.rs index b2fb1e3168a..ebc3d7021a8 100644 --- a/crates/fm/src/lib.rs +++ b/crates/fm/src/lib.rs @@ -46,7 +46,7 @@ impl FileManager { let source = file_reader::read_file_to_string(path_to_file).ok()?; let file_id = self.file_map.add_file(path_to_file.to_path_buf().into(), source); - let path_to_file = virtualise_path(path_to_file, file_type); + let path_to_file = virtualize_path(path_to_file, file_type); self.register_path(file_id, path_to_file); Some(file_id) @@ -99,7 +99,7 @@ impl Default for FileManager { /// Returns /// For Normal filetypes, given "src/mod.nr" this method returns "src/mod" /// For Root filetypes, given "src/mod.nr" this method returns "src" -fn virtualise_path(path: &Path, file_type: FileType) -> VirtualPath { +fn virtualize_path(path: &Path, file_type: FileType) -> VirtualPath { let mut path = path.to_path_buf(); let path = match file_type { FileType::Root => { diff --git a/crates/nargo/src/cli/compile_cmd.rs b/crates/nargo/src/cli/compile_cmd.rs index 37eaf1bdf81..5e130a59822 100644 --- a/crates/nargo/src/cli/compile_cmd.rs +++ b/crates/nargo/src/cli/compile_cmd.rs @@ -21,13 +21,13 @@ pub(crate) fn run(args: ArgMatches) -> Result<(), CliError> { let witness = args.is_present("witness"); let allow_warnings = args.is_present("allow-warnings"); - let curr_dir = std::env::current_dir().unwrap(); + let current_dir = std::env::current_dir().unwrap(); let mut circuit_path = PathBuf::new(); circuit_path.push(TARGET_DIR); let result = generate_circuit_and_witness_to_disk( circuit_name, - curr_dir, + current_dir, circuit_path, witness, allow_warnings, diff --git a/crates/nargo/src/cli/gates_cmd.rs b/crates/nargo/src/cli/gates_cmd.rs index 6adb818f2f8..04f0ecab759 100644 --- a/crates/nargo/src/cli/gates_cmd.rs +++ b/crates/nargo/src/cli/gates_cmd.rs @@ -13,8 +13,8 @@ pub(crate) fn run(args: ArgMatches) -> Result<(), CliError> { } pub fn count_gates(show_ssa: bool, allow_warnings: bool) -> Result<(), CliError> { - let curr_dir = std::env::current_dir().unwrap(); - count_gates_with_path(curr_dir, show_ssa, allow_warnings) + let current_dir = std::env::current_dir().unwrap(); + count_gates_with_path(current_dir, show_ssa, allow_warnings) } pub fn count_gates_with_path>( diff --git a/crates/nargo/src/cli/mod.rs b/crates/nargo/src/cli/mod.rs index 336dffecc09..02bc8fc7d0e 100644 --- a/crates/nargo/src/cli/mod.rs +++ b/crates/nargo/src/cli/mod.rs @@ -43,7 +43,7 @@ pub fn start_cli() { let matches = App::new("nargo") .about("Noir's package manager") .version(VERSION_STRING) - .author("Kevaundray Wedderburn ") + .author("The Noir Team ") .subcommand( App::new("check") .about("Checks the constraint system for errors") @@ -89,7 +89,7 @@ pub fn start_cli() { ) .subcommand( App::new("gates") - .about("Counts the occurences of different gates in circuit") + .about("Counts the occurrences of different gates in circuit") .arg(show_ssa) .arg(allow_warnings), ) @@ -170,7 +170,7 @@ fn write_inputs_to_file>( dir_path }; - let serialized_output = format.serialise(w_map)?; + let serialized_output = format.serialize(w_map)?; write_to_file(serialized_output.as_bytes(), &file_path); Ok(()) diff --git a/crates/nargo/src/cli/prove_cmd.rs b/crates/nargo/src/cli/prove_cmd.rs index a1fda13b3db..f734c15f08c 100644 --- a/crates/nargo/src/cli/prove_cmd.rs +++ b/crates/nargo/src/cli/prove_cmd.rs @@ -31,12 +31,12 @@ pub(crate) fn run(args: ArgMatches) -> Result<(), CliError> { const WITNESS_OFFSET: u32 = 1; fn prove(proof_name: Option<&str>, show_ssa: bool, allow_warnings: bool) -> Result<(), CliError> { - let curr_dir = std::env::current_dir().unwrap(); + let current_dir = std::env::current_dir().unwrap(); let mut proof_dir = PathBuf::new(); proof_dir.push(PROOFS_DIR); - prove_with_path(proof_name, curr_dir, proof_dir, show_ssa, allow_warnings)?; + prove_with_path(proof_name, current_dir, proof_dir, show_ssa, allow_warnings)?; Ok(()) } diff --git a/crates/nargo/src/cli/verify_cmd.rs b/crates/nargo/src/cli/verify_cmd.rs index 8ec38531102..da6f043b724 100644 --- a/crates/nargo/src/cli/verify_cmd.rs +++ b/crates/nargo/src/cli/verify_cmd.rs @@ -29,12 +29,12 @@ pub(crate) fn run(args: ArgMatches) -> Result<(), CliError> { } fn verify(proof_name: &str, allow_warnings: bool) -> Result { - let curr_dir = std::env::current_dir().unwrap(); + let current_dir = std::env::current_dir().unwrap(); let mut proof_path = PathBuf::new(); //or cur_dir? proof_path.push(PROOFS_DIR); proof_path.push(Path::new(proof_name)); proof_path.set_extension(PROOF_EXT); - verify_with_path(&curr_dir, &proof_path, false, allow_warnings) + verify_with_path(¤t_dir, &proof_path, false, allow_warnings) } pub fn verify_with_path>( @@ -50,9 +50,9 @@ pub fn verify_with_path>( let public_abi = compiled_program.abi.clone().unwrap().public_abi(); let num_pub_params = public_abi.num_parameters(); if num_pub_params != 0 { - let curr_dir = program_dir; + let current_dir = program_dir; public_inputs = - read_inputs_from_file(curr_dir, VERIFIER_INPUT_FILE, Format::Toml, public_abi)?; + read_inputs_from_file(current_dir, VERIFIER_INPUT_FILE, Format::Toml, public_abi)?; } let valid_proof = verify_proof(compiled_program, public_inputs, load_proof(proof_path)?)?; diff --git a/crates/nargo/tests/prove_and_verify.rs b/crates/nargo/tests/prove_and_verify.rs index e0d43139107..e62ca2a5186 100644 --- a/crates/nargo/tests/prove_and_verify.rs +++ b/crates/nargo/tests/prove_and_verify.rs @@ -28,22 +28,22 @@ mod tests { #[test] fn noir_integration() { - let mut cdir = std::env::current_dir().unwrap(); - cdir.push(TEST_DIR); - cdir.push(TEST_DATA_DIR); + let mut current_dir = std::env::current_dir().unwrap(); + current_dir.push(TEST_DIR); + current_dir.push(TEST_DATA_DIR); //load config.tml file from test_data directory - cdir.push(CONFIG_FILE); - let config_path = std::fs::read_to_string(cdir).unwrap(); - let conf_data: BTreeMap> = load_conf(&config_path); - let mut cdir = std::env::current_dir().unwrap(); - cdir.push(TEST_DIR); - cdir.push(TEST_DATA_DIR); + current_dir.push(CONFIG_FILE); + let config_path = std::fs::read_to_string(current_dir).unwrap(); + let config_data: BTreeMap> = load_conf(&config_path); + let mut current_dir = std::env::current_dir().unwrap(); + current_dir.push(TEST_DIR); + current_dir.push(TEST_DATA_DIR); - for c in fs::read_dir(cdir.as_path()).unwrap().flatten() { + for c in fs::read_dir(current_dir.as_path()).unwrap().flatten() { if let Ok(test_name) = c.file_name().into_string() { println!("Running test {test_name:?}"); - if c.path().is_dir() && !conf_data["exclude"].contains(&test_name) { + if c.path().is_dir() && !config_data["exclude"].contains(&test_name) { let verified = std::panic::catch_unwind(|| { nargo::cli::prove_and_verify("pp", &c.path(), false) }); @@ -51,11 +51,11 @@ mod tests { let r = match verified { Ok(result) => result, Err(_) => { - panic!("\n\n\nPanic occured while running test {:?} (ignore the following panic)", c.file_name()); + panic!("\n\n\nPanic occurred while running test {:?} (ignore the following panic)", c.file_name()); } }; - if conf_data["fail"].contains(&test_name) { + if config_data["fail"].contains(&test_name) { assert!(!r, "{:?} should not succeed", c.file_name()); } else { assert!(r, "verification fail for {:?}", c.file_name()); diff --git a/crates/nargo/tests/test_data/range_fail/src/main.nr b/crates/nargo/tests/test_data/range_fail/src/main.nr index 51fb073e27c..4535c5d9e5f 100644 --- a/crates/nargo/tests/test_data/range_fail/src/main.nr +++ b/crates/nargo/tests/test_data/range_fail/src/main.nr @@ -1,6 +1,6 @@ // Multiple integers constraints. // -// There is currently no range optimiser currently in ACIR :( +// There is currently no range optimizer currently in ACIR :( // fn main(x: u8, y: Field) { let _z = x + (y as u8); diff --git a/crates/noirc_abi/src/input_parser/mod.rs b/crates/noirc_abi/src/input_parser/mod.rs index afaab0a2e15..680aef950e8 100644 --- a/crates/noirc_abi/src/input_parser/mod.rs +++ b/crates/noirc_abi/src/input_parser/mod.rs @@ -97,12 +97,12 @@ impl Format { } } - pub fn serialise( + pub fn serialize( &self, w_map: &BTreeMap, ) -> Result { match self { - Format::Toml => toml::serialise_to_toml(w_map), + Format::Toml => toml::serialize_to_toml(w_map), } } } diff --git a/crates/noirc_abi/src/input_parser/toml.rs b/crates/noirc_abi/src/input_parser/toml.rs index 069658ce7be..4ffff7c76e7 100644 --- a/crates/noirc_abi/src/input_parser/toml.rs +++ b/crates/noirc_abi/src/input_parser/toml.rs @@ -24,7 +24,7 @@ pub(crate) fn parse_toml( }) } -pub(crate) fn serialise_to_toml( +pub(crate) fn serialize_to_toml( w_map: &BTreeMap, ) -> Result { // Toml requires that values be emitted before tables. Thus, we must reorder our map in case a TomlTypes::Table comes before any other values in the toml map diff --git a/crates/noirc_driver/src/lib.rs b/crates/noirc_driver/src/lib.rs index 0f97ab351ce..1a822e9bcb7 100644 --- a/crates/noirc_driver/src/lib.rs +++ b/crates/noirc_driver/src/lib.rs @@ -8,7 +8,7 @@ use noirc_evaluator::create_circuit; use noirc_frontend::graph::{CrateId, CrateName, CrateType, LOCAL_CRATE}; use noirc_frontend::hir::def_map::CrateDefMap; use noirc_frontend::hir::Context; -use noirc_frontend::monomorphisation::monomorphise; +use noirc_frontend::monomorphization::monomorphize; use serde::{Deserialize, Serialize}; use std::path::{Path, PathBuf}; @@ -127,12 +127,12 @@ impl Driver { } // NOTE: Maybe build could be skipped given that now it is a pass through method. - /// Statically analyses the local crate + /// Statically analyzes the local crate pub fn check(&mut self, allow_warnings: bool) { - self.analyse_crate(allow_warnings) + self.analyze_crate(allow_warnings) } - fn analyse_crate(&mut self, allow_warnings: bool) { + fn analyze_crate(&mut self, allow_warnings: bool) { let mut errs = vec![]; CrateDefMap::collect_defs(LOCAL_CRATE, &mut self.context, &mut errs); let mut error_count = 0; @@ -187,7 +187,7 @@ impl Driver { let func_meta = self.context.def_interner.function_meta(&main_function); let abi = func_meta.into_abi(&self.context.def_interner); - let program = monomorphise(main_function, self.context.def_interner); + let program = monomorphize(main_function, self.context.def_interner); // Compile Program let circuit = match create_circuit( @@ -199,7 +199,7 @@ impl Driver { Ok(circuit) => circuit, Err(err) => { // The FileId here will be the file id of the file with the main file - // Errors will be shown at the callsite without a stacktrace + // Errors will be shown at the call site without a stacktrace let file_id = err.location.map(|loc| loc.file); let error_count = Reporter::with_diagnostics( file_id, diff --git a/crates/noirc_driver/src/main.rs b/crates/noirc_driver/src/main.rs index 7e7b4450129..5663d745898 100644 --- a/crates/noirc_driver/src/main.rs +++ b/crates/noirc_driver/src/main.rs @@ -11,12 +11,12 @@ fn main() { driver.create_local_crate(ROOT_DIR_MAIN, CrateType::Binary); // Add libraries into Driver - let ncrate_id1 = driver.create_non_local_crate(EXTERNAL_DIR2, CrateType::Library); - let ncrate_id2 = driver.create_non_local_crate(EXTERNAL_DIR, CrateType::Library); + let crate_id1 = driver.create_non_local_crate(EXTERNAL_DIR2, CrateType::Library); + let crate_id2 = driver.create_non_local_crate(EXTERNAL_DIR, CrateType::Library); // Add dependencies as package - driver.add_dep(LOCAL_CRATE, ncrate_id1, "coo4"); - driver.add_dep(LOCAL_CRATE, ncrate_id2, "coo3"); + driver.add_dep(LOCAL_CRATE, crate_id1, "coo4"); + driver.add_dep(LOCAL_CRATE, crate_id2, "coo3"); driver.into_compiled_program(acvm::Language::R1CS, false, false); } diff --git a/crates/noirc_evaluator/src/lib.rs b/crates/noirc_evaluator/src/lib.rs index 3e07c6ae7c1..13acd97869e 100644 --- a/crates/noirc_evaluator/src/lib.rs +++ b/crates/noirc_evaluator/src/lib.rs @@ -8,7 +8,7 @@ use acvm::Language; use errors::{RuntimeError, RuntimeErrorKind}; use iter_extended::btree_map; use noirc_abi::{AbiType, AbiVisibility}; -use noirc_frontend::monomorphisation::ast::*; +use noirc_frontend::monomorphization::ast::*; use std::collections::BTreeMap; use ssa::{code_gen::IRGenerator, node}; @@ -27,9 +27,9 @@ pub struct Evaluator { opcodes: Vec, } -/// Compiles the Program into ACIR and applies optimisations to the arithmetic gates +/// Compiles the Program into ACIR and applies optimizations to the arithmetic gates // XXX: We return the num_witnesses, but this is the max number of witnesses -// Some of these could have been removed due to optimisations. We need this number because the +// Some of these could have been removed due to optimizations. We need this number because the // Standard format requires the number of witnesses. The max number is also fine. // If we had a composer object, we would not need it pub fn create_circuit( @@ -45,7 +45,7 @@ pub fn create_circuit( let witness_index = evaluator.current_witness_index(); - let optimised_circuit = acvm::compiler::compile( + let optimized_circuit = acvm::compiler::compile( Circuit { current_witness_index: witness_index, opcodes: evaluator.opcodes, @@ -56,7 +56,7 @@ pub fn create_circuit( ) .map_err(|_| RuntimeErrorKind::Spanless(String::from("produced an acvm compile error")))?; - Ok(optimised_circuit) + Ok(optimized_circuit) } impl Evaluator { @@ -109,14 +109,14 @@ impl Evaluator { program: Program, enable_logging: bool, ) -> Result<(), RuntimeError> { - let mut igen = IRGenerator::new(program); - self.parse_abi_alt(&mut igen); + let mut ir_gen = IRGenerator::new(program); + self.parse_abi_alt(&mut ir_gen); // Now call the main function - igen.codegen_main()?; + ir_gen.codegen_main()?; //Generates ACIR representation: - igen.context.ir_to_acir(self, enable_logging)?; + ir_gen.context.ir_to_acir(self, enable_logging)?; Ok(()) } @@ -139,7 +139,7 @@ impl Evaluator { def: Definition, param_type: &AbiType, visibility: &AbiVisibility, - igen: &mut IRGenerator, + ir_gen: &mut IRGenerator, ) -> Result<(), RuntimeErrorKind> { match param_type { AbiType::Field => { @@ -147,7 +147,7 @@ impl Evaluator { if *visibility == AbiVisibility::Public { self.public_inputs.push(witness); } - igen.create_new_variable( + ir_gen.create_new_variable( name.to_owned(), Some(def), node::ObjectType::NativeField, @@ -159,7 +159,7 @@ impl Evaluator { if *visibility == AbiVisibility::Public { self.public_inputs.extend(witnesses.clone()); } - igen.abi_array(name, Some(def), typ.as_ref(), *length, witnesses); + ir_gen.abi_array(name, Some(def), typ.as_ref(), *length, witnesses); } AbiType::Integer { sign: _, width } => { let witness = self.add_witness_to_cs(); @@ -167,8 +167,8 @@ impl Evaluator { if *visibility == AbiVisibility::Public { self.public_inputs.push(witness); } - let obj_type = igen.get_object_type_from_abi(param_type); // Fetch signedness of the integer - igen.create_new_variable(name.to_owned(), Some(def), obj_type, Some(witness)); + let obj_type = ir_gen.get_object_type_from_abi(param_type); // Fetch signedness of the integer + ir_gen.create_new_variable(name.to_owned(), Some(def), obj_type, Some(witness)); } AbiType::Boolean => { let witness = self.add_witness_to_cs(); @@ -177,7 +177,7 @@ impl Evaluator { self.public_inputs.push(witness); } let obj_type = node::ObjectType::Boolean; - igen.create_new_variable(name.to_owned(), Some(def), obj_type, Some(witness)); + ir_gen.create_new_variable(name.to_owned(), Some(def), obj_type, Some(witness)); } AbiType::Struct { fields } => { let new_fields = btree_map(fields, |(inner_name, value)| { @@ -192,7 +192,7 @@ impl Evaluator { struct_witnesses.values().flatten().cloned().collect(); self.public_inputs.extend(witnesses); } - igen.abi_struct(name, Some(def), fields, struct_witnesses); + ir_gen.abi_struct(name, Some(def), fields, struct_witnesses); } AbiType::String { length } => { let typ = AbiType::Integer { sign: noirc_abi::Sign::Unsigned, width: 8 }; @@ -200,7 +200,7 @@ impl Evaluator { if *visibility == AbiVisibility::Public { self.public_inputs.extend(witnesses.clone()); } - igen.abi_array(name, Some(def), &typ, *length, witnesses); + ir_gen.abi_array(name, Some(def), &typ, *length, witnesses); } } Ok(()) @@ -273,14 +273,14 @@ impl Evaluator { /// Noted in the noirc_abi, it is possible to convert Toml -> NoirTypes /// However, this intermediate representation is useful as it allows us to have /// intermediate Types which the core type system does not know about like Strings. - fn parse_abi_alt(&mut self, igen: &mut IRGenerator) { + fn parse_abi_alt(&mut self, ir_gen: &mut IRGenerator) { // XXX: Currently, the syntax only supports public witnesses // u8 and arrays are assumed to be private // This is not a short-coming of the ABI, but of the grammar // The new grammar has been conceived, and will be implemented. - let main = igen.program.main(); + let main = ir_gen.program.main(); let main_params = std::mem::take(&mut main.parameters); - let abi_params = std::mem::take(&mut igen.program.abi.parameters); + let abi_params = std::mem::take(&mut ir_gen.program.abi.parameters); // Remove the return type from the parameters // Since this is not in the main functions parameters. @@ -296,7 +296,7 @@ impl Evaluator { for ((param_id, _, param_name, _), abi_param) in main_params.iter().zip(abi_params) { assert_eq!(param_name, &abi_param.name); let def = Definition::Local(*param_id); - self.param_to_var(param_name, def, &abi_param.typ, &abi_param.visibility, igen) + self.param_to_var(param_name, def, &abi_param.typ, &abi_param.visibility, ir_gen) .unwrap(); } diff --git a/crates/noirc_evaluator/src/ssa/acir_gen.rs b/crates/noirc_evaluator/src/ssa/acir_gen.rs index 13efd26c211..b9679979e7f 100644 --- a/crates/noirc_evaluator/src/ssa/acir_gen.rs +++ b/crates/noirc_evaluator/src/ssa/acir_gen.rs @@ -25,7 +25,7 @@ use num_bigint::BigUint; #[derive(Default)] pub struct Acir { pub arith_cache: HashMap, - pub memory_map: HashMap, //maps memory adress to expression + pub memory_map: HashMap, //maps memory address to expression } #[derive(Default, Clone, Debug)] @@ -104,7 +104,7 @@ impl From for InternalVar { impl Acir { //This function stores the substitution with the arithmetic expression in the cache //When an instruction performs arithmetic operation, its output can be represented as an arithmetic expression of its arguments - //Substitute a nodeobj as an arithmetic expression + //Substitute a node object as an arithmetic expression fn substitute( &mut self, id: NodeId, @@ -115,12 +115,12 @@ impl Acir { return self.arith_cache[&id].clone(); } let var = match ctx.try_get_node(id) { - Some(node::NodeObj::Const(c)) => { + Some(node::NodeObject::Const(c)) => { let f_value = FieldElement::from_be_bytes_reduce(&c.value.to_bytes_be()); let expr = Expression::from_field(f_value); InternalVar::new(expr, None, id) } - Some(node::NodeObj::Obj(v)) => match v.get_type() { + Some(node::NodeObject::Obj(v)) => match v.get_type() { node::ObjectType::Pointer(_) => InternalVar::default(), _ => { let w = v.witness.unwrap_or_else(|| evaluator.add_witness_to_cs()); @@ -252,7 +252,7 @@ impl Acir { if self.memory_map.contains_key(&absolute_adr) { InternalVar::from(self.memory_map[&absolute_adr].expression.clone()) } else { - //if not found, then it must be a witness (else it is non-initialised memory) + //if not found, then it must be a witness (else it is non-initialized memory) let index = idx as usize; if mem_array.values.len() > index { mem_array.values[index].clone() @@ -522,7 +522,7 @@ impl Acir { } //Generates gates for the expression: \sum_i(zero_eq(A[i]-B[i])) - //N.b. We assumes the lenghts of a and b are the same but it is not checked inside the function. + //N.b. We assumes the lengths of a and b are the same but it is not checked inside the function. fn zero_eq_array_sum( &mut self, a: &MemArray, @@ -571,7 +571,7 @@ impl Acir { for a in args { let l_obj = cfg.try_get_node(*a).unwrap(); match l_obj { - node::NodeObj::Obj(v) => match l_obj.get_type() { + node::NodeObject::Obj(v) => match l_obj.get_type() { node::ObjectType::Pointer(a) => { let array = &cfg.mem[a]; let num_bits = array.element_type.bits(); @@ -908,8 +908,8 @@ fn evaluate_bitwise( let mut b_witness = rhs.generate_witness(evaluator); let result = evaluator.add_witness_to_cs(); - let bsize = if bit_size % 2 == 1 { bit_size + 1 } else { bit_size }; - assert!(bsize < FieldElement::max_num_bits() - 1); + let bit_size = if bit_size % 2 == 1 { bit_size + 1 } else { bit_size }; + assert!(bit_size < FieldElement::max_num_bits() - 1); let max = FieldElement::from((1_u128 << bit_size) - 1); let bit_gate = match opcode { BinaryOp::And => acvm::acir::BlackBoxFunc::AND, @@ -933,8 +933,8 @@ fn evaluate_bitwise( let gate = AcirOpcode::BlackBoxFuncCall(BlackBoxFuncCall { name: bit_gate, inputs: vec![ - FunctionInput { witness: a_witness, num_bits: bsize }, - FunctionInput { witness: b_witness, num_bits: bsize }, + FunctionInput { witness: a_witness, num_bits: bit_size }, + FunctionInput { witness: b_witness, num_bits: bit_size }, ], outputs: vec![result], }); @@ -1021,9 +1021,9 @@ pub fn evaluate_udiv( let mut d = mul_with_witness(evaluator, &rhs.expression, &Expression::from(&q_witness)); d = add(&d, FieldElement::one(), &Expression::from(&r_witness)); d = mul_with_witness(evaluator, &d, &predicate.expression); - let div_eucl = subtract(&pa, FieldElement::one(), &d); + let div_euclidean = subtract(&pa, FieldElement::one(), &d); - evaluator.opcodes.push(AcirOpcode::Arithmetic(div_eucl)); + evaluator.opcodes.push(AcirOpcode::Arithmetic(div_euclidean)); (q_witness, r_witness) } @@ -1118,7 +1118,7 @@ pub fn mul(a: &Expression, b: &Expression) -> Expression { let mut output = Expression::from_field(a.q_c * b.q_c); - //TODO to optimise... + //TODO to optimize... for lc in &a.linear_combinations { let single = single_mul(lc.1, b); output = add(&output, lc.0, &single); @@ -1128,24 +1128,26 @@ pub fn mul(a: &Expression, b: &Expression) -> Expression { let mut i1 = 0; //a let mut i2 = 0; //b while i1 < a.linear_combinations.len() && i2 < b.linear_combinations.len() { - let coef_a = b.q_c * a.linear_combinations[i1].0; - let coef_b = a.q_c * b.linear_combinations[i2].0; + let coeff_a = b.q_c * a.linear_combinations[i1].0; + let coeff_b = a.q_c * b.linear_combinations[i2].0; match a.linear_combinations[i1].1.cmp(&b.linear_combinations[i2].1) { Ordering::Greater => { - if coef_b != FieldElement::zero() { - output.linear_combinations.push((coef_b, b.linear_combinations[i2].1)); + if coeff_b != FieldElement::zero() { + output.linear_combinations.push((coeff_b, b.linear_combinations[i2].1)); } i2 += 1; } Ordering::Less => { - if coef_a != FieldElement::zero() { - output.linear_combinations.push((coef_a, a.linear_combinations[i1].1)); + if coeff_a != FieldElement::zero() { + output.linear_combinations.push((coeff_a, a.linear_combinations[i1].1)); } i1 += 1; } Ordering::Equal => { - if coef_a + coef_b != FieldElement::zero() { - output.linear_combinations.push((coef_a + coef_b, a.linear_combinations[i1].1)); + if coeff_a + coeff_b != FieldElement::zero() { + output + .linear_combinations + .push((coeff_a + coeff_b, a.linear_combinations[i1].1)); } i1 += 1; @@ -1154,13 +1156,13 @@ pub fn mul(a: &Expression, b: &Expression) -> Expression { } } while i1 < a.linear_combinations.len() { - let coef_a = b.q_c * a.linear_combinations[i1].0; - output.linear_combinations.push((coef_a, a.linear_combinations[i1].1)); + let coeff_a = b.q_c * a.linear_combinations[i1].0; + output.linear_combinations.push((coeff_a, a.linear_combinations[i1].1)); i1 += 1; } while i2 < b.linear_combinations.len() { - let coef_b = a.q_c * b.linear_combinations[i2].0; - output.linear_combinations.push((coef_b, b.linear_combinations[i2].1)); + let coeff_b = a.q_c * b.linear_combinations[i2].0; + output.linear_combinations.push((coeff_b, b.linear_combinations[i2].1)); i2 += 1; } @@ -1182,9 +1184,9 @@ pub fn add(a: &Expression, k: FieldElement, b: &Expression) -> Expression { while i1 < a.linear_combinations.len() && i2 < b.linear_combinations.len() { match a.linear_combinations[i1].1.cmp(&b.linear_combinations[i2].1) { Ordering::Greater => { - let coef = b.linear_combinations[i2].0 * k; - if coef != FieldElement::zero() { - output.linear_combinations.push((coef, b.linear_combinations[i2].1)); + let coeff = b.linear_combinations[i2].0 * k; + if coeff != FieldElement::zero() { + output.linear_combinations.push((coeff, b.linear_combinations[i2].1)); } i2 += 1; } @@ -1193,9 +1195,9 @@ pub fn add(a: &Expression, k: FieldElement, b: &Expression) -> Expression { i1 += 1; } Ordering::Equal => { - let coef = a.linear_combinations[i1].0 + b.linear_combinations[i2].0 * k; - if coef != FieldElement::zero() { - output.linear_combinations.push((coef, a.linear_combinations[i1].1)); + let coeff = a.linear_combinations[i1].0 + b.linear_combinations[i2].0 * k; + if coeff != FieldElement::zero() { + output.linear_combinations.push((coeff, a.linear_combinations[i1].1)); } i2 += 1; i1 += 1; @@ -1207,9 +1209,9 @@ pub fn add(a: &Expression, k: FieldElement, b: &Expression) -> Expression { i1 += 1; } while i2 < b.linear_combinations.len() { - let coef = b.linear_combinations[i2].0 * k; - if coef != FieldElement::zero() { - output.linear_combinations.push((coef, b.linear_combinations[i2].1)); + let coeff = b.linear_combinations[i2].0 * k; + if coeff != FieldElement::zero() { + output.linear_combinations.push((coeff, b.linear_combinations[i2].1)); } i2 += 1; } @@ -1222,9 +1224,9 @@ pub fn add(a: &Expression, k: FieldElement, b: &Expression) -> Expression { while i1 < a.mul_terms.len() && i2 < b.mul_terms.len() { match (a.mul_terms[i1].1, a.mul_terms[i1].2).cmp(&(b.mul_terms[i2].1, b.mul_terms[i2].2)) { Ordering::Greater => { - let coef = b.mul_terms[i2].0 * k; - if coef != FieldElement::zero() { - output.mul_terms.push((coef, b.mul_terms[i2].1, b.mul_terms[i2].2)); + let coeff = b.mul_terms[i2].0 * k; + if coeff != FieldElement::zero() { + output.mul_terms.push((coeff, b.mul_terms[i2].1, b.mul_terms[i2].2)); } i2 += 1; } @@ -1233,9 +1235,9 @@ pub fn add(a: &Expression, k: FieldElement, b: &Expression) -> Expression { i1 += 1; } Ordering::Equal => { - let coef = a.mul_terms[i1].0 + b.mul_terms[i2].0 * k; - if coef != FieldElement::zero() { - output.mul_terms.push((coef, a.mul_terms[i1].1, a.mul_terms[i1].2)); + let coeff = a.mul_terms[i1].0 + b.mul_terms[i2].0 * k; + if coeff != FieldElement::zero() { + output.mul_terms.push((coeff, a.mul_terms[i1].1, a.mul_terms[i1].2)); } i2 += 1; i1 += 1; @@ -1248,9 +1250,9 @@ pub fn add(a: &Expression, k: FieldElement, b: &Expression) -> Expression { } while i2 < b.mul_terms.len() { - let coef = b.mul_terms[i2].0 * k; - if coef != FieldElement::zero() { - output.mul_terms.push((coef, b.mul_terms[i2].1, b.mul_terms[i2].2)); + let coeff = b.mul_terms[i2].0 * k; + if coeff != FieldElement::zero() { + output.mul_terms.push((coeff, b.mul_terms[i2].1, b.mul_terms[i2].2)); } i2 += 1; } @@ -1286,7 +1288,7 @@ pub fn boolean_expr(expr: &Expression, evaluator: &mut Evaluator) -> Expression subtract(&mul_with_witness(evaluator, expr, expr), FieldElement::one(), expr) } -//contrain witness a to be num_bits-size integer, i.e between 0 and 2^num_bits-1 +//constrain witness a to be num_bits-size integer, i.e between 0 and 2^num_bits-1 pub fn range_constraint( witness: Witness, num_bits: u32, diff --git a/crates/noirc_evaluator/src/ssa/anchor.rs b/crates/noirc_evaluator/src/ssa/anchor.rs index c0ab28402ad..edd2419073b 100644 --- a/crates/noirc_evaluator/src/ssa/anchor.rs +++ b/crates/noirc_evaluator/src/ssa/anchor.rs @@ -67,7 +67,7 @@ impl Anchor { pub fn find_similar_cast( &self, - igen: &SsaContext, + ir_gen: &SsaContext, operator: &Operation, res_type: super::node::ObjectType, ) -> Option { @@ -77,7 +77,7 @@ impl Anchor { let by_type = &self.cast_map[id]; if by_type.contains_key(&res_type) { let tu = by_type[&res_type]; - if let Some(ins) = igen.try_get_instruction(tu) { + if let Some(ins) = ir_gen.try_get_instruction(tu) { if !ins.is_deleted() { return Some(tu); } diff --git a/crates/noirc_evaluator/src/ssa/block.rs b/crates/noirc_evaluator/src/ssa/block.rs index 834cafe1038..384ee386755 100644 --- a/crates/noirc_evaluator/src/ssa/block.rs +++ b/crates/noirc_evaluator/src/ssa/block.rs @@ -116,7 +116,7 @@ impl BasicBlock { for a in returned_arrays { result.insert(a.0); } - if let Some(f) = ctx.try_get_ssafunc(*func) { + if let Some(f) = ctx.try_get_ssa_func(*func) { for typ in &f.result_types { if let node::ObjectType::Pointer(a) = typ { result.insert(*a); @@ -226,7 +226,7 @@ pub fn link_with_target( if let Some(target_block) = ctx.try_get_block_mut(target) { target_block.right = right; target_block.left = left; - //TODO should also update the last instruction rhs to the first instruction of the current block -- TODOshoud we do it here?? + //TODO should also update the last instruction rhs to the first instruction of the current block -- TODO should we do it here?? if let Some(right_uw) = right { ctx[right_uw].dominator = Some(target); } @@ -244,10 +244,10 @@ pub fn compute_dom(ctx: &mut SsaContext) { dominator_link.entry(dom).or_insert_with(Vec::new).push(block.id); } } - for (master, svec) in dominator_link { + for (master, slave_vec) in dominator_link { if let Some(dom_b) = ctx.try_get_block_mut(master) { dom_b.dominated.clear(); - for slave in svec { + for slave in slave_vec { dom_b.dominated.push(slave); } } @@ -263,9 +263,9 @@ pub fn compute_sub_dom(ctx: &mut SsaContext, blocks: &[BlockId]) { dominator_link.entry(dom).or_insert_with(Vec::new).push(block.id); } } - for (master, svec) in dominator_link { + for (master, slave_vec) in dominator_link { let dom_b = &mut ctx[master]; - for slave in svec { + for slave in slave_vec { dom_b.dominated.push(slave); } } @@ -336,7 +336,7 @@ fn find_join_helper( } // Find the LCA of x and y -// n.b. this is a naive implementation which assumes there is no cycle in the graph, so it should be used after loop flatenning +// n.b. this is a naive implementation which assumes there is no cycle in the graph, so it should be used after loop flattening pub fn lca(ctx: &SsaContext, x: BlockId, y: BlockId) -> BlockId { if x == y { return x; @@ -550,9 +550,9 @@ pub fn merge_path( } } - //we assign the concatened list of instructions to the start block, using a CSE pass + //we assign the concatenated list of instructions to the start block, using a CSE pass let mut modified = false; - super::optim::cse_block(ctx, start, &mut instructions, &mut modified)?; + super::optimizations::cse_block(ctx, start, &mut instructions, &mut modified)?; //Wires start to end if !end.is_dummy() { rewire_block_left(ctx, start, end); diff --git a/crates/noirc_evaluator/src/ssa/code_gen.rs b/crates/noirc_evaluator/src/ssa/code_gen.rs index 75a44c5e8f1..567547d8c80 100644 --- a/crates/noirc_evaluator/src/ssa/code_gen.rs +++ b/crates/noirc_evaluator/src/ssa/code_gen.rs @@ -12,7 +12,7 @@ use crate::errors; use crate::ssa::block::BlockType; use acvm::FieldElement; use iter_extended::vecmap; -use noirc_frontend::monomorphisation::ast::*; +use noirc_frontend::monomorphization::ast::*; use noirc_frontend::{BinaryOpKind, UnaryOp}; use num_bigint::BigUint; use num_traits::Zero; @@ -253,7 +253,7 @@ impl IRGenerator { } Definition::Builtin(opcode) | Definition::LowLevel(opcode) => { let opcode = builtin::Opcode::lookup(opcode).unwrap_or_else(|| { - unreachable!("Unknown builtin/lowlevel opcode '{}'", opcode) + unreachable!("Unknown builtin/low level opcode '{}'", opcode) }); let function_node_id = self.context.get_or_create_opcode_node_id(opcode); Ok(Value::Single(function_node_id)) @@ -267,15 +267,15 @@ impl IRGenerator { rhs: NodeId, op: UnaryOp, ) -> Result { - let rtype = self.context.get_object_type(rhs); + let rhs_type = self.context.get_object_type(rhs); match op { UnaryOp::Minus => { - let lhs = self.context.zero_with_type(rtype); + let lhs = self.context.zero_with_type(rhs_type); let operator = BinaryOp::Sub { max_rhs_value: BigUint::zero() }; let op = Operation::Binary(node::Binary { operator, lhs, rhs, predicate: None }); - self.context.new_instruction(op, rtype) + self.context.new_instruction(op, rhs_type) } - UnaryOp::Not => self.context.new_instruction(Operation::Not(rhs), rtype), + UnaryOp::Not => self.context.new_instruction(Operation::Not(rhs), rhs_type), } } @@ -285,11 +285,11 @@ impl IRGenerator { rhs: NodeId, op: BinaryOpKind, ) -> Result { - let ltype = self.context.get_object_type(lhs); + let lhs_type = self.context.get_object_type(lhs); // Get the opcode from the infix operator - let opcode = Operation::Binary(Binary::from_ast(op, ltype, lhs, rhs)); - let optype = self.context.get_result_type(&opcode, ltype); - self.context.new_instruction(opcode, optype) + let opcode = Operation::Binary(Binary::from_ast(op, lhs_type, lhs, rhs)); + let op_type = self.context.get_result_type(&opcode, lhs_type); + self.context.new_instruction(opcode, op_type) } fn codegen_indexed_value( @@ -426,9 +426,13 @@ impl IRGenerator { let definition = Definition::Local(id); match value { Value::Single(node_id) => { - let otype = self.context.get_object_type(node_id); - let value = - self.bind_variable(name.to_owned(), Some(definition.clone()), otype, node_id)?; + let object_type = self.context.get_object_type(node_id); + let value = self.bind_variable( + name.to_owned(), + Some(definition.clone()), + object_type, + node_id, + )?; self.variable_values.insert(definition, value); } value @ Value::Tuple(_) => { @@ -446,8 +450,8 @@ impl IRGenerator { fn bind_fresh_pattern(&mut self, basename: &str, value: Value) -> Result { match value { Value::Single(node_id) => { - let otype = self.context.get_object_type(node_id); - self.bind_variable(basename.to_owned(), None, otype, node_id) + let object_type = self.context.get_object_type(node_id); + self.bind_variable(basename.to_owned(), None, object_type, node_id) } Value::Tuple(field_values) => { let values = field_values @@ -608,9 +612,9 @@ impl IRGenerator { } Expression::Cast(cast_expr) => { let lhs = self.codegen_expression(&cast_expr.lhs)?.unwrap_id(); - let rtype = self.context.convert_type(&cast_expr.r#type); + let object_type = self.context.convert_type(&cast_expr.r#type); - Ok(Value::Single(self.context.new_instruction(Operation::Cast(lhs), rtype)?)) + Ok(Value::Single(self.context.new_instruction(Operation::Cast(lhs), object_type)?)) } Expression::Index(indexed_expr) => { // Evaluate the 'array' expression @@ -725,8 +729,8 @@ impl IRGenerator { let phi = self.context.generate_empty_phi(join_idx, iter_id); self.update_variable_id(iter_id, iter_id, phi); //is it still needed? - let notequal = Operation::binary(BinaryOp::Ne, phi, end_idx); - let cond = self.context.new_instruction(notequal, ObjectType::Boolean)?; + let not_equal = Operation::binary(BinaryOp::Ne, phi, end_idx); + let cond = self.context.new_instruction(not_equal, ObjectType::Boolean)?; let to_fix = self.context.new_instruction(Operation::Nop, ObjectType::NotAnObject)?; diff --git a/crates/noirc_evaluator/src/ssa/conditional.rs b/crates/noirc_evaluator/src/ssa/conditional.rs index d45ada720b8..08aa26996c8 100644 --- a/crates/noirc_evaluator/src/ssa/conditional.rs +++ b/crates/noirc_evaluator/src/ssa/conditional.rs @@ -5,7 +5,7 @@ use crate::{ errors::{self, RuntimeError}, ssa::{ node::{Mark, ObjectType}, - optim, + optimizations, }, }; @@ -123,7 +123,7 @@ impl DecisionTree { ) -> Instruction { let operation = Operation::binary(operator, lhs, rhs); let mut i = Instruction::new(operation, typ, Some(block_id)); - super::optim::simplify(ctx, &mut i).unwrap(); + super::optimizations::simplify(ctx, &mut i).unwrap(); i } @@ -166,14 +166,14 @@ impl DecisionTree { if let Some(value) = assumption.value { return value; } - let pvalue = self[assumption.parent].value.unwrap(); + let parent_value = self[assumption.parent].value.unwrap(); let condition = self[assumption.parent].condition; let ins = if self.is_true_branch(block.assumption) { DecisionTree::new_instruction_after_phi( ctx, block_id, BinaryOp::Mul, - pvalue, + parent_value, condition, ObjectType::Boolean, ) @@ -190,7 +190,7 @@ impl DecisionTree { ctx, block_id, BinaryOp::Mul, - pvalue, + parent_value, not_condition, ObjectType::Boolean, not_condition, @@ -235,7 +235,7 @@ impl DecisionTree { } // is it an IF block? if let Some(ins) = ctx.get_if_condition(current_block) { - //add a new assuption for the IF + //add a new assumption for the IF if assumption.parent == AssumptionId::dummy() { //Root assumption parent = block_assumption; @@ -338,7 +338,7 @@ impl DecisionTree { if_block_id: BlockId, exit_block_id: BlockId, ) -> Result<(), RuntimeError> { - //basic reduction as a first step (i.e no optimisation) + //basic reduction as a first step (i.e no optimization) let if_block = &ctx[if_block_id]; let mut to_remove = Vec::new(); let left = if_block.left.unwrap(); @@ -378,10 +378,10 @@ impl DecisionTree { } else { let left_ins = ctx[left].instructions.clone(); let right_ins = ctx[right].instructions.clone(); - merged_ins = self.synchronise(ctx, &left_ins, &right_ins, left); + merged_ins = self.synchronize(ctx, &left_ins, &right_ins, left); } let mut modified = false; - super::optim::cse_block(ctx, left, &mut merged_ins, &mut modified)?; + super::optimizations::cse_block(ctx, left, &mut merged_ins, &mut modified)?; //housekeeping... let if_block = &mut ctx[if_block_id]; @@ -497,7 +497,7 @@ impl DecisionTree { stack: &mut StackFrame, ins_id: NodeId, predicate: AssumptionId, - short_circtuit: bool, + short_circuit: bool, ) -> Result { let ass_cond; let ass_value; @@ -529,7 +529,7 @@ impl DecisionTree { } let ins = ins1.clone(); - if short_circtuit { + if short_circuit { stack.set_zero(ctx, ins.res_type); let ins2 = ctx.get_mut_instruction(ins_id); if ins2.res_type == ObjectType::NotAnObject { @@ -548,7 +548,7 @@ impl DecisionTree { val_true: block_args[0].0, val_false: block_args[1].0, }; - optim::simplify_id(ctx, ins_id).unwrap(); + optimizations::simplify_id(ctx, ins_id).unwrap(); } stack.push(ins_id); } @@ -567,9 +567,9 @@ impl DecisionTree { } stack.push(ins_id); } - Operation::Binary(binop) => { + Operation::Binary(binary_op) => { let mut cond = ass_value; - if let Some(pred) = binop.predicate { + if let Some(pred) = binary_op.predicate { assert_ne!(pred, NodeId::dummy()); if ass_value == NodeId::dummy() { cond = pred; @@ -585,18 +585,18 @@ impl DecisionTree { ObjectType::Boolean, Some(stack.block), )); - optim::simplify_id(ctx, cond).unwrap(); + optimizations::simplify_id(ctx, cond).unwrap(); stack.push(cond); } } stack.push(ins_id); - match binop.operator { + match binary_op.operator { BinaryOp::Udiv | BinaryOp::Sdiv | BinaryOp::Urem | BinaryOp::Srem | BinaryOp::Div => { - if ctx.is_zero(binop.rhs) { + if ctx.is_zero(binary_op.rhs) { DecisionTree::short_circuit( ctx, stack, @@ -608,9 +608,9 @@ impl DecisionTree { if ctx.under_assumption(cond) { let ins2 = ctx.get_mut_instruction(ins_id); ins2.operation = Operation::Binary(crate::node::Binary { - lhs: binop.lhs, - rhs: binop.rhs, - operator: binop.operator.clone(), + lhs: binary_op.lhs, + rhs: binary_op.rhs, + operator: binary_op.operator.clone(), predicate: Some(cond), }); } @@ -752,7 +752,7 @@ impl DecisionTree { } } - fn synchronise( + fn synchronize( &self, ctx: &mut SsaContext, left: &[NodeId], diff --git a/crates/noirc_evaluator/src/ssa/context.rs b/crates/noirc_evaluator/src/ssa/context.rs index 6ae4f1bd399..55be0a578a8 100644 --- a/crates/noirc_evaluator/src/ssa/context.rs +++ b/crates/noirc_evaluator/src/ssa/context.rs @@ -3,8 +3,8 @@ use super::conditional::{DecisionTree, TreeBuilder}; use super::function::{FuncIndex, SSAFunction}; use super::inline::StackFrame; use super::mem::{ArrayId, Memory}; -use super::node::{BinaryOp, FunctionKind, Instruction, NodeId, NodeObj, ObjectType, Operation}; -use super::{block, builtin, flatten, inline, integer, node, optim}; +use super::node::{BinaryOp, FunctionKind, Instruction, NodeId, NodeObject, ObjectType, Operation}; +use super::{block, builtin, flatten, inline, integer, node, optimizations}; use std::collections::{HashMap, HashSet}; use super::super::errors::RuntimeError; @@ -15,7 +15,7 @@ use crate::ssa::node::{Mark, Node}; use crate::Evaluator; use acvm::FieldElement; use iter_extended::vecmap; -use noirc_frontend::monomorphisation::ast::{Definition, FuncId}; +use noirc_frontend::monomorphization::ast::{Definition, FuncId}; use num_bigint::BigUint; use num_traits::{One, Zero}; @@ -27,7 +27,7 @@ pub struct SsaContext { pub first_block: BlockId, pub current_block: BlockId, blocks: arena::Arena, - pub nodes: arena::Arena, + pub nodes: arena::Arena, value_names: HashMap, pub sealed_blocks: HashSet, pub mem: Memory, @@ -142,7 +142,7 @@ impl SsaContext { block } - //Display an object for debugging puposes + //Display an object for debugging purposes fn id_to_string(&self, id: NodeId) -> String { let mut result = String::new(); if let Some(var) = self.try_get_node(id) { @@ -196,7 +196,7 @@ impl SsaContext { Operation::Cast(value) => format!("cast {}", self.id_to_string(*value)), Operation::Truncate { value, bit_size, max_bit_size } => { format!( - "truncate {}, bitsize = {bit_size}, max bitsize = {max_bit_size}", + "truncate {}, bit size = {bit_size}, max bit size = {max_bit_size}", self.id_to_string(*value), ) } @@ -234,7 +234,7 @@ impl SsaContext { Operation::Intrinsic(opcode, args) => format!("intrinsic {opcode}({})", join(args)), Operation::Nop => "nop".into(), Operation::Call { func, arguments, returned_arrays, .. } => { - let name = self.try_get_funcid(*func).map(|id| self.functions[&id].name.clone()); + let name = self.try_get_func_id(*func).map(|id| self.functions[&id].name.clone()); let name = name.unwrap_or_else(|| self.id_to_string(*func)); format!("call {name}({}) _ {returned_arrays:?}", join(arguments)) } @@ -267,7 +267,7 @@ impl SsaContext { pub fn node_to_string(&self, id: NodeId) -> String { match self.try_get_node(id) { - Some(NodeObj::Instr(ins)) => { + Some(NodeObject::Instr(ins)) => { let mut str_res = if ins.res_name.is_empty() { format!("{:?}", id.0.into_raw_parts().0) } else { @@ -305,10 +305,10 @@ impl SsaContext { /// This function does NOT push the instruction to the current block. /// See push_instruction for that. pub fn add_instruction(&mut self, instruction: node::Instruction) -> NodeId { - let obj = NodeObj::Instr(instruction); + let obj = NodeObject::Instr(instruction); let id = NodeId(self.nodes.insert(obj)); match &mut self[id] { - NodeObj::Instr(i) => i.id = id, + NodeObject::Instr(i) => i.id = id, _ => unreachable!(), } @@ -318,7 +318,7 @@ impl SsaContext { /// Adds the instruction to self.nodes and pushes it to the current block pub fn push_instruction(&mut self, instruction: node::Instruction) -> NodeId { let id = self.add_instruction(instruction); - if let NodeObj::Instr(_) = &self[id] { + if let NodeObject::Instr(_) = &self[id] { self.get_current_block_mut().instructions.push(id); } id @@ -364,40 +364,40 @@ impl SsaContext { } pub fn add_const(&mut self, constant: node::Constant) -> NodeId { - let obj = NodeObj::Const(constant); + let obj = NodeObject::Const(constant); let id = NodeId(self.nodes.insert(obj)); match &mut self[id] { - NodeObj::Const(c) => c.id = id, + NodeObject::Const(c) => c.id = id, _ => unreachable!(), } id } - pub fn get_ssafunc(&self, func_id: FuncId) -> Option<&SSAFunction> { + pub fn get_ssa_func(&self, func_id: FuncId) -> Option<&SSAFunction> { self.functions.get(&func_id) } - pub fn try_get_funcid(&self, id: NodeId) -> Option { + pub fn try_get_func_id(&self, id: NodeId) -> Option { match &self[id] { - NodeObj::Function(FunctionKind::Normal(id), ..) => Some(*id), + NodeObject::Function(FunctionKind::Normal(id), ..) => Some(*id), _ => None, } } - pub fn try_get_ssafunc(&self, id: NodeId) -> Option<&SSAFunction> { - self.try_get_funcid(id).and_then(|id| self.get_ssafunc(id)) + pub fn try_get_ssa_func(&self, id: NodeId) -> Option<&SSAFunction> { + self.try_get_func_id(id).and_then(|id| self.get_ssa_func(id)) } pub fn dummy_id() -> arena::Index { arena::Index::from_raw_parts(std::usize::MAX, 0) } - pub fn try_get_node(&self, id: NodeId) -> Option<&node::NodeObj> { + pub fn try_get_node(&self, id: NodeId) -> Option<&node::NodeObject> { self.nodes.get(id.0) } - pub fn try_get_node_mut(&mut self, id: NodeId) -> Option<&mut node::NodeObj> { + pub fn try_get_node_mut(&mut self, id: NodeId) -> Option<&mut node::NodeObject> { self.nodes.get_mut(id.0) } @@ -407,7 +407,7 @@ impl SsaContext { //Returns the object value if it is a constant, None if not. pub fn get_as_constant(&self, id: NodeId) -> Option { - if let Some(node::NodeObj::Const(c)) = self.try_get_node(id) { + if let Some(node::NodeObject::Const(c)) = self.try_get_node(id) { return Some(FieldElement::from_be_bytes_reduce(&c.value.to_bytes_be())); } None @@ -422,14 +422,14 @@ impl SsaContext { } pub fn try_get_instruction(&self, id: NodeId) -> Option<&node::Instruction> { - if let Some(NodeObj::Instr(i)) = self.try_get_node(id) { + if let Some(NodeObject::Instr(i)) = self.try_get_node(id) { return Some(i); } None } pub fn try_get_mut_instruction(&mut self, id: NodeId) -> Option<&mut node::Instruction> { - if let Some(NodeObj::Instr(i)) = self.try_get_node_mut(id) { + if let Some(NodeObject::Instr(i)) = self.try_get_node_mut(id) { return Some(i); } None @@ -438,7 +438,7 @@ impl SsaContext { pub fn get_variable(&self, id: NodeId) -> Result<&node::Variable, RuntimeErrorKind> { match self.nodes.get(id.0) { Some(t) => match t { - node::NodeObj::Obj(o) => Ok(o), + node::NodeObject::Obj(o) => Ok(o), _ => Err(RuntimeErrorKind::UnstructuredError { message: "Not an object".to_string(), }), @@ -453,7 +453,7 @@ impl SsaContext { ) -> Result<&mut node::Variable, RuntimeErrorKind> { match self.nodes.get_mut(id.0) { Some(t) => match t { - node::NodeObj::Obj(o) => Ok(o), + node::NodeObject::Obj(o) => Ok(o), _ => Err(RuntimeErrorKind::UnstructuredError { message: "Not an object".to_string(), }), @@ -469,7 +469,7 @@ impl SsaContext { index: u32, ) -> Option<&mut Instruction> { for id in &self.blocks[target.0].instructions { - if let Some(NodeObj::Instr(i)) = self.nodes.get(id.0) { + if let Some(NodeObject::Instr(i)) = self.nodes.get(id.0) { if i.operation == (Operation::Result { call_instruction, index }) { let id = *id; return self.try_get_mut_instruction(id); @@ -484,9 +484,9 @@ impl SsaContext { } pub fn add_variable(&mut self, obj: node::Variable, root: Option) -> NodeId { - let id = NodeId(self.nodes.insert(NodeObj::Obj(obj))); + let id = NodeId(self.nodes.insert(NodeObject::Obj(obj))); match &mut self[id] { - node::NodeObj::Obj(v) => { + node::NodeObject::Obj(v) => { v.id = id; v.root = root; } @@ -511,8 +511,8 @@ impl SsaContext { *v_name += 1; let variable_id = *v_name; - if let Ok(nvar) = self.get_mut_variable(new_var) { - nvar.name = format!("{root_name}{variable_id}"); + if let Ok(new_var) = self.get_mut_variable(new_var) { + new_var.name = format!("{root_name}{variable_id}"); } } @@ -557,14 +557,14 @@ impl SsaContext { pub fn new_instruction( &mut self, opcode: Operation, - optype: ObjectType, + op_type: ObjectType, ) -> Result { //Add a new instruction to the nodes arena - let mut i = Instruction::new(opcode, optype, Some(self.current_block)); + let mut i = Instruction::new(opcode, op_type, Some(self.current_block)); //Basic simplification - we ignore RunTimeErrors when creating an instruction //because they must be managed after handling conditionals. For instance if false { b } should not fail whatever b is doing. - optim::simplify(self, &mut i).ok(); + optimizations::simplify(self, &mut i).ok(); if let Mark::ReplaceWith(replacement) = i.mark { return Ok(replacement); @@ -579,7 +579,7 @@ impl SsaContext { ) -> Option { //TODO We should map constant values to id for (idx, o) in &self.nodes { - if let node::NodeObj::Const(c) = o { + if let node::NodeObject::Const(c) = o { if c.value == *value && c.get_type() == e_type { return Some(NodeId(idx)); } @@ -588,7 +588,7 @@ impl SsaContext { None } - // Retrieve the object conresponding to the const value given in argument + // Retrieve the object corresponding to the const value given in argument // If such object does not exist, we create one pub fn get_or_create_const(&mut self, x: FieldElement, t: node::ObjectType) -> NodeId { let value = BigUint::from_bytes_be(&x.to_be_bytes()); @@ -689,7 +689,7 @@ impl SsaContext { } } - //Optimise, flatten and truncate IR and then generates ACIR representation from it + //Optimize, flatten and truncate IR and then generates ACIR representation from it pub fn ir_to_acir( &mut self, evaluator: &mut Evaluator, @@ -699,11 +699,11 @@ impl SsaContext { self.log(enable_logging, "SSA:", "\ninline functions"); function::inline_all(self)?; - //Optimisation + //Optimization block::compute_dom(self); - optim::full_cse(self, self.first_block, false)?; + optimizations::full_cse(self, self.first_block, false)?; - //Flattenning + //flattening self.log(enable_logging, "\nCSE:", "\nunrolling:"); //Unrolling flatten::unroll_tree(self, self.first_block)?; @@ -731,7 +731,7 @@ impl SsaContext { let first_block = self.first_block; self[first_block].dominated.clear(); - optim::cse(self, first_block, true)?; + optimizations::cse(self, first_block, true)?; //Truncation integer::overflow_strategy(self)?; @@ -761,7 +761,7 @@ impl SsaContext { } pub fn generate_empty_phi(&mut self, target_block: BlockId, phi_root: NodeId) -> NodeId { - //Ensure there is not already a phi for the variable (n.b. probably not usefull) + //Ensure there is not already a phi for the variable (n.b. probably not useful) for i in &self[target_block].instructions { match self.try_get_instruction(*i) { Some(Instruction { operation: Operation::Phi { root, .. }, .. }) @@ -812,7 +812,7 @@ impl SsaContext { // For instance: // - if lhs and rhs are standard variables, we create a new ssa variable of lhs // - if lhs is an array element, we generate a store instruction - // - if lhs and rhs are arrays, we perfom a copy of rhs into lhs, + // - if lhs and rhs are arrays, we perform a copy of rhs into lhs, // - if lhs is an array and rhs is a call instruction, we indicate in the call that lhs is the returned array (so that no copy is needed because the inlining will use it) // ... pub fn handle_assign( @@ -844,7 +844,7 @@ impl SsaContext { }) = self.try_get_mut_instruction(func) { returned_arrays.push((a, idx)); - //Issue #579: we initialise the array, unless it is also in arguments in which case it is already initialised. + //Issue #579: we initialize the array, unless it is also in arguments in which case it is already initialized. let mut init = false; for i in arguments.clone() { if let ObjectType::Pointer(b) = self.get_object_type(i) { @@ -884,11 +884,11 @@ impl SsaContext { } else if matches!(lhs_type, ObjectType::Pointer(_)) { if let Some(Instruction { operation: Operation::Intrinsic(_, _), - res_type: rtype, + res_type: result_type, .. }) = self.try_get_mut_instruction(rhs) { - *rtype = lhs_type; + *result_type = lhs_type; return Ok(lhs); } else { self.memcpy(lhs_type, rhs_type)?; @@ -922,10 +922,10 @@ impl SsaContext { fn new_instruction_inline( &mut self, operation: node::Operation, - optype: node::ObjectType, + op_type: node::ObjectType, stack_frame: &mut StackFrame, ) -> NodeId { - let i = node::Instruction::new(operation, optype, Some(stack_frame.block)); + let i = node::Instruction::new(operation, op_type, Some(stack_frame.block)); let ins_id = self.add_instruction(i); stack_frame.push(ins_id); ins_id @@ -1076,7 +1076,7 @@ impl SsaContext { pub fn push_function_id(&mut self, func_id: FuncId, name: &str) -> NodeId { let index = self.nodes.insert_with(|index| { let node_id = NodeId(index); - NodeObj::Function(FunctionKind::Normal(func_id), node_id, name.to_owned()) + NodeObject::Function(FunctionKind::Normal(func_id), node_id, name.to_owned()) }); NodeId(index) @@ -1091,7 +1091,7 @@ impl SsaContext { } pub fn function_already_compiled(&self, func_id: FuncId) -> bool { - self.get_ssafunc(func_id).is_some() + self.get_ssa_func(func_id).is_some() } pub fn get_or_create_opcode_node_id(&mut self, opcode: builtin::Opcode) -> NodeId { @@ -1100,7 +1100,7 @@ impl SsaContext { } let index = self.nodes.insert_with(|index| { - NodeObj::Function(FunctionKind::Builtin(opcode), NodeId(index), opcode.to_string()) + NodeObject::Function(FunctionKind::Builtin(opcode), NodeId(index), opcode.to_string()) }); self.opcode_ids.insert(opcode, NodeId(index)); NodeId(index) @@ -1108,13 +1108,13 @@ impl SsaContext { pub fn get_builtin_opcode(&self, node_id: NodeId) -> Option { match &self[node_id] { - NodeObj::Function(FunctionKind::Builtin(opcode), ..) => Some(*opcode), + NodeObject::Function(FunctionKind::Builtin(opcode), ..) => Some(*opcode), _ => None, } } - pub fn convert_type(&mut self, t: &noirc_frontend::monomorphisation::ast::Type) -> ObjectType { - use noirc_frontend::monomorphisation::ast::Type; + pub fn convert_type(&mut self, t: &noirc_frontend::monomorphization::ast::Type) -> ObjectType { + use noirc_frontend::monomorphization::ast::Type; use noirc_frontend::Signedness; match t { Type::Bool => ObjectType::Boolean, @@ -1164,7 +1164,7 @@ impl SsaContext { ObjectType::Boolean, Some(stack.block), )); - optim::simplify_id(self, cond).unwrap(); + optimizations::simplify_id(self, cond).unwrap(); stack.push(cond); cond } @@ -1204,7 +1204,7 @@ impl std::ops::IndexMut for SsaContext { } impl std::ops::Index for SsaContext { - type Output = NodeObj; + type Output = NodeObject; fn index(&self, index: NodeId) -> &Self::Output { &self.nodes[index.0] diff --git a/crates/noirc_evaluator/src/ssa/flatten.rs b/crates/noirc_evaluator/src/ssa/flatten.rs index 2c67af06ec0..da073d43748 100644 --- a/crates/noirc_evaluator/src/ssa/flatten.rs +++ b/crates/noirc_evaluator/src/ssa/flatten.rs @@ -3,8 +3,8 @@ use crate::errors::RuntimeError; use super::{ block::{self, BlockId}, context::SsaContext, - node::{self, BinaryOp, Mark, Node, NodeEval, NodeId, NodeObj, Operation}, - optim, + node::{self, BinaryOp, Mark, Node, NodeEval, NodeId, NodeObject, Operation}, + optimizations, }; use acvm::FieldElement; use std::collections::HashMap; @@ -43,7 +43,7 @@ fn eval_block(block_id: BlockId, eval_map: &HashMap, ctx: &mut let ins_id = ins.id; // We ignore RunTimeErrors at this stage because unrolling is done before conditionals // While failures must be managed after handling conditionals: For instance if false { b } should not fail whatever b is doing. - optim::simplify_id(ctx, ins_id).ok(); + optimizations::simplify_id(ctx, ins_id).ok(); } } } @@ -112,7 +112,7 @@ pub fn unroll_std_block( for i_id in &b_instructions { match &ctx[*i_id] { - node::NodeObj::Instr(i) => { + node::NodeObject::Instr(i) => { let new_op = i.operation.map_id(|id| { get_current_value(id, &unroll_ctx.eval_map).into_node_id().unwrap() }); @@ -127,7 +127,7 @@ pub fn unroll_std_block( Operation::Jmp(block) => assert_eq!(block, next), Operation::Nop => (), _ => { - optim::simplify(ctx, &mut new_ins).ok(); //ignore RuntimeErrors until conditionals are processed + optimizations::simplify(ctx, &mut new_ins).ok(); //ignore RuntimeErrors until conditionals are processed match new_ins.mark { Mark::None => { let id = ctx.push_instruction(new_ins); @@ -136,7 +136,7 @@ pub fn unroll_std_block( Mark::Deleted => (), Mark::ReplaceWith(replacement) => { // TODO: Should we insert into unrolled_instructions as well? - // If optim::simplify replaces with a constant then we should not, + // If optimizations::simplify replaces with a constant then we should not, // otherwise it may make sense if it is not already inserted. unroll_ctx .eval_map @@ -286,7 +286,7 @@ fn get_current_value(id: NodeId, value_array: &HashMap) -> Nod *value_array.get(&id).unwrap_or(&NodeEval::VarOrInstruction(id)) } -//Same as get_current_value but for a NodeEval object instead of a NodeObj +//Same as get_current_value but for a NodeEval object instead of a NodeObject fn get_current_value_for_node_eval( obj: NodeEval, value_array: &HashMap, @@ -312,8 +312,8 @@ fn evaluate_one( } match &ctx[obj_id] { - NodeObj::Instr(i) => { - let new_id = optim::propagate(ctx, obj_id, &mut modified); + NodeObject::Instr(i) => { + let new_id = optimizations::propagate(ctx, obj_id, &mut modified); if new_id != obj_id { return evaluate_one(NodeEval::VarOrInstruction(new_id), value_array, ctx); } @@ -332,12 +332,12 @@ fn evaluate_one( } Ok(result) } - NodeObj::Const(c) => { + NodeObject::Const(c) => { let value = FieldElement::from_be_bytes_reduce(&c.value.to_bytes_be()); Ok(NodeEval::Const(value, c.get_type())) } - NodeObj::Obj(_) => Ok(NodeEval::VarOrInstruction(obj_id)), - NodeObj::Function(f, id, _) => Ok(NodeEval::Function(*f, *id)), + NodeObject::Obj(_) => Ok(NodeEval::VarOrInstruction(obj_id)), + NodeObject::Function(f, id, _) => Ok(NodeEval::Function(*f, *id)), } } } @@ -357,7 +357,7 @@ fn evaluate_object( } match &ctx[obj_id] { - NodeObj::Instr(i) => { + NodeObject::Instr(i) => { if let Operation::Phi { .. } = i.operation { return Ok(NodeEval::VarOrInstruction(i.id)); } @@ -374,12 +374,12 @@ fn evaluate_object( } Ok(result) } - NodeObj::Const(c) => { + NodeObject::Const(c) => { let value = FieldElement::from_be_bytes_reduce(&c.value.to_bytes_be()); Ok(NodeEval::Const(value, c.get_type())) } - NodeObj::Obj(_) => Ok(NodeEval::VarOrInstruction(obj_id)), - NodeObj::Function(f, id, _) => Ok(NodeEval::Function(*f, *id)), + NodeObject::Obj(_) => Ok(NodeEval::VarOrInstruction(obj_id)), + NodeObject::Function(f, id, _) => Ok(NodeEval::Function(*f, *id)), } } } diff --git a/crates/noirc_evaluator/src/ssa/function.rs b/crates/noirc_evaluator/src/ssa/function.rs index f14d2f41d12..0919cd93178 100644 --- a/crates/noirc_evaluator/src/ssa/function.rs +++ b/crates/noirc_evaluator/src/ssa/function.rs @@ -3,7 +3,7 @@ use std::collections::{HashMap, VecDeque}; use crate::errors::RuntimeError; use crate::ssa::node::Opcode; use iter_extended::try_vecmap; -use noirc_frontend::monomorphisation::ast::{Call, Definition, FuncId, LocalId, Type}; +use noirc_frontend::monomorphization::ast::{Call, Definition, FuncId, LocalId, Type}; use super::builtin; use super::conditional::{AssumptionId, DecisionTree, TreeBuilder}; @@ -61,59 +61,60 @@ impl SSAFunction { } } - pub fn compile(&self, igen: &mut IRGenerator) -> Result { - let function_cfg = block::bfs(self.entry_block, None, &igen.context); - block::compute_sub_dom(&mut igen.context, &function_cfg); - //Optimisation + pub fn compile(&self, ir_gen: &mut IRGenerator) -> Result { + let function_cfg = block::bfs(self.entry_block, None, &ir_gen.context); + block::compute_sub_dom(&mut ir_gen.context, &function_cfg); + //Optimization //catch the error because the function may not be called - super::optim::full_cse(&mut igen.context, self.entry_block, false)?; + super::optimizations::full_cse(&mut ir_gen.context, self.entry_block, false)?; //Unrolling - super::flatten::unroll_tree(&mut igen.context, self.entry_block)?; + super::flatten::unroll_tree(&mut ir_gen.context, self.entry_block)?; //reduce conditionals - let mut decision = DecisionTree::new(&igen.context); + let mut decision = DecisionTree::new(&ir_gen.context); let mut builder = TreeBuilder::new(self.entry_block); for (arg, _) in &self.arguments { - if let ObjectType::Pointer(a) = igen.context.get_object_type(*arg) { + if let ObjectType::Pointer(a) = ir_gen.context.get_object_type(*arg) { builder.stack.created_arrays.insert(a, self.entry_block); } } let mut to_remove: VecDeque = VecDeque::new(); - let result = decision.make_decision_tree(&mut igen.context, builder); + let result = decision.make_decision_tree(&mut ir_gen.context, builder); if result.is_err() { // we take the last block to ensure we have the return instruction - let exit = block::exit(&igen.context, self.entry_block); + let exit = block::exit(&ir_gen.context, self.entry_block); //short-circuit for function: false constraint and return 0 - let instructions = &igen.context[exit].instructions.clone(); + let instructions = &ir_gen.context[exit].instructions.clone(); let stack = block::short_circuit_instructions( - &mut igen.context, + &mut ir_gen.context, self.entry_block, instructions, ); if self.entry_block != exit { for i in &stack { - igen.context.get_mut_instruction(*i).parent_block = self.entry_block; + ir_gen.context.get_mut_instruction(*i).parent_block = self.entry_block; } } - let function_block = &mut igen.context[self.entry_block]; + let function_block = &mut ir_gen.context[self.entry_block]; function_block.instructions.clear(); function_block.instructions = stack; function_block.left = None; to_remove.extend(function_cfg.iter()); //let's remove all the other blocks } else { - decision.reduce(&mut igen.context, decision.root)?; + decision.reduce(&mut ir_gen.context, decision.root)?; } //merge blocks - to_remove = block::merge_path(&mut igen.context, self.entry_block, BlockId::dummy(), None)?; + to_remove = + block::merge_path(&mut ir_gen.context, self.entry_block, BlockId::dummy(), None)?; - igen.context[self.entry_block].dominated.retain(|b| !to_remove.contains(b)); + ir_gen.context[self.entry_block].dominated.retain(|b| !to_remove.contains(b)); for i in to_remove { if i != self.entry_block { - igen.context.remove_block(i); + ir_gen.context.remove_block(i); } } Ok(decision) @@ -132,7 +133,7 @@ impl SSAFunction { } let node_obj_opt = ctx.try_get_node(*node_id); - if let Some(node::NodeObj::Const(c)) = node_obj_opt { + if let Some(node::NodeObject::Const(c)) = node_obj_opt { ctx.get_or_create_const(c.get_value_field(), c.value_type) } else if let Some(id) = inline_map.get(node_id) { *id @@ -253,8 +254,8 @@ impl IRGenerator { let call_instruction = self.context.new_instruction(call_op.clone(), ObjectType::NotAnObject)?; - if let Some(id) = self.context.try_get_funcid(func) { - let callee = self.context.get_ssafunc(id).unwrap().idx; + if let Some(id) = self.context.try_get_func_id(func) { + let callee = self.context.get_ssa_func(id).unwrap().idx; if let Some(caller) = self.function_context { update_call_graph(&mut self.context.call_graph, caller, callee); } @@ -265,7 +266,7 @@ impl IRGenerator { // return an array of size 0. // we should check issue #628 again when this block is removed // we should also see if the lca check in StackFrame.is_new_array() can be removed (cf. issue #661) - if let Some(func_id) = self.context.try_get_funcid(func) { + if let Some(func_id) = self.context.try_get_func_id(func) { let rtt = self.context.functions[&func_id].result_types.clone(); let mut result = Vec::new(); for i in rtt.iter().enumerate() { @@ -274,7 +275,7 @@ impl IRGenerator { *i.1, )?); } - let ssa_func = self.context.get_ssafunc(func_id).unwrap(); + let ssa_func = self.context.get_ssa_func(func_id).unwrap(); let func_arguments = ssa_func.arguments.clone(); for (caller_arg, func_arg) in arguments.iter().zip(func_arguments) { let mut is_array_result = false; @@ -332,7 +333,7 @@ impl IRGenerator { }) } - //Lowlevel functions with no more than 2 arguments + //Low-level functions with no more than 2 arguments pub fn call_low_level( &mut self, op: builtin::Opcode, @@ -402,7 +403,7 @@ pub fn inline_all(ctx: &mut SsaContext) -> Result<(), RuntimeError> { while processed.len() < l { let i = get_new_leaf(ctx, &processed); if !processed.is_empty() { - super::optim::full_cse(ctx, ctx.functions[&i.1].entry_block, false)?; + super::optimizations::full_cse(ctx, ctx.functions[&i.1].entry_block, false)?; } let mut to_inline = Vec::new(); for f in ctx.functions.values() { diff --git a/crates/noirc_evaluator/src/ssa/inline.rs b/crates/noirc_evaluator/src/ssa/inline.rs index 42b16a4faa2..e85f77e2633 100644 --- a/crates/noirc_evaluator/src/ssa/inline.rs +++ b/crates/noirc_evaluator/src/ssa/inline.rs @@ -1,12 +1,12 @@ use std::collections::{hash_map::Entry, HashMap}; -use noirc_frontend::monomorphisation::ast::FuncId; +use noirc_frontend::monomorphization::ast::FuncId; use crate::{ errors::RuntimeError, ssa::{ node::{Node, Operation}, - optim, + optimizations, }, }; @@ -21,7 +21,7 @@ use super::{ // Number of allowed times for inlining function calls inside a code block. // If a function calls another function, the inlining of the first function will leave the second function call that needs to be inlined as well. -// In case of recursive calls, this iterative inlining does not end so we arbitraty limit it. 100 nested calls should already support very complex programs. +// In case of recursive calls, this iterative inlining does not end so we arbitrarily limit it. 100 nested calls should already support very complex programs. const MAX_INLINE_TRIES: u32 = 100; //inline main @@ -48,7 +48,7 @@ pub fn inline_cfg( to_inline: Option, ) -> Result { let mut result = true; - let func = ctx.get_ssafunc(func_id).unwrap(); + let func = ctx.get_ssa_func(func_id).unwrap(); let func_cfg = block::bfs(func.entry_block, None, ctx); let decision = func.decision.clone(); for block_id in func_cfg { @@ -71,7 +71,7 @@ fn inline_block( if let Some(ins) = ctx.try_get_instruction(*i) { if !ins.is_deleted() { if let Operation::Call { func, arguments, returned_arrays, .. } = &ins.operation { - if to_inline.is_none() || to_inline == ctx.try_get_funcid(*func) { + if to_inline.is_none() || to_inline == ctx.try_get_func_id(*func) { call_ins.push(( ins.id, *func, @@ -86,8 +86,8 @@ fn inline_block( } let mut result = true; for (ins_id, f, args, arrays, parent_block) in call_ins { - if let Some(func_id) = ctx.try_get_funcid(f) { - let f_copy = ctx.get_ssafunc(func_id).unwrap().clone(); + if let Some(func_id) = ctx.try_get_func_id(f) { + let f_copy = ctx.get_ssa_func(func_id).unwrap().clone(); if !inline(ctx, &f_copy, &args, &arrays, parent_block, ins_id, decision)? { result = false; } @@ -95,7 +95,7 @@ fn inline_block( } if to_inline.is_none() { - optim::simple_cse(ctx, block_id)?; + optimizations::simple_cse(ctx, block_id)?; } Ok(result) } @@ -230,7 +230,7 @@ pub fn inline( } let mut result = true; - //3. inline in the block: we assume the function cfg is already flatened. + //3. inline in the block: we assume the function cfg is already flattened. let mut next_block = Some(ssa_func.entry_block); while let Some(next_b) = next_block { let mut nested_call = false; @@ -343,7 +343,7 @@ pub fn inline_in_block( new_ins.res_type = node::ObjectType::Pointer(new_id); } - let err = optim::simplify(ctx, &mut new_ins); + let err = optimizations::simplify(ctx, &mut new_ins); if err.is_err() { //add predicate if under condition, else short-circuit the target block. let ass_value = decision.get_assumption_value(predicate); @@ -378,7 +378,7 @@ pub fn inline_in_block( } } - // we conditionalise the stack frame into a new stack frame (to avoid ownership issues) + // we apply the `condition` to stack frame and place it into a new stack frame (to avoid ownership issues) let mut stack2 = StackFrame::new(stack_frame.block); stack2.return_arrays = stack_frame.return_arrays.clone(); if short_circuit { @@ -390,7 +390,7 @@ pub fn inline_in_block( &mut stack2, predicate, )?; - // we add the conditionalised instructions to the target_block, at proper location (really need a linked list!) + // we add the instructions which we have applied the conditions to, to the target_block, at proper location (really need a linked list!) stack2.apply(ctx, stack_frame.block, call_id, false); } diff --git a/crates/noirc_evaluator/src/ssa/integer.rs b/crates/noirc_evaluator/src/ssa/integer.rs index b989f7f427a..0d10c525d2d 100644 --- a/crates/noirc_evaluator/src/ssa/integer.rs +++ b/crates/noirc_evaluator/src/ssa/integer.rs @@ -5,8 +5,8 @@ use super::{ //block, context::SsaContext, mem::{ArrayId, Memory}, - node::{self, BinaryOp, Instruction, Mark, Node, NodeId, NodeObj, ObjectType, Operation}, - optim, + node::{self, BinaryOp, Instruction, Mark, Node, NodeId, NodeObject, ObjectType, Operation}, + optimizations, }; use acvm::FieldElement; use iter_extended::vecmap; @@ -27,7 +27,7 @@ fn get_instruction_max( ctx: &SsaContext, ins: &Instruction, max_map: &mut HashMap, - vmap: &HashMap, + value_map: &HashMap, ) -> BigUint { assert_ne!( ins.operation.opcode(), @@ -35,9 +35,9 @@ fn get_instruction_max( "Phi instructions must have been simplified" ); ins.operation.for_each_id(|id| { - get_obj_max_value(ctx, id, max_map, vmap); + get_obj_max_value(ctx, id, max_map, value_map); }); - get_instruction_max_operand(ctx, ins, max_map, vmap) + get_instruction_max_operand(ctx, ins, max_map, value_map) } //Gets the maximum value of the instruction result using the provided operand maximum @@ -45,10 +45,12 @@ fn get_instruction_max_operand( ctx: &SsaContext, ins: &Instruction, max_map: &mut HashMap, - vmap: &HashMap, + value_map: &HashMap, ) -> BigUint { match &ins.operation { - Operation::Load { array_id, index } => get_load_max(ctx, *index, max_map, vmap, *array_id), + Operation::Load { array_id, index } => { + get_load_max(ctx, *index, max_map, value_map, *array_id) + } Operation::Binary(node::Binary { operator, lhs, rhs, .. }) => { if let BinaryOp::Sub { .. } = operator { //TODO uses interval analysis instead @@ -65,7 +67,7 @@ fn get_instruction_max_operand( get_max_value(ins, max_map) } // Operation::Constrain(_) => { - //ContrainOp::Eq : + //ConstrainOp::Eq : //TODO... we should update the max_map AFTER the truncate is processed (else it breaks it) // let min = BigUint::min(left_max.clone(), right_max.clone()); // max_map.insert(ins.lhs, min.clone()); @@ -82,9 +84,9 @@ fn get_obj_max_value( ctx: &SsaContext, id: NodeId, max_map: &mut HashMap, - vmap: &HashMap, + value_map: &HashMap, ) -> BigUint { - let id = get_value_from_map(id, vmap); + let id = get_value_from_map(id, value_map); if max_map.contains_key(&id) { return max_map[&id].clone(); } @@ -95,10 +97,10 @@ fn get_obj_max_value( let obj = &ctx[id]; let result = match obj { - NodeObj::Obj(v) => (BigUint::one() << v.size_in_bits()) - BigUint::one(), //TODO check for signed type - NodeObj::Instr(i) => get_instruction_max(ctx, i, max_map, vmap), - NodeObj::Const(c) => c.value.clone(), //TODO panic for string constants - NodeObj::Function(..) => BigUint::zero(), + NodeObject::Obj(v) => (BigUint::one() << v.size_in_bits()) - BigUint::one(), //TODO check for signed type + NodeObject::Instr(i) => get_instruction_max(ctx, i, max_map, value_map), + NodeObject::Const(c) => c.value.clone(), //TODO panic for string constants + NodeObject::Function(..) => BigUint::zero(), }; max_map.insert(id, result.clone()); result @@ -120,7 +122,7 @@ fn truncate( if *v_max >= BigUint::one() << bit_size { //TODO is max_bit_size leaking some info???? - //Create a new truncate instruction '(idx): obj trunc bit_size' + //Create a new truncate instruction '(idx): obj truncate bit_size' //set current value of obj to idx let max_bit_size = v_max.bits() as u32; @@ -151,11 +153,11 @@ fn fix_truncate( id: NodeId, prev_id: NodeId, block_idx: BlockId, - vmap: &mut HashMap, + value_map: &mut HashMap, ) { if let Some(ins) = eval.try_get_mut_instruction(id) { ins.parent_block = block_idx; - vmap.insert(prev_id, id); + value_map.insert(prev_id, id); } } @@ -169,7 +171,7 @@ fn add_to_truncate( ) { let v_max = &max_map[&obj_id]; if *v_max >= BigUint::one() << bit_size { - if let Some(NodeObj::Const(_)) = &ctx.try_get_node(obj_id) { + if let Some(NodeObject::Const(_)) = &ctx.try_get_node(obj_id) { return; //a constant cannot be truncated, so we exit the function gracefully } let truncate_bits = match to_truncate.get(&obj_id) { @@ -187,12 +189,12 @@ fn process_to_truncate( to_truncate: &mut BTreeMap, max_map: &mut HashMap, block_idx: BlockId, - vmap: &mut HashMap, + value_map: &mut HashMap, ) { for (id, bit_size) in to_truncate.iter() { if let Some(truncate_idx) = truncate(ctx, *id, *bit_size, max_map) { //TODO properly handle signed arithmetic... - fix_truncate(ctx, truncate_idx, *id, block_idx, vmap); + fix_truncate(ctx, truncate_idx, *id, block_idx, value_map); new_list.push(truncate_idx); } } @@ -228,8 +230,8 @@ fn block_overflow( memory_map: &mut HashMap, ) -> Result<(), RuntimeError> { //for each instruction, we compute the resulting max possible value (in term of the field representation of the operation) - //when it is over the field charac, or if the instruction requires it, then we insert truncate instructions - // The instructions are insterted in a duplicate list( because of rust ownership..), which we use for + //when it is over the field characteristic, or if the instruction requires it, then we insert truncate instructions + // The instructions are inserted in a duplicate list( because of rust ownership..), which we use for // processing another cse round for the block because the truncates may be duplicated. let mut new_list = Vec::new(); @@ -254,11 +256,11 @@ fn block_overflow( } ins.operation.map_id_mut(|id| { - let id = optim::propagate(ctx, id, &mut modified); + let id = optimizations::propagate(ctx, id, &mut modified); get_value_from_map(id, &value_map) }); - //we propagate optimised loads - todo check if it is needed because there is cse at the end + //we propagate optimized loads - todo check if it is needed because there is cse at the end //We retrieve get_current_value() in case a previous truncate has updated the value map let should_truncate_ins = ins.truncate_required(ctx); let ins_max_bits = get_instruction_max(ctx, &ins, max_map, &value_map).bits(); @@ -281,14 +283,14 @@ fn block_overflow( match ins.operation { Operation::Load { array_id, index } => { if let Some(val) = ctx.get_indexed_value(array_id, index) { - //optimise static load + //optimize static load ins.mark = Mark::ReplaceWith(*val); } } Operation::Store { array_id, index, value } => { if let Some(idx) = Memory::to_u32(ctx, index) { let absolute_adr = ctx.mem[array_id].absolute_adr(idx); - //optimise static store + //optimize static store memory_map.insert(absolute_adr, value); } } @@ -344,15 +346,15 @@ fn block_overflow( ins.mark = Mark::ReplaceWith(value_id); } else { let max = get_obj_max_value(ctx, value_id, max_map, &value_map); - let maxbits = max.bits() as u32; + let max_bits = max.bits() as u32; - if ins.res_type.bits() < get_size_in_bits(obj) && maxbits > ins.res_type.bits() + if ins.res_type.bits() < get_size_in_bits(obj) && max_bits > ins.res_type.bits() { //we need to truncate ins.operation = Operation::Truncate { value: value_id, bit_size: ins.res_type.bits(), - max_bit_size: maxbits, + max_bit_size: max_bits, }; } } @@ -384,10 +386,10 @@ fn block_overflow( .. }) = &mut ins.operation { - //for now we pass the max value to the instruction, we could also keep the max_map e.g in the block (or max in each nodeobj) + //for now we pass the max value to the instruction, we could also keep the max_map e.g in the block (or max in each node object) //sub operations require the max value to ensure it does not underflow *max_rhs_value = max_map[rhs].clone(); - //we may do that in future when the max_map becomes more used elsewhere (for other optim) + //we may do that in future when the max_map becomes more used elsewhere (for other optimizations) } let old_ins = ctx.try_get_mut_instruction(ins.id).unwrap(); @@ -398,23 +400,27 @@ fn block_overflow( //We run another round of CSE for the block in order to remove possible duplicated truncates, this will assign 'new_list' to the block instructions let mut modified = false; - optim::cse_block(ctx, block_id, &mut new_list, &mut modified)?; + optimizations::cse_block(ctx, block_id, &mut new_list, &mut modified)?; Ok(()) } -fn update_value_array(ctx: &mut SsaContext, block_id: BlockId, vmap: &HashMap) { +fn update_value_array( + ctx: &mut SsaContext, + block_id: BlockId, + value_map: &HashMap, +) { let block = &mut ctx[block_id]; - for (old, new) in vmap { + for (old, new) in value_map { block.value_map.insert(*old, *new); //TODO we must merge rather than update } } -//Get current value using the provided vmap -pub fn get_value_from_map(id: NodeId, vmap: &HashMap) -> NodeId { - *vmap.get(&id).unwrap_or(&id) +//Get current value using the provided value map +pub fn get_value_from_map(id: NodeId, value_map: &HashMap) -> NodeId { + *value_map.get(&id).unwrap_or(&id) } -fn get_size_in_bits(obj: Option<&NodeObj>) -> u32 { +fn get_size_in_bits(obj: Option<&NodeObject>) -> u32 { if let Some(v) = obj { v.size_in_bits() } else { @@ -422,7 +428,7 @@ fn get_size_in_bits(obj: Option<&NodeObj>) -> u32 { } } -fn get_type(obj: Option<&NodeObj>) -> ObjectType { +fn get_type(obj: Option<&NodeObject>) -> ObjectType { if let Some(v) = obj { v.get_type() } else { @@ -434,12 +440,12 @@ fn get_load_max( ctx: &SsaContext, address: NodeId, max_map: &mut HashMap, - vmap: &HashMap, + value_map: &HashMap, array: ArrayId, // obj_type: ObjectType, ) -> BigUint { if let Some(&value) = ctx.get_indexed_value(array, address) { - return get_obj_max_value(ctx, value, max_map, vmap); + return get_obj_max_value(ctx, value, max_map, value_map); }; ctx.mem[array].max.clone() //return array max } diff --git a/crates/noirc_evaluator/src/ssa/mem.rs b/crates/noirc_evaluator/src/ssa/mem.rs index 2a40d502bad..abce89ca434 100644 --- a/crates/noirc_evaluator/src/ssa/mem.rs +++ b/crates/noirc_evaluator/src/ssa/mem.rs @@ -2,7 +2,7 @@ use super::acir_gen::InternalVar; use super::context::SsaContext; use super::node::{self, Node, NodeId}; use acvm::FieldElement; -use noirc_frontend::monomorphisation::ast::{Definition, LocalId}; +use noirc_frontend::monomorphization::ast::{Definition, LocalId}; use num_bigint::BigUint; use num_traits::ToPrimitive; @@ -13,7 +13,7 @@ use std::convert::TryInto; pub struct Memory { arrays: Vec, pub last_adr: u32, //last address in 'memory' - pub memory_map: HashMap, //maps memory adress to expression + pub memory_map: HashMap, //maps memory address to expression } #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] diff --git a/crates/noirc_evaluator/src/ssa/mod.rs b/crates/noirc_evaluator/src/ssa/mod.rs index 8c464fe9a6d..2512890fc1a 100644 --- a/crates/noirc_evaluator/src/ssa/mod.rs +++ b/crates/noirc_evaluator/src/ssa/mod.rs @@ -11,5 +11,5 @@ pub mod inline; pub mod integer; pub mod mem; pub mod node; -pub mod optim; +pub mod optimizations; pub mod ssa_form; diff --git a/crates/noirc_evaluator/src/ssa/node.rs b/crates/noirc_evaluator/src/ssa/node.rs index 760eef32c1b..f4a3a377638 100644 --- a/crates/noirc_evaluator/src/ssa/node.rs +++ b/crates/noirc_evaluator/src/ssa/node.rs @@ -6,7 +6,7 @@ use acvm::FieldElement; use arena; use iter_extended::vecmap; use noirc_errors::Location; -use noirc_frontend::monomorphisation::ast::{Definition, FuncId}; +use noirc_frontend::monomorphization::ast::{Definition, FuncId}; use noirc_frontend::BinaryOpKind; use num_bigint::BigUint; use num_traits::{FromPrimitive, One}; @@ -30,15 +30,15 @@ impl std::fmt::Display for Variable { } } -impl std::fmt::Display for NodeObj { +impl std::fmt::Display for NodeObject { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { use FunctionKind::*; match self { - NodeObj::Obj(o) => write!(f, "{o}"), - NodeObj::Instr(i) => write!(f, "{i}"), - NodeObj::Const(c) => write!(f, "{c}"), - NodeObj::Function(Normal(id), ..) => write!(f, "f{}", id.0), - NodeObj::Function(Builtin(opcode), ..) => write!(f, "{opcode}"), + NodeObject::Obj(o) => write!(f, "{o}"), + NodeObject::Instr(i) => write!(f, "{i}"), + NodeObject::Const(c) => write!(f, "{c}"), + NodeObject::Function(Normal(id), ..) => write!(f, "f{}", id.0), + NodeObject::Function(Builtin(opcode), ..) => write!(f, "{opcode}"), } } } @@ -63,31 +63,31 @@ impl Node for Variable { } } -impl Node for NodeObj { +impl Node for NodeObject { fn get_type(&self) -> ObjectType { match self { - NodeObj::Obj(o) => o.get_type(), - NodeObj::Instr(i) => i.res_type, - NodeObj::Const(o) => o.value_type, - NodeObj::Function(..) => ObjectType::Function, + NodeObject::Obj(o) => o.get_type(), + NodeObject::Instr(i) => i.res_type, + NodeObject::Const(o) => o.value_type, + NodeObject::Function(..) => ObjectType::Function, } } fn size_in_bits(&self) -> u32 { match self { - NodeObj::Obj(o) => o.size_in_bits(), - NodeObj::Instr(i) => i.res_type.bits(), - NodeObj::Const(c) => c.size_in_bits(), - NodeObj::Function(..) => 0, + NodeObject::Obj(o) => o.size_in_bits(), + NodeObject::Instr(i) => i.res_type.bits(), + NodeObject::Const(c) => c.size_in_bits(), + NodeObject::Function(..) => 0, } } fn get_id(&self) -> NodeId { match self { - NodeObj::Obj(o) => o.get_id(), - NodeObj::Instr(i) => i.id, - NodeObj::Const(c) => c.get_id(), - NodeObj::Function(_, id, _) => *id, + NodeObject::Obj(o) => o.get_id(), + NodeObject::Instr(i) => i.id, + NodeObject::Const(c) => c.get_id(), + NodeObject::Function(_, id, _) => *id, } } } @@ -116,7 +116,7 @@ impl NodeId { } #[derive(Debug)] -pub enum NodeObj { +pub enum NodeObject { Obj(Variable), Instr(Instruction), Const(Constant), @@ -299,8 +299,8 @@ impl NodeEval { } } - //returns the NodeObj index of a NodeEval object - //if NodeEval is a constant, it may creates a new NodeObj corresponding to the constant value + //returns the NodeObject index of a NodeEval object + //if NodeEval is a constant, it may creates a new NodeObject corresponding to the constant value pub fn to_index(self, ctx: &mut SsaContext) -> NodeId { match self { NodeEval::Const(c, t) => ctx.get_or_create_const(c, t), @@ -311,12 +311,12 @@ impl NodeEval { pub fn from_id(ctx: &SsaContext, id: NodeId) -> NodeEval { match &ctx[id] { - NodeObj::Const(c) => { + NodeObject::Const(c) => { let value = FieldElement::from_be_bytes_reduce(&c.value.to_bytes_be()); NodeEval::Const(value, c.get_type()) } - NodeObj::Function(f, id, _name) => NodeEval::Function(*f, *id), - NodeObj::Obj(_) | NodeObj::Instr(_) => NodeEval::VarOrInstruction(id), + NodeObject::Function(f, id, _name) => NodeEval::Function(*f, *id), + NodeObject::Obj(_) | NodeObject::Instr(_) => NodeEval::VarOrInstruction(id), } } @@ -364,7 +364,7 @@ impl Instruction { Operation::Load { .. } => false, Operation::Store { .. } => true, Operation::Intrinsic(_, _) => true, - Operation::Call { .. } => true, //return values are in the return statment + Operation::Call { .. } => true, //return values are in the return statement Operation::Return(_) => true, Operation::Result { .. } => false, } @@ -532,7 +532,7 @@ pub enum Operation { value: NodeId, }, - Intrinsic(builtin::Opcode, Vec), //Custom implementation of usefull primitives which are more performant with Aztec backend + Intrinsic(builtin::Opcode, Vec), //Custom implementation of useful primitives which are more performant with Aztec backend Nop, // no op } @@ -1025,7 +1025,7 @@ impl Binary { } } -/// Perform the given numeric operation and modulo the result by the max value for the given bitcount +/// Perform the given numeric operation and modulo the result by the max value for the given bit count /// if the res_type is not a NativeField. fn wrapping( lhs: FieldElement, diff --git a/crates/noirc_evaluator/src/ssa/optim.rs b/crates/noirc_evaluator/src/ssa/optimizations.rs similarity index 94% rename from crates/noirc_evaluator/src/ssa/optim.rs rename to crates/noirc_evaluator/src/ssa/optimizations.rs index 33f539a7941..7c1e4e8a9d5 100644 --- a/crates/noirc_evaluator/src/ssa/optim.rs +++ b/crates/noirc_evaluator/src/ssa/optimizations.rs @@ -13,7 +13,7 @@ use super::{ pub fn simplify_id(ctx: &mut SsaContext, ins_id: NodeId) -> Result<(), RuntimeError> { let mut ins = ctx.get_instruction(ins_id).clone(); simplify(ctx, &mut ins)?; - ctx[ins_id] = super::node::NodeObj::Instr(ins); + ctx[ins_id] = super::node::NodeObject::Instr(ins); Ok(()) } @@ -122,28 +122,34 @@ pub fn propagate(ctx: &SsaContext, id: NodeId, modified: &mut bool) -> NodeId { //common subexpression elimination, starting from the root pub fn cse( - igen: &mut SsaContext, + ir_gen: &mut SsaContext, first_block: BlockId, stop_on_error: bool, ) -> Result, RuntimeError> { let mut anchor = Anchor::default(); let mut modified = false; - cse_tree(igen, first_block, &mut anchor, &mut modified, stop_on_error) + cse_tree(ir_gen, first_block, &mut anchor, &mut modified, stop_on_error) } //Perform CSE for the provided block and then process its children following the dominator tree, passing around the anchor list. fn cse_tree( - igen: &mut SsaContext, + ir_gen: &mut SsaContext, block_id: BlockId, anchor: &mut Anchor, modified: &mut bool, stop_on_error: bool, ) -> Result, RuntimeError> { let mut instructions = Vec::new(); - let mut res = - cse_block_with_anchor(igen, block_id, &mut instructions, anchor, modified, stop_on_error)?; - for b in igen[block_id].dominated.clone() { - let sub_res = cse_tree(igen, b, &mut anchor.clone(), modified, stop_on_error)?; + let mut res = cse_block_with_anchor( + ir_gen, + block_id, + &mut instructions, + anchor, + modified, + stop_on_error, + )?; + for b in ir_gen[block_id].dominated.clone() { + let sub_res = cse_tree(ir_gen, b, &mut anchor.clone(), modified, stop_on_error)?; if sub_res.is_some() { res = sub_res; } @@ -153,7 +159,7 @@ fn cse_tree( //perform common subexpression elimination until there is no more change pub fn full_cse( - igen: &mut SsaContext, + ir_gen: &mut SsaContext, first_block: BlockId, report_error: bool, ) -> Result, RuntimeError> { @@ -162,7 +168,7 @@ pub fn full_cse( while modified { modified = false; let mut anchor = Anchor::default(); - result = cse_tree(igen, first_block, &mut anchor, &mut modified, report_error)?; + result = cse_tree(ir_gen, first_block, &mut anchor, &mut modified, report_error)?; } Ok(result) } @@ -290,7 +296,7 @@ fn cse_block_with_anchor( let id = ctx.get_dummy_store(a.0); anchor.push_mem_instruction(ctx, id)?; } - if let Some(f) = ctx.try_get_ssafunc(*func) { + if let Some(f) = ctx.try_get_ssa_func(*func) { for typ in &f.result_types { if let ObjectType::Pointer(a) = typ { let id = ctx.get_dummy_store(*a); @@ -311,7 +317,7 @@ fn cse_block_with_anchor( } Operation::Return(..) => new_list.push(*ins_id), Operation::Intrinsic(_, args) => { - //Add dunmmy load for function arguments and enable CSE only if no array in argument + //Add dummy load for function arguments and enable CSE only if no array in argument let mut activate_cse = true; for arg in args { if let Some(obj) = ctx.try_get_node(*arg) { diff --git a/crates/noirc_evaluator/src/ssa/ssa_form.rs b/crates/noirc_evaluator/src/ssa/ssa_form.rs index bdee4380aed..759e04bf34e 100644 --- a/crates/noirc_evaluator/src/ssa/ssa_form.rs +++ b/crates/noirc_evaluator/src/ssa/ssa_form.rs @@ -75,7 +75,7 @@ pub fn add_dummy_store(ctx: &mut SsaContext, entry: BlockId, join: BlockId) { } } -//look-up recursiverly into predecessors +//look-up recursively into predecessors pub fn get_block_value(ctx: &mut SsaContext, root: NodeId, block_id: BlockId) -> NodeId { let result = if !ctx.sealed_blocks.contains(&block_id) { //incomplete CFG diff --git a/crates/noirc_frontend/src/ast/expression.rs b/crates/noirc_frontend/src/ast/expression.rs index 14a22826123..db08c7dae01 100644 --- a/crates/noirc_frontend/src/ast/expression.rs +++ b/crates/noirc_frontend/src/ast/expression.rs @@ -251,7 +251,7 @@ pub enum UnaryOp { impl UnaryOp { /// Converts a token to a unary operator - /// If you want the parser to recognise another Token as being a prefix operator, it is defined here + /// If you want the parser to recognize another Token as being a prefix operator, it is defined here pub fn from(token: &Token) -> Option { match token { Token::Minus => Some(UnaryOp::Minus), diff --git a/crates/noirc_frontend/src/ast/function.rs b/crates/noirc_frontend/src/ast/function.rs index af86b0fef4e..02ef4bf27bb 100644 --- a/crates/noirc_frontend/src/ast/function.rs +++ b/crates/noirc_frontend/src/ast/function.rs @@ -6,7 +6,7 @@ use super::{FunctionDefinition, UnresolvedType}; // A NoirFunction can be either a foreign low level function or a function definition // A closure / function definition will be stored under a name, so we do not differentiate between their variants -// The name for function literal will be the variable it is binded to, and the name for a function definition will +// The name for function literal will be the variable it is bound to, and the name for a function definition will // be the function name itself. #[derive(Clone, Debug, PartialEq, Eq)] pub struct NoirFunction { diff --git a/crates/noirc_frontend/src/ast/mod.rs b/crates/noirc_frontend/src/ast/mod.rs index f9c01ff6380..725455a8598 100644 --- a/crates/noirc_frontend/src/ast/mod.rs +++ b/crates/noirc_frontend/src/ast/mod.rs @@ -12,18 +12,18 @@ use noirc_errors::Span; pub use statement::*; pub use structure::*; -use crate::{parser::ParserError, token::IntType, BinaryTypeOperator, Comptime}; +use crate::{parser::ParserError, token::IntType, BinaryTypeOperator, CompTime}; use iter_extended::vecmap; /// The parser parses types as 'UnresolvedType's which -/// require name resolution to resolve any typenames used +/// require name resolution to resolve any type names used /// for structs within, but are otherwise identical to Types. #[derive(Debug, PartialEq, Eq, Clone)] pub enum UnresolvedType { - FieldElement(Comptime), + FieldElement(CompTime), Array(Option, Box), // [4]Witness = Array(4, Witness) - Integer(Comptime, Signedness, u32), // u32 = Integer(unsigned, 32) - Bool(Comptime), + Integer(CompTime, Signedness, u32), // u32 = Integer(unsigned, 32) + Bool(CompTime), Expression(UnresolvedTypeExpression), String(Option), Unit, @@ -116,7 +116,7 @@ impl std::fmt::Display for UnresolvedTypeExpression { } impl UnresolvedType { - pub fn from_int_token(token: (Comptime, IntType)) -> UnresolvedType { + pub fn from_int_token(token: (CompTime, IntType)) -> UnresolvedType { use {IntType::*, UnresolvedType::Integer}; match token.1 { Signed(num_bits) => Integer(token.0, Signedness::Signed, num_bits), diff --git a/crates/noirc_frontend/src/ast/statement.rs b/crates/noirc_frontend/src/ast/statement.rs index e7076b1ebd5..7c69147c0b0 100644 --- a/crates/noirc_frontend/src/ast/statement.rs +++ b/crates/noirc_frontend/src/ast/statement.rs @@ -214,7 +214,7 @@ impl Statement { let infix = crate::InfixExpression { lhs: lvalue_expr, - operator: operator.try_into_binop(span).expect(error_msg), + operator: operator.try_into_binary_op(span).expect(error_msg), rhs: expression, }; expression = Expression::new(ExpressionKind::Infix(Box::new(infix)), span); diff --git a/crates/noirc_frontend/src/graph/mod.rs b/crates/noirc_frontend/src/graph/mod.rs index ad9984c364a..dadb558f9a6 100644 --- a/crates/noirc_frontend/src/graph/mod.rs +++ b/crates/noirc_frontend/src/graph/mod.rs @@ -28,7 +28,7 @@ impl CrateName { /// Creates a new CrateName rejecting any crate name that /// has a character on the blacklist. /// The difference between RA and this implementation is that - /// characters on the blacklist are never allowed; there is no normalisation. + /// characters on the blacklist are never allowed; there is no normalization. pub fn new(name: &str) -> Result { let is_invalid = name.chars().any(|n| CHARACTER_BLACK_LIST.contains(&n)); if is_invalid { diff --git a/crates/noirc_frontend/src/hir/def_collector/dc_crate.rs b/crates/noirc_frontend/src/hir/def_collector/dc_crate.rs index bc66698af2d..1b4bd7991d1 100644 --- a/crates/noirc_frontend/src/hir/def_collector/dc_crate.rs +++ b/crates/noirc_frontend/src/hir/def_collector/dc_crate.rs @@ -294,7 +294,7 @@ fn resolve_structs( ) { // We must first go through the struct list once to ensure all IDs are pushed to // the def_interner map. This lets structs refer to each other regardless of declaration order - // without resolve_struct_fields nondeterministically unwrapping a value + // without resolve_struct_fields non-deterministically unwrapping a value // that isn't in the HashMap. for (type_id, typ) in &structs { context.def_interner.push_empty_struct(*type_id, typ); diff --git a/crates/noirc_frontend/src/hir/resolution/resolver.rs b/crates/noirc_frontend/src/hir/resolution/resolver.rs index 0845ee4aee1..5b6e592907f 100644 --- a/crates/noirc_frontend/src/hir/resolution/resolver.rs +++ b/crates/noirc_frontend/src/hir/resolution/resolver.rs @@ -296,15 +296,15 @@ impl<'a> Resolver<'a> { /// freshly created TypeVariables created to new_variables. fn resolve_type_inner(&mut self, typ: UnresolvedType, new_variables: &mut Generics) -> Type { match typ { - UnresolvedType::FieldElement(comptime) => Type::FieldElement(comptime), + UnresolvedType::FieldElement(comp_time) => Type::FieldElement(comp_time), UnresolvedType::Array(size, elem) => { let resolved_size = self.resolve_array_size(size, new_variables); let elem = Box::new(self.resolve_type_inner(*elem, new_variables)); Type::Array(Box::new(resolved_size), elem) } UnresolvedType::Expression(expr) => self.convert_expression_type(expr), - UnresolvedType::Integer(comptime, sign, bits) => Type::Integer(comptime, sign, bits), - UnresolvedType::Bool(comptime) => Type::Bool(comptime), + UnresolvedType::Integer(comp_time, sign, bits) => Type::Integer(comp_time, sign, bits), + UnresolvedType::Bool(comp_time) => Type::Bool(comp_time), UnresolvedType::String(size) => { let resolved_size = self.resolve_array_size(size, new_variables); Type::String(Box::new(resolved_size)) @@ -613,8 +613,8 @@ impl<'a> Resolver<'a> { let hir_expr = match expr.kind { ExpressionKind::Literal(literal) => HirExpression::Literal(match literal { Literal::Bool(b) => HirLiteral::Bool(b), - Literal::Array(ArrayLiteral::Standard(elems)) => { - HirLiteral::Array(vecmap(elems, |elem| self.resolve_expression(elem))) + Literal::Array(ArrayLiteral::Standard(elements)) => { + HirLiteral::Array(vecmap(elements, |elem| self.resolve_expression(elem))) } Literal::Array(ArrayLiteral::Repeated { repeated_element, length }) => { let len = self.eval_array_length(&length); @@ -878,7 +878,7 @@ impl<'a> Resolver<'a> { let id = self.resolve_path(path)?; if let Some(mut function) = TryFromModuleDefId::try_from(id) { - // Check if this is an unsupported lowlevel opcode. If so, replace it with + // Check if this is an unsupported low level opcode. If so, replace it with // an alternative in the stdlib. if let Some(meta) = self.interner.try_function_meta(&function) { if meta.kind == crate::FunctionKind::LowLevel { diff --git a/crates/noirc_frontend/src/hir/scope/mod.rs b/crates/noirc_frontend/src/hir/scope/mod.rs index bf65be9e6b0..59dd4ae12a8 100644 --- a/crates/noirc_frontend/src/hir/scope/mod.rs +++ b/crates/noirc_frontend/src/hir/scope/mod.rs @@ -112,7 +112,7 @@ impl Default for ScopeTree { } } -// XXX: This trait is needed because when we pop off a forscope in the resolver +// XXX: This trait is needed because when we pop off a for-scope in the resolver // We want to check it for unused variables and return. Currently, // we only have an API for this with ScopeTree in the resolver. impl From> for ScopeTree { diff --git a/crates/noirc_frontend/src/hir/type_check/expr.rs b/crates/noirc_frontend/src/hir/type_check/expr.rs index b5ad956ed45..ad0b4a3710a 100644 --- a/crates/noirc_frontend/src/hir/type_check/expr.rs +++ b/crates/noirc_frontend/src/hir/type_check/expr.rs @@ -7,7 +7,7 @@ use crate::{ types::Type, }, node_interner::{ExprId, FuncId, NodeInterner}, - Comptime, Shared, TypeBinding, + CompTime, Shared, TypeBinding, }; use super::{bind_pattern, errors::TypeCheckError}; @@ -21,7 +21,7 @@ pub(crate) fn type_check_expression( HirExpression::Ident(ident) => { // An identifiers type may be forall-quantified in the case of generic functions. // E.g. `fn foo(t: T, field: Field) -> T` has type `forall T. fn(T, Field) -> T`. - // We must instantiate identifiers at every callsite to replace this T with a new type + // We must instantiate identifiers at every call site to replace this T with a new type // variable to handle generic functions. let t = interner.id_type(ident.id); let (typ, bindings) = t.instantiate(interner); @@ -60,11 +60,11 @@ pub(crate) fn type_check_expression( arr_type } - HirLiteral::Bool(_) => Type::Bool(Comptime::new(interner)), + HirLiteral::Bool(_) => Type::Bool(CompTime::new(interner)), HirLiteral::Integer(_) => { let id = interner.next_type_variable_id(); Type::PolymorphicInteger( - Comptime::new(interner), + CompTime::new(interner), Shared::new(TypeBinding::Unbound(id)), ) } @@ -153,7 +153,7 @@ pub(crate) fn type_check_expression( let end_range_type = type_check_expression(interner, &for_expr.end_range, errors); let span = interner.expr_span(&for_expr.start_range); - start_range_type.unify(&Type::comptime(Some(span)), span, errors, || { + start_range_type.unify(&Type::comp_time(Some(span)), span, errors, || { TypeCheckError::TypeCannotBeUsed { typ: start_range_type.clone(), place: "for loop", @@ -163,7 +163,7 @@ pub(crate) fn type_check_expression( }); let span = interner.expr_span(&for_expr.end_range); - end_range_type.unify(&Type::comptime(Some(span)), span, errors, || { + end_range_type.unify(&Type::comp_time(Some(span)), span, errors, || { TypeCheckError::TypeCannotBeUsed { typ: end_range_type.clone(), place: "for loop", @@ -257,8 +257,8 @@ fn type_check_index_expression( let index_type = type_check_expression(interner, &index_expr.index, errors); let span = interner.expr_span(&index_expr.index); - index_type.unify(&Type::comptime(Some(span)), span, errors, || { - // Specialize the error in the case the user has a Field, just not a comptime one. + index_type.unify(&Type::comp_time(Some(span)), span, errors, || { + // Specialize the error in the case the user has a Field, just not a `comptime` one. if matches!(index_type, Type::FieldElement(..)) { TypeCheckError::Unstructured { msg: format!("Array index must be known at compile-time, but here a non-comptime {index_type} was used instead"), @@ -292,14 +292,14 @@ fn type_check_index_expression( } fn check_cast(from: Type, to: Type, span: Span, errors: &mut Vec) -> Type { - let is_comptime = match from { - Type::Integer(is_comptime, ..) => is_comptime, - Type::FieldElement(is_comptime) => is_comptime, - Type::PolymorphicInteger(is_comptime, binding) => match &*binding.borrow() { + let is_comp_time = match from { + Type::Integer(is_comp_time, ..) => is_comp_time, + Type::FieldElement(is_comp_time) => is_comp_time, + Type::PolymorphicInteger(is_comp_time, binding) => match &*binding.borrow() { TypeBinding::Bound(from) => return check_cast(from.clone(), to, span, errors), - TypeBinding::Unbound(_) => is_comptime, + TypeBinding::Unbound(_) => is_comp_time, }, - Type::Bool(is_comptime) => is_comptime, + Type::Bool(is_comp_time) => is_comp_time, Type::Error => return Type::Error, from => { let msg = format!( @@ -313,28 +313,28 @@ fn check_cast(from: Type, to: Type, span: Span, errors: &mut Vec let error_message = "Cannot cast to a comptime type, argument to cast is not known at compile-time"; match to { - Type::Integer(dest_comptime, sign, bits) => { - if dest_comptime.is_comptime() && is_comptime.unify(&dest_comptime, span).is_err() { + Type::Integer(dest_comp_time, sign, bits) => { + if dest_comp_time.is_comp_time() && is_comp_time.unify(&dest_comp_time, span).is_err() { let msg = error_message.into(); errors.push(TypeCheckError::Unstructured { msg, span }); } - Type::Integer(is_comptime, sign, bits) + Type::Integer(is_comp_time, sign, bits) } - Type::FieldElement(dest_comptime) => { - if dest_comptime.is_comptime() && is_comptime.unify(&dest_comptime, span).is_err() { + Type::FieldElement(dest_comp_time) => { + if dest_comp_time.is_comp_time() && is_comp_time.unify(&dest_comp_time, span).is_err() { let msg = error_message.into(); errors.push(TypeCheckError::Unstructured { msg, span }); } - Type::FieldElement(is_comptime) + Type::FieldElement(is_comp_time) } - Type::Bool(dest_comptime) => { - if dest_comptime.is_comptime() && is_comptime.unify(&dest_comptime, span).is_err() { + Type::Bool(dest_comp_time) => { + if dest_comp_time.is_comp_time() && is_comp_time.unify(&dest_comp_time, span).is_err() { let msg = error_message.into(); errors.push(TypeCheckError::Unstructured { msg, span }); } - Type::Bool(dest_comptime) + Type::Bool(dest_comp_time) } Type::Error => Type::Error, _ => { @@ -384,7 +384,7 @@ fn lookup_method( } } -// We need a special function to typecheck method calls since the method +// We need a special function to type check method calls since the method // is not a Expression::Ident it must be manually instantiated here fn type_check_method_call( interner: &mut NodeInterner, @@ -426,7 +426,7 @@ fn bind_function_type( errors: &mut Vec, ) -> Type { // Could do a single unification for the entire function type, but matching beforehand - // lets us issue a more precise error on the individual argument that fails to typecheck. + // lets us issue a more precise error on the individual argument that fails to type check. match function { Type::TypeVariable(binding) => { if let TypeBinding::Bound(typ) = &*binding.borrow() { @@ -584,7 +584,7 @@ pub fn infix_operand_type_rules( return infix_operand_type_rules(binding, op, other, span, interner, errors); } - let comptime = Comptime::No(None); + let comptime = CompTime::No(None); if other.try_bind_to_polymorphic_int(var, &comptime, true, op.location.span).is_ok() || other == &Type::Error { Ok(other.clone()) } else { @@ -606,9 +606,9 @@ fn check_if_expr( let then_type = type_check_expression(interner, &if_expr.consequence, errors); let expr_span = interner.expr_span(&if_expr.condition); - cond_type.unify(&Type::Bool(Comptime::new(interner)), expr_span, errors, || { + cond_type.unify(&Type::Bool(CompTime::new(interner)), expr_span, errors, || { TypeCheckError::TypeMismatch { - expected_typ: Type::Bool(Comptime::No(None)).to_string(), + expected_typ: Type::Bool(CompTime::No(None)).to_string(), expr_typ: cond_type.to_string(), expr_span, } @@ -768,7 +768,7 @@ pub fn comparator_operand_type_rules( } // Avoid reporting errors multiple times - (Error, _) | (_,Error) => Ok(Bool(Comptime::Yes(None))), + (Error, _) | (_,Error) => Ok(Bool(CompTime::Yes(None))), // Special-case == and != for arrays (Array(x_size, x_type), Array(y_size, y_type)) if matches!(op.kind, Equal | NotEqual) => { @@ -787,11 +787,11 @@ pub fn comparator_operand_type_rules( }); // We could check if all elements of all arrays are comptime but I am lazy - Ok(Bool(Comptime::No(Some(op.location.span)))) + Ok(Bool(CompTime::No(Some(op.location.span)))) } (NamedGeneric(binding_a, name_a), NamedGeneric(binding_b, name_b)) => { if binding_a == binding_b { - return Ok(Bool(Comptime::No(Some(op.location.span)))); + return Ok(Bool(CompTime::No(Some(op.location.span)))); } Err(format!("Unsupported types for comparison: {name_a} and {name_b}")) } @@ -801,7 +801,7 @@ pub fn comparator_operand_type_rules( return comparator_operand_type_rules(binding, other, op, errors); } - let comptime = Comptime::No(None); + let comptime = CompTime::No(None); if other.try_bind_to_polymorphic_int(var, &comptime, true, op.location.span).is_ok() || other == &Type::Error { Ok(other.clone()) } else { @@ -816,7 +816,7 @@ pub fn comparator_operand_type_rules( } }); - Ok(Bool(Comptime::No(Some(op.location.span)))) + Ok(Bool(CompTime::No(Some(op.location.span)))) } (lhs, rhs) => Err(format!("Unsupported types for comparison: {lhs} and {rhs}")), } diff --git a/crates/noirc_frontend/src/hir/type_check/mod.rs b/crates/noirc_frontend/src/hir/type_check/mod.rs index f659a14797e..8453d5e5f77 100644 --- a/crates/noirc_frontend/src/hir/type_check/mod.rs +++ b/crates/noirc_frontend/src/hir/type_check/mod.rs @@ -126,7 +126,7 @@ mod test { // Create let statement let let_stmt = HirLetStatement { pattern: Identifier(z), - r#type: Type::FieldElement(crate::Comptime::No(None)), + r#type: Type::FieldElement(crate::CompTime::No(None)), expression: expr_id, }; let stmt_id = interner.push_stmt(HirStatement::Let(let_stmt)); diff --git a/crates/noirc_frontend/src/hir/type_check/stmt.rs b/crates/noirc_frontend/src/hir/type_check/stmt.rs index 3f8cb46ef8a..ac4c18139dd 100644 --- a/crates/noirc_frontend/src/hir/type_check/stmt.rs +++ b/crates/noirc_frontend/src/hir/type_check/stmt.rs @@ -5,7 +5,7 @@ use crate::hir_def::stmt::{ }; use crate::hir_def::types::Type; use crate::node_interner::{DefinitionId, ExprId, NodeInterner, StmtId}; -use crate::Comptime; +use crate::CompTime; use super::{errors::TypeCheckError, expr::type_check_expression}; @@ -26,7 +26,7 @@ pub(crate) fn type_check( // 5; or x; or x+a; // // In these cases, you cannot even get the expr_id because - // it is not binded to anything. We could therefore. + // it is not bound to anything. We could therefore. // // However since TypeChecking checks the return type of the last statement // the type checker could in the future incorrectly return the type. @@ -183,7 +183,7 @@ fn type_check_lvalue( let index_type = type_check_expression(interner, &index, errors); let expr_span = interner.expr_span(&index); - index_type.unify(&Type::comptime(Some(expr_span)), expr_span, errors, || { + index_type.unify(&Type::comp_time(Some(expr_span)), expr_span, errors, || { TypeCheckError::TypeMismatch { expected_typ: "comptime Field".to_owned(), expr_typ: index_type.to_string(), @@ -221,7 +221,7 @@ fn type_check_let_stmt( let mut resolved_type = type_check_declaration(interner, let_stmt.expression, let_stmt.r#type, errors); - resolved_type.set_comptime_span(interner.expr_span(&let_stmt.expression)); + resolved_type.set_comp_time_span(interner.expr_span(&let_stmt.expression)); // Set the type of the pattern to be equal to the annotated type bind_pattern(interner, &let_stmt.pattern, resolved_type, errors); @@ -235,10 +235,10 @@ fn type_check_constrain_stmt( let expr_type = type_check_expression(interner, &stmt.0, errors); let expr_span = interner.expr_span(&stmt.0); - expr_type.unify(&Type::Bool(Comptime::new(interner)), expr_span, errors, || { + expr_type.unify(&Type::Bool(CompTime::new(interner)), expr_span, errors, || { TypeCheckError::TypeMismatch { expr_typ: expr_type.to_string(), - expected_typ: Type::Bool(Comptime::No(None)).to_string(), + expected_typ: Type::Bool(CompTime::No(None)).to_string(), expr_span, } }); @@ -260,7 +260,7 @@ fn type_check_declaration( // If so, then we give it the same type as the expression if annotated_type != Type::Error { // Now check if LHS is the same type as the RHS - // Importantly, we do not co-erce any types implicitly + // Importantly, we do not coerce any types implicitly let expr_span = interner.expr_span(&rhs_expr); expr_type.make_subtype_of(&annotated_type, expr_span, errors, || { TypeCheckError::TypeMismatch { diff --git a/crates/noirc_frontend/src/hir_def/types.rs b/crates/noirc_frontend/src/hir_def/types.rs index 2f7bdd250df..b99cb917df4 100644 --- a/crates/noirc_frontend/src/hir_def/types.rs +++ b/crates/noirc_frontend/src/hir_def/types.rs @@ -190,11 +190,11 @@ impl std::fmt::Display for StructType { #[derive(Debug, PartialEq, Eq, Clone, Hash)] pub enum Type { - FieldElement(Comptime), + FieldElement(CompTime), Array(Box, Box), // Array(4, Field) = [Field; 4] - Integer(Comptime, Signedness, u32), // u32 = Integer(unsigned, 32) - PolymorphicInteger(Comptime, TypeVariable), - Bool(Comptime), + Integer(CompTime, Signedness, u32), // u32 = Integer(unsigned, 32) + PolymorphicInteger(CompTime, TypeVariable), + Bool(CompTime), String(Box), Unit, Struct(Shared, Vec), @@ -211,7 +211,7 @@ pub enum Type { /// A type generic over the given type variables. /// Storing both the TypeVariableId and TypeVariable isn't necessary /// but it makes handling them both easier. The TypeVariableId should - /// never be bound over during type checking, but during monomorphisation it + /// never be bound over during type checking, but during monomorphization it /// will be and thus needs the full TypeVariable link. Forall(Generics, Box), @@ -251,21 +251,21 @@ impl TypeBinding { pub struct TypeVariableId(pub usize); #[derive(Debug, Clone, Eq)] -pub enum Comptime { +pub enum CompTime { // Yes and No variants have optional spans representing the location in the source code - // which caused them to be comptime. + // which caused them to be compile time. Yes(Option), No(Option), - Maybe(TypeVariableId, Rc>>), + Maybe(TypeVariableId, Rc>>), } -impl std::hash::Hash for Comptime { +impl std::hash::Hash for CompTime { fn hash(&self, state: &mut H) { core::mem::discriminant(self).hash(state); - if let Comptime::Maybe(id, binding) = self { - if let Some(is_comptime) = &*binding.borrow() { - is_comptime.hash(state); + if let CompTime::Maybe(id, binding) = self { + if let Some(is_comp_time) = &*binding.borrow() { + is_comp_time.hash(state); } else { id.hash(state); } @@ -273,10 +273,10 @@ impl std::hash::Hash for Comptime { } } -impl PartialEq for Comptime { +impl PartialEq for CompTime { fn eq(&self, other: &Self) -> bool { match (self, other) { - (Comptime::Maybe(id1, binding1), Comptime::Maybe(id2, binding2)) => { + (CompTime::Maybe(id1, binding1), CompTime::Maybe(id2, binding2)) => { if let Some(new_self) = &*binding1.borrow() { return new_self == other; } @@ -285,7 +285,7 @@ impl PartialEq for Comptime { } id1 == id2 } - (Comptime::Yes(_), Comptime::Yes(_)) | (Comptime::No(_), Comptime::No(_)) => true, + (CompTime::Yes(_), CompTime::Yes(_)) | (CompTime::No(_), CompTime::No(_)) => true, _ => false, } } @@ -295,12 +295,12 @@ impl PartialEq for Comptime { /// to provide better error messages #[derive(Debug)] pub enum SpanKind { - Comptime(Span), - NotComptime(Span), + CompTime(Span), + NotCompTime(Span), None, } -impl Comptime { +impl CompTime { pub fn new(interner: &mut NodeInterner) -> Self { let id = interner.next_type_variable_id(); Self::Maybe(id, Rc::new(RefCell::new(None))) @@ -308,8 +308,8 @@ impl Comptime { fn set_span(&mut self, new_span: Span) { match self { - Comptime::Yes(span) | Comptime::No(span) => *span = Some(new_span), - Comptime::Maybe(_, binding) => { + CompTime::Yes(span) | CompTime::No(span) => *span = Some(new_span), + CompTime::Maybe(_, binding) => { if let Some(binding) = &mut *binding.borrow_mut() { binding.set_span(new_span); } @@ -317,30 +317,30 @@ impl Comptime { } } - /// Try to unify these two Comptime constraints. + /// Try to unify these two CompTime constraints. pub fn unify(&self, other: &Self, span: Span) -> Result<(), SpanKind> { match (self, other) { - (Comptime::Yes(_), Comptime::Yes(_)) | (Comptime::No(_), Comptime::No(_)) => Ok(()), + (CompTime::Yes(_), CompTime::Yes(_)) | (CompTime::No(_), CompTime::No(_)) => Ok(()), - (Comptime::Yes(y), Comptime::No(n)) | (Comptime::No(n), Comptime::Yes(y)) => { + (CompTime::Yes(y), CompTime::No(n)) | (CompTime::No(n), CompTime::Yes(y)) => { Err(match (y, n) { - (_, Some(span)) => SpanKind::NotComptime(*span), - (Some(span), _) => SpanKind::Comptime(*span), + (_, Some(span)) => SpanKind::NotCompTime(*span), + (Some(span), _) => SpanKind::CompTime(*span), _ => SpanKind::None, }) } - (Comptime::Maybe(_, binding), other) | (other, Comptime::Maybe(_, binding)) + (CompTime::Maybe(_, binding), other) | (other, CompTime::Maybe(_, binding)) if binding.borrow().is_some() => { let binding = &*binding.borrow(); binding.as_ref().unwrap().unify(other, span) } - (Comptime::Maybe(id1, _), Comptime::Maybe(id2, _)) if id1 == id2 => Ok(()), + (CompTime::Maybe(id1, _), CompTime::Maybe(id2, _)) if id1 == id2 => Ok(()), // Both are unbound and do not refer to each other, arbitrarily set one equal to the other - (Comptime::Maybe(_, binding), other) | (other, Comptime::Maybe(_, binding)) => { + (CompTime::Maybe(_, binding), other) | (other, CompTime::Maybe(_, binding)) => { let mut clone = other.clone(); clone.set_span(span); *binding.borrow_mut() = Some(clone); @@ -349,47 +349,47 @@ impl Comptime { } } - /// Try to unify these two Comptime constraints. + /// Try to unify these two CompTime constraints. pub fn is_subtype_of(&self, other: &Self, span: Span) -> Result<(), SpanKind> { match (self, other) { - (Comptime::Yes(_), Comptime::Yes(_)) - | (Comptime::No(_), Comptime::No(_)) + (CompTime::Yes(_), CompTime::Yes(_)) + | (CompTime::No(_), CompTime::No(_)) - // This is one of the only 2 differing cases between this and Comptime::unify - | (Comptime::Yes(_), Comptime::No(_)) => Ok(()), + // This is one of the only 2 differing cases between this and CompTime::unify + | (CompTime::Yes(_), CompTime::No(_)) => Ok(()), - (Comptime::No(n), Comptime::Yes(y)) => { + (CompTime::No(n), CompTime::Yes(y)) => { Err(match (y, n) { - (_, Some(span)) => SpanKind::NotComptime(*span), - (Some(span), _) => SpanKind::Comptime(*span), + (_, Some(span)) => SpanKind::NotCompTime(*span), + (Some(span), _) => SpanKind::CompTime(*span), _ => SpanKind::None, }) } - (Comptime::Maybe(_, binding), other) if binding.borrow().is_some() => { + (CompTime::Maybe(_, binding), other) if binding.borrow().is_some() => { let binding = &*binding.borrow(); binding.as_ref().unwrap().is_subtype_of(other, span) } - (other, Comptime::Maybe(_, binding)) if binding.borrow().is_some() => { + (other, CompTime::Maybe(_, binding)) if binding.borrow().is_some() => { let binding = &*binding.borrow(); other.is_subtype_of(binding.as_ref().unwrap(), span) } - (Comptime::Maybe(id1, _), Comptime::Maybe(id2, _)) if id1 == id2 => Ok(()), + (CompTime::Maybe(id1, _), CompTime::Maybe(id2, _)) if id1 == id2 => Ok(()), - // This is the other differing case between this and Comptime::unify. - // If this is polymorphically comptime, dont force it to be non-comptime because it is + // This is the other differing case between this and CompTime::unify. + // If this is polymorphically comptime, don't force it to be non-comptime because it is // passed as an argument to a function expecting a non-comptime parameter. - (Comptime::Maybe(_, binding), Comptime::No(_)) if binding.borrow().is_none() => Ok(()), + (CompTime::Maybe(_, binding), CompTime::No(_)) if binding.borrow().is_none() => Ok(()), - (Comptime::Maybe(_, binding), other) => { + (CompTime::Maybe(_, binding), other) => { let mut clone = other.clone(); clone.set_span(span); *binding.borrow_mut() = Some(clone); Ok(()) } - (other, Comptime::Maybe(_, binding)) => { + (other, CompTime::Maybe(_, binding)) => { let mut clone = other.clone(); clone.set_span(span); *binding.borrow_mut() = Some(clone); @@ -398,28 +398,28 @@ impl Comptime { } } - /// Combine these two Comptimes together, returning - /// - Comptime::Yes if both are Yes, - /// - Comptime::No if either are No, + /// Combine these two CompTimes together, returning + /// - CompTime::Yes if both are Yes, + /// - CompTime::No if either are No, /// - or if both are Maybe, unify them both and return the lhs. pub fn and(&self, other: &Self, span: Span) -> Self { match (self, other) { - (Comptime::Yes(_), Comptime::Yes(_)) => Comptime::Yes(Some(span)), + (CompTime::Yes(_), CompTime::Yes(_)) => CompTime::Yes(Some(span)), - (Comptime::No(_), Comptime::No(_)) - | (Comptime::Yes(_), Comptime::No(_)) - | (Comptime::No(_), Comptime::Yes(_)) => Comptime::No(Some(span)), + (CompTime::No(_), CompTime::No(_)) + | (CompTime::Yes(_), CompTime::No(_)) + | (CompTime::No(_), CompTime::Yes(_)) => CompTime::No(Some(span)), - (Comptime::Maybe(_, binding), other) | (other, Comptime::Maybe(_, binding)) + (CompTime::Maybe(_, binding), other) | (other, CompTime::Maybe(_, binding)) if binding.borrow().is_some() => { let binding = &*binding.borrow(); binding.as_ref().unwrap().and(other, span) } - (Comptime::Maybe(id1, _), Comptime::Maybe(id2, _)) if id1 == id2 => self.clone(), + (CompTime::Maybe(id1, _), CompTime::Maybe(id2, _)) if id1 == id2 => self.clone(), - (Comptime::Maybe(_, binding), other) | (other, Comptime::Maybe(_, binding)) => { + (CompTime::Maybe(_, binding), other) | (other, CompTime::Maybe(_, binding)) => { let mut clone = other.clone(); clone.set_span(span); *binding.borrow_mut() = Some(clone); @@ -428,13 +428,13 @@ impl Comptime { } } - pub fn is_comptime(&self) -> bool { + pub fn is_comp_time(&self) -> bool { match self { - Comptime::Yes(_) => true, - Comptime::No(_) => false, - Comptime::Maybe(_, binding) => { + CompTime::Yes(_) => true, + CompTime::No(_) => false, + CompTime::Maybe(_, binding) => { if let Some(binding) = &*binding.borrow() { - return binding.is_comptime(); + return binding.is_comp_time(); } true } @@ -444,11 +444,11 @@ impl Comptime { impl Type { pub fn field(span: Option) -> Type { - Type::FieldElement(Comptime::No(span)) + Type::FieldElement(CompTime::No(span)) } - pub fn comptime(span: Option) -> Type { - Type::FieldElement(Comptime::Yes(span)) + pub fn comp_time(span: Option) -> Type { + Type::FieldElement(CompTime::Yes(span)) } pub fn default_int_type(span: Option) -> Type { @@ -462,7 +462,7 @@ impl Type { /// A bit of an awkward name for this function - this function returns /// true for type variables or polymorphic integers which are unbound. /// NamedGenerics will always be false as although they are bindable, - /// they shouldn't be bound over until monomorphisation. + /// they shouldn't be bound over until monomorphization. pub fn is_bindable(&self) -> bool { match self { Type::PolymorphicInteger(_, binding) | Type::TypeVariable(binding) => { @@ -483,13 +483,13 @@ impl Type { impl std::fmt::Display for Type { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - Type::FieldElement(comptime) => { - write!(f, "{comptime}Field") + Type::FieldElement(comp_time) => { + write!(f, "{comp_time}Field") } Type::Array(len, typ) => write!(f, "[{typ}; {len}]"), - Type::Integer(comptime, sign, num_bits) => match sign { - Signedness::Signed => write!(f, "{comptime}i{num_bits}"), - Signedness::Unsigned => write!(f, "{comptime}u{num_bits}"), + Type::Integer(comp_time, sign, num_bits) => match sign { + Signedness::Signed => write!(f, "{comp_time}i{num_bits}"), + Signedness::Unsigned => write!(f, "{comp_time}u{num_bits}"), }, Type::PolymorphicInteger(_, binding) => { if let TypeBinding::Unbound(_) = &*binding.borrow() { @@ -513,7 +513,7 @@ impl std::fmt::Display for Type { let elements = vecmap(elements, ToString::to_string); write!(f, "({})", elements.join(", ")) } - Type::Bool(comptime) => write!(f, "{comptime}bool"), + Type::Bool(comp_time) => write!(f, "{comp_time}bool"), Type::String(len) => write!(f, "str<{len}>"), Type::Unit => write!(f, "()"), Type::Error => write!(f, "error"), @@ -563,12 +563,12 @@ impl std::fmt::Display for TypeBinding { } } -impl std::fmt::Display for Comptime { +impl std::fmt::Display for CompTime { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - Comptime::Yes(_) => write!(f, "comptime "), - Comptime::No(_) => Ok(()), - Comptime::Maybe(_, binding) => match &*binding.borrow() { + CompTime::Yes(_) => write!(f, "comptime "), + CompTime::No(_) => Ok(()), + CompTime::Maybe(_, binding) => match &*binding.borrow() { Some(binding) => binding.fmt(f), None => write!(f, "comptime "), }, @@ -577,17 +577,17 @@ impl std::fmt::Display for Comptime { } impl Type { - /// Mutate the span for Comptime to track where comptime is required for better - /// error messages that show both the erroring callsite and the callsite before + /// Mutate the span for the `CompTime` enum to track where a type is required to be `comptime` + /// for error messages that show both the erroring call site and the call site before /// which required the variable to be comptime or non-comptime. - pub fn set_comptime_span(&mut self, new_span: Span) { + pub fn set_comp_time_span(&mut self, new_span: Span) { match self { Type::FieldElement(comptime) | Type::Integer(comptime, _, _) => { comptime.set_span(new_span) } Type::PolymorphicInteger(span, binding) => { if let TypeBinding::Bound(binding) = &mut *binding.borrow_mut() { - return binding.set_comptime_span(new_span); + return binding.set_comp_time_span(new_span); } span.set_span(new_span); } @@ -595,14 +595,14 @@ impl Type { } } - pub fn set_comptime(&mut self, new_comptime: Comptime) { + pub fn set_comp_time(&mut self, new_comptime: CompTime) { match self { Type::FieldElement(comptime) | Type::Integer(comptime, _, _) => { *comptime = new_comptime; } Type::PolymorphicInteger(comptime, binding) => { if let TypeBinding::Bound(binding) = &mut *binding.borrow_mut() { - return binding.set_comptime(new_comptime); + return binding.set_comp_time(new_comptime); } *comptime = new_comptime; } @@ -611,12 +611,12 @@ impl Type { } /// Try to bind a PolymorphicInt variable to self, succeeding if self is an integer, field, - /// other PolymorphicInt type, or type variable. If use_subtype is true, the Comptime fields - /// of each will be checked via subtyping rather than unification. + /// other PolymorphicInt type, or type variable. If use_subtype is true, the CompTime fields + /// of each will be checked via sub-typing rather than unification. pub fn try_bind_to_polymorphic_int( &self, var: &TypeVariable, - var_comptime: &Comptime, + var_comp_time: &CompTime, use_subtype: bool, span: Span, ) -> Result<(), SpanKind> { @@ -625,36 +625,36 @@ impl Type { TypeBinding::Unbound(id) => *id, }; - let bind = |int_comptime: &Comptime| { + let bind = |int_comp_time: &CompTime| { let mut clone = self.clone(); - let mut new_comptime = var_comptime.clone(); - new_comptime.set_span(span); - clone.set_comptime(new_comptime); + let mut new_comp_time = var_comp_time.clone(); + new_comp_time.set_span(span); + clone.set_comp_time(new_comp_time); *var.borrow_mut() = TypeBinding::Bound(clone); if use_subtype { - var_comptime.is_subtype_of(int_comptime, span) + var_comp_time.is_subtype_of(int_comp_time, span) } else { - var_comptime.unify(int_comptime, span) + var_comp_time.unify(int_comp_time, span) } }; match self { - Type::FieldElement(int_comptime, ..) | Type::Integer(int_comptime, ..) => { - bind(int_comptime) + Type::FieldElement(int_comp_time, ..) | Type::Integer(int_comp_time, ..) => { + bind(int_comp_time) } - Type::PolymorphicInteger(int_comptime, self_var) => { + Type::PolymorphicInteger(int_comp_time, self_var) => { let borrow = self_var.borrow(); match &*borrow { TypeBinding::Bound(typ) => { - typ.try_bind_to_polymorphic_int(var, var_comptime, use_subtype, span) + typ.try_bind_to_polymorphic_int(var, var_comp_time, use_subtype, span) } // Avoid infinitely recursive bindings TypeBinding::Unbound(id) if *id == target_id => Ok(()), TypeBinding::Unbound(_) => { drop(borrow); - bind(int_comptime) + bind(int_comp_time) } } } @@ -662,7 +662,7 @@ impl Type { let borrow = binding.borrow(); match &*borrow { TypeBinding::Bound(typ) => { - typ.try_bind_to_polymorphic_int(var, var_comptime, use_subtype, span) + typ.try_bind_to_polymorphic_int(var, var_comp_time, use_subtype, span) } // Avoid infinitely recursive bindings TypeBinding::Unbound(id) if *id == target_id => Ok(()), @@ -670,8 +670,9 @@ impl Type { drop(borrow); // PolymorphicInt is more specific than TypeVariable so we bind the type // variable to PolymorphicInt instead. - let mut clone = Type::PolymorphicInteger(var_comptime.clone(), var.clone()); - clone.set_comptime_span(span); + let mut clone = + Type::PolymorphicInteger(var_comp_time.clone(), var.clone()); + clone.set_comp_time_span(span); *binding.borrow_mut() = TypeBinding::Bound(clone); Ok(()) } @@ -687,7 +688,7 @@ impl Type { TypeBinding::Unbound(id) => *id, }; - if let Some(binding) = self.get_inner_typevariable() { + if let Some(binding) = self.get_inner_type_variable() { match &*binding.borrow() { TypeBinding::Bound(typ) => return typ.try_bind_to(var), // Don't recursively bind the same id to itself @@ -706,7 +707,7 @@ impl Type { } } - fn get_inner_typevariable(&self) -> Option> { + fn get_inner_type_variable(&self) -> Option> { match self { Type::PolymorphicInteger(_, var) | Type::TypeVariable(var) @@ -715,15 +716,15 @@ impl Type { } } - fn is_comptime(&self) -> bool { + fn is_comp_time(&self) -> bool { match self { - Type::FieldElement(comptime) => comptime.is_comptime(), - Type::Integer(comptime, ..) => comptime.is_comptime(), + Type::FieldElement(comptime) => comptime.is_comp_time(), + Type::Integer(comptime, ..) => comptime.is_comp_time(), Type::PolymorphicInteger(comptime, binding) => { if let TypeBinding::Bound(binding) = &*binding.borrow() { - return binding.is_comptime(); + return binding.is_comp_time(); } - comptime.is_comptime() + comptime.is_comp_time() } _ => false, } @@ -731,7 +732,7 @@ impl Type { /// Try to unify this type with another, setting any type variables found /// equal to the other type in the process. Unification is more strict - /// than subtyping but less strict than Eq. Returns true if the unification + /// than sub-typing but less strict than Eq. Returns true if the unification /// succeeded. Note that any bindings performed in a failed unification are /// not undone. This may cause further type errors later on. pub fn unify( @@ -754,12 +755,12 @@ impl Type { ) { errors.push(make_error()); - match (expected.is_comptime(), err_span) { - (true, SpanKind::NotComptime(span)) => { + match (expected.is_comp_time(), err_span) { + (true, SpanKind::NotCompTime(span)) => { let msg = "The value is non-comptime because of this expression, which uses another non-comptime value".into(); errors.push(TypeCheckError::Unstructured { msg, span }); } - (false, SpanKind::Comptime(span)) => { + (false, SpanKind::CompTime(span)) => { let msg = "The value is comptime because of this expression, which forces the value to be comptime".into(); errors.push(TypeCheckError::Unstructured { msg, span }); } @@ -798,11 +799,11 @@ impl Type { elem_a.try_unify(elem_b, span) } - (Tuple(elems_a), Tuple(elems_b)) => { - if elems_a.len() != elems_b.len() { + (Tuple(elements_a), Tuple(elements_b)) => { + if elements_a.len() != elements_b.len() { Err(SpanKind::None) } else { - for (a, b) in elems_a.iter().zip(elems_b) { + for (a, b) in elements_a.iter().zip(elements_b) { a.try_unify(b, span)?; } Ok(()) @@ -871,8 +872,8 @@ impl Type { } } - /// The `subtype` term here is somewhat loose, the only subtyping relations remaining - /// have to do with Comptime tracking. + /// The `subtype` term here is somewhat loose, the only sub-typing relations remaining + /// have to do with CompTime tracking. pub fn make_subtype_of( &self, expected: &Type, @@ -929,11 +930,11 @@ impl Type { elem_a.is_subtype_of(elem_b, span) } - (Tuple(elems_a), Tuple(elems_b)) => { - if elems_a.len() != elems_b.len() { + (Tuple(elements_a), Tuple(elements_b)) => { + if elements_a.len() != elements_b.len() { Err(SpanKind::None) } else { - for (a, b) in elems_a.iter().zip(elems_b) { + for (a, b) in elements_a.iter().zip(elements_b) { a.is_subtype_of(b, span)?; } Ok(()) @@ -1068,7 +1069,7 @@ impl Type { pub fn iter_fields(&self) -> impl Iterator { let fields: Vec<_> = match self { // Unfortunately the .borrow() here forces us to collect into a Vec - // only to have to call .into_iter again afterward. Trying to ellide + // only to have to call .into_iter again afterward. Trying to elide // collecting to a Vec leads to us dropping the temporary Ref before // the iterator is returned Type::Struct(def, args) => vecmap(&def.borrow().fields, |(name, _)| { diff --git a/crates/noirc_frontend/src/lexer/lexer.rs b/crates/noirc_frontend/src/lexer/lexer.rs index 63140986473..cbdae322e81 100644 --- a/crates/noirc_frontend/src/lexer/lexer.rs +++ b/crates/noirc_frontend/src/lexer/lexer.rs @@ -204,8 +204,8 @@ impl<'a> Lexer<'a> { if peeked_char.is_ascii_alphabetic() { // Okay to unwrap here because we already peeked to // see that we have a character - let curr_char = self.next_char().unwrap(); - return self.eat_word(curr_char); + let current_char = self.next_char().unwrap(); + return self.eat_word(current_char); } Ok(spanned_prev_token) @@ -236,7 +236,7 @@ impl<'a> Lexer<'a> { // Keep checking that we are not at the EOF while let Some(peek_char) = self.peek_char() { // Then check for the predicate, if predicate matches append char and increment the cursor - // If not, return word. The next character will be analysed on the next iteration of next_token, + // If not, return word. The next character will be analyzed on the next iteration of next_token, // Which will increment the cursor if !predicate(peek_char) { return (word, start, self.position); @@ -293,7 +293,7 @@ impl<'a> Lexer<'a> { Ok(attribute.into_span(start - 1, end + 1)) } - //XXX(low): Can increase performance if we use iterator semantic and utilise some of the methods on String. See below + //XXX(low): Can increase performance if we use iterator semantic and utilize some of the methods on String. See below // https://doc.rust-lang.org/stable/std/primitive.str.html#method.rsplit fn eat_word(&mut self, initial_char: char) -> SpannedTokenResult { let (word, start, end) = self.eat_while(Some(initial_char), |ch| { @@ -447,7 +447,7 @@ fn test_int_type() { } #[test] -fn test_arithematic_sugar() { +fn test_arithmetic_sugar() { let input = "+= -= *= /= %="; let expected = vec![ diff --git a/crates/noirc_frontend/src/lexer/token.rs b/crates/noirc_frontend/src/lexer/token.rs index 5d0ed99b866..9f0a1fa65c0 100644 --- a/crates/noirc_frontend/src/lexer/token.rs +++ b/crates/noirc_frontend/src/lexer/token.rs @@ -129,8 +129,8 @@ pub enum Token { #[allow(clippy::upper_case_acronyms)] EOF, - // An invalid character is one that is not in noir's language or grammer. - // Delaying reporting these as errors until parsing improves error messsages + // An invalid character is one that is not in noir's language or grammar. + // Delaying reporting these as errors until parsing improves error messages Invalid(char), } @@ -235,9 +235,9 @@ impl Token { [Plus, Minus, Star, Slash, Percent, Ampersand, Caret, ShiftLeft, ShiftRight, Pipe] } - pub fn try_into_binop(self, span: Span) -> Option> { + pub fn try_into_binary_op(self, span: Span) -> Option> { use crate::BinaryOpKind::*; - let binop = match self { + let binary_op = match self { Token::Plus => Add, Token::Ampersand => And, Token::Caret => Xor, @@ -256,7 +256,7 @@ impl Token { Token::Percent => Modulo, _ => return None, }; - Some(Spanned::from(span, binop)) + Some(Spanned::from(span, binary_op)) } } @@ -396,7 +396,7 @@ pub enum Keyword { As, Bool, Char, - Comptime, + CompTime, Constrain, Crate, Dep, @@ -425,7 +425,7 @@ impl fmt::Display for Keyword { Keyword::As => write!(f, "as"), Keyword::Bool => write!(f, "bool"), Keyword::Char => write!(f, "char"), - Keyword::Comptime => write!(f, "comptime"), + Keyword::CompTime => write!(f, "comptime"), Keyword::Constrain => write!(f, "constrain"), Keyword::Crate => write!(f, "crate"), Keyword::Dep => write!(f, "dep"), @@ -459,7 +459,7 @@ impl Keyword { "as" => Keyword::As, "bool" => Keyword::Bool, "char" => Keyword::Char, - "comptime" => Keyword::Comptime, + "comptime" => Keyword::CompTime, "constrain" => Keyword::Constrain, "crate" => Keyword::Crate, "dep" => Keyword::Dep, diff --git a/crates/noirc_frontend/src/lib.rs b/crates/noirc_frontend/src/lib.rs index 50501b22aed..64b98c64eb3 100644 --- a/crates/noirc_frontend/src/lib.rs +++ b/crates/noirc_frontend/src/lib.rs @@ -1,7 +1,7 @@ pub mod ast; pub mod graph; pub mod lexer; -pub mod monomorphisation; +pub mod monomorphization; pub mod node_interner; pub mod parser; diff --git a/crates/noirc_frontend/src/main.rs b/crates/noirc_frontend/src/main.rs index ccc8f3ce9f4..7a61d8387ad 100644 --- a/crates/noirc_frontend/src/main.rs +++ b/crates/noirc_frontend/src/main.rs @@ -57,7 +57,7 @@ fn main() { // println!("-----------------------------Start Data for module at position {}----------------------------", i); // println!("current module id is: {:?}", module_index); // println!("parent module id is: {:?}", module_data.parent); - // println!("-----------------------------Chidren for module at position {}----------------------------", i); + // println!("-----------------------------Children for module at position {}----------------------------", i); // for (child_name, child_id) in &module_data.children { // println!("{:?} is a child module with id {:?}", child_name, child_id); // } diff --git a/crates/noirc_frontend/src/monomorphisation/ast.rs b/crates/noirc_frontend/src/monomorphization/ast.rs similarity index 95% rename from crates/noirc_frontend/src/monomorphisation/ast.rs rename to crates/noirc_frontend/src/monomorphization/ast.rs index 47796f96bcb..bcd6c744ff5 100644 --- a/crates/noirc_frontend/src/monomorphisation/ast.rs +++ b/crates/noirc_frontend/src/monomorphization/ast.rs @@ -160,7 +160,7 @@ pub struct Function { pub return_type: Type, } -/// A monomorphised Type has all type variables removed +/// A monomorphized Type has all type variables removed #[derive(Debug, PartialEq, Eq, Clone)] pub enum Type { Field, @@ -253,7 +253,7 @@ impl std::fmt::Display for Type { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Type::Field => write!(f, "Field"), - Type::Array(len, elems) => write!(f, "[{elems}; {len}]"), + Type::Array(len, elements) => write!(f, "[{elements}; {len}]"), Type::Integer(sign, bits) => match sign { Signedness::Unsigned => write!(f, "u{bits}"), Signedness::Signed => write!(f, "i{bits}"), @@ -261,9 +261,9 @@ impl std::fmt::Display for Type { Type::Bool => write!(f, "bool"), Type::String(len) => write!(f, "str[{len}]"), Type::Unit => write!(f, "()"), - Type::Tuple(elems) => { - let elems = vecmap(elems, ToString::to_string); - write!(f, "({})", elems.join(", ")) + Type::Tuple(elements) => { + let elements = vecmap(elements, ToString::to_string); + write!(f, "({})", elements.join(", ")) } Type::Function(args, ret) => { let args = vecmap(args, ToString::to_string); diff --git a/crates/noirc_frontend/src/monomorphisation/mod.rs b/crates/noirc_frontend/src/monomorphization/mod.rs similarity index 95% rename from crates/noirc_frontend/src/monomorphisation/mod.rs rename to crates/noirc_frontend/src/monomorphization/mod.rs index 746e1203fd1..1aefd2246ad 100644 --- a/crates/noirc_frontend/src/monomorphisation/mod.rs +++ b/crates/noirc_frontend/src/monomorphization/mod.rs @@ -10,7 +10,7 @@ use crate::{ stmt::{HirAssignStatement, HirLValue, HirLetStatement, HirPattern, HirStatement}, }, node_interner::{self, DefinitionKind, NodeInterner, StmtId}, - Comptime, FunctionKind, TypeBinding, TypeBindings, + CompTime, FunctionKind, TypeBinding, TypeBindings, }; use self::ast::{Definition, FuncId, Function, LocalId, Program}; @@ -18,14 +18,14 @@ use self::ast::{Definition, FuncId, Function, LocalId, Program}; pub mod ast; pub mod printer; -struct Monomorphiser { - // Store monomorphised globals and locals separately, - // only locals are cleared on each function call and only globals are monomorphised. +struct Monomorphizer { + // Store monomorphized globals and locals separately, + // only locals are cleared on each function call and only globals are monomorphized. // Nested HashMaps in globals lets us avoid cloning HirTypes when calling .get() globals: HashMap>, locals: HashMap, - /// Queue of functions to monomorphise next + /// Queue of functions to monomorphize next queue: VecDeque<(node_interner::FuncId, FuncId, TypeBindings)>, finished_functions: BTreeMap, @@ -38,26 +38,26 @@ struct Monomorphiser { type HirType = crate::Type; -pub fn monomorphise(main: node_interner::FuncId, interner: NodeInterner) -> Program { - let mut monomorphiser = Monomorphiser::new(interner); - let abi = monomorphiser.compile_main(main); +pub fn monomorphize(main: node_interner::FuncId, interner: NodeInterner) -> Program { + let mut monomorphizer = Monomorphizer::new(interner); + let abi = monomorphizer.compile_main(main); - while !monomorphiser.queue.is_empty() { - let (next_fn_id, new_id, bindings) = monomorphiser.queue.pop_front().unwrap(); - monomorphiser.locals.clear(); + while !monomorphizer.queue.is_empty() { + let (next_fn_id, new_id, bindings) = monomorphizer.queue.pop_front().unwrap(); + monomorphizer.locals.clear(); perform_instantiation_bindings(&bindings); - monomorphiser.function(next_fn_id, new_id); + monomorphizer.function(next_fn_id, new_id); undo_instantiation_bindings(bindings); } - let functions = vecmap(monomorphiser.finished_functions, |(_, f)| f); + let functions = vecmap(monomorphizer.finished_functions, |(_, f)| f); Program::new(functions, abi) } -impl Monomorphiser { - fn new(interner: NodeInterner) -> Monomorphiser { - Monomorphiser { +impl Monomorphizer { + fn new(interner: NodeInterner) -> Monomorphizer { + Monomorphizer { globals: HashMap::new(), locals: HashMap::new(), queue: VecDeque::new(), @@ -94,7 +94,7 @@ impl Monomorphiser { match self.globals.get(&id).and_then(|inner_map| inner_map.get(&typ)) { Some(id) => Definition::Function(*id), None => { - // Function has not been monomorphised yet + // Function has not been monomorphized yet let meta = self.interner.function_meta(&id); match meta.kind { FunctionKind::LowLevel => { @@ -155,7 +155,7 @@ impl Monomorphiser { assert!(existing.is_none()); } - /// Monomorphise each parameter, expanding tuple/struct patterns into multiple parameters + /// Monomorphize each parameter, expanding tuple/struct patterns into multiple parameters /// and binding any generic types found. fn parameters(&mut self, params: Parameters) -> Vec<(ast::LocalId, bool, String, ast::Type)> { let mut new_params = Vec::with_capacity(params.len()); @@ -272,7 +272,7 @@ impl Monomorphiser { } HirExpression::If(if_expr) => { - let cond = self.expr(if_expr.condition, &HirType::Bool(Comptime::No(None))); + let cond = self.expr(if_expr.condition, &HirType::Bool(CompTime::No(None))); let then = self.expr(if_expr.consequence, typ); let else_ = if_expr.alternative.map(|alt| Box::new(self.expr(alt, typ))); ast::Expression::If(ast::If { @@ -299,7 +299,7 @@ impl Monomorphiser { match self.interner.statement(&id) { HirStatement::Let(let_statement) => self.let_statement(let_statement), HirStatement::Constrain(constrain) => { - let expr = self.expr(constrain.0, &HirType::Bool(Comptime::No(None))); + let expr = self.expr(constrain.0, &HirType::Bool(CompTime::No(None))); let location = self.interner.expr_location(&constrain.0); ast::Expression::Constrain(Box::new(expr), location) } @@ -462,7 +462,7 @@ impl Monomorphiser { } } - /// Convert a non-tuple/struct type to a monomorphised type + /// Convert a non-tuple/struct type to a monomorphized type fn convert_type(typ: &HirType) -> ast::Type { match typ { HirType::FieldElement(_) => ast::Type::Field, @@ -492,7 +492,7 @@ impl Monomorphiser { // after type checking, but care should be taken that it doesn't change which // impls are chosen. *binding.borrow_mut() = - TypeBinding::Bound(HirType::FieldElement(Comptime::No(None))); + TypeBinding::Bound(HirType::FieldElement(CompTime::No(None))); ast::Type::Field } @@ -538,9 +538,9 @@ impl Monomorphiser { })) } - /// Try to evaluate certain builtin functions (currently only 'arraylen' and field modulus methods) - /// at their callsite. - /// NOTE: Evaluating at the callsite means we cannot track aliased functions. + /// Try to evaluate certain builtin functions (currently only 'array_len' and field modulus methods) + /// at their call site. + /// NOTE: Evaluating at the call site means we cannot track aliased functions. /// E.g. `let f = std::array::len; f(arr)` will fail to evaluate. /// To fix this we need to evaluate on the identifier instead, which /// requires us to evaluate to a Lambda value which isn't in noir yet. @@ -551,7 +551,7 @@ impl Monomorphiser { ) -> Option { match func { ast::Expression::Ident(ident) => match &ident.definition { - Definition::Builtin(opcode) if opcode == "arraylen" => { + Definition::Builtin(opcode) if opcode == "array_len" => { let typ = self.interner.id_type(arguments[0]); let len = typ.evaluate_to_u64().unwrap(); Some(ast::Expression::Literal(ast::Literal::Integer( @@ -623,9 +623,9 @@ impl Monomorphiser { /// Follow any type variable links within the given TypeBindings to produce /// a new TypeBindings that won't be changed when bindings are pushed or popped - /// during {perform,undo}_monomorphisation_bindings. + /// during {perform,undo}_monomorphization_bindings. /// - /// Without this, a monomorphised type may fail to propagate passed more than 2 + /// Without this, a monomorphized type may fail to propagate passed more than 2 /// function calls deep since it is possible for a previous link in the chain to /// unbind a type variable that was previously bound. fn follow_bindings(&self, bindings: &TypeBindings) -> TypeBindings { diff --git a/crates/noirc_frontend/src/monomorphisation/printer.rs b/crates/noirc_frontend/src/monomorphization/printer.rs similarity index 100% rename from crates/noirc_frontend/src/monomorphisation/printer.rs rename to crates/noirc_frontend/src/monomorphization/printer.rs diff --git a/crates/noirc_frontend/src/node_interner.rs b/crates/noirc_frontend/src/node_interner.rs index 5b1c5aa2efb..9cdc03253ad 100644 --- a/crates/noirc_frontend/src/node_interner.rs +++ b/crates/noirc_frontend/src/node_interner.rs @@ -152,8 +152,8 @@ pub struct NodeInterner { structs: HashMap>, /// Map from ExprId (referring to a Function/Method call) to its corresponding TypeBindings, - /// filled out during type checking from instantiated variables. Used during monomorphisation - /// to map callsite types back onto function parameter types, and undo this binding as needed. + /// filled out during type checking from instantiated variables. Used during monomorphization + /// to map call site types back onto function parameter types, and undo this binding as needed. instantiation_bindings: HashMap, /// Remembers the field index a given HirMemberAccess expression was resolved to during type diff --git a/crates/noirc_frontend/src/parser/parser.rs b/crates/noirc_frontend/src/parser/parser.rs index 22e0a352d83..686220c523c 100644 --- a/crates/noirc_frontend/src/parser/parser.rs +++ b/crates/noirc_frontend/src/parser/parser.rs @@ -8,7 +8,7 @@ use crate::lexer::Lexer; use crate::parser::{force, ignore_then_commit, statement_recovery}; use crate::token::{Attribute, Keyword, Token, TokenKind}; use crate::{ - BinaryOp, BinaryOpKind, BlockExpression, Comptime, ConstrainStatement, FunctionDefinition, + BinaryOp, BinaryOpKind, BlockExpression, CompTime, ConstrainStatement, FunctionDefinition, Ident, IfExpression, ImportStatement, InfixExpression, LValue, Lambda, NoirFunction, NoirImpl, NoirStruct, Path, PathKind, Pattern, Recoverable, UnaryOp, UnresolvedTypeExpression, }; @@ -164,7 +164,7 @@ fn function_return_type() -> impl NoirParser<(AbiVisibility, UnresolvedType)> { } fn attribute() -> impl NoirParser { - tokenkind(TokenKind::Attribute).map(|token| match token { + token_kind(TokenKind::Attribute).map(|token| match token { Token::Attribute(attribute) => attribute, _ => unreachable!(), }) @@ -277,10 +277,10 @@ fn check_statements_require_semicolon( fn global_type_annotation() -> impl NoirParser { ignore_then_commit(just(Token::Colon), parse_type()) .map(|r#type| match r#type { - UnresolvedType::FieldElement(_) => UnresolvedType::FieldElement(Comptime::Yes(None)), - UnresolvedType::Bool(_) => UnresolvedType::Bool(Comptime::Yes(None)), + UnresolvedType::FieldElement(_) => UnresolvedType::FieldElement(CompTime::Yes(None)), + UnresolvedType::Bool(_) => UnresolvedType::Bool(CompTime::Yes(None)), UnresolvedType::Integer(_, sign, size) => { - UnresolvedType::Integer(Comptime::Yes(None), sign, size) + UnresolvedType::Integer(CompTime::Yes(None), sign, size) } other => other, }) @@ -311,12 +311,12 @@ fn keyword(keyword: Keyword) -> impl NoirParser { just(Token::Keyword(keyword)) } -fn tokenkind(tokenkind: TokenKind) -> impl NoirParser { +fn token_kind(token_kind: TokenKind) -> impl NoirParser { filter_map(move |span, found: Token| { - if found.kind() == tokenkind { + if found.kind() == token_kind { Ok(found) } else { - Err(ParserError::expected_label(tokenkind.to_string(), found, span)) + Err(ParserError::expected_label(token_kind.to_string(), found, span)) } }) } @@ -336,7 +336,7 @@ fn path() -> impl NoirParser { } fn ident() -> impl NoirParser { - tokenkind(TokenKind::Ident).map_with_span(Ident::from_token) + token_kind(TokenKind::Ident).map_with_span(Ident::from_token) } fn statement<'a, P>(expr_parser: P) -> impl NoirParser + 'a @@ -378,11 +378,11 @@ fn pattern() -> impl NoirParser { .ignore_then(pattern.clone()) .map_with_span(|inner, span| Pattern::Mutable(Box::new(inner), span)); - let shortfield = ident().map(|name| (name.clone(), Pattern::Identifier(name))); - let longfield = ident().then_ignore(just(Token::Colon)).then(pattern.clone()); + let short_field = ident().map(|name| (name.clone(), Pattern::Identifier(name))); + let long_field = ident().then_ignore(just(Token::Colon)).then(pattern.clone()); - let struct_pattern_fields = longfield - .or(shortfield) + let struct_pattern_fields = long_field + .or(short_field) .separated_by(just(Token::Comma)) .delimited_by(just(Token::LeftBrace), just(Token::RightBrace)); @@ -404,9 +404,9 @@ fn assignment<'a, P>(expr_parser: P) -> impl NoirParser + 'a where P: ExprParser + 'a, { - let failable = lvalue(expr_parser.clone()).then(assign_operator()).labelled("statement"); + let fallible = lvalue(expr_parser.clone()).then(assign_operator()).labelled("statement"); - then_commit(failable, expr_parser).map_with_span( + then_commit(fallible, expr_parser).map_with_span( |((identifier, operator), expression), span| { Statement::assign(identifier, operator, expression, span) }, @@ -470,19 +470,19 @@ fn optional_visibility() -> impl NoirParser { }) } -fn maybe_comptime() -> impl NoirParser { - keyword(Keyword::Comptime).or_not().map(|opt| match opt { - Some(_) => Comptime::Yes(None), - None => Comptime::No(None), +fn maybe_comp_time() -> impl NoirParser { + keyword(Keyword::CompTime).or_not().map(|opt| match opt { + Some(_) => CompTime::Yes(None), + None => CompTime::No(None), }) } fn field_type() -> impl NoirParser { - maybe_comptime().then_ignore(keyword(Keyword::Field)).map(UnresolvedType::FieldElement) + maybe_comp_time().then_ignore(keyword(Keyword::Field)).map(UnresolvedType::FieldElement) } fn bool_type() -> impl NoirParser { - maybe_comptime().then_ignore(keyword(Keyword::Bool)).map(UnresolvedType::Bool) + maybe_comp_time().then_ignore(keyword(Keyword::Bool)).map(UnresolvedType::Bool) } fn string_type() -> impl NoirParser { @@ -494,7 +494,7 @@ fn string_type() -> impl NoirParser { } fn int_type() -> impl NoirParser { - maybe_comptime() + maybe_comp_time() .then(filter_map(|span, token: Token| match token { Token::IntType(int_type) => Ok(int_type), unexpected => { @@ -567,8 +567,8 @@ fn expression() -> impl ExprParser { .labelled("expression") } -// An expression is a single term followed by 0 or more (OP subexpr)* -// where OP is an operator at the given precedence level and subexpr +// An expression is a single term followed by 0 or more (OP subexpression)* +// where OP is an operator at the given precedence level and subexpression // is an expression at the current precedence level plus one. fn expression_with_precedence<'a, P>( precedence: Precedence, @@ -613,7 +613,7 @@ fn create_infix_expression(lhs: Expression, (operator, rhs): (BinaryOp, Expressi fn operator_with_precedence(precedence: Precedence) -> impl NoirParser> { filter_map(move |span, token: Token| { if Precedence::token_precedence(&token) == Some(precedence) { - Ok(token.try_into_binop(span).unwrap()) + Ok(token.try_into_binary_op(span).unwrap()) } else { Err(ParserError::expected_label("binary operator".to_string(), token, span)) } @@ -656,7 +656,7 @@ where MemberAccess((Ident, Option>)), } - // `(arg1, ..., argN)` in `myfunc(arg1, ..., argN)` + // `(arg1, ..., argN)` in `my_func(arg1, ..., argN)` let call_rhs = parenthesized(expression_list(expr_parser.clone())).map(UnaryRhs::Call); // `[expr]` in `arr[expr]` @@ -763,14 +763,14 @@ where { expression_list(expr_parser) .delimited_by(just(Token::LeftBracket), just(Token::RightBracket)) - .validate(|elems, span, emit| { - if elems.is_empty() { + .validate(|elements, span, emit| { + if elements.is_empty() { emit(ParserError::with_reason( "Arrays must have at least one element".to_owned(), span, )) } - ExpressionKind::array(elems) + ExpressionKind::array(elements) }) } @@ -851,7 +851,7 @@ where } fn field_name() -> impl NoirParser { - ident().or(tokenkind(TokenKind::Literal).validate(|token, span, emit| match token { + ident().or(token_kind(TokenKind::Literal).validate(|token, span, emit| match token { Token::Int(_) => Ident::from(Spanned::from(span, token.to_string())), other => { let reason = format!("Unexpected '{other}', expected a field name"); @@ -888,7 +888,7 @@ fn variable() -> impl NoirParser { } fn literal() -> impl NoirParser { - tokenkind(TokenKind::Literal).map(|token| match token { + token_kind(TokenKind::Literal).map(|token| match token { Token::Int(x) => ExpressionKind::integer(x), Token::Bool(b) => ExpressionKind::boolean(b), Token::Str(s) => ExpressionKind::string(s), @@ -1046,7 +1046,7 @@ mod test { for expr in parse_all(array_expr(expression()), valid) { match expr_to_array(expr) { - ArrayLiteral::Standard(elems) => assert_eq!(elems.len(), 5), + ArrayLiteral::Standard(elements) => assert_eq!(elements.len(), 5), ArrayLiteral::Repeated { length, .. } => { assert_eq!(length.kind, ExpressionKind::integer(5i128.into())) } diff --git a/crates/readme.md b/crates/readme.md index d51643891dc..6e1d6efc9f8 100644 --- a/crates/readme.md +++ b/crates/readme.md @@ -37,7 +37,7 @@ This crate can be seen as the middle end. It is in charge of generating the ACIR ## noirc_frontend -This crate comprises of the first few compiler passes that together we denote as the compiler frontend (in order): lexing, parsing, name resolution, type checking, and monomorphisation. If any of these passes error, the resulting monomorphised AST will not be passed to the middle-end (noirc_evaluator) +This crate comprises of the first few compiler passes that together we denote as the compiler frontend (in order): lexing, parsing, name resolution, type checking, and monomorphization. If any of these passes error, the resulting monomorphized AST will not be passed to the middle-end (noirc_evaluator) ## wasm diff --git a/crates/wasm/src/lib.rs b/crates/wasm/src/lib.rs index 84390074188..da1394f79cb 100644 --- a/crates/wasm/src/lib.rs +++ b/crates/wasm/src/lib.rs @@ -13,7 +13,7 @@ pub fn compile(src: String) -> JsValue { let compiled_program = noirc_driver::Driver::compile_file(path, language); ::from_serde(&compiled_program).unwrap() } -// Deserialises bytes into ACIR structure +// Deserializes bytes into ACIR structure #[wasm_bindgen] pub fn acir_from_bytes(bytes: Vec) -> JsValue { console_error_panic_hook::set_once(); diff --git a/cspell.json b/cspell.json new file mode 100644 index 00000000000..3fe9b47540f --- /dev/null +++ b/cspell.json @@ -0,0 +1,74 @@ +{ + "version": "0.2", + "words": [ + // In code + // + "aeiou", + "arithmetization", + "arity", + "barretenberg", + "blackbox", + "codegen", + "coeff", + "comptime", + "desugared", + "forall", + "foralls", + "higher-kinded", + "Hindley-Milner", + "idents", + "impls", + "injective", + "interner", + "krate", + "lvalue", + "merkle", + "monomorphization", + "monomorphize", + "monomorphized", + "monomorphizer", + "pedersen", + "peekable", + "schnorr", + "sdiv", + "signedness", + "srem", + "stdlib", + "struct", + "subexpression", + "typevar", + "typevars", + "udiv", + "uninstantiated", + "urem", + "vecmap", + // Dependencies + // + "acir", + "acvm", + "arkworks", + "bindgen", + "bitand", + "canonicalize", + "chumsky", + "clippy", + "deque", + "foldl", + "formatcp", + "gloo", + "hasher", + "hexdigit", + "nargo", + "noirc", + "noirup", + "plonkc", + "rustc", + "rustup", + "secp256k1", + "smol", + "tempdir", + "tempfile", + "termcolor", + "thiserror" + ] +} \ No newline at end of file diff --git a/noir_stdlib/src/array.nr b/noir_stdlib/src/array.nr index 30b146cdab8..b31b967ec8e 100644 --- a/noir_stdlib/src/array.nr +++ b/noir_stdlib/src/array.nr @@ -1,4 +1,4 @@ -#[builtin(arraylen)] +#[builtin(array_len)] fn len(_input : [T]) -> comptime Field {} // insertion sort - n.b. it is a quadratic sort