diff --git a/compiler/src/llvm/builder.rs b/compiler/src/llvm/builder.rs index 7092003f..c8c36f75 100644 --- a/compiler/src/llvm/builder.rs +++ b/compiler/src/llvm/builder.rs @@ -3,7 +3,6 @@ use crate::llvm::context::Context; use crate::llvm::module::Module; use crate::llvm::runtime_function::RuntimeFunction; use crate::symbol_names::SymbolNames; -use inkwell::attributes::AttributeLoc; use inkwell::basic_block::BasicBlock; use inkwell::builder; use inkwell::debug_info::{ @@ -197,7 +196,7 @@ impl<'ctx> Builder<'ctx> { self.load(self.context.pointer_type(), variable).into_pointer_value() } - pub(crate) fn call( + pub(crate) fn call_with_return( &self, function: FunctionValue<'ctx>, arguments: &[BasicMetadataValueEnum<'ctx>], @@ -219,7 +218,7 @@ impl<'ctx> Builder<'ctx> { self.inner.build_indirect_call(typ, func, args, "").unwrap() } - pub(crate) fn call_void( + pub(crate) fn direct_call( &self, function: FunctionValue<'ctx>, arguments: &[BasicMetadataValueEnum<'ctx>], @@ -227,19 +226,6 @@ impl<'ctx> Builder<'ctx> { self.inner.build_call(function, arguments, "").unwrap() } - pub(crate) fn load_for_struct_return( - &self, - call: CallSiteValue<'ctx>, - typ: StructType<'ctx>, - tmp: PointerValue<'ctx>, - result: PointerValue<'ctx>, - ) { - let sret = self.context.type_attribute("sret", typ.into()); - - call.add_attribute(AttributeLoc::Param(0), sret); - self.store(result, self.load(typ, tmp)); - } - pub(crate) fn pointer_to_int( &self, value: PointerValue<'ctx>, @@ -792,7 +778,7 @@ impl<'ctx> Builder<'ctx> { // The block to jump to when the allocation failed. self.switch_to_block(err_block); - self.call_void(err_func, &[size.into()]); + self.direct_call(err_func, &[size.into()]); self.unreachable(); // The block to jump to when the allocation succeeds. diff --git a/compiler/src/llvm/context.rs b/compiler/src/llvm/context.rs index 659a217c..760af257 100644 --- a/compiler/src/llvm/context.rs +++ b/compiler/src/llvm/context.rs @@ -1,12 +1,13 @@ -use crate::llvm::layouts::Layouts; +use crate::llvm::layouts::{ArgumentType, Layouts, ReturnType}; use crate::state::State; +use crate::target::Architecture; use inkwell::attributes::Attribute; use inkwell::basic_block::BasicBlock; use inkwell::builder::Builder; use inkwell::module::Module; use inkwell::types::{ - AnyTypeEnum, ArrayType, BasicMetadataTypeEnum, BasicType, BasicTypeEnum, - FloatType, IntType, PointerType, StructType, VoidType, + AnyTypeEnum, ArrayType, BasicType, BasicTypeEnum, FloatType, IntType, + PointerType, StructType, VoidType, }; use inkwell::values::FunctionValue; use inkwell::{context, AddressSpace}; @@ -50,6 +51,10 @@ impl Context { self.inner.bool_type() } + pub(crate) fn custom_int(&self, bits: u32) -> IntType { + self.inner.custom_width_int_type(bits) + } + pub(crate) fn i8_type(&self) -> IntType { self.inner.i8_type() } @@ -205,27 +210,38 @@ impl Context { state: &State, layouts: &Layouts<'ctx>, typ: BasicTypeEnum<'ctx>, - ) -> BasicTypeEnum<'ctx> { - // TODO: on AMD64 we can pass around structs <= 16 bytes as-is, but only - // if there are only two fields. On ARM64 we need to round them up to - // one or two pairs of i64 values: - // - // | Arch | Original | Result - // |-------|-------------------|----------------- - // | AMD64 | { i64, i32 } | { i64, i32 } - // | AMD64 | { i64, i32, i32 } | ptr - // | ARM64 | { i64, i32 } | { i64, i64 } - // | ARM64 | { i64, i32, i32 } | ptr (I think?) - // - // For anything larger than 16 bytes, pass as a pointer. - let BasicTypeEnum::StructType(typ) = typ else { return typ }; - let max = state.config.target.pass_struct_size(); - let size = layouts.target_data.get_abi_size(&typ); - - if size > max { - self.pointer_type().as_basic_type_enum() - } else { - typ.as_basic_type_enum() + ) -> ArgumentType<'ctx> { + let BasicTypeEnum::StructType(typ) = typ else { + return ArgumentType::Regular(typ); + }; + let bytes = layouts.target_data.get_abi_size(&typ) as u32; + + match state.config.target.arch { + Architecture::Amd64 => { + if bytes <= 8 { + let bits = self.custom_int(bytes * 8); + + ArgumentType::Regular(bits.as_basic_type_enum()) + } else if bytes <= 16 { + ArgumentType::Regular(typ.as_basic_type_enum()) + } else { + ArgumentType::StructValue(typ) + } + } + Architecture::Arm64 => { + if bytes <= 8 { + ArgumentType::Regular(self.i64_type().as_basic_type_enum()) + } else if bytes <= 16 { + let word = self.i64_type().into(); + let sret = self.struct_type(&[word, word]); + + ArgumentType::Regular(sret.as_basic_type_enum()) + } else { + // clang and Rust don't use "byval" for ARM64 when the + // struct is too large, so neither do we. + ArgumentType::Pointer + } + } } } @@ -234,17 +250,17 @@ impl Context { state: &State, layouts: &Layouts<'ctx>, method: MethodId, - args: &mut Vec>, - ) -> (Option>, Option>) { + ) -> ReturnType<'ctx> { if method.returns_value(&state.db) { - self.return_type( - state, + let typ = self.llvm_type( + &state.db, layouts, - args, method.return_type(&state.db), - ) + ); + + self.return_type(state, layouts, typ) } else { - (None, None) + ReturnType::None } } @@ -252,36 +268,41 @@ impl Context { &'ctx self, state: &State, layouts: &Layouts<'ctx>, - args: &mut Vec>, - returns: TypeRef, - ) -> (Option>, Option>) { - // The regular return type, and the type of the structure to pass with - // the `sret` attribute. If `ret` is `None`, it means the function - // returns `void`. If `sret` is `None`, it means the function doesn't - // return a struct. - let mut ret = None; - let mut sret = None; - let typ = self.llvm_type(&state.db, layouts, returns); - - // The C ABI mandates that structures are either passed through - // registers (if small enough), or using a pointer. LLVM doesn't - // detect when this is needed for us, so sadly we (and everybody - // else using LLVM) have to do this ourselves. - if let BasicTypeEnum::StructType(typ) = typ { - if layouts.target_data.get_abi_size(&typ) - > state.config.target.pass_struct_size() - { - args.push(self.pointer_type().into()); - sret = Some(typ); - } else { - // TODO: pass as [2 x i64] when necessary - ret = Some(typ.as_basic_type_enum()); + typ: BasicTypeEnum<'ctx>, + ) -> ReturnType<'ctx> { + let BasicTypeEnum::StructType(typ) = typ else { + return ReturnType::Regular(typ); + }; + + let bytes = layouts.target_data.get_abi_size(&typ) as u32; + + match state.config.target.arch { + Architecture::Amd64 => { + if bytes <= 8 { + let bits = self.custom_int(bytes * 8); + + ReturnType::Regular(bits.as_basic_type_enum()) + } else if bytes <= 16 { + ReturnType::Regular(typ.as_basic_type_enum()) + } else { + ReturnType::Struct(typ) + } + } + Architecture::Arm64 => { + if bytes <= 8 { + let bits = self.custom_int(bytes * 8); + + ReturnType::Regular(bits.as_basic_type_enum()) + } else if bytes <= 16 { + let word = self.i64_type().into(); + let sret = self.struct_type(&[word, word]); + + ReturnType::Regular(sret.as_basic_type_enum()) + } else { + ReturnType::Struct(typ) + } } - } else { - ret = Some(typ); } - - (ret, sret) } } diff --git a/compiler/src/llvm/layouts.rs b/compiler/src/llvm/layouts.rs index 60d56edb..29307a3c 100644 --- a/compiler/src/llvm/layouts.rs +++ b/compiler/src/llvm/layouts.rs @@ -4,12 +4,12 @@ use crate::state::State; use crate::target::OperatingSystem; use inkwell::targets::TargetData; use inkwell::types::{ - BasicMetadataTypeEnum, BasicType, FunctionType, StructType, + BasicMetadataTypeEnum, BasicType, BasicTypeEnum, FunctionType, StructType, }; use std::collections::VecDeque; use types::{ - CallConvention, ClassId, Database, TypeId, TypeRef, BOOL_ID, BYTE_ARRAY_ID, - FLOAT_ID, INT_ID, NIL_ID, STRING_ID, + CallConvention, ClassId, Database, MethodId, TypeId, TypeRef, BOOL_ID, + BYTE_ARRAY_ID, FLOAT_ID, INT_ID, NIL_ID, STRING_ID, }; /// The size of an object header. @@ -44,18 +44,128 @@ impl Sized { } #[derive(Copy, Clone)] -pub(crate) struct Method<'ctx> { - pub(crate) signature: FunctionType<'ctx>, +pub(crate) enum ArgumentType<'ctx> { + /// The argument should be passed as a normal value. + Regular(BasicTypeEnum<'ctx>), + + /// The argument should be passed as a pointer. + Pointer, + + /// The argument should be a pointer to a struct that's passed using the + /// "byval" attribute. + StructValue(StructType<'ctx>), + + /// The argument is the struct return argument. + StructReturn(StructType<'ctx>), +} + +#[derive(Copy, Clone)] +pub(crate) enum ReturnType<'ctx> { + /// The function doesn't return anything. + None, + + /// The function returns a regular value. + Regular(BasicTypeEnum<'ctx>), + + /// The function returns a structure using the ABIs struct return + /// convention. + Struct(StructType<'ctx>), +} + +impl<'ctx> ReturnType<'ctx> { + pub(crate) fn is_struct(self) -> bool { + matches!(self, ReturnType::Struct(_)) + } + pub(crate) fn is_regular(self) -> bool { + matches!(self, ReturnType::Regular(_)) + } +} + +#[derive(Clone)] +pub(crate) struct Method<'ctx> { /// The calling convention to use for this method. pub(crate) call_convention: CallConvention, - /// If the function returns a structure on the stack, its type is stored - /// here. - /// - /// This is needed separately because the signature's return type will be - /// `void` in this case. - pub(crate) struct_return: Option>, + /// If the method is a variadic method or not. + pub(crate) variadic: bool, + + /// The return type, if any. + pub(crate) returns: ReturnType<'ctx>, + + /// The types of the arguments. + pub(crate) arguments: Vec>, +} + +impl<'ctx> Method<'ctx> { + pub(crate) fn new() -> Method<'ctx> { + Method { + call_convention: CallConvention::Inko, + variadic: false, + returns: ReturnType::None, + arguments: Vec::new(), + } + } + + pub(crate) fn regular( + state: &State, + context: &'ctx Context, + layouts: &Layouts<'ctx>, + method: MethodId, + ) -> Method<'ctx> { + let db = &state.db; + let ret = context.method_return_type(state, layouts, method); + let mut args = if let ReturnType::Struct(t) = ret { + vec![ArgumentType::StructReturn(t)] + } else { + Vec::new() + }; + + for &typ in method + .is_instance(db) + .then(|| method.receiver(db)) + .iter() + .chain(method.argument_types(db)) + { + let raw = context.llvm_type(db, layouts, typ); + let typ = context.argument_type(state, layouts, raw); + + args.push(typ.into()); + } + + Method { + call_convention: CallConvention::new(method.is_extern(db)), + variadic: method.is_variadic(db), + arguments: args, + returns: ret, + } + } + + pub(crate) fn signature( + &self, + context: &'ctx Context, + ) -> FunctionType<'ctx> { + let var = self.variadic; + let mut args: Vec = Vec::new(); + + for &arg in &self.arguments { + match arg { + ArgumentType::Regular(t) => args.push(t.into()), + ArgumentType::StructValue(_) + | ArgumentType::StructReturn(_) + | ArgumentType::Pointer => { + args.push(context.pointer_type().into()) + } + } + } + + match self.returns { + ReturnType::None | ReturnType::Struct(_) => { + context.void_type().fn_type(&args, var) + } + ReturnType::Regular(t) => t.fn_type(&args, var), + } + } } /// Types and layout information to expose to all modules. @@ -173,12 +283,6 @@ impl<'ctx> Layouts<'ctx> { // optimizations, but that's OK as in the worst case we just waste a few // KiB. let num_methods = db.number_of_methods(); - let dummy_method = Method { - call_convention: CallConvention::Inko, - signature: context.void_type().fn_type(&[], false), - struct_return: None, - }; - let mut layouts = Self { target_data, empty_class: context.class_type(method), @@ -187,7 +291,7 @@ impl<'ctx> Layouts<'ctx> { state: state_layout, header, method_counts: method_counts_layout, - methods: vec![dummy_method; num_methods], + methods: vec![Method::new(); num_methods], process_stack_data: stack_data_layout, }; @@ -280,29 +384,34 @@ impl<'ctx> Layouts<'ctx> { // For each constructor argument we generate a field with an // opaque type. The size of this type must equal that of the // largest type. - let mut opaque_types = - vec![ - context.i8_type().array_type(1).as_basic_type_enum(); - fields.len() - 1 - ]; + // + // We use a custom width integer type for the opaque data. This + // ensures that when passing stack allocated enums around, LLVM + // passes the opaque data as a single value/register. In + // contrast, using e.g. `[8 x i8]` instead of `i64` may result + // in LLVM trying to pass the values separately, resulting in + // code that doesn't conform to the platform's ABI. + let mut opaque = vec![ + context.custom_int(0).as_basic_type_enum(); + fields.len() - 1 + ]; for con in id.constructors(db) { for (idx, &typ) in con.arguments(db).iter().enumerate() { let llvm_typ = context.llvm_type(db, self, typ); let size = self.target_data.get_abi_size(&llvm_typ); - let ex = - self.target_data.get_abi_size(&opaque_types[idx]); + let ex = self.target_data.get_abi_size(&opaque[idx]); if size > ex { - opaque_types[idx] = context - .i8_type() - .array_type(size as _) - .as_basic_type_enum(); + let bits = size as u32 * 8; + + opaque[idx] = + context.custom_int(bits).as_basic_type_enum(); } } } - types.append(&mut opaque_types); + types.append(&mut opaque); sized.set_has_size(id); layout.set_body(&types, false); continue; @@ -349,34 +458,10 @@ impl<'ctx> Layouts<'ctx> { mir: &Mir, context: &'ctx Context, ) { - let db = &state.db; - for calls in mir.dynamic_calls.values() { - for (method, _) in calls { - let mut args: Vec = Vec::new(); - let (ret, sret) = - context.method_return_type(state, self, *method, &mut args); - - for &typ in [method.receiver(db)] - .iter() - .chain(method.argument_types(db)) - { - let raw = context.llvm_type(db, self, typ); - let typ = context.argument_type(state, self, raw); - - args.push(typ.into()); - } - - let signature = - ret.map(|t| t.fn_type(&args, false)).unwrap_or_else(|| { - context.void_type().fn_type(&args, false) - }); - - self.methods[method.0 as usize] = Method { - call_convention: CallConvention::new(method.is_extern(db)), - signature, - struct_return: sret, - }; + for (id, _) in calls { + self.methods[id.0 as usize] = + Method::regular(state, context, self, *id); } } } @@ -390,99 +475,32 @@ impl<'ctx> Layouts<'ctx> { let db = &state.db; for mir_class in mir.classes.values() { - // Define the method signatures once (so we can cheaply retrieve - // them whenever needed), and assign the methods to their method - // table slots. - for &method in &mir_class.methods { - let (typ, sret) = if method.is_async(db) { - ( - context - .void_type() - .fn_type(&[context.pointer_type().into()], false), - None, - ) - } else { - let mut args: Vec = Vec::new(); - let (ret, sret) = context - .method_return_type(state, self, method, &mut args); - - // For instance methods, the receiver is passed as an - // explicit argument before any user-defined arguments. - for &typ in method - .is_instance(db) - .then(|| method.receiver(db)) - .iter() - .chain(method.argument_types(db)) - { - let raw = context.llvm_type(db, self, typ); - let typ = context.argument_type(state, self, raw); - - args.push(typ.into()); + for &id in &mir_class.methods { + self.methods[id.0 as usize] = if id.is_async(db) { + let args = vec![ArgumentType::Regular( + context.pointer_type().as_basic_type_enum(), + )]; + + Method { + call_convention: CallConvention::Inko, + variadic: false, + arguments: args, + returns: ReturnType::None, } - - ( - ret.map(|t| t.fn_type(&args, false)).unwrap_or_else( - || context.void_type().fn_type(&args, false), - ), - sret, - ) - }; - - self.methods[method.0 as usize] = Method { - call_convention: CallConvention::new(method.is_extern(db)), - signature: typ, - struct_return: sret, + } else { + Method::regular(state, context, self, id) }; } } - for &method in mir.methods.keys().filter(|m| m.is_static(db)) { - let mut args: Vec = Vec::new(); - let (ret, sret) = - context.method_return_type(state, self, method, &mut args); - - for &typ in method.argument_types(db) { - let raw = context.llvm_type(db, self, typ); - let typ = context.argument_type(state, self, raw); - - args.push(typ.into()); - } - - let typ = ret - .map(|t| t.fn_type(&args, false)) - .unwrap_or_else(|| context.void_type().fn_type(&args, false)); - - self.methods[method.0 as usize] = Method { - call_convention: CallConvention::new(method.is_extern(db)), - signature: typ, - struct_return: sret, - }; + for &id in mir.methods.keys().filter(|m| m.is_static(db)) { + self.methods[id.0 as usize] = + Method::regular(state, context, self, id); } - for &method in &mir.extern_methods { - let mut args: Vec = - Vec::with_capacity(method.number_of_arguments(db) + 1); - let (ret, sret) = - context.method_return_type(state, self, method, &mut args); - - for &typ in method.argument_types(db) { - let raw = context.llvm_type(db, self, typ); - let typ = context.argument_type(state, self, raw); - - args.push(typ.into()); - } - - let variadic = method.is_variadic(db); - let sig = - ret.map(|t| t.fn_type(&args, variadic)).unwrap_or_else(|| { - context.void_type().fn_type(&args, variadic) - }); - - self.methods[method.0 as usize] = Method { - call_convention: CallConvention::C, - signature: sig, - struct_return: sret, - }; + for &id in &mir.extern_methods { + self.methods[id.0 as usize] = + Method::regular(state, context, self, id); } } } diff --git a/compiler/src/llvm/module.rs b/compiler/src/llvm/module.rs index afc0cc11..6a8ec238 100644 --- a/compiler/src/llvm/module.rs +++ b/compiler/src/llvm/module.rs @@ -1,6 +1,6 @@ use crate::llvm::builder::DebugBuilder; use crate::llvm::context::Context; -use crate::llvm::layouts::Layouts; +use crate::llvm::layouts::{ArgumentType, Layouts}; use crate::llvm::runtime_function::RuntimeFunction; use crate::symbol_names::SYMBOL_PREFIX; use inkwell::attributes::AttributeLoc; @@ -100,7 +100,8 @@ impl<'a, 'ctx> Module<'a, 'ctx> { ) -> FunctionValue<'ctx> { self.inner.get_function(name).unwrap_or_else(|| { let info = &self.layouts.methods[method.0 as usize]; - let func = self.inner.add_function(name, info.signature, None); + let fn_typ = info.signature(self.context); + let fn_val = self.inner.add_function(name, fn_typ, None); let conv = match info.call_convention { // LLVM uses 0 for the C calling convention. CallConvention::C => 0, @@ -110,19 +111,33 @@ impl<'a, 'ctx> Module<'a, 'ctx> { CallConvention::Inko => 0, }; - func.set_call_conventions(conv); - - if let Some(typ) = info.struct_return { - let sret = self.context.type_attribute("sret", typ.into()); - let noalias = self.context.enum_attribute("noalias", 0); - let nocapt = self.context.enum_attribute("nocapture", 0); - - func.add_attribute(AttributeLoc::Param(0), sret); - func.add_attribute(AttributeLoc::Param(0), noalias); - func.add_attribute(AttributeLoc::Param(0), nocapt); + fn_val.set_call_conventions(conv); + + for (idx, &arg) in info.arguments.iter().enumerate() { + match arg { + ArgumentType::StructValue(t) => { + fn_val.add_attribute( + AttributeLoc::Param(idx as _), + self.context.type_attribute("byval", t.into()), + ); + } + ArgumentType::StructReturn(t) => { + let loc = AttributeLoc::Param(0); + let sret = + self.context.type_attribute("sret", t.into()); + let noalias = self.context.enum_attribute("noalias", 0); + let nocapt = + self.context.enum_attribute("nocapture", 0); + + fn_val.add_attribute(loc, sret); + fn_val.add_attribute(loc, noalias); + fn_val.add_attribute(loc, nocapt); + } + _ => {} + } } - func + fn_val }) } diff --git a/compiler/src/llvm/passes.rs b/compiler/src/llvm/passes.rs index dd97e4ed..c5af272b 100644 --- a/compiler/src/llvm/passes.rs +++ b/compiler/src/llvm/passes.rs @@ -8,7 +8,9 @@ use crate::llvm::constants::{ STACK_DATA_EPOCH_INDEX, STACK_DATA_PROCESS_INDEX, STATE_EPOCH_INDEX, }; use crate::llvm::context::Context; -use crate::llvm::layouts::Layouts; +use crate::llvm::layouts::{ + ArgumentType, Layouts, Method as MethodLayout, ReturnType, +}; use crate::llvm::methods::Methods; use crate::llvm::module::Module; use crate::llvm::runtime_function::RuntimeFunction; @@ -20,6 +22,7 @@ use crate::state::State; use crate::symbol_names::{SymbolNames, STACK_MASK_GLOBAL, STATE_GLOBAL}; use crate::target::Architecture; use blake3::{hash, Hasher}; +use inkwell::attributes::AttributeLoc; use inkwell::basic_block::BasicBlock; use inkwell::debug_info::AsDIScope as _; use inkwell::module::Linkage; @@ -28,9 +31,7 @@ use inkwell::targets::{ CodeModel, FileType, InitializationConfig, RelocMode, Target, TargetMachine, TargetTriple, }; -use inkwell::types::{ - BasicMetadataTypeEnum, BasicType, BasicTypeEnum, FunctionType, StructType, -}; +use inkwell::types::{BasicType, BasicTypeEnum, FunctionType}; use inkwell::values::{ BasicMetadataValueEnum, BasicValue, BasicValueEnum, FloatValue, FunctionValue, GlobalValue, IntValue, PointerValue, @@ -682,7 +683,10 @@ impl<'shared, 'module, 'ctx> LowerModule<'shared, 'module, 'ctx> { ); builder - .call(class_new, &[name_ptr, size.into(), methods_len]) + .call_with_return( + class_new, + &[name_ptr, size.into(), methods_len], + ) .into_pointer_value() } }; @@ -895,7 +899,7 @@ impl<'shared, 'module, 'ctx> LowerModule<'shared, 'module, 'ctx> { let len = builder.u64_literal(value.len() as u64).into(); let func = self.module.runtime_function(RuntimeFunction::StringNew); - builder.call(func, &[state.into(), bytes_var.into(), len]) + builder.call_with_return(func, &[state.into(), bytes_var.into(), len]) } fn load_state(&mut self, builder: &Builder<'ctx>) -> PointerValue<'ctx> { @@ -925,7 +929,8 @@ pub struct LowerMethod<'shared, 'module, 'ctx> { /// The LLVM types for each MIR register. variable_types: HashMap>, - /// The temporary value to use for struct returns. + /// The pointer to write structs to when performing an ABI compliant + /// structure return. struct_return_value: Option>, } @@ -943,12 +948,10 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { builder.switch_to_block(entry_block); - let sret = - if layouts.methods[method.id.0 as usize].struct_return.is_some() { - Some(builder.argument(0).into_pointer_value()) - } else { - None - }; + let sret = layouts.methods[method.id.0 as usize] + .returns + .is_struct() + .then(|| builder.argument(0).into_pointer_value()); LowerMethod { shared, @@ -983,7 +986,10 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { let var = self.variables[reg]; let typ = self.variable_types[reg]; - // TODO: can we avoid this load? + // Depending on the ABI requirements we may pass a struct in as a + // pointer, but expect it as a value. In this case we need to load + // the argument pointer's value into the stack slot, instead of + // loading the argument as-is. if typ.is_struct_type() && arg.is_pointer_value() { let val = self.builder.load(typ, arg.into_pointer_value()); @@ -1258,7 +1264,7 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { let res = self .builder - .call(func, &[val.into()]) + .call_with_return(func, &[val.into()]) .into_float_value(); self.builder.store(reg_var, res); @@ -1274,7 +1280,7 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { let res = self .builder - .call(func, &[val.into()]) + .call_with_return(func, &[val.into()]) .into_float_value(); self.builder.store(reg_var, res); @@ -1362,7 +1368,7 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { let pos_val = self .builder - .call(fabs, &[val.into()]) + .call_with_return(fabs, &[val.into()]) .into_float_value(); let pos_inf = self.builder.f64_literal(f64::INFINITY); @@ -1389,7 +1395,7 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { let res = self .builder - .call(func, &[val.into()]) + .call_with_return(func, &[val.into()]) .into_float_value(); self.builder.store(reg_var, res); @@ -1411,7 +1417,7 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { let res = self .builder - .call(func, &[lhs.into(), rhs.into()]) + .call_with_return(func, &[lhs.into(), rhs.into()]) .into_float_value(); self.builder.store(reg_var, res); @@ -1428,7 +1434,7 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { ); let res = self .builder - .call(func, &[lhs, lhs, rhs]) + .call_with_return(func, &[lhs, lhs, rhs]) .into_int_value(); self.builder.store(reg_var, res); @@ -1445,7 +1451,7 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { ); let res = self .builder - .call(func, &[lhs, lhs, rhs]) + .call_with_return(func, &[lhs, lhs, rhs]) .into_int_value(); self.builder.store(reg_var, res); @@ -1523,7 +1529,7 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { let res = self .builder - .call(add, &[lhs.into(), rhs.into()]) + .call_with_return(add, &[lhs.into(), rhs.into()]) .into_struct_value(); self.builder.store(reg_var, res); @@ -1541,7 +1547,7 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { let res = self .builder - .call(add, &[lhs.into(), rhs.into()]) + .call_with_return(add, &[lhs.into(), rhs.into()]) .into_struct_value(); self.builder.store(reg_var, res); @@ -1559,7 +1565,7 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { let res = self .builder - .call(add, &[lhs.into(), rhs.into()]) + .call_with_return(add, &[lhs.into(), rhs.into()]) .into_struct_value(); self.builder.store(reg_var, res); @@ -1585,7 +1591,7 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { self.module.intrinsic("llvm.bswap", &[val_typ]); let swapped = self .builder - .call(fun, &[val.into()]) + .call_with_return(fun, &[val.into()]) .into_int_value(); let res = self.builder.int_to_int(swapped, 64, signed); @@ -1603,7 +1609,10 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { let no_poison = self.builder.bool_literal(false); let res = self .builder - .call(fun, &[val.into(), no_poison.into()]) + .call_with_return( + fun, + &[val.into(), no_poison.into()], + ) .into_int_value(); self.builder.store(reg_var, res); @@ -1629,7 +1638,7 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { let func = self.module.runtime_function(func_name); let proc = self.load_process().into(); - self.builder.call_void(func, &[proc, val.into()]); + self.builder.direct_call(func, &[proc, val.into()]); self.builder.unreachable(); } Intrinsic::StringConcat => { @@ -1656,7 +1665,7 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { let state = self.load_state(); let func_name = RuntimeFunction::StringConcat; let func = self.module.runtime_function(func_name); - let res = self.builder.call( + let res = self.builder.call_with_return( func, &[state.into(), temp_var.into(), len.into()], ); @@ -1684,7 +1693,7 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { let func = self .module .intrinsic("llvm.x86.sse2.pause", &[]); - self.builder.call_void(func, &[]); + self.builder.direct_call(func, &[]); } Architecture::Arm64 => { // For ARM64 we use the same approach as Rust by @@ -1694,7 +1703,7 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { .module .intrinsic("llvm.aarch64.isb", &[]); - self.builder.call_void(func, &[sy.into()]); + self.builder.direct_call(func, &[sy.into()]); } }; @@ -1718,13 +1727,29 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { } Instruction::Return(ins) => { let var = self.variables[&ins.register]; - let typ = self.variable_types[&ins.register]; - let val = self.builder.load(typ, var); if let Some(ptr) = self.struct_return_value { + let typ = self.variable_types[&ins.register]; + let val = self.builder.load(typ, var); + self.builder.store(ptr, val); self.builder.return_value(None); } else { + // When returning a struct on the stack, the return type + // will be structurally compatible but might be nominally + // different. + // + // For example, if the struct is `{ i64 }` we may + // in fact return a value of type `i64`. While both have the + // same layout, they're not compatible at the LLVM level. + let typ = self + .builder + .function + .get_type() + .get_return_type() + .unwrap(); + let val = self.builder.load(typ, var); + self.builder.return_value(Some(&val)); } } @@ -1797,10 +1822,9 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { let name = ins.method.name(&self.shared.state.db); let fn_val = self.module.add_method(name, ins.method); let kind = CallKind::Direct(fn_val); - let sret = - self.layouts.methods[ins.method.0 as usize].struct_return; + let layout = &self.layouts.methods[ins.method.0 as usize]; - self.call(ins.register, kind, None, &ins.arguments, sret) + self.call(kind, layout, ins.register, None, &ins.arguments) } Instruction::CallStatic(ins) => { self.set_debug_location(ins.location); @@ -1808,10 +1832,9 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { let func_name = &self.shared.names.methods[&ins.method]; let func = self.module.add_method(func_name, ins.method); let kind = CallKind::Direct(func); - let sret = - self.layouts.methods[ins.method.0 as usize].struct_return; + let layout = &self.layouts.methods[ins.method.0 as usize]; - self.call(ins.register, kind, None, &ins.arguments, sret); + self.call(kind, layout, ins.register, None, &ins.arguments); } Instruction::CallInstance(ins) => { self.set_debug_location(ins.location); @@ -1819,15 +1842,14 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { let name = &self.shared.names.methods[&ins.method]; let func = self.module.add_method(name, ins.method); let kind = CallKind::Direct(func); - let sret = - self.layouts.methods[ins.method.0 as usize].struct_return; + let layout = &self.layouts.methods[ins.method.0 as usize]; self.call( - ins.register, kind, + layout, + ins.register, Some(ins.receiver), &ins.arguments, - sret, ); } Instruction::CallDynamic(ins) => { @@ -1847,7 +1869,7 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { let rec = self.builder.load(rec_typ, rec_var); let info = &self.shared.methods.info[ins.method.0 as usize]; let layout = &self.layouts.methods[ins.method.0 as usize]; - let fn_typ = layout.signature; + let fn_typ = layout.signature(self.builder.context); let rec_class = self .builder .load_field( @@ -1938,14 +1960,13 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { ); let fn_val = self.builder.load_pointer(fn_var); let kind = CallKind::Indirect(fn_typ, fn_val); - let sret = layout.struct_return; self.call( - ins.register, kind, + layout, + ins.register, Some(ins.receiver), &ins.arguments, - sret, ); } Instruction::CallClosure(ins) => { @@ -1953,19 +1974,24 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { let rec_var = self.variables[&ins.receiver]; let rec_typ = self.variable_types[&ins.receiver]; + let reg_typ = self.variable_types[&ins.register]; // For closures we generate the signature on the fly, as the // method for `call` isn't always clearly defined: for an // argument typed as a closure, we don't know what the actual // method is, thus we can't retrieve an existing signature. - let mut fn_args: Vec = vec![]; - let (ret, sret) = self.builder.context.return_type( + let mut layout = MethodLayout::new(); + + layout.returns = self.builder.context.return_type( self.shared.state, self.layouts, - &mut fn_args, - self.register_type(ins.register), + reg_typ, ); + if let ReturnType::Struct(t) = layout.returns { + layout.arguments.push(ArgumentType::StructReturn(t)) + } + for reg in [ins.receiver].iter().chain(ins.arguments.iter()) { let raw = self.variable_types[reg]; let typ = self.builder.context.argument_type( @@ -1974,7 +2000,7 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { raw, ); - fn_args.push(typ.into()); + layout.arguments.push(typ); } // Load the method from the method table. @@ -2004,30 +2030,32 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { .builder .extract_field(method, METHOD_FUNCTION_INDEX) .into_pointer_value(); - let fn_type = ret - .map(|t| t.fn_type(&fn_args, false)) - .unwrap_or_else(|| { - self.builder - .context - .void_type() - .fn_type(&fn_args, false) - }); + let fn_type = layout.signature(self.builder.context); let kind = CallKind::Indirect(fn_type, fn_val); self.call( - ins.register, kind, + &layout, + ins.register, Some(ins.receiver), &ins.arguments, - sret, ); } Instruction::CallDropper(ins) => { self.set_debug_location(ins.location); + let reg_typ = self.variable_types[&ins.register]; let rec_var = self.variables[&ins.receiver]; let rec_typ = self.variable_types[&ins.receiver]; - let sig_args: Vec = vec![rec_typ.into()]; + let mut layout = MethodLayout::new(); + + layout.returns = ReturnType::Regular(reg_typ); + layout.arguments.push(self.builder.context.argument_type( + self.shared.state, + self.layouts, + rec_typ, + )); + let rec = self.builder.load(rec_typ, rec_var); let class = self .builder @@ -2048,15 +2076,14 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { .builder .load(self.layouts.method, addr) .into_struct_value(); - let func_val = self + let fn_val = self .builder .extract_field(method, METHOD_FUNCTION_INDEX) .into_pointer_value(); - let func_type = self.variable_types[&ins.register] - .fn_type(&sig_args, false); - let kind = CallKind::Indirect(func_type, func_val); + let fn_typ = layout.signature(self.builder.context); + let kind = CallKind::Indirect(fn_typ, fn_val); - self.call(ins.register, kind, Some(ins.receiver), &[], None); + self.call(kind, &layout, ins.register, Some(ins.receiver), &[]); } Instruction::Send(ins) => { self.set_debug_location(ins.location); @@ -2103,7 +2130,7 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { let sender = self.load_process().into(); let rec = self.builder.load(rec_typ, rec_var).into(); - self.builder.call_void( + self.builder.direct_call( send_message, &[state.into(), sender, rec, method, args.into()], ); @@ -2285,7 +2312,7 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { .module .runtime_function(RuntimeFunction::ReferenceCountError); - self.builder.call_void(func, &[proc.into(), val.into()]); + self.builder.direct_call(func, &[proc.into(), val.into()]); self.builder.unreachable(); // The block to jump to when the count is zero. @@ -2298,7 +2325,7 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { let ptr = self.builder.load_pointer(var); let func = self.module.runtime_function(RuntimeFunction::Free); - self.builder.call_void(func, &[ptr.into()]); + self.builder.direct_call(func, &[ptr.into()]); } Instruction::Increment(ins) => { let reg_var = self.variables[&ins.register]; @@ -2379,7 +2406,7 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { let proc = self.load_process().into(); let func = self.module.runtime_function(RuntimeFunction::ProcessNew); - let ptr = self.builder.call(func, &[proc, class]); + let ptr = self.builder.call_with_return(func, &[proc, class]); self.builder.store(reg_var, ptr); } @@ -2426,7 +2453,7 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { let func = self.module.runtime_function(RuntimeFunction::ProcessYield); - self.builder.call_void(func, &[proc.into()]); + self.builder.direct_call(func, &[proc.into()]); self.builder.jump(cont_block); // The block to jump to if we can continue running. @@ -2441,7 +2468,7 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { .module .runtime_function(RuntimeFunction::ProcessFinishMessage); - self.builder.call_void(func, &[proc, terminate]); + self.builder.direct_call(func, &[proc, terminate]); self.builder.unreachable(); } Instruction::Cast(ins) => { @@ -2576,97 +2603,87 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { fn call( &self, - register: RegisterId, kind: CallKind<'ctx>, + layout: &MethodLayout<'ctx>, + register: RegisterId, receiver: Option, arguments: &[RegisterId], - struct_return: Option>, ) { let mut args: Vec = Vec::with_capacity( arguments.len() + receiver.is_some() as usize - + struct_return.is_some() as usize, + + layout.returns.is_struct() as usize, ); // When using struct returns, the returned data is written to a pointer // which we then read into the desired return register _after_ the call. - let sret = if let Some(typ) = struct_return { + let mut attrs = Vec::new(); + let sret = if let ReturnType::Struct(typ) = layout.returns { let var = self.builder.new_stack_slot(typ); + attrs.push(( + AttributeLoc::Param(0), + self.builder.context.type_attribute("sret", typ.into()), + )); args.push(var.into()); Some((typ, var)) } else { None }; - let struct_max = self.shared.state.config.target.pass_struct_size(); - - for reg in receiver.iter().chain(arguments.iter()) { - let typ = self.variable_types[reg]; + for (idx, reg) in receiver.iter().chain(arguments.iter()).enumerate() { + let idx = if sret.is_some() { idx + 1 } else { idx }; let var = self.variables[reg]; - // TODO: cast to [2 x i64] when necessary - if typ.is_struct_type() - && self.layouts.target_data.get_abi_size(&typ) > struct_max - { - args.push(var.into()); - } else { - args.push(self.builder.load(typ, var).into()); - } - } + match layout.arguments.get(idx).cloned() { + Some(ArgumentType::Regular(t)) => { + args.push(self.builder.load(t, var).into()); + } + Some(ArgumentType::StructValue(t)) => { + attrs.push(( + AttributeLoc::Param(idx as u32), + self.builder.context.type_attribute("byval", t.into()), + )); - match kind { - CallKind::Direct(fun) => { - self.direct_call(register, fun, &args, sret); - } - CallKind::Indirect(typ, fun) => { - self.indirect_call(register, typ, fun, &args, sret); + args.push(var.into()); + } + Some(ArgumentType::StructReturn(_)) => { + // We only iterate over explicitly provided arguments and + // those don't include the sret pointer. In addition, we + // handle sret arguments before the iteration, so there's + // nothing we need to do here. + } + Some(ArgumentType::Pointer) => { + args.push(var.into()); + } + None => { + // We may run into this case when calling a variadic + // function and passing more arguments than are defined. + let typ = self.variable_types[reg]; + + args.push(self.builder.load(typ, var).into()); + } } } - } - - fn direct_call( - &self, - register: RegisterId, - function: FunctionValue<'ctx>, - arguments: &[BasicMetadataValueEnum], - struct_return: Option<(StructType<'ctx>, PointerValue<'ctx>)>, - ) { - let var = self.variables[®ister]; - if function.get_type().get_return_type().is_some() { - let val = self.builder.call(function, arguments); - - self.builder.store(var, val); - } else { - let call = self.builder.call_void(function, arguments); + let reg_var = self.variables[®ister]; + let call_site = match kind { + CallKind::Direct(f) => self.builder.direct_call(f, &args), + CallKind::Indirect(t, f) => self.builder.indirect_call(t, f, &args), + }; - if let Some((typ, tmp)) = struct_return { - self.builder.load_for_struct_return(call, typ, tmp, var); - } + for (loc, attr) in attrs { + call_site.add_attribute(loc, attr); } - if self.register_type(register).is_never(&self.shared.state.db) { - self.builder.unreachable(); - } - } + if layout.returns.is_regular() { + let val = call_site.try_as_basic_value().left().unwrap(); - fn indirect_call( - &self, - register: RegisterId, - function_type: FunctionType<'ctx>, - function: PointerValue<'ctx>, - arguments: &[BasicMetadataValueEnum], - struct_return: Option<(StructType<'ctx>, PointerValue<'ctx>)>, - ) { - let var = self.variables[®ister]; - let call = - self.builder.indirect_call(function_type, function, arguments); - - if function_type.get_return_type().is_some() { - self.builder.store(var, call.try_as_basic_value().left().unwrap()); - } else if let Some((typ, tmp)) = struct_return { - self.builder.load_for_struct_return(call, typ, tmp, var); + self.builder.store(reg_var, val); + } else if let Some((typ, tmp)) = sret { + let val = self.builder.load(typ, tmp); + + self.builder.store(reg_var, val); } if self.register_type(register).is_never(&self.shared.state.db) { @@ -2750,7 +2767,7 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { &[target.into(), source.get_type().into()], ); - self.builder.call(func, &[source.into()]).into_int_value() + self.builder.call_with_return(func, &[source.into()]).into_int_value() } fn load_process(&mut self) -> PointerValue<'ctx> { @@ -2772,8 +2789,10 @@ impl<'shared, 'module, 'ctx> LowerMethod<'shared, 'module, 'ctx> { self.shared.state.config.target.stack_pointer_register_name(); let mname = self.builder.context.inner.metadata_string(rsp_name); let mnode = self.builder.context.inner.metadata_node(&[mname.into()]); - let rsp_addr = - self.builder.call(func, &[mnode.into()]).into_int_value(); + let rsp_addr = self + .builder + .call_with_return(func, &[mnode.into()]) + .into_int_value(); let mask = self.load_stack_mask(); let addr = self.builder.bit_and(rsp_addr, mask); @@ -2869,7 +2888,10 @@ impl<'a, 'ctx> GenerateMain<'a, 'ctx> { self.module.runtime_function(RuntimeFunction::RuntimeStackMask); let runtime = self .builder - .call(rt_new, &[counts.into(), argc.into(), argv.into()]) + .call_with_return( + rt_new, + &[counts.into(), argc.into(), argv.into()], + ) .into_pointer_value(); // The state is needed by various runtime functions. Because this data @@ -2877,8 +2899,10 @@ impl<'a, 'ctx> GenerateMain<'a, 'ctx> { // global and thus remove the need to pass it as a hidden argument to // every Inko method. let state_global = self.module.add_global_pointer(STATE_GLOBAL); - let state = - self.builder.call(rt_state, &[runtime.into()]).into_pointer_value(); + let state = self + .builder + .call_with_return(rt_state, &[runtime.into()]) + .into_pointer_value(); state_global.set_initializer( &self @@ -2905,7 +2929,7 @@ impl<'a, 'ctx> GenerateMain<'a, 'ctx> { let stack_size = self .builder - .call(rt_stack_mask, &[runtime.into()]) + .call_with_return(rt_stack_mask, &[runtime.into()]) .into_int_value(); self.builder.store(stack_size_global.as_pointer_value(), stack_size); @@ -2919,7 +2943,7 @@ impl<'a, 'ctx> GenerateMain<'a, 'ctx> { let name = &self.names.setup_classes[&module.id]; let func = self.module.add_setup_function(name); - self.builder.call_void(func, &[]); + self.builder.direct_call(func, &[]); } // Constants need to be defined in a separate pass, as they may depends @@ -2929,7 +2953,7 @@ impl<'a, 'ctx> GenerateMain<'a, 'ctx> { let name = &self.names.setup_constants[&module.id]; let func = self.module.add_setup_function(name); - self.builder.call_void(func, &[]); + self.builder.direct_call(func, &[]); } let main_class_id = self.db.main_class().unwrap(); @@ -2954,7 +2978,7 @@ impl<'a, 'ctx> GenerateMain<'a, 'ctx> { let main_class = self.builder.load_pointer(main_class_ptr); - self.builder.call_void( + self.builder.direct_call( rt_start, &[runtime.into(), main_class.into(), main_method.into()], ); @@ -2965,7 +2989,7 @@ impl<'a, 'ctx> GenerateMain<'a, 'ctx> { // we're exiting here. We _do_ drop the runtime in case we want to hook // any additional logic into that step at some point, though technically // this isn't necessary. - self.builder.call_void(rt_drop, &[runtime.into()]); + self.builder.direct_call(rt_drop, &[runtime.into()]); self.builder.return_value(Some(&self.builder.u32_literal(0))); } diff --git a/compiler/src/target.rs b/compiler/src/target.rs index 049a5a0b..be3873da 100644 --- a/compiler/src/target.rs +++ b/compiler/src/target.rs @@ -237,17 +237,6 @@ impl Target { self == &Target::native() } - /// Returns the maximum size (in bytes) of a struct that can be passed - /// through registers. - /// - /// If a struct is larger than this size, it must be passed using a pointer. - pub(crate) fn pass_struct_size(&self) -> u64 { - // The exact size may differ per platform, but both amd64 and arm64 have - // the same requirement, and those are the only platforms we support at - // this time. - 16 - } - pub(crate) fn stack_pointer_register_name(&self) -> &str { match self.arch { Architecture::Amd64 => "rsp", diff --git a/types/src/lib.rs b/types/src/lib.rs index 103d1dae..f0e1c798 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -2659,39 +2659,42 @@ impl MethodId { } pub fn is_mutable(self, db: &Database) -> bool { - matches!( - self.get(db).kind, - MethodKind::Mutable | MethodKind::AsyncMutable - ) + matches!(self.kind(db), MethodKind::Mutable | MethodKind::AsyncMutable) } pub fn is_immutable(self, db: &Database) -> bool { matches!( - self.get(db).kind, + self.kind(db), MethodKind::Async | MethodKind::Static | MethodKind::Instance ) } pub fn is_async(self, db: &Database) -> bool { - matches!( - self.get(db).kind, - MethodKind::Async | MethodKind::AsyncMutable - ) + matches!(self.kind(db), MethodKind::Async | MethodKind::AsyncMutable) } pub fn is_static(self, db: &Database) -> bool { - matches!( - self.get(db).kind, - MethodKind::Static | MethodKind::Constructor - ) + matches!(self.kind(db), MethodKind::Static | MethodKind::Constructor) } pub fn is_extern(self, db: &Database) -> bool { - matches!(self.get(db).kind, MethodKind::Extern) + matches!(self.kind(db), MethodKind::Extern) } pub fn is_moving(self, db: &Database) -> bool { - matches!(self.get(db).kind, MethodKind::Moving) + matches!(self.kind(db), MethodKind::Moving) + } + + pub fn is_instance(self, db: &Database) -> bool { + matches!( + self.kind(db), + MethodKind::Async + | MethodKind::AsyncMutable + | MethodKind::Instance + | MethodKind::Moving + | MethodKind::Mutable + | MethodKind::Destructor + ) } pub fn set_variadic(self, db: &mut Database) { @@ -2776,10 +2779,6 @@ impl MethodId { self.get(db).kind } - pub fn is_instance(self, db: &Database) -> bool { - !self.is_static(db) - } - pub fn module(self, db: &Database) -> ModuleId { self.get(db).module }