Skip to content

Commit

Permalink
Include Result in the MacroAssembler
Browse files Browse the repository at this point in the history
This commit updates the MacroAssembler trait to require returning
`Result<T>`  on every method in the interface, making it easier to
detect partial support for Masm instructions.
  • Loading branch information
saulecabrera committed Jan 1, 2025
1 parent 502efa8 commit d74ea8e
Show file tree
Hide file tree
Showing 13 changed files with 1,147 additions and 836 deletions.
44 changes: 23 additions & 21 deletions winch/codegen/src/codegen/bounds.rs
Original file line number Diff line number Diff line change
Expand Up @@ -94,19 +94,19 @@ where
let dst = context.any_gpr(masm)?;
match heap.memory.static_heap_size() {
// Constant size, no need to perform a load.
Some(size) => masm.mov(writable!(dst), RegImm::i64(size.signed()), ptr_size),
Some(size) => masm.mov(writable!(dst), RegImm::i64(size.signed()), ptr_size)?,

None => {
let scratch = scratch!(M);
let base = if let Some(offset) = heap.import_from {
let addr = masm.address_at_vmctx(offset);
masm.load_ptr(addr, writable!(scratch));
let addr = masm.address_at_vmctx(offset)?;
masm.load_ptr(addr, writable!(scratch))?;
scratch
} else {
vmctx!(M)
};
let addr = masm.address_at_reg(base, heap.current_length_offset);
masm.load_ptr(addr, writable!(dst));
let addr = masm.address_at_reg(base, heap.current_length_offset)?;
masm.load_ptr(addr, writable!(dst))?;
}
}

Expand All @@ -129,10 +129,10 @@ pub(crate) fn ensure_index_and_offset<M: MacroAssembler>(
index: Index,
offset: u64,
heap_ty_size: OperandSize,
) -> ImmOffset {
) -> Result<ImmOffset> {
match u32::try_from(offset) {
// If the immediate offset fits in a u32, then we simply return.
Ok(offs) => ImmOffset::from_u32(offs),
Ok(offs) => Ok(ImmOffset::from_u32(offs)),
// Else we adjust the index to be index = index + offset, including an
// overflow check, and return 0 as the offset.
Err(_) => {
Expand All @@ -142,9 +142,9 @@ pub(crate) fn ensure_index_and_offset<M: MacroAssembler>(
RegImm::i64(offset as i64),
heap_ty_size,
TrapCode::HEAP_OUT_OF_BOUNDS,
);
)?;

ImmOffset::from_u32(0)
Ok(ImmOffset::from_u32(0))
}
}
}
Expand All @@ -164,23 +164,23 @@ pub(crate) fn load_heap_addr_checked<M, F>(
) -> Result<Reg>
where
M: MacroAssembler,
F: FnMut(&mut M, Bounds, Index) -> IntCmpKind,
F: FnMut(&mut M, Bounds, Index) -> Result<IntCmpKind>,
{
let cmp_kind = emit_check_condition(masm, bounds, index);
let cmp_kind = emit_check_condition(masm, bounds, index)?;

masm.trapif(cmp_kind, TrapCode::HEAP_OUT_OF_BOUNDS);
masm.trapif(cmp_kind, TrapCode::HEAP_OUT_OF_BOUNDS)?;
let addr = context.any_gpr(masm)?;

load_heap_addr_unchecked(masm, heap, index, offset, addr, ptr_size);
load_heap_addr_unchecked(masm, heap, index, offset, addr, ptr_size)?;
if !enable_spectre_mitigation {
Ok(addr)
} else {
// Conditionally assign 0 to the register holding the base address if
// the comparison kind is met.
let tmp = context.any_gpr(masm)?;
masm.mov(writable!(tmp), RegImm::i64(0), ptr_size);
let cmp_kind = emit_check_condition(masm, bounds, index);
masm.cmov(writable!(addr), tmp, cmp_kind, ptr_size);
masm.mov(writable!(tmp), RegImm::i64(0), ptr_size)?;
let cmp_kind = emit_check_condition(masm, bounds, index)?;
masm.cmov(writable!(addr), tmp, cmp_kind, ptr_size)?;
context.free_reg(tmp);
Ok(addr)
}
Expand All @@ -196,14 +196,15 @@ pub(crate) fn load_heap_addr_unchecked<M>(
offset: ImmOffset,
dst: Reg,
ptr_size: OperandSize,
) where
) -> Result<()>
where
M: MacroAssembler,
{
let base = if let Some(offset) = heap.import_from {
// If the WebAssembly memory is imported, load the address into
// the scratch register.
let scratch = scratch!(M);
masm.load_ptr(masm.address_at_vmctx(offset), writable!(scratch));
masm.load_ptr(masm.address_at_vmctx(offset)?, writable!(scratch))?;
scratch
} else {
// Else if the WebAssembly memory is defined in the current module,
Expand All @@ -212,17 +213,18 @@ pub(crate) fn load_heap_addr_unchecked<M>(
};

// Load the base of the memory into the `addr` register.
masm.load_ptr(masm.address_at_reg(base, heap.offset), writable!(dst));
masm.load_ptr(masm.address_at_reg(base, heap.offset)?, writable!(dst))?;
// Start by adding the index to the heap base addr.
let index_reg = index.as_typed_reg().reg;
masm.add(writable!(dst), dst, index_reg.into(), ptr_size);
masm.add(writable!(dst), dst, index_reg.into(), ptr_size)?;

if offset.as_u32() > 0 {
masm.add(
writable!(dst),
dst,
RegImm::i64(offset.as_u32() as i64),
ptr_size,
);
)?;
}
Ok(())
}
74 changes: 39 additions & 35 deletions winch/codegen/src/codegen/call.rs
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ impl FnCall {

let sig = env.callee_sig::<M::ABI>(&callee);
context.spill(masm)?;
let ret_area = Self::make_ret_area(&sig, masm);
let ret_area = Self::make_ret_area(&sig, masm)?;
let arg_stack_space = sig.params_stack_size();
let reserved_stack = masm.call(arg_stack_space, |masm| {
Self::assign(sig, &callee_context, ret_area.as_ref(), context, masm)?;
Expand All @@ -112,15 +112,20 @@ impl FnCall {
}

/// Calculates the return area for the callee, if any.
fn make_ret_area<M: MacroAssembler>(callee_sig: &ABISig, masm: &mut M) -> Option<RetArea> {
callee_sig.has_stack_results().then(|| {
let base = masm.sp_offset().as_u32();
fn make_ret_area<M: MacroAssembler>(
callee_sig: &ABISig,
masm: &mut M,
) -> Result<Option<RetArea>> {
if callee_sig.has_stack_results() {
let base = masm.sp_offset()?.as_u32();
let end = base + callee_sig.results_stack_size();
if end > base {
masm.reserve_stack(end - base);
masm.reserve_stack(end - base)?;
}
RetArea::sp(SPOffset::from_u32(end))
})
Ok(Some(RetArea::sp(SPOffset::from_u32(end))))
} else {
Ok(None)
}
}

/// Lowers the high-level [`Callee`] to a [`CalleeKind`] and
Expand Down Expand Up @@ -187,12 +192,12 @@ impl FnCall {
Ok((context.any_gpr(masm)?, context.any_gpr(masm)?))
})??;
let callee_vmctx_offset = vmoffsets.vmctx_vmfunction_import_vmctx(index);
let callee_vmctx_addr = masm.address_at_vmctx(callee_vmctx_offset);
masm.load_ptr(callee_vmctx_addr, writable!(callee_vmctx));
let callee_vmctx_addr = masm.address_at_vmctx(callee_vmctx_offset)?;
masm.load_ptr(callee_vmctx_addr, writable!(callee_vmctx))?;

let callee_body_offset = vmoffsets.vmctx_vmfunction_import_wasm_call(index);
let callee_addr = masm.address_at_vmctx(callee_body_offset);
masm.load_ptr(callee_addr, writable!(callee));
let callee_addr = masm.address_at_vmctx(callee_body_offset)?;
masm.load_ptr(callee_addr, writable!(callee))?;

Ok((
CalleeKind::indirect(callee),
Expand Down Expand Up @@ -225,15 +230,15 @@ impl FnCall {
// Load the callee VMContext, that will be passed as first argument to
// the function call.
masm.load_ptr(
masm.address_at_reg(funcref_ptr, ptr.vm_func_ref_vmctx().into()),
masm.address_at_reg(funcref_ptr, ptr.vm_func_ref_vmctx().into())?,
writable!(callee_vmctx),
);
)?;

// Load the function pointer to be called.
masm.load_ptr(
masm.address_at_reg(funcref_ptr, ptr.vm_func_ref_wasm_call().into()),
masm.address_at_reg(funcref_ptr, ptr.vm_func_ref_wasm_call().into())?,
writable!(funcref),
);
)?;
context.free_reg(funcref_ptr);

Ok((
Expand All @@ -259,20 +264,20 @@ impl FnCall {
{
match (context_arg, operand) {
(VMContextLoc::Pinned, ABIOperand::Reg { ty, reg, .. }) => {
masm.mov(writable!(*reg), vmctx!(M).into(), (*ty).try_into()?);
masm.mov(writable!(*reg), vmctx!(M).into(), (*ty).try_into()?)?;
}
(VMContextLoc::Pinned, ABIOperand::Stack { ty, offset, .. }) => {
let addr = masm.address_at_sp(SPOffset::from_u32(*offset));
masm.store(vmctx!(M).into(), addr, (*ty).try_into()?);
let addr = masm.address_at_sp(SPOffset::from_u32(*offset))?;
masm.store(vmctx!(M).into(), addr, (*ty).try_into()?)?;
}

(VMContextLoc::Reg(src), ABIOperand::Reg { ty, reg, .. }) => {
masm.mov(writable!(*reg), (*src).into(), (*ty).try_into()?);
masm.mov(writable!(*reg), (*src).into(), (*ty).try_into()?)?;
}

(VMContextLoc::Reg(src), ABIOperand::Stack { ty, offset, .. }) => {
let addr = masm.address_at_sp(SPOffset::from_u32(*offset));
masm.store((*src).into(), addr, (*ty).try_into()?);
let addr = masm.address_at_sp(SPOffset::from_u32(*offset))?;
masm.store((*src).into(), addr, (*ty).try_into()?)?;
}
}
}
Expand Down Expand Up @@ -307,31 +312,31 @@ impl FnCall {
context.move_val_to_reg(&val, reg, masm)?;
}
&ABIOperand::Stack { ty, offset, .. } => {
let addr = masm.address_at_sp(SPOffset::from_u32(offset));
let addr = masm.address_at_sp(SPOffset::from_u32(offset))?;
let size: OperandSize = ty.try_into()?;
let scratch = scratch!(M, &ty);
context.move_val_to_reg(val, scratch, masm)?;
masm.store(scratch.into(), addr, size);
masm.store(scratch.into(), addr, size)?;
}
}
}

if sig.has_stack_results() {
let operand = sig.params.unwrap_results_area_operand();
let base = ret_area.unwrap().unwrap_sp();
let addr = masm.address_from_sp(base);
let addr = masm.address_from_sp(base)?;

match operand {
&ABIOperand::Reg { ty, reg, .. } => {
masm.load_addr(addr, writable!(reg), ty.try_into()?);
masm.load_addr(addr, writable!(reg), ty.try_into()?)?;
}
&ABIOperand::Stack { ty, offset, .. } => {
let slot = masm.address_at_sp(SPOffset::from_u32(offset));
let slot = masm.address_at_sp(SPOffset::from_u32(offset))?;
// Don't rely on `ABI::scratch_for` as we always use
// an int register as the return pointer.
let scratch = scratch!(M);
masm.load_addr(addr, writable!(scratch), ty.try_into()?);
masm.store(scratch.into(), slot, ty.try_into()?);
masm.load_addr(addr, writable!(scratch), ty.try_into()?)?;
masm.store(scratch.into(), slot, ty.try_into()?)?;
}
}
}
Expand Down Expand Up @@ -364,7 +369,7 @@ impl FnCall {
}
// Deallocate the reserved space for stack arguments and for alignment,
// which was allocated last.
masm.free_stack(reserved_space);
masm.free_stack(reserved_space)?;

ensure!(
sig.params.len_without_retptr() >= callee_context.len(),
Expand Down Expand Up @@ -401,12 +406,12 @@ impl FnCall {
CodeGenError::invalid_sp_offset(),
);
let dst = SPOffset::from_u32(sp.as_u32() - stack_consumed);
masm.memmove(sp, dst, result_bytes, MemMoveDirection::LowToHigh);
masm.memmove(sp, dst, result_bytes, MemMoveDirection::LowToHigh)?;
}
};

// Free the bytes consumed by the call.
masm.free_stack(stack_consumed);
masm.free_stack(stack_consumed)?;

let mut calculated_ret_area = None;

Expand All @@ -415,12 +420,12 @@ impl FnCall {
// If there's a return area and stack space was consumed by the
// call, adjust the return area to be to the current stack
// pointer offset.
calculated_ret_area = Some(RetArea::sp(masm.sp_offset()));
calculated_ret_area = Some(RetArea::sp(masm.sp_offset()?));
} else {
// Else if no stack space was consumed by the call, simply use
// the previously calculated area.
ensure!(
area.unwrap_sp() == masm.sp_offset(),
area.unwrap_sp() == masm.sp_offset()?,
CodeGenError::invalid_sp_offset()
);
calculated_ret_area = Some(area);
Expand All @@ -434,7 +439,6 @@ impl FnCall {
// register. Winch currently doesn't have any callee-saved registers in
// the default ABI. So the callee might clobber the designated pinned
// register.
context.load_vmctx(masm);
Ok(())
context.load_vmctx(masm)
}
}
Loading

0 comments on commit d74ea8e

Please sign in to comment.