From a94e72b5b7328ae8957e78b41b8a6f7ab093e74d Mon Sep 17 00:00:00 2001 From: Ulrich Weigand Date: Thu, 20 Jan 2022 21:46:08 +0100 Subject: [PATCH] s390x: Add ISLE support This adds ISLE support for the s390x back-end and moves lowering of most instructions to ISLE. The only instructions still remaining are calls, returns, traps, and branches, most of which will need additional support in common code. Generated code is not intended to be (significantly) different than before; any additional optimizations now made easier to implement due to the ISLE layer can be added in follow-on patches. There were a few differences in some filetests, but those are all just simple register allocation changes (and all to the better!). --- cranelift/codegen/build.rs | 15 + cranelift/codegen/src/isa/s390x/inst.isle | 2914 ++++ cranelift/codegen/src/isa/s390x/inst/mod.rs | 1261 +- cranelift/codegen/src/isa/s390x/lower.isle | 1780 +++ cranelift/codegen/src/isa/s390x/lower.rs | 2241 +-- cranelift/codegen/src/isa/s390x/lower/isle.rs | 475 + .../s390x/lower/isle/generated_code.manifest | 4 + .../isa/s390x/lower/isle/generated_code.rs | 11438 ++++++++++++++++ .../filetests/filetests/isa/s390x/bitops.clif | 34 +- .../filetests/isa/s390x/heap_addr.clif | 4 +- .../filetests/isa/s390x/shift-rotate.clif | 28 +- 11 files changed, 16975 insertions(+), 3219 deletions(-) create mode 100644 cranelift/codegen/src/isa/s390x/inst.isle create mode 100644 cranelift/codegen/src/isa/s390x/lower.isle create mode 100644 cranelift/codegen/src/isa/s390x/lower/isle.rs create mode 100644 cranelift/codegen/src/isa/s390x/lower/isle/generated_code.manifest create mode 100644 cranelift/codegen/src/isa/s390x/lower/isle/generated_code.rs diff --git a/cranelift/codegen/build.rs b/cranelift/codegen/build.rs index 8eac58e8d9b2..cb704cb0abbb 100644 --- a/cranelift/codegen/build.rs +++ b/cranelift/codegen/build.rs @@ -220,6 +220,8 @@ fn get_isle_compilations(crate_dir: &std::path::Path) -> Result Result integer. + (FpuToInt + (op FpuToIntOp) + (rd WritableReg) + (rn Reg)) + + ;; Conversion integer -> FP. + (IntToFpu + (op IntToFpuOp) + (rd WritableReg) + (rn Reg)) + + ;; Round to integer. + (FpuRound + (op FpuRoundMode) + (rd WritableReg) + (rn Reg)) + + ;; 2-op FPU instruction implemented as vector instruction with the W bit. + (FpuVecRRR + (fpu_op FPUOp2) + (rd WritableReg) + (rn Reg) + (rm Reg)) + + ;; A machine call instruction. + (Call + (link WritableReg) + (info BoxCallInfo)) + + ;; A machine indirect-call instruction. + (CallInd + (link WritableReg) + (info BoxCallIndInfo)) + + ;; ---- branches (exactly one must appear at end of BB) ---- + + ;; A machine return instruction. + (Ret + (link Reg)) + + ;; A placeholder instruction, generating no code, meaning that a function epilogue must be + ;; inserted there. + (EpiloguePlaceholder) + + ;; An unconditional branch. + (Jump + (dest BranchTarget)) + + ;; A conditional branch. Contains two targets; at emission time, both are emitted, but + ;; the MachBuffer knows to truncate the trailing branch if fallthrough. We optimize the + ;; choice of taken/not_taken (inverting the branch polarity as needed) based on the + ;; fallthrough at the time of lowering. + (CondBr + (taken BranchTarget) + (not_taken BranchTarget) + (cond Cond)) + + ;; A conditional trap execute a `Trap` if the condition is true. This is + ;; one VCode instruction because it uses embedded control flow; it is + ;; logically a single-in, single-out region, but needs to appear as one + ;; unit to the register allocator. + ;; + ;; The `Cond` gives the conditional-branch condition that will + ;; *execute* the embedded `Trap`. (In the emitted code, we use the inverse + ;; of this condition in a branch that skips the trap instruction.) + (TrapIf + (cond Cond) + (trap_code TrapCode)) + + ;; A one-way conditional branch, invisible to the CFG processing; used *only* as part of + ;; straight-line sequences in code to be emitted. + ;; + ;; In more detail: + ;; - This branch is lowered to a branch at the machine-code level, but does not end a basic + ;; block, and does not create edges in the CFG seen by regalloc. + ;; - Thus, it is *only* valid to use as part of a single-in, single-out sequence that is + ;; lowered from a single CLIF instruction. For example, certain arithmetic operations may + ;; use these branches to handle certain conditions, such as overflows, traps, etc. + ;; + ;; See, e.g., the lowering of `trapif` (conditional trap) for an example. + (OneWayCondBr + (target BranchTarget) + (cond Cond)) + + ;; An indirect branch through a register, augmented with set of all + ;; possible successors. + (IndirectBr + (rn Reg) + (targets VecMachLabel)) + + ;; A "debugtrap" instruction, used for e.g. traps and debug breakpoints. + (Debugtrap) + + ;; An instruction guaranteed to always be undefined and to trigger an illegal instruction at + ;; runtime. + (Trap + (trap_code TrapCode)) + + ;; Jump-table sequence, as one compound instruction (see note in lower.rs + ;; for rationale). + (JTSequence + (info BoxJTSequenceInfo) + (ridx Reg) + (rtmp1 WritableReg) + (rtmp2 WritableReg)) + + ;; Load an inline symbol reference with RelocDistance::Far. + (LoadExtNameFar + (rd WritableReg) + (name BoxExternalName) + (offset i64)) + + ;; Load address referenced by `mem` into `rd`. + (LoadAddr + (rd WritableReg) + (mem MemArg)) + + ;; Marker, no-op in generated code SP "virtual offset" is adjusted. This + ;; controls how MemArg::NominalSPOffset args are lowered. + (VirtualSPOffsetAdj + (offset i64)) + + ;; A definition of a value label. + (ValueLabelMarker + (reg Reg) + (label ValueLabel)) + + ;; An unwind pseudoinstruction describing the state of the + ;; machine at this program point. + (Unwind + (inst UnwindInst)) +)) + +;; Primitive types used in instruction formats. + +(type BoxCallInfo (primitive BoxCallInfo)) +(type BoxCallIndInfo (primitive BoxCallIndInfo)) +(type VecMachLabel (primitive VecMachLabel)) +(type BranchTarget (primitive BranchTarget)) +(type BoxJTSequenceInfo (primitive BoxJTSequenceInfo)) +(type BoxExternalName (primitive BoxExternalName)) +(type ValueLabel (primitive ValueLabel)) +(type UnwindInst (primitive UnwindInst)) + +;; An ALU operation. +(type ALUOp + (enum + (Add32) + (Add32Ext16) + (Add64) + (Add64Ext16) + (Add64Ext32) + (AddLogical32) + (AddLogical64) + (AddLogical64Ext32) + (Sub32) + (Sub32Ext16) + (Sub64) + (Sub64Ext16) + (Sub64Ext32) + (SubLogical32) + (SubLogical64) + (SubLogical64Ext32) + (Mul32) + (Mul32Ext16) + (Mul64) + (Mul64Ext16) + (Mul64Ext32) + (And32) + (And64) + (Orr32) + (Orr64) + (Xor32) + (Xor64) + ;; NAND + (AndNot32) + (AndNot64) + ;; NOR + (OrrNot32) + (OrrNot64) + ;; XNOR + (XorNot32) + (XorNot64) +)) + +;; A unary operation. +(type UnaryOp + (enum + (Abs32) + (Abs64) + (Abs64Ext32) + (Neg32) + (Neg64) + (Neg64Ext32) + (PopcntByte) + (PopcntReg) +)) + +;; A shift operation. +(type ShiftOp + (enum + (RotL32) + (RotL64) + (LShL32) + (LShL64) + (LShR32) + (LShR64) + (AShR32) + (AShR64) +)) + +;; An integer comparison operation. +(type CmpOp + (enum + (CmpS32) + (CmpS32Ext16) + (CmpS64) + (CmpS64Ext16) + (CmpS64Ext32) + (CmpL32) + (CmpL32Ext16) + (CmpL64) + (CmpL64Ext16) + (CmpL64Ext32) +)) + +;; A floating-point unit (FPU) operation with one arg. +(type FPUOp1 + (enum + (Abs32) + (Abs64) + (Neg32) + (Neg64) + (NegAbs32) + (NegAbs64) + (Sqrt32) + (Sqrt64) + (Cvt32To64) + (Cvt64To32) +)) + +;; A floating-point unit (FPU) operation with two args. +(type FPUOp2 + (enum + (Add32) + (Add64) + (Sub32) + (Sub64) + (Mul32) + (Mul64) + (Div32) + (Div64) + (Max32) + (Max64) + (Min32) + (Min64) +)) + +;; A floating-point unit (FPU) operation with three args. +(type FPUOp3 + (enum + (MAdd32) + (MAdd64) + (MSub32) + (MSub64) +)) + +;; A conversion from an FP to an integer value. +(type FpuToIntOp + (enum + (F32ToU32) + (F32ToI32) + (F32ToU64) + (F32ToI64) + (F64ToU32) + (F64ToI32) + (F64ToU64) + (F64ToI64) +)) + +;; A conversion from an integer to an FP value. +(type IntToFpuOp + (enum + (U32ToF32) + (I32ToF32) + (U32ToF64) + (I32ToF64) + (U64ToF32) + (I64ToF32) + (U64ToF64) + (I64ToF64) +)) + +;; Modes for FP rounding ops: round down (floor) or up (ceil), or toward zero +;; (trunc), or to nearest, and for 32- or 64-bit FP values. +(type FpuRoundMode + (enum + (Minus32) + (Minus64) + (Plus32) + (Plus64) + (Zero32) + (Zero64) + (Nearest32) + (Nearest64) +)) + + +;; Helpers for querying enabled ISA extensions ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +(decl mie2_enabled () Type) +(extern extractor mie2_enabled mie2_enabled) +(decl mie2_disabled () Type) +(extern extractor mie2_disabled mie2_disabled) + +(decl vxrs_ext2_enabled () Type) +(extern extractor vxrs_ext2_enabled vxrs_ext2_enabled) +(decl vxrs_ext2_disabled () Type) +(extern extractor vxrs_ext2_disabled vxrs_ext2_disabled) + +(decl allow_div_traps () Type) +(extern extractor allow_div_traps allow_div_traps) + + +;; Helpers to access instruction data members ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Extractor for `symbol_value` instruction data member. + +(decl symbol_value_data (BoxExternalName RelocDistance i64) Inst) +(extern extractor symbol_value_data symbol_value_data) + +;; Extractor for `call_target` instruction data members. + +(decl call_target_data (BoxExternalName RelocDistance) Inst) +(extern extractor call_target_data call_target_data) + + +;; Helpers for register numbers and types ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Hard-coded registers. +(decl writable_gpr (u8) WritableReg) +(extern constructor writable_gpr writable_gpr) + +;; The zero register. +(decl zero_reg () Reg) +(extern constructor zero_reg zero_reg) + +;; Types that can be operated on using 32-bit GPR instructions +(decl gpr32_ty (Type) Type) +(extern extractor gpr32_ty gpr32_ty) + +;; Types that can be operated on using 64-bit GPR instructions +(decl gpr64_ty (Type) Type) +(extern extractor gpr64_ty gpr64_ty) + + +;; Helpers for various immmediate constants ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Special integer types and their constructors + +(type UImm32Shifted (primitive UImm32Shifted)) +(decl uimm32shifted (u32 u8) UImm32Shifted) +(extern constructor uimm32shifted uimm32shifted) + +(type UImm16Shifted (primitive UImm16Shifted)) +(decl uimm16shifted (u16 u8) UImm16Shifted) +(extern constructor uimm16shifted uimm16shifted) + +;; Detect specific integer values + +(decl i64_nonequal (i64 i64) i64) +(extern extractor i64_nonequal i64_nonequal (out in)) + +(decl i64_nonzero (i64) i64) +(extractor (i64_nonzero val) (i64_nonequal val <0)) + +(decl i64_not_neg1 (i64) i64) +(extractor (i64_not_neg1 val) (i64_nonequal val <-1)) + +;; Integer type casts (with the rust `as` semantics). + +(decl u8_as_u16 (u8) u16) +(extern constructor u8_as_u16 u8_as_u16) + +(decl u64_as_u32 (u64) u32) +(extern constructor u64_as_u32 u64_as_u32) + +(decl u64_as_i16 (u64) i16) +(extern constructor u64_as_i16 u64_as_i16) + +;; Split an u64 into high and low parts. + +(decl u64_nonzero_hipart (u64) u64) +(extern extractor u64_nonzero_hipart u64_nonzero_hipart) + +(decl u64_nonzero_lopart (u64) u64) +(extern extractor u64_nonzero_lopart u64_nonzero_lopart) + +;; Extract smaller integer type from u64 if it matches. + +(decl i32_from_u64 (i32) u64) +(extern extractor i32_from_u64 i32_from_u64) + +(decl i16_from_u64 (i16) u64) +(extern extractor i16_from_u64 i16_from_u64) + +(decl uimm32shifted_from_u64 (UImm32Shifted) u64) +(extern extractor uimm32shifted_from_u64 uimm32shifted_from_u64) + +(decl uimm16shifted_from_u64 (UImm16Shifted) u64) +(extern extractor uimm16shifted_from_u64 uimm16shifted_from_u64) + +;; Extract integer of certain type from value if it matches. + +(decl u64_from_value (u64) Value) +(extern extractor u64_from_value u64_from_value) + +(decl u32_from_value (u32) Value) +(extern extractor u32_from_value u32_from_value) + +(decl u8_from_value (u8) Value) +(extern extractor u8_from_value u8_from_value) + +(decl u64_from_signed_value (u64) Value) +(extern extractor u64_from_signed_value u64_from_signed_value) + +(decl i64_from_value (i64) Value) +(extern extractor i64_from_value i64_from_value) + +(decl i32_from_value (i32) Value) +(extern extractor i32_from_value i32_from_value) + +(decl i16_from_value (i16) Value) +(extern extractor i16_from_value i16_from_value) + +(decl i16_from_swapped_value (i16) Value) +(extern extractor i16_from_swapped_value i16_from_swapped_value) + +(decl i64_from_negated_value (i64) Value) +(extern extractor i64_from_negated_value i64_from_negated_value) + +(decl i32_from_negated_value (i32) Value) +(extern extractor i32_from_negated_value i32_from_negated_value) + +(decl i16_from_negated_value (i16) Value) +(extern extractor i16_from_negated_value i16_from_negated_value) + +(decl uimm16shifted_from_value (UImm16Shifted) Value) +(extern extractor uimm16shifted_from_value uimm16shifted_from_value) + +(decl uimm32shifted_from_value (UImm32Shifted) Value) +(extern extractor uimm32shifted_from_value uimm32shifted_from_value) + +(decl uimm16shifted_from_inverted_value (UImm16Shifted) Value) +(extern extractor uimm16shifted_from_inverted_value uimm16shifted_from_inverted_value) + +(decl uimm32shifted_from_inverted_value (UImm32Shifted) Value) +(extern extractor uimm32shifted_from_inverted_value uimm32shifted_from_inverted_value) + + +;; Helpers for masking shift amounts ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Mask (immediate) shift amount to the type size. +(decl mask_amt_imm (Type i64) u8) +(extern constructor mask_amt_imm mask_amt_imm) + +;; Mask (immediate) shift amount to the type size. +;; Note that the hardware instructions always masks to six bits, so +;; in the case of a 64-bit type we do not need any explicit masking. +(decl mask_amt_reg (Type Reg) Reg) +(rule (mask_amt_reg (gpr32_ty ty) reg) + (let ((mask u8 (mask_amt_imm ty -1))) + (and_uimm16shifted ty reg (uimm16shifted (u8_as_u16 mask) 0)))) +(rule (mask_amt_reg (gpr64_ty ty) reg) reg) + + +;; Helpers for condition codes ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +(type Cond extern (enum)) + +(decl mask_as_cond (u8) Cond) +(extern constructor mask_as_cond mask_as_cond) + +(decl intcc_as_cond (IntCC) Cond) +(extern constructor intcc_as_cond intcc_as_cond) + +(decl floatcc_as_cond (FloatCC) Cond) +(extern constructor floatcc_as_cond floatcc_as_cond) + +(decl invert_cond (Cond) Cond) +(extern constructor invert_cond invert_cond) + +(decl signed () IntCC) +(extern extractor signed signed) + +(decl unsigned () IntCC) +(extern extractor unsigned unsigned) + + +;; Helpers for memory arguments ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Accessors for `RelocDistance`. + +(type RelocDistance extern (enum)) + +(decl reloc_distance_near () RelocDistance) +(extern extractor reloc_distance_near reloc_distance_near) + +;; Accessors for `Offset32`. + +(decl zero_offset () Offset32) +(extern constructor zero_offset zero_offset) + +(decl i64_from_offset (i64) Offset32) +(extern extractor infallible i64_from_offset i64_from_offset) + +;; Accessors for `MemFlags`. + +(decl littleendian () MemFlags) +(extern extractor littleendian littleendian) + +(decl bigendian () MemFlags) +(extern extractor bigendian bigendian) + +(decl memflags_trusted () MemFlags) +(extern constructor memflags_trusted memflags_trusted) + +;; Accessors for `MemArg`. + +(type MemArg extern (enum)) + +(decl memarg_reg_plus_reg (Reg Reg MemFlags) MemArg) +(extern constructor memarg_reg_plus_reg memarg_reg_plus_reg) + +(decl memarg_reg_plus_off (Reg i64 MemFlags) MemArg) +(extern constructor memarg_reg_plus_off memarg_reg_plus_off) + +(decl memarg_symbol (BoxExternalName i32 MemFlags) MemArg) +(extern constructor memarg_symbol memarg_symbol) + +;; Form the sum of two offset values, and check that the result is +;; a valid `MemArg::Symbol` offset (i.e. is even and fits into i32). +(decl memarg_symbol_offset_sum (i64 i32) i64) +(extern extractor memarg_symbol_offset_sum memarg_symbol_offset_sum (in out)) + +;; Likewise, but just check a single offset value. +(decl memarg_symbol_offset (i32) i64) +(extractor (memarg_symbol_offset offset) (memarg_symbol_offset_sum <0 offset)) + +;; Lower an address into a `MemArg`. + +(decl lower_address (MemFlags Value Offset32) MemArg) + +(rule (lower_address flags addr (i64_from_offset offset)) + (memarg_reg_plus_off (put_in_reg addr) offset flags)) + +(rule (lower_address flags (def_inst (iadd x y)) (i64_from_offset 0)) + (memarg_reg_plus_reg (put_in_reg x) (put_in_reg y) flags)) + +(rule (lower_address flags + (def_inst (symbol_value_data name (reloc_distance_near) offset)) + (i64_from_offset (memarg_symbol_offset_sum InstructionSet { - match self { - ALUOp::AndNot32 | ALUOp::AndNot64 => InstructionSet::MIE2, - ALUOp::OrrNot32 | ALUOp::OrrNot64 => InstructionSet::MIE2, - ALUOp::XorNot32 | ALUOp::XorNot64 => InstructionSet::MIE2, - _ => InstructionSet::Base, - } - } -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -pub enum UnaryOp { - Abs32, - Abs64, - Abs64Ext32, - Neg32, - Neg64, - Neg64Ext32, - PopcntByte, - PopcntReg, -} - -impl UnaryOp { - pub(crate) fn available_from(&self) -> InstructionSet { - match self { - UnaryOp::PopcntReg => InstructionSet::MIE2, - _ => InstructionSet::Base, - } - } -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -pub enum ShiftOp { - RotL32, - RotL64, - LShL32, - LShL64, - LShR32, - LShR64, - AShR32, - AShR64, -} - -/// An integer comparison operation. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -pub enum CmpOp { - CmpS32, - CmpS32Ext16, - CmpS64, - CmpS64Ext16, - CmpS64Ext32, - CmpL32, - CmpL32Ext16, - CmpL64, - CmpL64Ext16, - CmpL64Ext32, -} - -/// A floating-point unit (FPU) operation with one arg. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -pub enum FPUOp1 { - Abs32, - Abs64, - Neg32, - Neg64, - NegAbs32, - NegAbs64, - Sqrt32, - Sqrt64, - Cvt32To64, - Cvt64To32, -} - -/// A floating-point unit (FPU) operation with two args. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -pub enum FPUOp2 { - Add32, - Add64, - Sub32, - Sub64, - Mul32, - Mul64, - Div32, - Div64, - Max32, - Max64, - Min32, - Min64, -} - -/// A floating-point unit (FPU) operation with three args. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -pub enum FPUOp3 { - MAdd32, - MAdd64, - MSub32, - MSub64, -} - -/// A conversion from an FP to an integer value. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -pub enum FpuToIntOp { - F32ToU32, - F32ToI32, - F32ToU64, - F32ToI64, - F64ToU32, - F64ToI32, - F64ToU64, - F64ToI64, -} - -/// A conversion from an integer to an FP value. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -pub enum IntToFpuOp { - U32ToF32, - I32ToF32, - U32ToF64, - I32ToF64, - U64ToF32, - I64ToF32, - U64ToF64, - I64ToF64, -} - -/// Modes for FP rounding ops: round down (floor) or up (ceil), or toward zero (trunc), or to -/// nearest, and for 32- or 64-bit FP values. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -pub enum FpuRoundMode { - Minus32, - Minus64, - Plus32, - Plus64, - Zero32, - Zero64, - Nearest32, - Nearest64, -} +pub use crate::isa::s390x::lower::isle::generated_code::{ + ALUOp, CmpOp, FPUOp1, FPUOp2, FPUOp3, FpuRoundMode, FpuToIntOp, IntToFpuOp, MInst as Inst, + ShiftOp, UnaryOp, +}; /// Additional information for (direct) Call instructions, left out of line to lower the size of /// the Inst enum. @@ -257,726 +67,6 @@ pub struct JTSequenceInfo { pub targets_for_term: Vec, // needed for MachTerminator. } -/// Instruction formats. -#[derive(Clone, Debug)] -pub enum Inst { - /// A no-op of zero size. - Nop0, - - /// A no-op of size two bytes. - Nop2, - - /// An ALU operation with two register sources and a register destination. - AluRRR { - alu_op: ALUOp, - rd: Writable, - rn: Reg, - rm: Reg, - }, - /// An ALU operation with a register source and a signed 16-bit - /// immediate source, and a separate register destination. - AluRRSImm16 { - alu_op: ALUOp, - rd: Writable, - rn: Reg, - imm: i16, - }, - /// An ALU operation with a register in-/out operand and - /// a second register source. - AluRR { - alu_op: ALUOp, - rd: Writable, - rm: Reg, - }, - /// An ALU operation with a register in-/out operand and - /// a memory source. - AluRX { - alu_op: ALUOp, - rd: Writable, - mem: MemArg, - }, - /// An ALU operation with a register in-/out operand and a signed 16-bit - /// immediate source. - AluRSImm16 { - alu_op: ALUOp, - rd: Writable, - imm: i16, - }, - /// An ALU operation with a register in-/out operand and a signed 32-bit - /// immediate source. - AluRSImm32 { - alu_op: ALUOp, - rd: Writable, - imm: i32, - }, - /// An ALU operation with a register in-/out operand and an unsigned 32-bit - /// immediate source. - AluRUImm32 { - alu_op: ALUOp, - rd: Writable, - imm: u32, - }, - /// An ALU operation with a register in-/out operand and a shifted 16-bit - /// immediate source. - AluRUImm16Shifted { - alu_op: ALUOp, - rd: Writable, - imm: UImm16Shifted, - }, - /// An ALU operation with a register in-/out operand and a shifted 32-bit - /// immediate source. - AluRUImm32Shifted { - alu_op: ALUOp, - rd: Writable, - imm: UImm32Shifted, - }, - /// A multiply operation with two register sources and a register pair destination. - /// FIXME: The pair is hard-coded as %r0/%r1 because regalloc cannot handle pairs. - SMulWide { - rn: Reg, - rm: Reg, - }, - /// A multiply operation with an in/out register pair, and an extra register source. - /// Only the lower half of the register pair is used as input. - /// FIXME: The pair is hard-coded as %r0/%r1 because regalloc cannot handle pairs. - UMulWide { - rn: Reg, - }, - /// A divide operation with an in/out register pair, and an extra register source. - /// Only the lower half of the register pair is used as input. - /// FIXME: The pair is hard-coded as %r0/%r1 because regalloc cannot handle pairs. - SDivMod32 { - rn: Reg, - }, - SDivMod64 { - rn: Reg, - }, - /// A divide operation with an in/out register pair, and an extra register source. - /// FIXME: The pair is hard-coded as %r0/%r1 because regalloc cannot handle pairs. - UDivMod32 { - rn: Reg, - }, - UDivMod64 { - rn: Reg, - }, - /// A FLOGR operation with a register source and a register pair destination. - /// FIXME: The pair is hard-coded as %r0/%r1 because regalloc cannot handle pairs. - Flogr { - rn: Reg, - }, - - /// A shift instruction with a register source, a register destination, - /// and an immediate plus an optional register as shift count. - ShiftRR { - shift_op: ShiftOp, - rd: Writable, - rn: Reg, - shift_imm: u8, - shift_reg: Reg, - }, - - /// An unary operation with a register source and a register destination. - UnaryRR { - op: UnaryOp, - rd: Writable, - rn: Reg, - }, - - /// A compare operation with two register sources. - CmpRR { - op: CmpOp, - rn: Reg, - rm: Reg, - }, - /// A compare operation with a register source and a memory source. - CmpRX { - op: CmpOp, - rn: Reg, - mem: MemArg, - }, - /// A compare operation with a register source and a signed 16-bit - /// immediate source. - CmpRSImm16 { - op: CmpOp, - rn: Reg, - imm: i16, - }, - /// A compare operation with a register source and a signed 32-bit - /// immediate source. - CmpRSImm32 { - op: CmpOp, - rn: Reg, - imm: i32, - }, - /// A compare operation with a register source and a unsigned 32-bit - /// immediate source. - CmpRUImm32 { - op: CmpOp, - rn: Reg, - imm: u32, - }, - /// A compare-and-trap instruction with two register sources. - CmpTrapRR { - op: CmpOp, - rn: Reg, - rm: Reg, - cond: Cond, - trap_code: TrapCode, - }, - /// A compare-and-trap operation with a register source and a signed 16-bit - /// immediate source. - CmpTrapRSImm16 { - op: CmpOp, - rn: Reg, - imm: i16, - cond: Cond, - trap_code: TrapCode, - }, - /// A compare-and-trap operation with a register source and an unsigned 16-bit - /// immediate source. - CmpTrapRUImm16 { - op: CmpOp, - rn: Reg, - imm: u16, - cond: Cond, - trap_code: TrapCode, - }, - - /// An atomic read-modify-write operation with a memory in-/out operand, - /// a register destination, and a register source. - /// a memory source. - AtomicRmw { - alu_op: ALUOp, - rd: Writable, - rn: Reg, - mem: MemArg, - }, - /// A 32-bit atomic compare-and-swap operation. - AtomicCas32 { - rd: Writable, - rn: Reg, - mem: MemArg, - }, - /// A 64-bit atomic compare-and-swap operation. - AtomicCas64 { - rd: Writable, - rn: Reg, - mem: MemArg, - }, - /// A memory fence operation. - Fence, - - /// A 32-bit load. - Load32 { - rd: Writable, - mem: MemArg, - }, - /// An unsigned (zero-extending) 8-bit to 32-bit load. - Load32ZExt8 { - rd: Writable, - mem: MemArg, - }, - /// A signed (sign-extending) 8-bit to 32-bit load. - Load32SExt8 { - rd: Writable, - mem: MemArg, - }, - /// An unsigned (zero-extending) 16-bit to 32-bit load. - Load32ZExt16 { - rd: Writable, - mem: MemArg, - }, - /// A signed (sign-extending) 16-bit to 32-bit load. - Load32SExt16 { - rd: Writable, - mem: MemArg, - }, - /// A 64-bit load. - Load64 { - rd: Writable, - mem: MemArg, - }, - /// An unsigned (zero-extending) 8-bit to 64-bit load. - Load64ZExt8 { - rd: Writable, - mem: MemArg, - }, - /// A signed (sign-extending) 8-bit to 64-bit load. - Load64SExt8 { - rd: Writable, - mem: MemArg, - }, - /// An unsigned (zero-extending) 16-bit to 64-bit load. - Load64ZExt16 { - rd: Writable, - mem: MemArg, - }, - /// A signed (sign-extending) 16-bit to 64-bit load. - Load64SExt16 { - rd: Writable, - mem: MemArg, - }, - /// An unsigned (zero-extending) 32-bit to 64-bit load. - Load64ZExt32 { - rd: Writable, - mem: MemArg, - }, - /// A signed (sign-extending) 32-bit to 64-bit load. - Load64SExt32 { - rd: Writable, - mem: MemArg, - }, - - /// A 16-bit byte-reversed load. - LoadRev16 { - rd: Writable, - mem: MemArg, - }, - /// A 32-bit byte-reversed load. - LoadRev32 { - rd: Writable, - mem: MemArg, - }, - /// A 64-bit byte-reversed load. - LoadRev64 { - rd: Writable, - mem: MemArg, - }, - - /// An 8-bit store. - Store8 { - rd: Reg, - mem: MemArg, - }, - /// A 16-bit store. - Store16 { - rd: Reg, - mem: MemArg, - }, - /// A 32-bit store. - Store32 { - rd: Reg, - mem: MemArg, - }, - /// A 64-bit store. - Store64 { - rd: Reg, - mem: MemArg, - }, - /// An 8-bit store of an immediate. - StoreImm8 { - imm: u8, - mem: MemArg, - }, - /// A 16-bit store of an immediate. - StoreImm16 { - imm: i16, - mem: MemArg, - }, - /// A 32-bit store of a sign-extended 16-bit immediate. - StoreImm32SExt16 { - imm: i16, - mem: MemArg, - }, - /// A 64-bit store of a sign-extended 16-bit immediate. - StoreImm64SExt16 { - imm: i16, - mem: MemArg, - }, - - /// A 16-bit byte-reversed store. - StoreRev16 { - rd: Reg, - mem: MemArg, - }, - /// A 32-bit byte-reversed store. - StoreRev32 { - rd: Reg, - mem: MemArg, - }, - /// A 64-bit byte-reversed store. - StoreRev64 { - rd: Reg, - mem: MemArg, - }, - - /// A load-multiple instruction. - LoadMultiple64 { - rt: Writable, - rt2: Writable, - mem: MemArg, - }, - /// A store-multiple instruction. - StoreMultiple64 { - rt: Reg, - rt2: Reg, - mem: MemArg, - }, - - /// A 32-bit move instruction. - Mov32 { - rd: Writable, - rm: Reg, - }, - /// A 64-bit move instruction. - Mov64 { - rd: Writable, - rm: Reg, - }, - /// A 32-bit move instruction with a full 32-bit immediate. - Mov32Imm { - rd: Writable, - imm: u32, - }, - /// A 32-bit move instruction with a 16-bit signed immediate. - Mov32SImm16 { - rd: Writable, - imm: i16, - }, - /// A 64-bit move instruction with a 16-bit signed immediate. - Mov64SImm16 { - rd: Writable, - imm: i16, - }, - /// A 64-bit move instruction with a 32-bit signed immediate. - Mov64SImm32 { - rd: Writable, - imm: i32, - }, - /// A 64-bit move instruction with a shifted 16-bit immediate. - Mov64UImm16Shifted { - rd: Writable, - imm: UImm16Shifted, - }, - /// A 64-bit move instruction with a shifted 32-bit immediate. - Mov64UImm32Shifted { - rd: Writable, - imm: UImm32Shifted, - }, - - /// A 64-bit insert instruction with a shifted 16-bit immediate. - Insert64UImm16Shifted { - rd: Writable, - imm: UImm16Shifted, - }, - /// A 64-bit insert instruction with a shifted 32-bit immediate. - Insert64UImm32Shifted { - rd: Writable, - imm: UImm32Shifted, - }, - - /// A sign- or zero-extend operation. - Extend { - rd: Writable, - rn: Reg, - signed: bool, - from_bits: u8, - to_bits: u8, - }, - - /// A 32-bit conditional move instruction. - CMov32 { - rd: Writable, - cond: Cond, - rm: Reg, - }, - /// A 64-bit conditional move instruction. - CMov64 { - rd: Writable, - cond: Cond, - rm: Reg, - }, - /// A 32-bit conditional move instruction with a 16-bit signed immediate. - CMov32SImm16 { - rd: Writable, - cond: Cond, - imm: i16, - }, - /// A 64-bit conditional move instruction with a 16-bit signed immediate. - CMov64SImm16 { - rd: Writable, - cond: Cond, - imm: i16, - }, - - /// 32-bit FPU move. - FpuMove32 { - rd: Writable, - rn: Reg, - }, - /// 64-bit FPU move. - FpuMove64 { - rd: Writable, - rn: Reg, - }, - - /// A 32-bit conditional move FPU instruction. - FpuCMov32 { - rd: Writable, - cond: Cond, - rm: Reg, - }, - /// A 64-bit conditional move FPU instruction. - FpuCMov64 { - rd: Writable, - cond: Cond, - rm: Reg, - }, - - /// A 64-bit move instruction from GPR to FPR. - MovToFpr { - rd: Writable, - rn: Reg, - }, - /// A 64-bit move instruction from FPR to GPR. - MovFromFpr { - rd: Writable, - rn: Reg, - }, - - /// 1-op FPU instruction. - FpuRR { - fpu_op: FPUOp1, - rd: Writable, - rn: Reg, - }, - - /// 2-op FPU instruction. - FpuRRR { - fpu_op: FPUOp2, - rd: Writable, - rm: Reg, - }, - - /// 3-op FPU instruction. - FpuRRRR { - fpu_op: FPUOp3, - rd: Writable, - rn: Reg, - rm: Reg, - }, - - /// FPU copy sign instruction. - FpuCopysign { - rd: Writable, - rn: Reg, - rm: Reg, - }, - - /// FPU comparison, single-precision (32 bit). - FpuCmp32 { - rn: Reg, - rm: Reg, - }, - - /// FPU comparison, double-precision (64 bit). - FpuCmp64 { - rn: Reg, - rm: Reg, - }, - - /// Floating-point load, single-precision (32 bit). - FpuLoad32 { - rd: Writable, - mem: MemArg, - }, - /// Floating-point store, single-precision (32 bit). - FpuStore32 { - rd: Reg, - mem: MemArg, - }, - /// Floating-point load, double-precision (64 bit). - FpuLoad64 { - rd: Writable, - mem: MemArg, - }, - /// Floating-point store, double-precision (64 bit). - FpuStore64 { - rd: Reg, - mem: MemArg, - }, - /// Floating-point byte-reversed load, single-precision (32 bit). - FpuLoadRev32 { - rd: Writable, - mem: MemArg, - }, - /// Floating-point byte-reversed store, single-precision (32 bit). - FpuStoreRev32 { - rd: Reg, - mem: MemArg, - }, - /// Floating-point byte-reversed load, double-precision (64 bit). - FpuLoadRev64 { - rd: Writable, - mem: MemArg, - }, - /// Floating-point byte-reversed store, double-precision (64 bit). - FpuStoreRev64 { - rd: Reg, - mem: MemArg, - }, - - LoadFpuConst32 { - rd: Writable, - const_data: u32, - }, - - LoadFpuConst64 { - rd: Writable, - const_data: u64, - }, - - /// Conversion: FP -> integer. - FpuToInt { - op: FpuToIntOp, - rd: Writable, - rn: Reg, - }, - - /// Conversion: integer -> FP. - IntToFpu { - op: IntToFpuOp, - rd: Writable, - rn: Reg, - }, - - /// Round to integer. - FpuRound { - op: FpuRoundMode, - rd: Writable, - rn: Reg, - }, - - /// 2-op FPU instruction implemented as vector instruction with the W bit. - FpuVecRRR { - fpu_op: FPUOp2, - rd: Writable, - rn: Reg, - rm: Reg, - }, - - /// A machine call instruction. - Call { - link: Writable, - info: Box, - }, - /// A machine indirect-call instruction. - CallInd { - link: Writable, - info: Box, - }, - - // ---- branches (exactly one must appear at end of BB) ---- - /// A machine return instruction. - Ret { - link: Reg, - }, - - /// A placeholder instruction, generating no code, meaning that a function epilogue must be - /// inserted there. - EpiloguePlaceholder, - - /// An unconditional branch. - Jump { - dest: BranchTarget, - }, - - /// A conditional branch. Contains two targets; at emission time, both are emitted, but - /// the MachBuffer knows to truncate the trailing branch if fallthrough. We optimize the - /// choice of taken/not_taken (inverting the branch polarity as needed) based on the - /// fallthrough at the time of lowering. - CondBr { - taken: BranchTarget, - not_taken: BranchTarget, - cond: Cond, - }, - - /// A conditional trap: execute a `Trap` if the condition is true. This is - /// one VCode instruction because it uses embedded control flow; it is - /// logically a single-in, single-out region, but needs to appear as one - /// unit to the register allocator. - /// - /// The `Cond` gives the conditional-branch condition that will - /// *execute* the embedded `Trap`. (In the emitted code, we use the inverse - /// of this condition in a branch that skips the trap instruction.) - TrapIf { - cond: Cond, - trap_code: TrapCode, - }, - - /// A one-way conditional branch, invisible to the CFG processing; used *only* as part of - /// straight-line sequences in code to be emitted. - /// - /// In more detail: - /// - This branch is lowered to a branch at the machine-code level, but does not end a basic - /// block, and does not create edges in the CFG seen by regalloc. - /// - Thus, it is *only* valid to use as part of a single-in, single-out sequence that is - /// lowered from a single CLIF instruction. For example, certain arithmetic operations may - /// use these branches to handle certain conditions, such as overflows, traps, etc. - /// - /// See, e.g., the lowering of `trapif` (conditional trap) for an example. - OneWayCondBr { - target: BranchTarget, - cond: Cond, - }, - - /// An indirect branch through a register, augmented with set of all - /// possible successors. - IndirectBr { - rn: Reg, - targets: Vec, - }, - - /// A "debugtrap" instruction, used for e.g. traps and debug breakpoints. - Debugtrap, - - /// An instruction guaranteed to always be undefined and to trigger an illegal instruction at - /// runtime. - Trap { - trap_code: TrapCode, - }, - - /// Jump-table sequence, as one compound instruction (see note in lower.rs - /// for rationale). - JTSequence { - info: Box, - ridx: Reg, - rtmp1: Writable, - rtmp2: Writable, - }, - - /// Load an inline symbol reference with RelocDistance::Far. - LoadExtNameFar { - rd: Writable, - name: Box, - offset: i64, - }, - - /// Load address referenced by `mem` into `rd`. - LoadAddr { - rd: Writable, - mem: MemArg, - }, - - /// Marker, no-op in generated code: SP "virtual offset" is adjusted. This - /// controls how MemArg::NominalSPOffset args are lowered. - VirtualSPOffsetAdj { - offset: i64, - }, - - /// A definition of a value label. - ValueLabelMarker { - reg: Reg, - label: ValueLabel, - }, - - /// An unwind pseudoinstruction describing the state of the - /// machine at this program point. - Unwind { - inst: UnwindInst, - }, -} - #[test] fn inst_size_test() { // This test will help with unintentionally growing the size @@ -984,6 +74,18 @@ fn inst_size_test() { assert_eq!(32, std::mem::size_of::()); } +/// Supported instruction sets +#[allow(non_camel_case_types)] +#[derive(Debug)] +pub(crate) enum InstructionSet { + /// Baseline ISA for cranelift is z14. + Base, + /// Miscellaneous-Instruction-Extensions Facility 2 (z15) + MIE2, + /// Vector-Enhancements Facility 2 (z15) + VXRS_EXT2, +} + impl Inst { /// Retrieve the ISA feature set in which the instruction is available. fn available_in_isa(&self) -> InstructionSet { @@ -1103,8 +205,16 @@ impl Inst { | Inst::Unwind { .. } => InstructionSet::Base, // These depend on the opcode - Inst::AluRRR { alu_op, .. } => alu_op.available_from(), - Inst::UnaryRR { op, .. } => op.available_from(), + Inst::AluRRR { alu_op, .. } => match alu_op { + ALUOp::AndNot32 | ALUOp::AndNot64 => InstructionSet::MIE2, + ALUOp::OrrNot32 | ALUOp::OrrNot64 => InstructionSet::MIE2, + ALUOp::XorNot32 | ALUOp::XorNot64 => InstructionSet::MIE2, + _ => InstructionSet::Base, + }, + Inst::UnaryRR { op, .. } => match op { + UnaryOp::PopcntReg => InstructionSet::MIE2, + _ => InstructionSet::Base, + }, // These are all part of VXRS_EXT2 Inst::FpuLoadRev32 { .. } @@ -1601,29 +711,8 @@ fn s390x_get_regs(inst: &Inst, collector: &mut RegUsageCollector) { //============================================================================= // Instructions: map_regs -fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { - fn map_use(m: &RUM, r: &mut Reg) { - if r.is_virtual() { - let new = m.get_use(r.to_virtual_reg()).unwrap().to_reg(); - *r = new; - } - } - - fn map_def(m: &RUM, r: &mut Writable) { - if r.to_reg().is_virtual() { - let new = m.get_def(r.to_reg().to_virtual_reg()).unwrap().to_reg(); - *r = Writable::from_reg(new); - } - } - - fn map_mod(m: &RUM, r: &mut Writable) { - if r.to_reg().is_virtual() { - let new = m.get_mod(r.to_reg().to_virtual_reg()).unwrap().to_reg(); - *r = Writable::from_reg(new); - } - } - - fn map_mem(m: &RUM, mem: &mut MemArg) { +pub fn s390x_map_regs(inst: &mut Inst, mapper: &RM) { + fn map_mem(m: &RM, mem: &mut MemArg) { match mem { &mut MemArg::BXD12 { ref mut base, @@ -1636,14 +725,14 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { .. } => { if *base != zero_reg() { - map_use(m, base); + m.map_use(base); } if *index != zero_reg() { - map_use(m, index); + m.map_use(index); } } &mut MemArg::Label { .. } | &mut MemArg::Symbol { .. } => {} - &mut MemArg::RegOffset { ref mut reg, .. } => map_use(m, reg), + &mut MemArg::RegOffset { ref mut reg, .. } => m.map_use(reg), &mut MemArg::InitialSPOffset { .. } | &mut MemArg::NominalSPOffset { .. } => {} }; } @@ -1655,24 +744,24 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut rm, .. } => { - map_def(mapper, rd); - map_use(mapper, rn); - map_use(mapper, rm); + mapper.map_def(rd); + mapper.map_use(rn); + mapper.map_use(rm); } &mut Inst::AluRRSImm16 { ref mut rd, ref mut rn, .. } => { - map_def(mapper, rd); - map_use(mapper, rn); + mapper.map_def(rd); + mapper.map_use(rn); } &mut Inst::AluRX { ref mut rd, ref mut mem, .. } => { - map_mod(mapper, rd); + mapper.map_mod(rd); map_mem(mapper, mem); } &mut Inst::AluRR { @@ -1680,49 +769,49 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut rm, .. } => { - map_mod(mapper, rd); - map_use(mapper, rm); + mapper.map_mod(rd); + mapper.map_use(rm); } &mut Inst::AluRSImm16 { ref mut rd, .. } => { - map_mod(mapper, rd); + mapper.map_mod(rd); } &mut Inst::AluRSImm32 { ref mut rd, .. } => { - map_mod(mapper, rd); + mapper.map_mod(rd); } &mut Inst::AluRUImm32 { ref mut rd, .. } => { - map_mod(mapper, rd); + mapper.map_mod(rd); } &mut Inst::AluRUImm16Shifted { ref mut rd, .. } => { - map_mod(mapper, rd); + mapper.map_mod(rd); } &mut Inst::AluRUImm32Shifted { ref mut rd, .. } => { - map_mod(mapper, rd); + mapper.map_mod(rd); } &mut Inst::SMulWide { ref mut rn, ref mut rm, .. } => { - map_use(mapper, rn); - map_use(mapper, rm); + mapper.map_use(rn); + mapper.map_use(rm); } &mut Inst::UMulWide { ref mut rn, .. } => { - map_use(mapper, rn); + mapper.map_use(rn); } &mut Inst::SDivMod32 { ref mut rn, .. } => { - map_use(mapper, rn); + mapper.map_use(rn); } &mut Inst::SDivMod64 { ref mut rn, .. } => { - map_use(mapper, rn); + mapper.map_use(rn); } &mut Inst::UDivMod32 { ref mut rn, .. } => { - map_use(mapper, rn); + mapper.map_use(rn); } &mut Inst::UDivMod64 { ref mut rn, .. } => { - map_use(mapper, rn); + mapper.map_use(rn); } &mut Inst::Flogr { ref mut rn, .. } => { - map_use(mapper, rn); + mapper.map_use(rn); } &mut Inst::ShiftRR { ref mut rd, @@ -1730,10 +819,10 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut shift_reg, .. } => { - map_def(mapper, rd); - map_use(mapper, rn); + mapper.map_def(rd); + mapper.map_use(rn); if *shift_reg != zero_reg() { - map_use(mapper, shift_reg); + mapper.map_use(shift_reg); } } &mut Inst::UnaryRR { @@ -1741,47 +830,47 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut rn, .. } => { - map_def(mapper, rd); - map_use(mapper, rn); + mapper.map_def(rd); + mapper.map_use(rn); } &mut Inst::CmpRR { ref mut rn, ref mut rm, .. } => { - map_use(mapper, rn); - map_use(mapper, rm); + mapper.map_use(rn); + mapper.map_use(rm); } &mut Inst::CmpRX { ref mut rn, ref mut mem, .. } => { - map_use(mapper, rn); + mapper.map_use(rn); map_mem(mapper, mem); } &mut Inst::CmpRSImm16 { ref mut rn, .. } => { - map_use(mapper, rn); + mapper.map_use(rn); } &mut Inst::CmpRSImm32 { ref mut rn, .. } => { - map_use(mapper, rn); + mapper.map_use(rn); } &mut Inst::CmpRUImm32 { ref mut rn, .. } => { - map_use(mapper, rn); + mapper.map_use(rn); } &mut Inst::CmpTrapRR { ref mut rn, ref mut rm, .. } => { - map_use(mapper, rn); - map_use(mapper, rm); + mapper.map_use(rn); + mapper.map_use(rm); } &mut Inst::CmpTrapRSImm16 { ref mut rn, .. } => { - map_use(mapper, rn); + mapper.map_use(rn); } &mut Inst::CmpTrapRUImm16 { ref mut rn, .. } => { - map_use(mapper, rn); + mapper.map_use(rn); } &mut Inst::AtomicRmw { @@ -1790,8 +879,8 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_def(mapper, rd); - map_use(mapper, rn); + mapper.map_def(rd); + mapper.map_use(rn); map_mem(mapper, mem); } &mut Inst::AtomicCas32 { @@ -1800,8 +889,8 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_mod(mapper, rd); - map_use(mapper, rn); + mapper.map_mod(rd); + mapper.map_use(rn); map_mem(mapper, mem); } &mut Inst::AtomicCas64 { @@ -1810,8 +899,8 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_mod(mapper, rd); - map_use(mapper, rn); + mapper.map_mod(rd); + mapper.map_use(rn); map_mem(mapper, mem); } &mut Inst::Fence => {} @@ -1821,7 +910,7 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_def(mapper, rd); + mapper.map_def(rd); map_mem(mapper, mem); } &mut Inst::Load32ZExt8 { @@ -1829,7 +918,7 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_def(mapper, rd); + mapper.map_def(rd); map_mem(mapper, mem); } &mut Inst::Load32SExt8 { @@ -1837,7 +926,7 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_def(mapper, rd); + mapper.map_def(rd); map_mem(mapper, mem); } &mut Inst::Load32ZExt16 { @@ -1845,7 +934,7 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_def(mapper, rd); + mapper.map_def(rd); map_mem(mapper, mem); } &mut Inst::Load32SExt16 { @@ -1853,7 +942,7 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_def(mapper, rd); + mapper.map_def(rd); map_mem(mapper, mem); } &mut Inst::Load64 { @@ -1861,7 +950,7 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_def(mapper, rd); + mapper.map_def(rd); map_mem(mapper, mem); } &mut Inst::Load64ZExt8 { @@ -1869,7 +958,7 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_def(mapper, rd); + mapper.map_def(rd); map_mem(mapper, mem); } &mut Inst::Load64SExt8 { @@ -1877,7 +966,7 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_def(mapper, rd); + mapper.map_def(rd); map_mem(mapper, mem); } &mut Inst::Load64ZExt16 { @@ -1885,7 +974,7 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_def(mapper, rd); + mapper.map_def(rd); map_mem(mapper, mem); } &mut Inst::Load64SExt16 { @@ -1893,7 +982,7 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_def(mapper, rd); + mapper.map_def(rd); map_mem(mapper, mem); } &mut Inst::Load64ZExt32 { @@ -1901,7 +990,7 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_def(mapper, rd); + mapper.map_def(rd); map_mem(mapper, mem); } &mut Inst::Load64SExt32 { @@ -1909,7 +998,7 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_def(mapper, rd); + mapper.map_def(rd); map_mem(mapper, mem); } &mut Inst::LoadRev16 { @@ -1917,7 +1006,7 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_def(mapper, rd); + mapper.map_def(rd); map_mem(mapper, mem); } &mut Inst::LoadRev32 { @@ -1925,7 +1014,7 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_def(mapper, rd); + mapper.map_def(rd); map_mem(mapper, mem); } &mut Inst::LoadRev64 { @@ -1933,7 +1022,7 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_def(mapper, rd); + mapper.map_def(rd); map_mem(mapper, mem); } @@ -1942,7 +1031,7 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_use(mapper, rd); + mapper.map_use(rd); map_mem(mapper, mem); } &mut Inst::Store16 { @@ -1950,7 +1039,7 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_use(mapper, rd); + mapper.map_use(rd); map_mem(mapper, mem); } &mut Inst::Store32 { @@ -1958,7 +1047,7 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_use(mapper, rd); + mapper.map_use(rd); map_mem(mapper, mem); } &mut Inst::Store64 { @@ -1966,7 +1055,7 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_use(mapper, rd); + mapper.map_use(rd); map_mem(mapper, mem); } &mut Inst::StoreImm8 { ref mut mem, .. } => { @@ -1986,7 +1075,7 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_use(mapper, rd); + mapper.map_use(rd); map_mem(mapper, mem); } &mut Inst::StoreRev32 { @@ -1994,7 +1083,7 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_use(mapper, rd); + mapper.map_use(rd); map_mem(mapper, mem); } &mut Inst::StoreRev64 { @@ -2002,7 +1091,7 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_use(mapper, rd); + mapper.map_use(rd); map_mem(mapper, mem); } &mut Inst::LoadMultiple64 { .. } => { @@ -2022,121 +1111,121 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut rd, ref mut rm, } => { - map_def(mapper, rd); - map_use(mapper, rm); + mapper.map_def(rd); + mapper.map_use(rm); } &mut Inst::Mov32 { ref mut rd, ref mut rm, } => { - map_def(mapper, rd); - map_use(mapper, rm); + mapper.map_def(rd); + mapper.map_use(rm); } &mut Inst::Mov32Imm { ref mut rd, .. } => { - map_def(mapper, rd); + mapper.map_def(rd); } &mut Inst::Mov32SImm16 { ref mut rd, .. } => { - map_def(mapper, rd); + mapper.map_def(rd); } &mut Inst::Mov64SImm16 { ref mut rd, .. } => { - map_def(mapper, rd); + mapper.map_def(rd); } &mut Inst::Mov64SImm32 { ref mut rd, .. } => { - map_def(mapper, rd); + mapper.map_def(rd); } &mut Inst::Mov64UImm16Shifted { ref mut rd, .. } => { - map_def(mapper, rd); + mapper.map_def(rd); } &mut Inst::Mov64UImm32Shifted { ref mut rd, .. } => { - map_def(mapper, rd); + mapper.map_def(rd); } &mut Inst::Insert64UImm16Shifted { ref mut rd, .. } => { - map_mod(mapper, rd); + mapper.map_mod(rd); } &mut Inst::Insert64UImm32Shifted { ref mut rd, .. } => { - map_mod(mapper, rd); + mapper.map_mod(rd); } &mut Inst::CMov64 { ref mut rd, ref mut rm, .. } => { - map_mod(mapper, rd); - map_use(mapper, rm); + mapper.map_mod(rd); + mapper.map_use(rm); } &mut Inst::CMov32 { ref mut rd, ref mut rm, .. } => { - map_mod(mapper, rd); - map_use(mapper, rm); + mapper.map_mod(rd); + mapper.map_use(rm); } &mut Inst::CMov32SImm16 { ref mut rd, .. } => { - map_mod(mapper, rd); + mapper.map_mod(rd); } &mut Inst::CMov64SImm16 { ref mut rd, .. } => { - map_mod(mapper, rd); + mapper.map_mod(rd); } &mut Inst::FpuMove32 { ref mut rd, ref mut rn, } => { - map_def(mapper, rd); - map_use(mapper, rn); + mapper.map_def(rd); + mapper.map_use(rn); } &mut Inst::FpuMove64 { ref mut rd, ref mut rn, } => { - map_def(mapper, rd); - map_use(mapper, rn); + mapper.map_def(rd); + mapper.map_use(rn); } &mut Inst::FpuCMov64 { ref mut rd, ref mut rm, .. } => { - map_mod(mapper, rd); - map_use(mapper, rm); + mapper.map_mod(rd); + mapper.map_use(rm); } &mut Inst::FpuCMov32 { ref mut rd, ref mut rm, .. } => { - map_mod(mapper, rd); - map_use(mapper, rm); + mapper.map_mod(rd); + mapper.map_use(rm); } &mut Inst::MovToFpr { ref mut rd, ref mut rn, } => { - map_def(mapper, rd); - map_use(mapper, rn); + mapper.map_def(rd); + mapper.map_use(rn); } &mut Inst::MovFromFpr { ref mut rd, ref mut rn, } => { - map_def(mapper, rd); - map_use(mapper, rn); + mapper.map_def(rd); + mapper.map_use(rn); } &mut Inst::FpuRR { ref mut rd, ref mut rn, .. } => { - map_def(mapper, rd); - map_use(mapper, rn); + mapper.map_def(rd); + mapper.map_use(rn); } &mut Inst::FpuRRR { ref mut rd, ref mut rm, .. } => { - map_mod(mapper, rd); - map_use(mapper, rm); + mapper.map_mod(rd); + mapper.map_use(rm); } &mut Inst::FpuRRRR { ref mut rd, @@ -2144,9 +1233,9 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut rm, .. } => { - map_mod(mapper, rd); - map_use(mapper, rn); - map_use(mapper, rm); + mapper.map_mod(rd); + mapper.map_use(rn); + mapper.map_use(rm); } &mut Inst::FpuCopysign { ref mut rd, @@ -2154,30 +1243,30 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut rm, .. } => { - map_def(mapper, rd); - map_use(mapper, rn); - map_use(mapper, rm); + mapper.map_def(rd); + mapper.map_use(rn); + mapper.map_use(rm); } &mut Inst::FpuCmp32 { ref mut rn, ref mut rm, } => { - map_use(mapper, rn); - map_use(mapper, rm); + mapper.map_use(rn); + mapper.map_use(rm); } &mut Inst::FpuCmp64 { ref mut rn, ref mut rm, } => { - map_use(mapper, rn); - map_use(mapper, rm); + mapper.map_use(rn); + mapper.map_use(rm); } &mut Inst::FpuLoad32 { ref mut rd, ref mut mem, .. } => { - map_def(mapper, rd); + mapper.map_def(rd); map_mem(mapper, mem); } &mut Inst::FpuLoad64 { @@ -2185,7 +1274,7 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_def(mapper, rd); + mapper.map_def(rd); map_mem(mapper, mem); } &mut Inst::FpuStore32 { @@ -2193,7 +1282,7 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_use(mapper, rd); + mapper.map_use(rd); map_mem(mapper, mem); } &mut Inst::FpuStore64 { @@ -2201,7 +1290,7 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_use(mapper, rd); + mapper.map_use(rd); map_mem(mapper, mem); } &mut Inst::FpuLoadRev32 { @@ -2209,7 +1298,7 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_def(mapper, rd); + mapper.map_def(rd); map_mem(mapper, mem); } &mut Inst::FpuLoadRev64 { @@ -2217,7 +1306,7 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_def(mapper, rd); + mapper.map_def(rd); map_mem(mapper, mem); } &mut Inst::FpuStoreRev32 { @@ -2225,7 +1314,7 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_use(mapper, rd); + mapper.map_use(rd); map_mem(mapper, mem); } &mut Inst::FpuStoreRev64 { @@ -2233,38 +1322,38 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut mem, .. } => { - map_use(mapper, rd); + mapper.map_use(rd); map_mem(mapper, mem); } &mut Inst::LoadFpuConst32 { ref mut rd, .. } => { - map_def(mapper, rd); + mapper.map_def(rd); } &mut Inst::LoadFpuConst64 { ref mut rd, .. } => { - map_def(mapper, rd); + mapper.map_def(rd); } &mut Inst::FpuToInt { ref mut rd, ref mut rn, .. } => { - map_def(mapper, rd); - map_use(mapper, rn); + mapper.map_def(rd); + mapper.map_use(rn); } &mut Inst::IntToFpu { ref mut rd, ref mut rn, .. } => { - map_def(mapper, rd); - map_use(mapper, rn); + mapper.map_def(rd); + mapper.map_use(rn); } &mut Inst::FpuRound { ref mut rd, ref mut rn, .. } => { - map_def(mapper, rd); - map_use(mapper, rn); + mapper.map_def(rd); + mapper.map_use(rn); } &mut Inst::FpuVecRRR { ref mut rd, @@ -2272,28 +1361,28 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut rm, .. } => { - map_def(mapper, rd); - map_use(mapper, rn); - map_use(mapper, rm); + mapper.map_def(rd); + mapper.map_use(rn); + mapper.map_use(rm); } &mut Inst::Extend { ref mut rd, ref mut rn, .. } => { - map_def(mapper, rd); - map_use(mapper, rn); + mapper.map_def(rd); + mapper.map_use(rn); } &mut Inst::Call { ref mut link, ref mut info, } => { - map_def(mapper, link); + mapper.map_def(link); for r in info.uses.iter_mut() { - map_use(mapper, r); + mapper.map_use(r); } for r in info.defs.iter_mut() { - map_def(mapper, r); + mapper.map_def(r); } } &mut Inst::CallInd { @@ -2301,20 +1390,20 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut info, .. } => { - map_def(mapper, link); + mapper.map_def(link); for r in info.uses.iter_mut() { - map_use(mapper, r); + mapper.map_use(r); } for r in info.defs.iter_mut() { - map_def(mapper, r); + mapper.map_def(r); } - map_use(mapper, &mut info.rn); + mapper.map_use(&mut info.rn); } &mut Inst::Ret { .. } => {} &mut Inst::EpiloguePlaceholder => {} &mut Inst::Jump { .. } => {} &mut Inst::IndirectBr { ref mut rn, .. } => { - map_use(mapper, rn); + mapper.map_use(rn); } &mut Inst::CondBr { .. } | &mut Inst::OneWayCondBr { .. } => {} &mut Inst::Debugtrap | &mut Inst::Trap { .. } | &mut Inst::TrapIf { .. } => {} @@ -2325,23 +1414,23 @@ fn s390x_map_regs(inst: &mut Inst, mapper: &RUM) { ref mut rtmp2, .. } => { - map_use(mapper, ridx); - map_def(mapper, rtmp1); - map_def(mapper, rtmp2); + mapper.map_use(ridx); + mapper.map_def(rtmp1); + mapper.map_def(rtmp2); } &mut Inst::LoadExtNameFar { ref mut rd, .. } => { - map_def(mapper, rd); + mapper.map_def(rd); } &mut Inst::LoadAddr { ref mut rd, ref mut mem, } => { - map_def(mapper, rd); + mapper.map_def(rd); map_mem(mapper, mem); } &mut Inst::VirtualSPOffsetAdj { .. } => {} &mut Inst::ValueLabelMarker { ref mut reg, .. } => { - map_use(mapper, reg); + mapper.map_use(reg); } &mut Inst::Unwind { .. } => {} } diff --git a/cranelift/codegen/src/isa/s390x/lower.isle b/cranelift/codegen/src/isa/s390x/lower.isle new file mode 100644 index 000000000000..ae464c5cc91b --- /dev/null +++ b/cranelift/codegen/src/isa/s390x/lower.isle @@ -0,0 +1,1780 @@ +;; s390x instruction selection and CLIF-to-MachInst lowering. + +;; The main lowering constructor term: takes a clif `Inst` and returns the +;; register(s) within which the lowered instruction's result values live. +(decl lower (Inst) ValueRegs) + + +;;;; Rules for `iconst` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +(rule (lower (has_type ty (iconst (u64_from_imm64 n)))) + (value_reg (imm ty n))) + + +;;;; Rules for `bconst` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +(rule (lower (has_type ty (bconst $false))) + (value_reg (imm ty 0))) +(rule (lower (has_type ty (bconst $true))) + (value_reg (imm ty 1))) + + +;;;; Rules for `f32const` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +(rule (lower (f32const (u64_from_ieee32 x))) + (value_reg (imm $F32 x))) + + +;;;; Rules for `f64const` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +(rule (lower (f64const (u64_from_ieee64 x))) + (value_reg (imm $F64 x))) + + +;;;; Rules for `null` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +(rule (lower (has_type ty (null))) + (value_reg (imm ty 0))) + + +;;;; Rules for `nop` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +(rule (lower (nop)) + (value_reg (invalid_reg))) + + +;;;; Rules for `copy` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +(rule (lower (copy x)) + (value_reg (put_in_reg x))) + + +;;;; Rules for `iadd` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Add two registers. +(rule (lower (has_type (fits_in_64 ty) (iadd x y))) + (value_reg (add_reg ty (put_in_reg x) (put_in_reg y)))) + +;; Add a register and a sign-extended register. +(rule (lower (has_type (fits_in_64 ty) (iadd x (sext32_value y)))) + (value_reg (add_reg_sext32 ty (put_in_reg x) (put_in_reg y)))) +(rule (lower (has_type (fits_in_64 ty) (iadd (sext32_value x) y))) + (value_reg (add_reg_sext32 ty (put_in_reg y) (put_in_reg x)))) + +;; Add a register and an immediate. +(rule (lower (has_type (fits_in_64 ty) (iadd x (i16_from_value y)))) + (value_reg (add_simm16 ty (put_in_reg x) y))) +(rule (lower (has_type (fits_in_64 ty) (iadd (i16_from_value x) y))) + (value_reg (add_simm16 ty (put_in_reg y) x))) +(rule (lower (has_type (fits_in_64 ty) (iadd x (i32_from_value y)))) + (value_reg (add_simm32 ty (put_in_reg x) y))) +(rule (lower (has_type (fits_in_64 ty) (iadd (i32_from_value x) y))) + (value_reg (add_simm32 ty (put_in_reg y) x))) + +;; Add a register and memory (32/64-bit types). +(rule (lower (has_type (fits_in_64 ty) (iadd x (sinkable_load_32_64 y)))) + (value_reg (add_mem ty (put_in_reg x) (sink_load y)))) +(rule (lower (has_type (fits_in_64 ty) (iadd (sinkable_load_32_64 x) y))) + (value_reg (add_mem ty (put_in_reg y) (sink_load x)))) + +;; Add a register and memory (16-bit types). +(rule (lower (has_type (fits_in_64 ty) (iadd x (sinkable_load_16 y)))) + (value_reg (add_mem_sext16 ty (put_in_reg x) (sink_load y)))) +(rule (lower (has_type (fits_in_64 ty) (iadd (sinkable_load_16 x) y))) + (value_reg (add_mem_sext16 ty (put_in_reg y) (sink_load x)))) + +;; Add a register and sign-extended memory. +(rule (lower (has_type (fits_in_64 ty) (iadd x (sinkable_sload16 y)))) + (value_reg (add_mem_sext16 ty (put_in_reg x) (sink_sload16 y)))) +(rule (lower (has_type (fits_in_64 ty) (iadd (sinkable_sload16 x) y))) + (value_reg (add_mem_sext16 ty (put_in_reg y) (sink_sload16 x)))) +(rule (lower (has_type (fits_in_64 ty) (iadd x (sinkable_sload32 y)))) + (value_reg (add_mem_sext32 ty (put_in_reg x) (sink_sload32 y)))) +(rule (lower (has_type (fits_in_64 ty) (iadd (sinkable_sload32 x) y))) + (value_reg (add_mem_sext32 ty (put_in_reg y) (sink_sload32 x)))) + + +;;;; Rules for `isub` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Sub two registers. +(rule (lower (has_type (fits_in_64 ty) (isub x y))) + (value_reg (sub_reg ty (put_in_reg x) (put_in_reg y)))) + +;; Sub a register and a sign-extended register. +(rule (lower (has_type (fits_in_64 ty) (isub x (sext32_value y)))) + (value_reg (sub_reg_sext32 ty (put_in_reg x) (put_in_reg y)))) + +;; Sub a register and an immediate (using add of the negated value). +(rule (lower (has_type (fits_in_64 ty) (isub x (i16_from_negated_value y)))) + (value_reg (add_simm16 ty (put_in_reg x) y))) +(rule (lower (has_type (fits_in_64 ty) (isub x (i32_from_negated_value y)))) + (value_reg (add_simm32 ty (put_in_reg x) y))) + +;; Sub a register and memory (32/64-bit types). +(rule (lower (has_type (fits_in_64 ty) (isub x (sinkable_load_32_64 y)))) + (value_reg (sub_mem ty (put_in_reg x) (sink_load y)))) + +;; Sub a register and memory (16-bit types). +(rule (lower (has_type (fits_in_64 ty) (isub x (sinkable_load_16 y)))) + (value_reg (sub_mem_sext16 ty (put_in_reg x) (sink_load y)))) + +;; Sub a register and sign-extended memory. +(rule (lower (has_type (fits_in_64 ty) (isub x (sinkable_sload16 y)))) + (value_reg (sub_mem_sext16 ty (put_in_reg x) (sink_sload16 y)))) +(rule (lower (has_type (fits_in_64 ty) (isub x (sinkable_sload32 y)))) + (value_reg (sub_mem_sext32 ty (put_in_reg x) (sink_sload32 y)))) + + +;;;; Rules for `iabs` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Absolute value of a register. +;; For types smaller than 32-bit, the input value must be sign-extended. +(rule (lower (has_type (fits_in_64 ty) (iabs x))) + (value_reg (abs_reg (ty_ext32 ty) (put_in_reg_sext32 x)))) + +;; Absolute value of a sign-extended register. +(rule (lower (has_type (fits_in_64 ty) (iabs (sext32_value x)))) + (value_reg (abs_reg_sext32 ty (put_in_reg x)))) + + +;;;; Rules for `iadd_ifcout` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; N.B.: the second output of `iadd_ifcout` is meant to be the `iflags` value +;; containing the carry result, but we do not support the `iflags` mechanism. +;; However, the only actual use case is where `iadd_ifcout` feeds into `trapif`, +;; which is implemented by explicitly matching on the flags producer. So we can +;; get away with just allocating a second temp so that the reg-renaming code +;; does the right thing, for now. +(decl value_regs_ifcout (Reg) ValueRegs) +(rule (value_regs_ifcout reg) + (value_regs reg (writable_reg_to_reg (temp_writable_reg $I64)))) + +;; Add two registers. +(rule (lower (has_type (fits_in_64 ty) (iadd_ifcout x y))) + (value_regs_ifcout (add_logical_reg ty (put_in_reg x) (put_in_reg y)))) + +;; Add a register and a zero-extended register. +(rule (lower (has_type (fits_in_64 ty) (iadd_ifcout x (zext32_value y)))) + (value_regs_ifcout (add_logical_reg_zext32 ty (put_in_reg x) (put_in_reg y)))) +(rule (lower (has_type (fits_in_64 ty) (iadd_ifcout (zext32_value x) y))) + (value_regs_ifcout (add_logical_reg_zext32 ty (put_in_reg y) (put_in_reg x)))) + +;; Add a register and an immediate. +(rule (lower (has_type (fits_in_64 ty) (iadd_ifcout x (u32_from_value y)))) + (value_regs_ifcout (add_logical_zimm32 ty (put_in_reg x) y))) +(rule (lower (has_type (fits_in_64 ty) (iadd_ifcout (u32_from_value x) y))) + (value_regs_ifcout (add_logical_zimm32 ty (put_in_reg y) x))) + +;; Add a register and memory (32/64-bit types). +(rule (lower (has_type (fits_in_64 ty) (iadd_ifcout x (sinkable_load_32_64 y)))) + (value_regs_ifcout (add_logical_mem ty (put_in_reg x) (sink_load y)))) +(rule (lower (has_type (fits_in_64 ty) (iadd_ifcout (sinkable_load_32_64 x) y))) + (value_regs_ifcout (add_logical_mem ty (put_in_reg y) (sink_load x)))) + +;; Add a register and zero-extended memory. +(rule (lower (has_type (fits_in_64 ty) (iadd_ifcout x (sinkable_uload32 y)))) + (value_regs_ifcout (add_logical_mem_zext32 ty (put_in_reg x) (sink_uload32 y)))) +(rule (lower (has_type (fits_in_64 ty) (iadd_ifcout (sinkable_uload32 x) y))) + (value_regs_ifcout (add_logical_mem_zext32 ty (put_in_reg y) (sink_uload32 x)))) + + +;;;; Rules for `ineg` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Negate a register. +(rule (lower (has_type (fits_in_64 ty) (ineg x))) + (value_reg (neg_reg ty (put_in_reg x)))) + +;; Negate a sign-extended register. +(rule (lower (has_type (fits_in_64 ty) (ineg (sext32_value x)))) + (value_reg (neg_reg_sext32 ty (put_in_reg x)))) + + +;;;; Rules for `imul` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Multiply two registers. +(rule (lower (has_type (fits_in_64 ty) (imul x y))) + (value_reg (mul_reg ty (put_in_reg x) (put_in_reg y)))) + +;; Multiply a register and a sign-extended register. +(rule (lower (has_type (fits_in_64 ty) (imul x (sext32_value y)))) + (value_reg (mul_reg_sext32 ty (put_in_reg x) (put_in_reg y)))) +(rule (lower (has_type (fits_in_64 ty) (imul (sext32_value x) y))) + (value_reg (mul_reg_sext32 ty (put_in_reg y) (put_in_reg x)))) + +;; Multiply a register and an immediate. +(rule (lower (has_type (fits_in_64 ty) (imul x (i16_from_value y)))) + (value_reg (mul_simm16 ty (put_in_reg x) y))) +(rule (lower (has_type (fits_in_64 ty) (imul (i16_from_value x) y))) + (value_reg (mul_simm16 ty (put_in_reg y) x))) +(rule (lower (has_type (fits_in_64 ty) (imul x (i32_from_value y)))) + (value_reg (mul_simm32 ty (put_in_reg x) y))) +(rule (lower (has_type (fits_in_64 ty) (imul (i32_from_value x) y))) + (value_reg (mul_simm32 ty (put_in_reg y) x))) + +;; Multiply a register and memory (32/64-bit types). +(rule (lower (has_type (fits_in_64 ty) (imul x (sinkable_load_32_64 y)))) + (value_reg (mul_mem ty (put_in_reg x) (sink_load y)))) +(rule (lower (has_type (fits_in_64 ty) (imul (sinkable_load_32_64 x) y))) + (value_reg (mul_mem ty (put_in_reg y) (sink_load x)))) + +;; Multiply a register and memory (16-bit types). +(rule (lower (has_type (fits_in_64 ty) (imul x (sinkable_load_16 y)))) + (value_reg (mul_mem_sext16 ty (put_in_reg x) (sink_load y)))) +(rule (lower (has_type (fits_in_64 ty) (imul (sinkable_load_16 x) y))) + (value_reg (mul_mem_sext16 ty (put_in_reg y) (sink_load x)))) + +;; Multiply a register and sign-extended memory. +(rule (lower (has_type (fits_in_64 ty) (imul x (sinkable_sload16 y)))) + (value_reg (mul_mem_sext16 ty (put_in_reg x) (sink_sload16 y)))) +(rule (lower (has_type (fits_in_64 ty) (imul (sinkable_sload16 x) y))) + (value_reg (mul_mem_sext16 ty (put_in_reg y) (sink_sload16 x)))) +(rule (lower (has_type (fits_in_64 ty) (imul x (sinkable_sload32 y)))) + (value_reg (mul_mem_sext32 ty (put_in_reg x) (sink_sload32 y)))) +(rule (lower (has_type (fits_in_64 ty) (imul (sinkable_sload32 x) y))) + (value_reg (mul_mem_sext32 ty (put_in_reg y) (sink_sload32 x)))) + + +;;;; Rules for `umulhi` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Multiply high part unsigned, 8-bit or 16-bit types. (Uses 32-bit multiply.) +(rule (lower (has_type (ty_8_or_16 ty) (umulhi x y))) + (let ((ext_reg_x Reg (put_in_reg_zext32 x)) + (ext_reg_y Reg (put_in_reg_zext32 y)) + (ext_mul Reg (mul_reg $I32 ext_reg_x ext_reg_y))) + (value_reg (lshr_imm $I32 ext_mul (ty_bits ty))))) + +;; Multiply high part unsigned, 32-bit types. (Uses 64-bit multiply.) +(rule (lower (has_type $I32 (umulhi x y))) + (let ((ext_reg_x Reg (put_in_reg_zext64 x)) + (ext_reg_y Reg (put_in_reg_zext64 y)) + (ext_mul Reg (mul_reg $I64 ext_reg_x ext_reg_y))) + (value_reg (lshr_imm $I64 ext_mul 32)))) + +;; Multiply high part unsigned, 64-bit types. (Uses umul_wide.) +(rule (lower (has_type $I64 (umulhi x y))) + (let ((pair RegPair (umul_wide (put_in_reg x) (put_in_reg y)))) + (value_reg (copy_reg $I64 (regpair_hi pair))))) + + +;;;; Rules for `smulhi` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Multiply high part signed, 8-bit or 16-bit types. (Uses 32-bit multiply.) +(rule (lower (has_type (ty_8_or_16 ty) (smulhi x y))) + (let ((ext_reg_x Reg (put_in_reg_sext32 x)) + (ext_reg_y Reg (put_in_reg_sext32 y)) + (ext_mul Reg (mul_reg $I32 ext_reg_x ext_reg_y))) + (value_reg (ashr_imm $I32 ext_mul (ty_bits ty))))) + +;; Multiply high part signed, 32-bit types. (Uses 64-bit multiply.) +(rule (lower (has_type $I32 (smulhi x y))) + (let ((ext_reg_x Reg (put_in_reg_sext64 x)) + (ext_reg_y Reg (put_in_reg_sext64 y)) + (ext_mul Reg (mul_reg $I64 ext_reg_x ext_reg_y))) + (value_reg (ashr_imm $I64 ext_mul 32)))) + +;; Multiply high part signed, 64-bit types. (Uses smul_wide.) +(rule (lower (has_type $I64 (smulhi x y))) + (let ((pair RegPair (smul_wide (put_in_reg x) (put_in_reg y)))) + (value_reg (copy_reg $I64 (regpair_hi pair))))) + + +;;;; Rules for `udiv` and `urem` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Divide two registers. The architecture provides combined udiv / urem +;; instructions with the following combination of data types: +;; +;; - 64-bit dividend (split across a 2x32-bit register pair), +;; 32-bit divisor (in a single input register) +;; 32-bit quotient & remainder (in a 2x32-bit register pair) +;; +;; - 128-bit dividend (split across a 2x64-bit register pair), +;; 64-bit divisor (in a single input register) +;; 64-bit quotient & remainder (in a 2x64-bit register pair) +;; +;; We use the first variant for 32-bit and smaller input types, +;; and the second variant for 64-bit input types. + +;; Implement `udiv`. +(rule (lower (has_type (fits_in_64 ty) (udiv x y))) + (let (;; Look at the divisor to determine whether we need to generate + ;; an explicit division-by zero check. + (DZcheck bool (zero_divisor_check_needed y)) + ;; Load up the dividend, by loading the input (possibly zero- + ;; extended) input into the low half of the register pair, + ;; and setting the high half to zero. + (ext_x RegPair (put_in_regpair_lo_zext32 x + (imm_regpair_hi (ty_ext32 ty) 0 (uninitialized_regpair)))) + ;; Load up the divisor, zero-extended if necessary. + (ext_y Reg (put_in_reg_zext32 y)) + (ext_ty Type (ty_ext32 ty)) + ;; Now actually perform the division-by zero check if necessary. + ;; This cannot be done earlier than here, because the check + ;; requires an already extended divisor value. + (_ Reg (maybe_trap_if_zero_divisor DZcheck ext_ty ext_y)) + ;; Emit the actual divide instruction. + (pair RegPair (udivmod ext_ty ext_x ext_y))) + ;; The quotient can be found in the low half of the result. + (value_reg (copy_reg ty (regpair_lo pair))))) + +;; Implement `urem`. Same as `udiv`, but finds the remainder in +;; the high half of the result register pair instead. +(rule (lower (has_type (fits_in_64 ty) (urem x y))) + (let ((DZcheck bool (zero_divisor_check_needed y)) + (ext_x RegPair (put_in_regpair_lo_zext32 x + (imm_regpair_hi ty 0 (uninitialized_regpair)))) + (ext_y Reg (put_in_reg_zext32 y)) + (ext_ty Type (ty_ext32 ty)) + (_ Reg (maybe_trap_if_zero_divisor DZcheck ext_ty ext_y)) + (pair RegPair (udivmod ext_ty ext_x ext_y))) + (value_reg (copy_reg ty (regpair_hi pair))))) + +;; Determine whether we need to perform a divide-by-zero-check. +;; +;; If the `avoid_div_traps` flag is false, we never need to perform +;; that check; we can rely on the divide instruction itself to trap. +;; +;; If the `avoid_div_traps` flag is true, we perform the check explicitly. +;; This still can be omittted if the divisor is a non-zero immediate. +(decl zero_divisor_check_needed (Value) bool) +(rule (zero_divisor_check_needed (i64_from_value (i64_nonzero _))) $false) +(rule (zero_divisor_check_needed (value_type (allow_div_traps))) $false) +(rule (zero_divisor_check_needed _) $true) + +;; Perform the divide-by-zero check if required. +;; This is simply a compare-and-trap of the (extended) divisor against 0. +(decl maybe_trap_if_zero_divisor (bool Type Reg) Reg) +(rule (maybe_trap_if_zero_divisor $false _ _) (invalid_reg)) +(rule (maybe_trap_if_zero_divisor $true ext_ty reg) + (icmps_simm16_and_trap ext_ty reg 0 + (intcc_as_cond (IntCC.Equal)) + (trap_code_division_by_zero))) + + +;;;; Rules for `sdiv` and `srem` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Divide two registers. The architecture provides combined sdiv / srem +;; instructions with the following combination of data types: +;; +;; - 64-bit dividend (in the low half of a 2x64-bit register pair), +;; 32-bit divisor (in a single input register) +;; 64-bit quotient & remainder (in a 2x64-bit register pair) +;; +;; - 64-bit dividend (in the low half of a 2x64-bit register pair), +;; 64-bit divisor (in a single input register) +;; 64-bit quotient & remainder (in a 2x64-bit register pair) +;; +;; We use the first variant for 32-bit and smaller input types, +;; and the second variant for 64-bit input types. + +;; Implement `sdiv`. +(rule (lower (has_type (fits_in_64 ty) (sdiv x y))) + (let (;; Look at the divisor to determine whether we need to generate + ;; explicit division-by-zero and/or integer-overflow checks. + (DZcheck bool (zero_divisor_check_needed y)) + (OFcheck bool (div_overflow_check_needed y)) + ;; Load up the dividend (sign-extended to 64-bit) into the low + ;; half of a register pair (the high half remains uninitialized). + (ext_x RegPair (put_in_regpair_lo_sext64 x (uninitialized_regpair))) + ;; Load up the divisor (sign-extended if necessary). + (ext_y Reg (put_in_reg_sext32 y)) + (ext_ty Type (ty_ext32 ty)) + ;; Perform division-by-zero check (same as for `udiv`). + (_1 Reg (maybe_trap_if_zero_divisor DZcheck ext_ty ext_y)) + ;; Perform integer-overflow check if necessary. + (_2 Reg (maybe_trap_if_sdiv_overflow OFcheck ext_ty ty ext_x ext_y)) + ;; Emit the actual divide instruction. + (pair RegPair (sdivmod ext_ty ext_x ext_y))) + ;; The quotient can be found in the low half of the result. + (value_reg (copy_reg ty (regpair_lo pair))))) + +;; Implement `srem`. Same as `sdiv`, but finds the remainder in +;; the high half of the result register pair instead. Also, handle +;; the integer overflow case differently, see below. +(rule (lower (has_type (fits_in_64 ty) (srem x y))) + (let ((DZcheck bool (zero_divisor_check_needed y)) + (OFcheck bool (div_overflow_check_needed y)) + (ext_x RegPair (put_in_regpair_lo_sext64 x (uninitialized_regpair))) + (ext_y Reg (put_in_reg_sext32 y)) + (ext_ty Type (ty_ext32 ty)) + (_ Reg (maybe_trap_if_zero_divisor DZcheck ext_ty ext_y)) + (checked_x RegPair (maybe_avoid_srem_overflow OFcheck ext_ty ext_x ext_y)) + (pair RegPair (sdivmod ext_ty checked_x ext_y))) + (value_reg (copy_reg ty (regpair_hi pair))))) + +;; Determine whether we need to perform an integer-overflow check. +;; +;; We never rely on the divide instruction itself to trap; while that trap +;; would indeed happen, we have no way of signalling two different trap +;; conditions from the same instruction. By explicity checking for the +;; integer-overflow case ahead of time, any hardware trap in the divide +;; instruction is guaranteed to indicate divison-by-zero. +;; +;; In addition, for types smaller than 64 bits we would have to perform +;; the check explicitly anyway, since the instruction provides a 64-bit +;; quotient and only traps if *that* overflows. +;; +;; However, the only case where integer overflow can occur is if the +;; minimum (signed) integer value is divided by -1, so if the divisor +;; is any immediate different from -1, the check can be omitted. +(decl div_overflow_check_needed (Value) bool) +(rule (div_overflow_check_needed (i64_from_value (i64_not_neg1 _))) $false) +(rule (div_overflow_check_needed _) $true) + +;; Perform the integer-overflow check if necessary. This implements: +;; +;; if divisor == INT_MIN && dividend == -1 { trap } +;; +;; but to avoid introducing control flow, it is actually done as: +;; +;; if ((divisor ^ INT_MAX) & dividend) == -1 { trap } +;; +;; instead, using a single conditional trap instruction. +(decl maybe_trap_if_sdiv_overflow (bool Type Type RegPair Reg) Reg) +(rule (maybe_trap_if_sdiv_overflow $false ext_ty _ _ _) (invalid_reg)) +(rule (maybe_trap_if_sdiv_overflow $true ext_ty ty x y) + (let ((int_max Reg (imm ext_ty (int_max ty))) + (reg Reg (and_reg ext_ty (xor_reg ext_ty int_max + (regpair_lo x)) y))) + (icmps_simm16_and_trap ext_ty reg -1 + (intcc_as_cond (IntCC.Equal)) + (trap_code_integer_overflow)))) +(decl int_max (Type) u64) +(rule (int_max $I8) 0x7f) +(rule (int_max $I16) 0x7fff) +(rule (int_max $I32) 0x7fffffff) +(rule (int_max $I64) 0x7fffffffffffffff) + +;; When performing `srem`, we do not want to trap in the +;; integer-overflow scenario, because it is only the quotient +;; that overflows, not the remainder. +;; +;; For types smaller than 64 bits, we can simply let the +;; instruction execute, since (as above) it will never trap. +;; +;; For 64-bit inputs, we check whether the divisor is -1, and +;; if so simply replace the dividend by zero, which will give +;; the correct result, since any value modulo -1 is zero. +;; +;; (We could in fact avoid executing the divide instruction +;; at all in this case, but that would require introducing +;; control flow.) +(decl maybe_avoid_srem_overflow (bool Type RegPair Reg) RegPair) +(rule (maybe_avoid_srem_overflow $false _ x _) x) +(rule (maybe_avoid_srem_overflow $true $I32 x _) x) +(rule (maybe_avoid_srem_overflow $true $I64 x y) + (cmov_imm_regpair_lo $I64 (icmps_simm16 $I64 y -1) + (intcc_as_cond (IntCC.Equal)) 0 x)) + + +;;;; Rules for `ishl` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Shift left, shift amount in register. +(rule (lower (has_type (fits_in_64 ty) (ishl x y))) + (let ((masked_amt Reg (mask_amt_reg ty (put_in_reg y)))) + (value_reg (lshl_reg ty (put_in_reg x) masked_amt)))) + +;; Shift left, immediate shift amount. +(rule (lower (has_type (fits_in_64 ty) (ishl x (i64_from_value y)))) + (let ((masked_amt u8 (mask_amt_imm ty y))) + (value_reg (lshl_imm ty (put_in_reg x) masked_amt)))) + + +;;;; Rules for `ushr` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Shift right logical, shift amount in register. +;; For types smaller than 32-bit, the input value must be zero-extended. +(rule (lower (has_type (fits_in_64 ty) (ushr x y))) + (let ((ext_reg Reg (put_in_reg_zext32 x)) + (masked_amt Reg (mask_amt_reg ty (put_in_reg y)))) + (value_reg (lshr_reg (ty_ext32 ty) ext_reg masked_amt)))) + +;; Shift right logical, immediate shift amount. +;; For types smaller than 32-bit, the input value must be zero-extended. +(rule (lower (has_type (fits_in_64 ty) (ushr x (i64_from_value y)))) + (let ((ext_reg Reg (put_in_reg_zext32 x)) + (masked_amt u8 (mask_amt_imm ty y))) + (value_reg (lshr_imm (ty_ext32 ty) ext_reg masked_amt)))) + + +;;;; Rules for `sshr` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Shift right arithmetic, shift amount in register. +;; For types smaller than 32-bit, the input value must be sign-extended. +(rule (lower (has_type (fits_in_64 ty) (sshr x y))) + (let ((ext_reg Reg (put_in_reg_sext32 x)) + (masked_amt Reg (mask_amt_reg ty (put_in_reg y)))) + (value_reg (ashr_reg (ty_ext32 ty) ext_reg masked_amt)))) + +;; Shift right arithmetic, immediate shift amount. +;; For types smaller than 32-bit, the input value must be sign-extended. +(rule (lower (has_type (fits_in_64 ty) (sshr x (i64_from_value y)))) + (let ((ext_reg Reg (put_in_reg_sext32 x)) + (masked_amt u8 (mask_amt_imm ty y))) + (value_reg (ashr_imm (ty_ext32 ty) ext_reg masked_amt)))) + + +;;;; Rules for `rotl` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Rotate left, shift amount in register. 32-bit or 64-bit types. +(rule (lower (has_type (ty_32_or_64 ty) (rotl x y))) + (value_reg (rot_reg ty (put_in_reg x) (put_in_reg y)))) + +;; Rotate left arithmetic, immediate shift amount. 32-bit or 64-bit types. +(rule (lower (has_type (ty_32_or_64 ty) (rotl x (i64_from_value y)))) + (let ((masked_amt u8 (mask_amt_imm ty y))) + (value_reg (rot_imm ty (put_in_reg x) masked_amt)))) + +;; Rotate left, shift amount in register. 8-bit or 16-bit types. +;; Implemented via a pair of 32-bit shifts on the zero-extended input. +(rule (lower (has_type (ty_8_or_16 ty) (rotl x y))) + (let ((ext_reg Reg (put_in_reg_zext32 x)) + (ext_ty Type (ty_ext32 ty)) + (pos_amt Reg (put_in_reg y)) + (neg_amt Reg (neg_reg ty pos_amt)) + (masked_pos_amt Reg (mask_amt_reg ty pos_amt)) + (masked_neg_amt Reg (mask_amt_reg ty neg_amt))) + (value_reg (or_reg ty (lshl_reg ext_ty ext_reg masked_pos_amt) + (lshr_reg ext_ty ext_reg masked_neg_amt))))) + +;; Rotate left, immediate shift amount. 8-bit or 16-bit types. +;; Implemented via a pair of 32-bit shifts on the zero-extended input. +(rule (lower (has_type (ty_8_or_16 ty) (rotl x (and (i64_from_value pos_amt) + (i64_from_negated_value neg_amt))))) + (let ((ext_reg Reg (put_in_reg_zext32 x)) + (ext_ty Type (ty_ext32 ty)) + (masked_pos_amt u8 (mask_amt_imm ty pos_amt)) + (masked_neg_amt u8 (mask_amt_imm ty neg_amt))) + (value_reg (or_reg ty (lshl_imm ext_ty ext_reg masked_pos_amt) + (lshr_imm ext_ty ext_reg masked_neg_amt))))) + + +;;;; Rules for `rotr` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Rotate right, shift amount in register. 32-bit or 64-bit types. +;; Implemented as rotate left with negated rotate amount. +(rule (lower (has_type (ty_32_or_64 ty) (rotr x y))) + (let ((negated_amt Reg (neg_reg ty (put_in_reg y)))) + (value_reg (rot_reg ty (put_in_reg x) negated_amt)))) + +;; Rotate right arithmetic, immediate shift amount. 32-bit or 64-bit types. +;; Implemented as rotate left with negated rotate amount. +(rule (lower (has_type (ty_32_or_64 ty) (rotr x (i64_from_negated_value y)))) + (let ((negated_amt u8 (mask_amt_imm ty y))) + (value_reg (rot_imm ty (put_in_reg x) negated_amt)))) + +;; Rotate right, shift amount in register. 8-bit or 16-bit types. +;; Implemented as rotate left with negated rotate amount. +(rule (lower (has_type (ty_8_or_16 ty) (rotr x y))) + (let ((ext_reg Reg (put_in_reg_zext32 x)) + (ext_ty Type (ty_ext32 ty)) + (pos_amt Reg (put_in_reg y)) + (neg_amt Reg (neg_reg ty pos_amt)) + (masked_pos_amt Reg (mask_amt_reg ty pos_amt)) + (masked_neg_amt Reg (mask_amt_reg ty neg_amt))) + (value_reg (or_reg ty (lshl_reg ext_ty ext_reg masked_neg_amt) + (lshr_reg ext_ty ext_reg masked_pos_amt))))) + +;; Rotate right, immediate shift amount. 8-bit or 16-bit types. +;; Implemented as rotate left with negated rotate amount. +(rule (lower (has_type (ty_8_or_16 ty) (rotr x (and (i64_from_value pos_amt) + (i64_from_negated_value neg_amt))))) + (let ((ext_reg Reg (put_in_reg_zext32 x)) + (ext_ty Type (ty_ext32 ty)) + (masked_pos_amt u8 (mask_amt_imm ty pos_amt)) + (masked_neg_amt u8 (mask_amt_imm ty neg_amt))) + (value_reg (or_reg ty (lshl_imm ext_ty ext_reg masked_neg_amt) + (lshr_imm ext_ty ext_reg masked_pos_amt))))) + +;;;; Rules for `ireduce` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Always a no-op. +(rule (lower (ireduce x)) + (value_reg (put_in_reg x))) + + +;;;; Rules for `uextend` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; 16- or 32-bit target types. +(rule (lower (has_type (gpr32_ty _ty) (uextend x))) + (value_reg (put_in_reg_zext32 x))) + +;; 64-bit target types. +(rule (lower (has_type (gpr64_ty _ty) (uextend x))) + (value_reg (put_in_reg_zext64 x))) + + +;;;; Rules for `sextend` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; 16- or 32-bit target types. +(rule (lower (has_type (gpr32_ty _ty) (sextend x))) + (value_reg (put_in_reg_sext32 x))) + +;; 64-bit target types. +(rule (lower (has_type (gpr64_ty _ty) (sextend x))) + (value_reg (put_in_reg_sext64 x))) + + +;;;; Rules for `bnot` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; z15 version using a single instruction (NOR). +(rule (lower (has_type (and (mie2_enabled) (fits_in_64 ty)) (bnot x))) + (let ((rx Reg (put_in_reg x))) + (value_reg (or_not_reg ty rx rx)))) + +;; z14 version using XOR with -1. +(rule (lower (has_type (and (mie2_disabled) (fits_in_64 ty)) (bnot x))) + (value_reg (not_reg ty (put_in_reg x)))) + + +;;;; Rules for `band` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; And two registers. +(rule (lower (has_type (fits_in_64 ty) (band x y))) + (value_reg (and_reg ty (put_in_reg x) (put_in_reg y)))) + +;; And a register and an immediate. +(rule (lower (has_type (fits_in_64 ty) (band x (uimm16shifted_from_inverted_value y)))) + (value_reg (and_uimm16shifted ty (put_in_reg x) y))) +(rule (lower (has_type (fits_in_64 ty) (band (uimm16shifted_from_inverted_value x) y))) + (value_reg (and_uimm16shifted ty (put_in_reg y) x))) +(rule (lower (has_type (fits_in_64 ty) (band x (uimm32shifted_from_inverted_value y)))) + (value_reg (and_uimm32shifted ty (put_in_reg x) y))) +(rule (lower (has_type (fits_in_64 ty) (band (uimm32shifted_from_inverted_value x) y))) + (value_reg (and_uimm32shifted ty (put_in_reg y) x))) + +;; And a register and memory (32/64-bit types). +(rule (lower (has_type (fits_in_64 ty) (band x (sinkable_load_32_64 y)))) + (value_reg (and_mem ty (put_in_reg x) (sink_load y)))) +(rule (lower (has_type (fits_in_64 ty) (band (sinkable_load_32_64 x) y))) + (value_reg (and_mem ty (put_in_reg y) (sink_load x)))) + + +;;;; Rules for `bor` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Or two registers. +(rule (lower (has_type (fits_in_64 ty) (bor x y))) + (value_reg (or_reg ty (put_in_reg x) (put_in_reg y)))) + +;; Or a register and an immediate. +(rule (lower (has_type (fits_in_64 ty) (bor x (uimm16shifted_from_value y)))) + (value_reg (or_uimm16shifted ty (put_in_reg x) y))) +(rule (lower (has_type (fits_in_64 ty) (bor (uimm16shifted_from_value x) y))) + (value_reg (or_uimm16shifted ty (put_in_reg y) x))) +(rule (lower (has_type (fits_in_64 ty) (bor x (uimm32shifted_from_value y)))) + (value_reg (or_uimm32shifted ty (put_in_reg x) y))) +(rule (lower (has_type (fits_in_64 ty) (bor (uimm32shifted_from_value x) y))) + (value_reg (or_uimm32shifted ty (put_in_reg y) x))) + +;; Or a register and memory (32/64-bit types). +(rule (lower (has_type (fits_in_64 ty) (bor x (sinkable_load_32_64 y)))) + (value_reg (or_mem ty (put_in_reg x) (sink_load y)))) +(rule (lower (has_type (fits_in_64 ty) (bor (sinkable_load_32_64 x) y))) + (value_reg (or_mem ty (put_in_reg y) (sink_load x)))) + + +;;;; Rules for `bxor` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Xor two registers. +(rule (lower (has_type (fits_in_64 ty) (bxor x y))) + (value_reg (xor_reg ty (put_in_reg x) (put_in_reg y)))) + +;; Xor a register and an immediate. +(rule (lower (has_type (fits_in_64 ty) (bxor x (uimm32shifted_from_value y)))) + (value_reg (xor_uimm32shifted ty (put_in_reg x) y))) +(rule (lower (has_type (fits_in_64 ty) (bxor (uimm32shifted_from_value x) y))) + (value_reg (xor_uimm32shifted ty (put_in_reg y) x))) + +;; Xor a register and memory (32/64-bit types). +(rule (lower (has_type (fits_in_64 ty) (bxor x (sinkable_load_32_64 y)))) + (value_reg (xor_mem ty (put_in_reg x) (sink_load y)))) +(rule (lower (has_type (fits_in_64 ty) (bxor (sinkable_load_32_64 x) y))) + (value_reg (xor_mem ty (put_in_reg y) (sink_load x)))) + + +;;;; Rules for `band_not` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; z15 version using a single instruction. +(rule (lower (has_type (and (mie2_enabled) (fits_in_64 ty)) (band_not x y))) + (value_reg (and_not_reg ty (put_in_reg x) (put_in_reg y)))) + +;; z14 version using XOR with -1. +(rule (lower (has_type (and (mie2_disabled) (fits_in_64 ty)) (band_not x y))) + (value_reg (not_reg ty (and_reg ty (put_in_reg x) (put_in_reg y))))) + + +;;;; Rules for `bor_not` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; z15 version using a single instruction. +(rule (lower (has_type (and (mie2_enabled) (fits_in_64 ty)) (bor_not x y))) + (value_reg (or_not_reg ty (put_in_reg x) (put_in_reg y)))) + +;; z14 version using XOR with -1. +(rule (lower (has_type (and (mie2_disabled) (fits_in_64 ty)) (bor_not x y))) + (value_reg (not_reg ty (or_reg ty (put_in_reg x) (put_in_reg y))))) + + +;;;; Rules for `bxor_not` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; z15 version using a single instruction. +(rule (lower (has_type (and (mie2_enabled) (fits_in_64 ty)) (bxor_not x y))) + (value_reg (xor_not_reg ty (put_in_reg x) (put_in_reg y)))) + +;; z14 version using XOR with -1. +(rule (lower (has_type (and (mie2_disabled) (fits_in_64 ty)) (bxor_not x y))) + (value_reg (not_reg ty (xor_reg ty (put_in_reg x) (put_in_reg y))))) + + +;;;; Rules for `bitselect` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; z15 version using a NAND instruction. +(rule (lower (has_type (and (mie2_enabled) (fits_in_64 ty)) (bitselect x y z))) + (let ((rx Reg (put_in_reg x)) + (if_true Reg (and_reg ty (put_in_reg y) rx)) + (if_false Reg (and_not_reg ty (put_in_reg z) rx))) + (value_reg (or_reg ty if_false if_true)))) + +;; z14 version using XOR with -1. +(rule (lower (has_type (and (mie2_disabled) (fits_in_64 ty)) (bitselect x y z))) + (let ((rx Reg (put_in_reg x)) + (if_true Reg (and_reg ty (put_in_reg y) rx)) + (if_false Reg (not_reg ty (and_reg ty (put_in_reg z) rx)))) + (value_reg (or_reg ty if_false if_true)))) + + +;;;; Rules for `breduce` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Always a no-op. +(rule (lower (breduce x)) + (value_reg (put_in_reg x))) + + +;;;; Rules for `bextend` and `bmask` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Use a common helper to type cast bools to either bool or integer types. +(decl cast_bool (Type Value) Reg) +(rule (lower (has_type ty (bextend x))) + (value_reg (cast_bool ty x))) +(rule (lower (has_type ty (bmask x))) + (value_reg (cast_bool ty x))) + +;; If the target has the same or a smaller size than the source, it's a no-op. +(rule (cast_bool $B1 x @ (value_type $B1)) (put_in_reg x)) +(rule (cast_bool $B1 x @ (value_type $B8)) (put_in_reg x)) +(rule (cast_bool $B8 x @ (value_type $B8)) (put_in_reg x)) +(rule (cast_bool $I8 x @ (value_type $B8)) (put_in_reg x)) +(rule (cast_bool (fits_in_16 _ty) x @ (value_type $B16)) (put_in_reg x)) +(rule (cast_bool (fits_in_32 _ty) x @ (value_type $B32)) (put_in_reg x)) +(rule (cast_bool (fits_in_64 _ty) x @ (value_type $B64)) (put_in_reg x)) + +;; Single-bit values are sign-extended via a pair of shifts. +(rule (cast_bool (gpr32_ty ty) x @ (value_type $B1)) + (ashr_imm $I32 (lshl_imm $I32 (put_in_reg x) 31) 31)) +(rule (cast_bool (gpr64_ty ty) x @ (value_type $B1)) + (ashr_imm $I64 (lshl_imm $I64 (put_in_reg x) 63) 63)) + +;; Other values are just sign-extended normally. +(rule (cast_bool (gpr32_ty _ty) x @ (value_type $B8)) + (sext32_reg $I8 (put_in_reg x))) +(rule (cast_bool (gpr32_ty _ty) x @ (value_type $B16)) + (sext32_reg $I16 (put_in_reg x))) +(rule (cast_bool (gpr64_ty _ty) x @ (value_type $B8)) + (sext64_reg $I8 (put_in_reg x))) +(rule (cast_bool (gpr64_ty _ty) x @ (value_type $B16)) + (sext64_reg $I16 (put_in_reg x))) +(rule (cast_bool (gpr64_ty _ty) x @ (value_type $B32)) + (sext64_reg $I32 (put_in_reg x))) + + +;;;; Rules for `bint` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Mask with 1 to get a 0/1 result (8- or 16-bit types). +(rule (lower (has_type (fits_in_16 ty) (bint x))) + (value_reg (and_uimm16shifted ty (put_in_reg x) (uimm16shifted 1 0)))) + +;; Mask with 1 to get a 0/1 result (32-bit types). +(rule (lower (has_type (fits_in_32 ty) (bint x))) + (value_reg (and_uimm32shifted ty (put_in_reg x) (uimm32shifted 1 0)))) + +;; Mask with 1 to get a 0/1 result (64-bit types). +(rule (lower (has_type (fits_in_64 ty) (bint x))) + (value_reg (and_reg ty (put_in_reg x) (imm ty 1)))) + + +;;;; Rules for `clz` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; The FLOGR hardware instruction always operates on the full 64-bit register. +;; We can zero-extend smaller types, but then we have to compensate for the +;; additional leading zero bits the instruction will actually see. +(decl clz_offset (Type Reg) Reg) +(rule (clz_offset $I8 x) (add_simm16 $I8 x -56)) +(rule (clz_offset $I16 x) (add_simm16 $I16 x -48)) +(rule (clz_offset $I32 x) (add_simm16 $I32 x -32)) +(rule (clz_offset $I64 x) (copy_reg $I64 x)) + +;; Count leading zeros, via FLOGR on an input zero-extended to 64 bits, +;; with the result compensated for the extra bits. +(rule (lower (has_type (fits_in_64 ty) (clz x))) + (let ((ext_reg Reg (put_in_reg_zext64 x)) + ;; Ask for a value of 64 in the all-zero 64-bit input case. + ;; After compensation this will match the expected semantics. + (clz RegPair (clz_reg 64 ext_reg))) + (value_reg (clz_offset ty (regpair_hi clz))))) + + +;;;; Rules for `cls` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Count leading sign-bit copies. We don't have any instruction for that, +;; so we instead count the leading zeros after inverting the input if negative, +;; i.e. computing +;; cls(x) == clz(x ^ (x >> 63)) +;; where x is the sign-extended input. +(rule (lower (has_type (fits_in_64 ty) (cls x))) + (let ((ext_reg Reg (put_in_reg_sext64 x)) + (signbit_copies Reg (ashr_imm $I64 ext_reg 63)) + (inv_reg Reg (xor_reg $I64 ext_reg signbit_copies)) + (clz RegPair (clz_reg 64 inv_reg))) + (value_reg (clz_offset ty (regpair_hi clz))))) + + +;;;; Rules for `ctz` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; To count trailing zeros, we find the last bit set in the input via (x & -x), +;; count the leading zeros of that value, and subtract from 63: +;; +;; ctz(x) == 63 - clz(x & -x) +;; +;; This works for all cases except a zero input, where the above formula would +;; return -1, but we are expected to return the type size. The compensation +;; for this case is handled differently for 64-bit types vs. smaller types. + +;; For smaller types, we simply ensure that the extended 64-bit input is +;; never zero by setting a "guard bit" in the position corresponding to +;; the input type size. This way the 64-bit algorithm above will handle +;; that case correctly automatically. +(rule (lower (has_type (gpr32_ty ty) (ctz x))) + (let ((rx Reg (or_uimm16shifted $I64 (put_in_reg x) (ctz_guardbit ty))) + (lastbit Reg (and_reg $I64 rx (neg_reg $I64 rx))) + (clz RegPair (clz_reg 64 lastbit))) + (value_reg (sub_reg ty (imm ty 63) (regpair_hi clz))))) + +(decl ctz_guardbit (Type) UImm16Shifted) +(rule (ctz_guardbit $I8) (uimm16shifted 256 0)) +(rule (ctz_guardbit $I16) (uimm16shifted 1 16)) +(rule (ctz_guardbit $I32) (uimm16shifted 1 32)) + +;; For 64-bit types, the FLOGR instruction will indicate the zero input case +;; via its condition code. We check for that and replace the instruction +;; result with the value -1 via a conditional move, which will then lead to +;; the correct result after the final subtraction from 63. +(rule (lower (has_type (gpr64_ty _ty) (ctz x))) + (let ((rx Reg (put_in_reg x)) + (lastbit Reg (and_reg $I64 rx (neg_reg $I64 rx))) + (clz RegPair (clz_reg -1 lastbit))) + (value_reg (sub_reg $I64 (imm $I64 63) (regpair_hi clz))))) + + +;;;; Rules for `popcnt` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Population count for 8-bit types is supported by the POPCNT instruction. +(rule (lower (has_type $I8 (popcnt x))) + (value_reg (popcnt_byte (put_in_reg x)))) + +;; On z15, the POPCNT instruction has a variant to compute a full 64-bit +;; population count, which we also use for 16- and 32-bit types. +(rule (lower (has_type (and (mie2_enabled) (fits_in_64 ty)) (popcnt x))) + (value_reg (popcnt_reg (put_in_reg_zext64 x)))) + +;; On z14, we use the regular POPCNT, which computes the population count +;; of each input byte separately, so we need to accumulate those partial +;; results via a series of log2(type size in bytes) - 1 additions. We +;; accumulate in the high byte, so that a final right shift will zero out +;; any unrelated bits to give a clean result. + +(rule (lower (has_type (and (mie2_disabled) $I16) (popcnt x))) + (let ((cnt2 Reg (popcnt_byte (put_in_reg x))) + (cnt1 Reg (add_reg $I32 cnt2 (lshl_imm $I32 cnt2 8)))) + (value_reg (lshr_imm $I32 cnt1 8)))) + +(rule (lower (has_type (and (mie2_disabled) $I32) (popcnt x))) + (let ((cnt4 Reg (popcnt_byte (put_in_reg x))) + (cnt2 Reg (add_reg $I32 cnt4 (lshl_imm $I32 cnt4 16))) + (cnt1 Reg (add_reg $I32 cnt2 (lshl_imm $I32 cnt2 8)))) + (value_reg (lshr_imm $I32 cnt1 24)))) + +(rule (lower (has_type (and (mie2_disabled) $I64) (popcnt x))) + (let ((cnt8 Reg (popcnt_byte (put_in_reg x))) + (cnt4 Reg (add_reg $I64 cnt8 (lshl_imm $I64 cnt8 32))) + (cnt2 Reg (add_reg $I64 cnt4 (lshl_imm $I64 cnt4 16))) + (cnt1 Reg (add_reg $I64 cnt2 (lshl_imm $I64 cnt2 8)))) + (value_reg (lshr_imm $I64 cnt1 56)))) + + +;;;; Rules for `fadd` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Add two registers. +(rule (lower (has_type ty (fadd x y))) + (value_reg (fadd_reg ty (put_in_reg x) (put_in_reg y)))) + + +;;;; Rules for `fsub` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Subtract two registers. +(rule (lower (has_type ty (fsub x y))) + (value_reg (fsub_reg ty (put_in_reg x) (put_in_reg y)))) + + +;;;; Rules for `fmul` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Multiply two registers. +(rule (lower (has_type ty (fmul x y))) + (value_reg (fmul_reg ty (put_in_reg x) (put_in_reg y)))) + + +;;;; Rules for `fdiv` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Divide two registers. +(rule (lower (has_type ty (fdiv x y))) + (value_reg (fdiv_reg ty (put_in_reg x) (put_in_reg y)))) + + +;;;; Rules for `fmin` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Minimum of two registers. +(rule (lower (has_type ty (fmin x y))) + (value_reg (fmin_reg ty (put_in_reg x) (put_in_reg y)))) + + +;;;; Rules for `fmax` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Maximum of two registers. +(rule (lower (has_type ty (fmax x y))) + (value_reg (fmax_reg ty (put_in_reg x) (put_in_reg y)))) + + +;;;; Rules for `fcopysign` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Copysign of two registers. +(rule (lower (has_type ty (fcopysign x y))) + (value_reg (fpu_copysign ty (put_in_reg x) (put_in_reg y)))) + + +;;;; Rules for `fma` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Multiply-and-add of three registers. +(rule (lower (has_type ty (fma x y z))) + (value_reg (fma_reg ty (put_in_reg x) (put_in_reg y) (put_in_reg z)))) + + +;;;; Rules for `sqrt` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Square root of a register. +(rule (lower (has_type ty (sqrt x))) + (value_reg (sqrt_reg ty (put_in_reg x)))) + + +;;;; Rules for `fneg` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Negated value of a register. +(rule (lower (has_type ty (fneg x))) + (value_reg (fneg_reg ty (put_in_reg x)))) + + +;;;; Rules for `fabs` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Absolute value of a register. +(rule (lower (has_type ty (fabs x))) + (value_reg (fabs_reg ty (put_in_reg x)))) + + +;;;; Rules for `ceil` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Round value in a register towards positive infinity. +(rule (lower (has_type ty (ceil x))) + (value_reg (ceil_reg ty (put_in_reg x)))) + + +;;;; Rules for `floor` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Round value in a register towards negative infinity. +(rule (lower (has_type ty (floor x))) + (value_reg (floor_reg ty (put_in_reg x)))) + + +;;;; Rules for `trunc` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Round value in a register towards zero. +(rule (lower (has_type ty (trunc x))) + (value_reg (trunc_reg ty (put_in_reg x)))) + + +;;;; Rules for `nearest` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Round value in a register towards nearest. +(rule (lower (has_type ty (nearest x))) + (value_reg (nearest_reg ty (put_in_reg x)))) + + +;;;; Rules for `fpromote` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Promote a register. +(rule (lower (has_type dst_ty (fpromote x @ (value_type src_ty)))) + (value_reg (fpromote_reg dst_ty src_ty (put_in_reg x)))) + + +;;;; Rules for `fdemote` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Demote a register. +(rule (lower (has_type dst_ty (fdemote x @ (value_type src_ty)))) + (value_reg (fdemote_reg dst_ty src_ty (put_in_reg x)))) + + +;;;; Rules for `fcvt_from_uint` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Convert an unsigned integer value in a register to floating-point. +(rule (lower (has_type dst_ty (fcvt_from_uint x @ (value_type src_ty)))) + (value_reg (fcvt_from_uint_reg dst_ty (ty_ext32 src_ty) + (put_in_reg_zext32 x)))) + + +;;;; Rules for `fcvt_from_sint` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Convert a signed integer value in a register to floating-point. +(rule (lower (has_type dst_ty (fcvt_from_sint x @ (value_type src_ty)))) + (value_reg (fcvt_from_sint_reg dst_ty (ty_ext32 src_ty) + (put_in_reg_sext32 x)))) + + +;;;; Rules for `fcvt_to_uint` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Convert a floating-point value in a register to an unsigned integer value. +;; Traps if the input cannot be represented in the output type. +;; FIXME: Add support for 8-/16-bit destination types (needs overflow check). +(rule (lower (has_type (ty_32_or_64 dst_ty) (fcvt_to_uint x @ (value_type src_ty)))) + (let ((src Reg (put_in_reg x)) + ;; First, check whether the input is a NaN, and trap if so. + (_ Reg (trap_if (fcmp_reg src_ty src src) + (floatcc_as_cond (FloatCC.Unordered)) + (trap_code_bad_conversion_to_integer))) + ;; Perform the conversion. If this sets CC 3, we have a + ;; "special case". Since we already exluded the case where + ;; the input was a NaN, the only other option is that the + ;; conversion overflowed the target type. + (dst Reg (trap_if (fcvt_to_uint_reg_with_flags dst_ty src_ty src) + (floatcc_as_cond (FloatCC.Unordered)) + (trap_code_integer_overflow)))) + (value_reg dst))) + + +;;;; Rules for `fcvt_to_sint` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Convert a floating-point value in a register to a signed integer value. +;; Traps if the input cannot be represented in the output type. +;; FIXME: Add support for 8-/16-bit destination types (needs overflow check). +(rule (lower (has_type (ty_32_or_64 dst_ty) (fcvt_to_sint x @ (value_type src_ty)))) + (let ((src Reg (put_in_reg x)) + ;; First, check whether the input is a NaN, and trap if so. + (_ Reg (trap_if (fcmp_reg src_ty src src) + (floatcc_as_cond (FloatCC.Unordered)) + (trap_code_bad_conversion_to_integer))) + ;; Perform the conversion. If this sets CC 3, we have a + ;; "special case". Since we already exluded the case where + ;; the input was a NaN, the only other option is that the + ;; conversion overflowed the target type. + (dst Reg (trap_if (fcvt_to_sint_reg_with_flags dst_ty src_ty src) + (floatcc_as_cond (FloatCC.Unordered)) + (trap_code_integer_overflow)))) + (value_reg dst))) + + +;;;; Rules for `fcvt_to_uint_sat` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Convert a floating-point value in a register to an unsigned integer value. +;; FIXME: Add support for 8-/16-bit destination types (needs overflow check). +(rule (lower (has_type (ty_32_or_64 dst_ty) (fcvt_to_uint_sat x @ (value_type src_ty)))) + (let ((src Reg (put_in_reg x)) + (dst Reg (fcvt_to_uint_reg dst_ty src_ty src)) + ;; In most special cases, the Z instruction already yields the + ;; result expected by Cranelift semantics. The only exception + ;; it the case where the input was a NaN. We explicitly check + ;; for that and force the output to 0 in that case. + (sat Reg (with_flags_1 (fcmp_reg src_ty src src) + (cmov_imm dst_ty + (floatcc_as_cond (FloatCC.Unordered)) 0 dst)))) + (value_reg sat))) + + +;;;; Rules for `fcvt_to_sint_sat` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Convert a floating-point value in a register to a signed integer value. +;; FIXME: Add support for 8-/16-bit destination types (needs overflow check). +(rule (lower (has_type (ty_32_or_64 dst_ty) (fcvt_to_sint_sat x @ (value_type src_ty)))) + (let ((src Reg (put_in_reg x)) + (dst Reg (fcvt_to_sint_reg dst_ty src_ty src)) + ;; In most special cases, the Z instruction already yields the + ;; result expected by Cranelift semantics. The only exception + ;; it the case where the input was a NaN. We explicitly check + ;; for that and force the output to 0 in that case. + (sat Reg (with_flags_1 (fcmp_reg src_ty src src) + (cmov_imm dst_ty + (floatcc_as_cond (FloatCC.Unordered)) 0 dst)))) + (value_reg sat))) + + +;;;; Rules for `bitcast` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Reinterpret a 64-bit integer value as floating-point. +(rule (lower (has_type $F64 (bitcast x @ (value_type $I64)))) + (value_reg (mov_to_fpr (put_in_reg x)))) + +;; Reinterpret a 64-bit floating-point value as integer. +(rule (lower (has_type $I64 (bitcast x @ (value_type $F64)))) + (value_reg (mov_from_fpr (put_in_reg x)))) + +;; Reinterpret a 32-bit integer value as floating-point (via $I64). +;; Note that a 32-bit float is located in the high bits of the GPR. +(rule (lower (has_type $F32 (bitcast x @ (value_type $I32)))) + (value_reg (mov_to_fpr (lshl_imm $I64 (put_in_reg x) 32)))) + +;; Reinterpret a 32-bit floating-point value as integer (via $I64). +;; Note that a 32-bit float is located in the high bits of the GPR. +(rule (lower (has_type $I32 (bitcast x @ (value_type $F32)))) + (value_reg (lshr_imm $I64 (mov_from_fpr (put_in_reg x)) 32))) + + +;;;; Rules for `stack_addr` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Load the address of a stack slot. +(rule (lower (has_type ty (stack_addr stack_slot offset))) + (value_reg (stack_addr_impl ty stack_slot offset))) + + +;;;; Rules for `func_addr` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Load the address of a function, target reachable via PC-relative instruction. +(rule (lower (and (func_addr _) + (call_target_data name (reloc_distance_near)))) + (value_reg (load_addr (memarg_symbol name 0 (memflags_trusted))))) + +;; Load the address of a function, general case. +(rule (lower (and (func_addr _) + (call_target_data name _))) + (value_reg (load_ext_name_far name 0))) + + +;;;; Rules for `symbol_value` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Load the address of a symbol, target reachable via PC-relative instruction. +(rule (lower (and (symbol_value _) + (symbol_value_data name (reloc_distance_near) + (memarg_symbol_offset offset)))) + (value_reg (load_addr (memarg_symbol name offset (memflags_trusted))))) + +;; Load the address of a symbol, general case. +(rule (lower (and (symbol_value _) + (symbol_value_data name _ offset))) + (value_reg (load_ext_name_far name offset))) + + +;;;; Rules for `load` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Load 8-bit integers. +(rule (lower (has_type $I8 (load flags addr offset))) + (value_reg (zext32_mem $I8 (lower_address flags addr offset)))) + +;; Load 16-bit big-endian integers. +(rule (lower (has_type $I16 (load flags @ (bigendian) addr offset))) + (value_reg (zext32_mem $I16 (lower_address flags addr offset)))) + +;; Load 16-bit little-endian integers. +(rule (lower (has_type $I16 (load flags @ (littleendian) addr offset))) + (value_reg (loadrev16 (lower_address flags addr offset)))) + +;; Load 32-bit big-endian integers. +(rule (lower (has_type $I32 (load flags @ (bigendian) addr offset))) + (value_reg (load32 (lower_address flags addr offset)))) + +;; Load 32-bit little-endian integers. +(rule (lower (has_type $I32 (load flags @ (littleendian) addr offset))) + (value_reg (loadrev32 (lower_address flags addr offset)))) + +;; Load 64-bit big-endian integers. +(rule (lower (has_type $I64 (load flags @ (bigendian) addr offset))) + (value_reg (load64 (lower_address flags addr offset)))) + +;; Load 64-bit little-endian integers. +(rule (lower (has_type $I64 (load flags @ (littleendian) addr offset))) + (value_reg (loadrev64 (lower_address flags addr offset)))) + +;; Load 64-bit big-endian references. +(rule (lower (has_type $R64 (load flags @ (bigendian) addr offset))) + (value_reg (load64 (lower_address flags addr offset)))) + +;; Load 64-bit little-endian references. +(rule (lower (has_type $R64 (load flags @ (littleendian) addr offset))) + (value_reg (loadrev64 (lower_address flags addr offset)))) + +;; Load 32-bit big-endian floating-point values. +(rule (lower (has_type $F32 (load flags @ (bigendian) addr offset))) + (value_reg (fpu_load32 (lower_address flags addr offset)))) + +;; Load 32-bit little-endian floating-point values (z15 instruction). +(rule (lower (has_type (and (vxrs_ext2_enabled) $F32) + (load flags @ (littleendian) addr offset))) + (value_reg (fpu_loadrev32 (lower_address flags addr offset)))) + +;; Load 32-bit little-endian floating-point values (via GPR on z14). +(rule (lower (has_type (and (vxrs_ext2_disabled) $F32) + (load flags @ (littleendian) addr offset))) + (let ((gpr Reg (loadrev32 (lower_address flags addr offset)))) + (value_reg (mov_to_fpr (lshl_imm $I64 gpr 32))))) + +;; Load 64-bit big-endian floating-point values. +(rule (lower (has_type $F64 (load flags @ (bigendian) addr offset))) + (value_reg (fpu_load64 (lower_address flags addr offset)))) + +;; Load 64-bit little-endian floating-point values (z15 instruction). +(rule (lower (has_type (and (vxrs_ext2_enabled) $F64) + (load flags @ (littleendian) addr offset))) + (value_reg (fpu_loadrev64 (lower_address flags addr offset)))) + +;; Load 64-bit little-endian floating-point values (via GPR on z14). +(rule (lower (has_type (and (vxrs_ext2_disabled) $F64) + (load flags @ (littleendian) addr offset))) + (let ((gpr Reg (loadrev64 (lower_address flags addr offset)))) + (value_reg (mov_to_fpr gpr)))) + + +;;;; Rules for `uload8` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; 16- or 32-bit target types. +(rule (lower (has_type (gpr32_ty _ty) (uload8 flags addr offset))) + (value_reg (zext32_mem $I8 (lower_address flags addr offset)))) + +;; 64-bit target types. +(rule (lower (has_type (gpr64_ty _ty) (uload8 flags addr offset))) + (value_reg (zext64_mem $I8 (lower_address flags addr offset)))) + + +;;;; Rules for `sload8` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; 16- or 32-bit target types. +(rule (lower (has_type (gpr32_ty _ty) (sload8 flags addr offset))) + (value_reg (sext32_mem $I8 (lower_address flags addr offset)))) + +;; 64-bit target types. +(rule (lower (has_type (gpr64_ty _ty) (sload8 flags addr offset))) + (value_reg (sext64_mem $I8 (lower_address flags addr offset)))) + + +;;;; Rules for `uload16` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; 32-bit target type, big-endian source value. +(rule (lower (has_type (gpr32_ty _ty) + (uload16 flags @ (bigendian) addr offset))) + (value_reg (zext32_mem $I16 (lower_address flags addr offset)))) + +;; 32-bit target type, little-endian source value (via explicit extension). +(rule (lower (has_type (gpr32_ty _ty) + (uload16 flags @ (littleendian) addr offset))) + (let ((reg16 Reg (loadrev16 (lower_address flags addr offset)))) + (value_reg (zext32_reg $I16 reg16)))) + +;; 64-bit target type, big-endian source value. +(rule (lower (has_type (gpr64_ty _ty) + (uload16 flags @ (bigendian) addr offset))) + (value_reg (zext64_mem $I16 (lower_address flags addr offset)))) + +;; 64-bit target type, little-endian source value (via explicit extension). +(rule (lower (has_type (gpr64_ty _ty) + (uload16 flags @ (littleendian) addr offset))) + (let ((reg16 Reg (loadrev16 (lower_address flags addr offset)))) + (value_reg (zext64_reg $I16 reg16)))) + + +;;;; Rules for `sload16` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; 32-bit target type, big-endian source value. +(rule (lower (has_type (gpr32_ty _ty) + (sload16 flags @ (bigendian) addr offset))) + (value_reg (sext32_mem $I16 (lower_address flags addr offset)))) + +;; 32-bit target type, little-endian source value (via explicit extension). +(rule (lower (has_type (gpr32_ty _ty) + (sload16 flags @ (littleendian) addr offset))) + (let ((reg16 Reg (loadrev16 (lower_address flags addr offset)))) + (value_reg (sext32_reg $I16 reg16)))) + +;; 64-bit target type, big-endian source value. +(rule (lower (has_type (gpr64_ty _ty) + (sload16 flags @ (bigendian) addr offset))) + (value_reg (sext64_mem $I16 (lower_address flags addr offset)))) + +;; 64-bit target type, little-endian source value (via explicit extension). +(rule (lower (has_type (gpr64_ty _ty) + (sload16 flags @ (littleendian) addr offset))) + (let ((reg16 Reg (loadrev16 (lower_address flags addr offset)))) + (value_reg (sext64_reg $I16 reg16)))) + + +;;;; Rules for `uload32` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; 64-bit target type, big-endian source value. +(rule (lower (has_type (gpr64_ty _ty) + (uload32 flags @ (bigendian) addr offset))) + (value_reg (zext64_mem $I32 (lower_address flags addr offset)))) + +;; 64-bit target type, little-endian source value (via explicit extension). +(rule (lower (has_type (gpr64_ty _ty) + (uload32 flags @ (littleendian) addr offset))) + (let ((reg32 Reg (loadrev32 (lower_address flags addr offset)))) + (value_reg (zext64_reg $I32 reg32)))) + + +;;;; Rules for `sload32` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; 64-bit target type, big-endian source value. +(rule (lower (has_type (gpr64_ty _ty) + (sload32 flags @ (bigendian) addr offset))) + (value_reg (sext64_mem $I32 (lower_address flags addr offset)))) + +;; 64-bit target type, little-endian source value (via explicit extension). +(rule (lower (has_type (gpr64_ty _ty) + (sload32 flags @ (littleendian) addr offset))) + (let ((reg32 Reg (loadrev32 (lower_address flags addr offset)))) + (value_reg (sext64_reg $I32 reg32)))) + + +;;;; Rules for `store` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; The actual store logic for integer types is identical for the `store`, +;; `istoreNN`, and `atomic_store` instructions, so we share common helpers. + +;; Store 8-bit integer type, main lowering entry point. +(rule (lower (store flags val @ (value_type $I8) addr offset)) + (value_regs_none (istore8_impl flags val addr offset))) + +;; Store 16-bit integer type, main lowering entry point. +(rule (lower (store flags val @ (value_type $I16) addr offset)) + (value_regs_none (istore16_impl flags val addr offset))) + +;; Store 32-bit integer type, main lowering entry point. +(rule (lower (store flags val @ (value_type $I32) addr offset)) + (value_regs_none (istore32_impl flags val addr offset))) + +;; Store 64-bit integer type, main lowering entry point. +(rule (lower (store flags val @ (value_type $I64) addr offset)) + (value_regs_none (istore64_impl flags val addr offset))) + +;; Store 64-bit reference type, main lowering entry point. +(rule (lower (store flags val @ (value_type $R64) addr offset)) + (value_regs_none (istore64_impl flags val addr offset))) + +;; Store 32-bit big-endian floating-point type. +(rule (lower (store flags @ (bigendian) + val @ (value_type $F32) addr offset)) + (value_regs_none (fpu_store32 (put_in_reg val) + (lower_address flags addr offset)))) + +;; Store 32-bit little-endian floating-point type (z15 instruction). +(rule (lower (store flags @ (littleendian) + val @ (value_type (and $F32 (vxrs_ext2_enabled))) addr offset)) + (value_regs_none (fpu_storerev32 (put_in_reg val) + (lower_address flags addr offset)))) + +;; Store 32-bit little-endian floating-point type (via GPR on z14). +(rule (lower (store flags @ (littleendian) + val @ (value_type (and $F32 (vxrs_ext2_disabled))) addr offset)) + (let ((gpr Reg (lshr_imm $I64 (mov_from_fpr (put_in_reg val)) 32))) + (value_regs_none (storerev32 gpr (lower_address flags addr offset))))) + +;; Store 64-bit big-endian floating-point type. +(rule (lower (store flags @ (bigendian) + val @ (value_type $F64) addr offset)) + (value_regs_none (fpu_store64 (put_in_reg val) + (lower_address flags addr offset)))) + +;; Store 64-bit little-endian floating-point type (z15 instruction). +(rule (lower (store flags @ (littleendian) + val @ (value_type (and $F64 (vxrs_ext2_enabled))) addr offset)) + (value_regs_none (fpu_storerev64 (put_in_reg val) + (lower_address flags addr offset)))) + +;; Store 64-bit little-endian floating-point type (via GPR on z14). +(rule (lower (store flags @ (littleendian) + val @ (value_type (and $F64 (vxrs_ext2_disabled))) addr offset)) + (let ((gpr Reg (mov_from_fpr (put_in_reg val)))) + (value_regs_none (storerev64 gpr (lower_address flags addr offset))))) + + +;;;; Rules for 8-bit integer stores ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Main `istore8` lowering entry point, dispatching to the helper. +(rule (lower (istore8 flags val addr offset)) + (value_regs_none (istore8_impl flags val addr offset))) + +;; Helper to store 8-bit integer types. +(decl istore8_impl (MemFlags Value Value Offset32) SideEffectNoResult) + +;; Store 8-bit integer types, register input. +(rule (istore8_impl flags val addr offset) + (store8 (put_in_reg val) (lower_address flags addr offset))) + +;; Store 8-bit integer types, immediate input. +(rule (istore8_impl flags (u8_from_value imm) addr offset) + (store8_imm imm (lower_address flags addr offset))) + + +;;;; Rules for 16-bit integer stores ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Main `istore16` lowering entry point, dispatching to the helper. +(rule (lower (istore16 flags val addr offset)) + (value_regs_none (istore16_impl flags val addr offset))) + +;; Helper to store 16-bit integer types. +(decl istore16_impl (MemFlags Value Value Offset32) SideEffectNoResult) + +;; Store 16-bit big-endian integer types, register input. +(rule (istore16_impl flags @ (bigendian) val addr offset) + (store16 (put_in_reg val) (lower_address flags addr offset))) + +;; Store 16-bit little-endian integer types, register input. +(rule (istore16_impl flags @ (littleendian) val addr offset) + (storerev16 (put_in_reg val) (lower_address flags addr offset))) + +;; Store 16-bit big-endian integer types, immediate input. +(rule (istore16_impl flags @ (bigendian) (i16_from_value imm) addr offset) + (store16_imm imm (lower_address flags addr offset))) + +;; Store 16-bit little-endian integer types, immediate input. +(rule (istore16_impl flags @ (littleendian) (i16_from_swapped_value imm) addr offset) + (store16_imm imm (lower_address flags addr offset))) + + +;;;; Rules for 32-bit integer stores ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Main `istore32` lowering entry point, dispatching to the helper. +(rule (lower (istore32 flags val addr offset)) + (value_regs_none (istore32_impl flags val addr offset))) + +;; Helper to store 32-bit integer types. +(decl istore32_impl (MemFlags Value Value Offset32) SideEffectNoResult) + +;; Store 32-bit big-endian integer types, register input. +(rule (istore32_impl flags @ (bigendian) val addr offset) + (store32 (put_in_reg val) (lower_address flags addr offset))) + +;; Store 32-bit big-endian integer types, immediate input. +(rule (istore32_impl flags @ (bigendian) (i16_from_value imm) addr offset) + (store32_simm16 imm (lower_address flags addr offset))) + +;; Store 32-bit little-endian integer types. +(rule (istore32_impl flags @ (littleendian) val addr offset) + (storerev32 (put_in_reg val) (lower_address flags addr offset))) + + +;;;; Rules for 64-bit integer stores ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Helper to store 64-bit integer types. +(decl istore64_impl (MemFlags Value Value Offset32) SideEffectNoResult) + +;; Store 64-bit big-endian integer types, register input. +(rule (istore64_impl flags @ (bigendian) val addr offset) + (store64 (put_in_reg val) (lower_address flags addr offset))) + +;; Store 64-bit big-endian integer types, immediate input. +(rule (istore64_impl flags @ (bigendian) (i16_from_value imm) addr offset) + (store64_simm16 imm (lower_address flags addr offset))) + +;; Store 64-bit little-endian integer types. +(rule (istore64_impl flags @ (littleendian) val addr offset) + (storerev64 (put_in_reg val) (lower_address flags addr offset))) + + +;;;; Rules for `atomic_rmw` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Atomic AND for 32/64-bit big-endian types, using a single instruction. +(rule (lower (has_type (ty_32_or_64 ty) + (atomic_rmw flags @ (bigendian) (AtomicRmwOp.And) addr src))) + (value_reg (atomic_rmw_and ty (put_in_reg src) + (lower_address flags addr (zero_offset))))) + +;; Atomic OR for 32/64-bit big-endian types, using a single instruction. +(rule (lower (has_type (ty_32_or_64 ty) + (atomic_rmw flags @ (bigendian) (AtomicRmwOp.Or) addr src))) + (value_reg (atomic_rmw_or ty (put_in_reg src) + (lower_address flags addr (zero_offset))))) + +;; Atomic XOR for 32/64-bit big-endian types, using a single instruction. +(rule (lower (has_type (ty_32_or_64 ty) + (atomic_rmw flags @ (bigendian) (AtomicRmwOp.Xor) addr src))) + (value_reg (atomic_rmw_xor ty (put_in_reg src) + (lower_address flags addr (zero_offset))))) + +;; Atomic ADD for 32/64-bit big-endian types, using a single instruction. +(rule (lower (has_type (ty_32_or_64 ty) + (atomic_rmw flags @ (bigendian) (AtomicRmwOp.Add) addr src))) + (value_reg (atomic_rmw_add ty (put_in_reg src) + (lower_address flags addr (zero_offset))))) + +;; Atomic SUB for 32/64-bit big-endian types, using atomic ADD with negated input. +(rule (lower (has_type (ty_32_or_64 ty) + (atomic_rmw flags @ (bigendian) (AtomicRmwOp.Sub) addr src))) + (value_reg (atomic_rmw_add ty (neg_reg ty (put_in_reg src)) + (lower_address flags addr (zero_offset))))) + + +;;;; Rules for `atomic_cas` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; 32-bit big-endian atomic compare-and-swap instruction. +(rule (lower (has_type $I32 (atomic_cas flags @ (bigendian) addr old new))) + (value_reg (atomic_cas32 (put_in_reg old) (put_in_reg new) + (lower_address flags addr (zero_offset))))) + +;; 64-bit big-endian atomic compare-and-swap instruction. +(rule (lower (has_type $I64 (atomic_cas flags @ (bigendian) addr old new))) + (value_reg (atomic_cas64 (put_in_reg old) (put_in_reg new) + (lower_address flags addr (zero_offset))))) + + +;;;; Rules for `atomic_load` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Atomic loads can be implemented via regular loads on this platform. + +;; 8-bit atomic load. +(rule (lower (has_type $I8 (atomic_load flags addr))) + (value_reg (zext32_mem $I8 (lower_address flags addr (zero_offset))))) + +;; 16-bit big-endian atomic load. +(rule (lower (has_type $I16 (atomic_load flags @ (bigendian) addr))) + (value_reg (zext32_mem $I16 (lower_address flags addr (zero_offset))))) + +;; 16-bit little-endian atomic load. +(rule (lower (has_type $I16 (atomic_load flags @ (littleendian) addr))) + (value_reg (loadrev16 (lower_address flags addr (zero_offset))))) + +;; 32-bit big-endian atomic load. +(rule (lower (has_type $I32 (atomic_load flags @ (bigendian) addr))) + (value_reg (load32 (lower_address flags addr (zero_offset))))) + +;; 32-bit little-endian atomic load. +(rule (lower (has_type $I32 (atomic_load flags @ (littleendian) addr))) + (value_reg (loadrev32 (lower_address flags addr (zero_offset))))) + +;; 64-bit big-endian atomic load. +(rule (lower (has_type $I64 (atomic_load flags @ (bigendian) addr))) + (value_reg (load64 (lower_address flags addr (zero_offset))))) + +;; 64-bit little-endian atomic load. +(rule (lower (has_type $I64 (atomic_load flags @ (littleendian) addr))) + (value_reg (loadrev64 (lower_address flags addr (zero_offset))))) + + +;;;; Rules for `atomic_store` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Atomic stores can be implemented via regular stores followed by a fence. +(decl atomic_store_impl (SideEffectNoResult) ValueRegs) +(rule (atomic_store_impl store) + (let ((_ ValueRegs (value_regs_none store))) + (value_regs_none (fence_impl)))) + +;; 8-bit atomic store. +(rule (lower (atomic_store flags val @ (value_type $I8) addr)) + (atomic_store_impl (istore8_impl flags val addr (zero_offset)))) + +;; 16-bit atomic store. +(rule (lower (atomic_store flags val @ (value_type $I16) addr)) + (atomic_store_impl (istore16_impl flags val addr (zero_offset)))) + +;; 32-bit atomic store. +(rule (lower (atomic_store flags val @ (value_type $I32) addr)) + (atomic_store_impl (istore32_impl flags val addr (zero_offset)))) + +;; 64-bit atomic store. +(rule (lower (atomic_store flags val @ (value_type $I64) addr)) + (atomic_store_impl (istore64_impl flags val addr (zero_offset)))) + + +;;;; Rules for `fence` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Fence to ensure sequential consistency. +(rule (lower (fence)) + (value_regs_none (fence_impl))) + + +;;;; Rules for `icmp` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; We want to optimize the typical use of `icmp` (generating an integer 0/1 +;; result) followed by some user, like a `select` or a conditional branch. +;; Instead of first generating the integer result and later testing it again, +;; we want to sink the comparison to be performed at the site of use. +;; +;; To enable this, we provide generic helpers that return a `ProducesBool` +;; encapsulating the comparison in question, which can be used by all the +;; above scenarios. +;; +;; N.B. There are specific considerations when sinking a memory load into a +;; comparison. When emitting an `icmp` directly, this can of course be done +;; as usual. However, when we use the `ProducesBool` elsewhere, we need to +;; consider *three* instructions: the load, the `icmp`, and the final user +;; (e.g. a conditional branch). The only way to safely sink the load would +;; be to sink it direct into the final user, which is only possible if there +;; is no *other* user of the `icmp` result. This is not currently being +;; verified by the `SinkableInst` logic, so to be safe we do not perform this +;; optimization at all. +;; +;; The generic `icmp_val` helper therefore has a flag indicating whether +;; it is being invoked in a context where it is safe to sink memory loads +;; (e.g. when directly emitting an `icmp`), or whether it is not (e.g. when +;; sinking the `icmp` result into a conditional branch or select). + +;; Main `icmp` entry point. Generate a `ProducesBool` capturing the +;; integer comparison and immediately lower it to a 0/1 integer result. +;; In this case, it is safe to sink memory loads. +(rule (lower (has_type ty (icmp int_cc x y))) + (value_reg (lower_bool ty (icmp_val $true int_cc x y)))) + + +;; Return a `ProducesBool` to implement any integer comparison. +;; The first argument is a flag to indicate whether it is safe to sink +;; memory loads as discussed above. +(decl icmp_val (bool IntCC Value Value) ProducesBool) + +;; Dispatch for signed comparisons. +(rule (icmp_val allow_mem int_cc @ (signed) x y) + (bool (icmps_val allow_mem x y) (intcc_as_cond int_cc))) +;; Dispatch for unsigned comparisons. +(rule (icmp_val allow_mem int_cc @ (unsigned) x y) + (bool (icmpu_val allow_mem x y) (intcc_as_cond int_cc))) + + +;; Return a `ProducesBool` to implement signed integer comparisons. +(decl icmps_val (bool Value Value) ProducesFlags) + +;; Compare (signed) two registers. +(rule (icmps_val _ x @ (value_type (fits_in_64 ty)) y) + (icmps_reg (ty_ext32 ty) (put_in_reg_sext32 x) (put_in_reg_sext32 y))) + +;; Compare (signed) a register and a sign-extended register. +(rule (icmps_val _ x @ (value_type (fits_in_64 ty)) (sext32_value y)) + (icmps_reg_sext32 ty (put_in_reg x) (put_in_reg y))) + +;; Compare (signed) a register and an immediate. +(rule (icmps_val _ x @ (value_type (fits_in_64 ty)) (i16_from_value y)) + (icmps_simm16 (ty_ext32 ty) (put_in_reg_sext32 x) y)) +(rule (icmps_val _ x @ (value_type (fits_in_64 ty)) (i32_from_value y)) + (icmps_simm32 (ty_ext32 ty) (put_in_reg_sext32 x) y)) + +;; Compare (signed) a register and memory (32/64-bit types). +(rule (icmps_val $true x @ (value_type (fits_in_64 ty)) (sinkable_load_32_64 y)) + (icmps_mem ty (put_in_reg x) (sink_load y))) + +;; Compare (signed) a register and memory (16-bit types). +(rule (icmps_val $true x @ (value_type (fits_in_64 ty)) (sinkable_load_16 y)) + (icmps_mem_sext16 (ty_ext32 ty) (put_in_reg_sext32 x) (sink_load y))) + +;; Compare (signed) a register and sign-extended memory. +(rule (icmps_val $true x @ (value_type (fits_in_64 ty)) (sinkable_sload16 y)) + (icmps_mem_sext16 ty (put_in_reg x) (sink_sload16 y))) +(rule (icmps_val $true x @ (value_type (fits_in_64 ty)) (sinkable_sload32 y)) + (icmps_mem_sext32 ty (put_in_reg x) (sink_sload32 y))) + + +;; Return a `ProducesBool` to implement unsigned integer comparisons. +(decl icmpu_val (bool Value Value) ProducesFlags) + +;; Compare (unsigned) two registers. +(rule (icmpu_val _ x @ (value_type (fits_in_64 ty)) y) + (icmpu_reg (ty_ext32 ty) (put_in_reg_zext32 x) (put_in_reg_zext32 y))) + +;; Compare (unsigned) a register and a sign-extended register. +(rule (icmpu_val _ x @ (value_type (fits_in_64 ty)) (zext32_value y)) + (icmpu_reg_zext32 ty (put_in_reg x) (put_in_reg y))) + +;; Compare (unsigned) a register and an immediate. +(rule (icmpu_val _ x @ (value_type (fits_in_64 ty)) (u32_from_value y)) + (icmpu_uimm32 (ty_ext32 ty) (put_in_reg_zext32 x) y)) + +;; Compare (unsigned) a register and memory (32/64-bit types). +(rule (icmpu_val $true x @ (value_type (fits_in_64 ty)) (sinkable_load_32_64 y)) + (icmpu_mem ty (put_in_reg x) (sink_load y))) + +;; Compare (unsigned) a register and memory (16-bit types). +;; Note that the ISA only provides instructions with a PC-relative memory +;; address here, so we need to check whether the sinkable load matches this. +(rule (icmpu_val $true x @ (value_type (fits_in_64 ty)) + (sinkable_load_16 (load_sym y))) + (icmpu_mem_zext16 (ty_ext32 ty) (put_in_reg_zext32 x) (sink_load y))) + +;; Compare (unsigned) a register and zero-extended memory. +;; Note that the ISA only provides instructions with a PC-relative memory +;; address here, so we need to check whether the sinkable load matches this. +(rule (icmpu_val $true x @ (value_type (fits_in_64 ty)) + (sinkable_uload16 (uload16_sym y))) + (icmpu_mem_zext16 ty (put_in_reg x) (sink_uload16 y))) +(rule (icmpu_val $true x @ (value_type (fits_in_64 ty)) (sinkable_uload32 y)) + (icmpu_mem_zext32 ty (put_in_reg x) (sink_uload32 y))) + + +;;;; Rules for `fcmp` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Main `fcmp` entry point. Generate a `ProducesBool` capturing the +;; integer comparison and immediately lower it to a 0/1 integer result. +(rule (lower (has_type ty (fcmp float_cc x y))) + (value_reg (lower_bool ty (fcmp_val float_cc x y)))) + +;; Return a `ProducesBool` to implement any floating-point comparison. +(decl fcmp_val (FloatCC Value Value) ProducesBool) +(rule (fcmp_val float_cc x @ (value_type ty) y) + (bool (fcmp_reg ty (put_in_reg x) (put_in_reg y)) + (floatcc_as_cond float_cc))) + + +;;;; Rules for `is_null` and `is_invalid` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Null references are represented by the constant value 0. +(rule (lower (has_type $B1 (is_null x @ (value_type $R64)))) + (value_reg (lower_bool $B1 (bool (icmps_simm16 $I64 (put_in_reg x) 0) + (intcc_as_cond (IntCC.Equal)))))) + + +;; Invalid references are represented by the constant value -1. +(rule (lower (has_type $B1 (is_invalid x @ (value_type $R64)))) + (value_reg (lower_bool $B1 (bool (icmps_simm16 $I64 (put_in_reg x) -1) + (intcc_as_cond (IntCC.Equal)))))) + + +;;;; Rules for `select` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Return a `ProducesBool` to capture the fact that the input value is nonzero. +;; In the common case where that input is the result of an `icmp` or `fcmp` +;; instruction (possibly via an intermediate `bint`), directly use that compare. +;; Note that it is not safe to sink memory loads here, see the `icmp` comment. +(decl value_nonzero (Value) ProducesBool) +(rule (value_nonzero (def_inst (bint val))) (value_nonzero val)) +(rule (value_nonzero (def_inst (icmp int_cc x y))) (icmp_val $false int_cc x y)) +(rule (value_nonzero (def_inst (fcmp float_cc x y))) (fcmp_val float_cc x y)) +(rule (value_nonzero val @ (value_type (gpr32_ty ty))) + (bool (icmps_simm16 $I32 (put_in_reg_sext32 val) 0) + (intcc_as_cond (IntCC.NotEqual)))) +(rule (value_nonzero val @ (value_type (gpr64_ty ty))) + (bool (icmps_simm16 $I64 (put_in_reg val) 0) + (intcc_as_cond (IntCC.NotEqual)))) + +;; Main `select` entry point. Lower the `value_nonzero` result. +(rule (lower (has_type ty (select val_cond val_true val_false))) + (value_reg (select_bool_reg ty (value_nonzero val_cond) + (put_in_reg val_true) (put_in_reg val_false)))) + + +;;;; Rules for `selectif_spectre_guard` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; We do not support the `iflags` mechanism on our platform. However, common +;; code will unconditionally emit certain patterns using `iflags` which we +;; need to handle somehow. Note that only those specific patterns are +;; recognized by the code below, other uses will fail to lower. + +(rule (lower (has_type ty (selectif_spectre_guard int_cc + (def_inst (ifcmp x y)) val_true val_false))) + (value_reg (select_bool_reg ty (icmp_val $false int_cc x y) + (put_in_reg val_true) (put_in_reg val_false)))) + diff --git a/cranelift/codegen/src/isa/s390x/lower.rs b/cranelift/codegen/src/isa/s390x/lower.rs index f4aa7c805d7c..866583922a30 100644 --- a/cranelift/codegen/src/isa/s390x/lower.rs +++ b/cranelift/codegen/src/isa/s390x/lower.rs @@ -1,8 +1,8 @@ //! Lowering rules for S390x. -use crate::ir::condcodes::{FloatCC, IntCC}; +use crate::ir::condcodes::IntCC; use crate::ir::Inst as IRInst; -use crate::ir::{types, Endianness, InstructionData, MemFlags, Opcode, TrapCode, Type}; +use crate::ir::{types, Endianness, MemFlags, Opcode, Type}; use crate::isa::s390x::abi::*; use crate::isa::s390x::inst::*; use crate::isa::s390x::settings as s390x_settings; @@ -17,30 +17,11 @@ use core::convert::TryFrom; use regalloc::{Reg, Writable}; use smallvec::SmallVec; +pub mod isle; + //============================================================================= // Helpers for instruction lowering. -fn ty_is_int(ty: Type) -> bool { - match ty { - types::B1 | types::B8 | types::B16 | types::B32 | types::B64 => true, - types::I8 | types::I16 | types::I32 | types::I64 | types::R64 => true, - types::F32 | types::F64 => false, - types::IFLAGS | types::FFLAGS => panic!("Unexpected flags type"), - _ => panic!("ty_is_int() on unknown type: {:?}", ty), - } -} - -fn ty_is_float(ty: Type) -> bool { - !ty_is_int(ty) -} - -fn is_valid_atomic_transaction_ty(ty: Type) -> bool { - match ty { - types::I8 | types::I16 | types::I32 | types::I64 => true, - _ => false, - } -} - fn choose_32_64(ty: Type, op32: T, op64: T) -> T { let bits = ty_bits(ty); if bits <= 32 { @@ -71,22 +52,6 @@ fn input_matches_sconst>(ctx: &mut C, input: InsnInput) -> } } -/// Return false if instruction input cannot have the value Imm, true otherwise. -fn input_maybe_imm>(ctx: &mut C, input: InsnInput, imm: u64) -> bool { - if let Some(c) = input_matches_const(ctx, input) { - let ty = ctx.input_ty(input.insn, input.input); - let from_bits = ty_bits(ty) as u8; - let mask = if from_bits < 64 { - (1u64 << ty_bits(ty)) - 1 - } else { - 0xffff_ffff_ffff_ffff - }; - c & mask == imm & mask - } else { - true - } -} - /// Lower an instruction input to a 16-bit signed constant, if possible. fn input_matches_simm16>(ctx: &mut C, input: InsnInput) -> Option { if let Some(imm_value) = input_matches_sconst(ctx, input) { @@ -117,80 +82,6 @@ fn input_matches_uimm32>(ctx: &mut C, input: InsnInput) -> None } -/// Lower a negated instruction input to a 16-bit signed constant, if possible. -fn negated_input_matches_simm16>( - ctx: &mut C, - input: InsnInput, -) -> Option { - if let Some(imm_value) = input_matches_sconst(ctx, input) { - if let Ok(imm) = i16::try_from(-imm_value) { - return Some(imm); - } - } - None -} - -/// Lower a negated instruction input to a 32-bit signed constant, if possible. -fn negated_input_matches_simm32>( - ctx: &mut C, - input: InsnInput, -) -> Option { - if let Some(imm_value) = input_matches_sconst(ctx, input) { - if let Ok(imm) = i32::try_from(-imm_value) { - return Some(imm); - } - } - None -} - -/// Lower an instruction input to a 16-bit shifted constant, if possible. -fn input_matches_uimm16shifted>( - ctx: &mut C, - input: InsnInput, -) -> Option { - if let Some(imm_value) = input_matches_const(ctx, input) { - return UImm16Shifted::maybe_from_u64(imm_value); - } - None -} - -/// Lower an instruction input to a 32-bit shifted constant, if possible. -fn input_matches_uimm32shifted>( - ctx: &mut C, - input: InsnInput, -) -> Option { - if let Some(imm_value) = input_matches_const(ctx, input) { - return UImm32Shifted::maybe_from_u64(imm_value); - } - None -} - -/// Lower an instruction input to a 16-bit inverted shifted constant, if possible. -fn input_matches_uimm16shifted_inv>( - ctx: &mut C, - input: InsnInput, -) -> Option { - if let Some(imm_value) = input_matches_const(ctx, input) { - if let Some(imm) = UImm16Shifted::maybe_from_u64(!imm_value) { - return Some(imm.negate_bits()); - } - } - None -} - -/// Lower an instruction input to a 32-bit inverted shifted constant, if possible. -fn input_matches_uimm32shifted_inv>( - ctx: &mut C, - input: InsnInput, -) -> Option { - if let Some(imm_value) = input_matches_const(ctx, input) { - if let Some(imm) = UImm32Shifted::maybe_from_u64(!imm_value) { - return Some(imm.negate_bits()); - } - } - None -} - /// Checks for an instance of `op` feeding the given input. fn input_matches_insn>( c: &mut C, @@ -545,84 +436,6 @@ fn lower_constant_u64>(ctx: &mut C, rd: Writable, val } } -fn lower_constant_u32>(ctx: &mut C, rd: Writable, value: u32) { - for inst in Inst::load_constant32(rd, value) { - ctx.emit(inst); - } -} - -fn lower_constant_f32>(ctx: &mut C, rd: Writable, value: f32) { - ctx.emit(Inst::load_fp_constant32(rd, value)); -} - -fn lower_constant_f64>(ctx: &mut C, rd: Writable, value: f64) { - ctx.emit(Inst::load_fp_constant64(rd, value)); -} - -//============================================================================ -// Lowering: miscellaneous helpers. - -/// Emit code to invert the value of type ty in register rd. -fn lower_bnot>(ctx: &mut C, ty: Type, rd: Writable) { - let alu_op = choose_32_64(ty, ALUOp::Xor32, ALUOp::Xor64); - ctx.emit(Inst::AluRUImm32Shifted { - alu_op, - rd, - imm: UImm32Shifted::maybe_from_u64(0xffff_ffff).unwrap(), - }); - if ty_bits(ty) > 32 { - ctx.emit(Inst::AluRUImm32Shifted { - alu_op, - rd, - imm: UImm32Shifted::maybe_from_u64(0xffff_ffff_0000_0000).unwrap(), - }); - } -} - -/// Emit code to bitcast between integer and floating-point values. -fn lower_bitcast>( - ctx: &mut C, - rd: Writable, - output_ty: Type, - rn: Reg, - input_ty: Type, -) { - match (input_ty, output_ty) { - (types::I64, types::F64) => { - ctx.emit(Inst::MovToFpr { rd, rn }); - } - (types::F64, types::I64) => { - ctx.emit(Inst::MovFromFpr { rd, rn }); - } - (types::I32, types::F32) => { - let tmp = ctx.alloc_tmp(types::I64).only_reg().unwrap(); - ctx.emit(Inst::ShiftRR { - shift_op: ShiftOp::LShL64, - rd: tmp, - rn, - shift_imm: 32, - shift_reg: zero_reg(), - }); - ctx.emit(Inst::MovToFpr { - rd, - rn: tmp.to_reg(), - }); - } - (types::F32, types::I32) => { - let tmp = ctx.alloc_tmp(types::I64).only_reg().unwrap(); - ctx.emit(Inst::MovFromFpr { rd: tmp, rn }); - ctx.emit(Inst::ShiftRR { - shift_op: ShiftOp::LShR64, - rd, - rn: tmp.to_reg(), - shift_imm: 32, - shift_reg: zero_reg(), - }); - } - _ => unreachable!("invalid bitcast from {:?} to {:?}", input_ty, output_ty), - } -} - //============================================================================= // Lowering: comparisons @@ -792,24 +605,6 @@ fn lower_boolean_to_flags>(ctx: &mut C, input: InsnInput) } } -fn lower_flags_to_bool_result>( - ctx: &mut C, - cond: Cond, - rd: Writable, - ty: Type, -) { - if ty_bits(ty) == 1 { - lower_constant_u32(ctx, rd, 0); - ctx.emit(Inst::CMov32SImm16 { rd, cond, imm: 1 }); - } else if ty_bits(ty) < 64 { - lower_constant_u32(ctx, rd, 0); - ctx.emit(Inst::CMov32SImm16 { rd, cond, imm: -1 }); - } else { - lower_constant_u64(ctx, rd, 0); - ctx.emit(Inst::CMov64SImm16 { rd, cond, imm: -1 }); - } -} - //============================================================================ // Lowering: main entry point for lowering a instruction @@ -832,1759 +627,156 @@ fn lower_insn_to_regs>( None }; - match op { - Opcode::Nop => { - // Nothing. - } - - Opcode::Copy | Opcode::Ireduce | Opcode::Breduce => { - // Smaller ints / bools have the high bits undefined, so any reduce - // operation is simply a copy. - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - let ty = ctx.input_ty(insn, 0); - ctx.emit(Inst::gen_move(rd, rn, ty)); - } + if let Ok(()) = super::lower::isle::lower(ctx, flags, isa_flags, &outputs, insn) { + return Ok(()); + } - Opcode::Iconst | Opcode::Bconst | Opcode::Null => { - let value = ctx.get_constant(insn).unwrap(); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let ty = ty.unwrap(); - if ty.bits() <= 32 { - lower_constant_u32(ctx, rd, value as u32); - } else { - lower_constant_u64(ctx, rd, value); - } - } - Opcode::F32const => { - let value = f32::from_bits(ctx.get_constant(insn).unwrap() as u32); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - lower_constant_f32(ctx, rd, value); - } - Opcode::F64const => { - let value = f64::from_bits(ctx.get_constant(insn).unwrap()); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - lower_constant_f64(ctx, rd, value); - } + let implemented_in_isle = || { + unreachable!( + "implemented in ISLE: inst = `{}`, type = `{:?}`", + ctx.dfg().display_inst(insn), + ty + ); + }; - Opcode::Iadd => { - let ty = ty.unwrap(); - let alu_op = choose_32_64(ty, ALUOp::Add32, ALUOp::Add64); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - if let Some(imm) = input_matches_simm16(ctx, inputs[1]) { - ctx.emit(Inst::AluRRSImm16 { - alu_op, - rd, - rn, - imm, - }); - } else if let Some(imm) = input_matches_simm32(ctx, inputs[1]) { - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRSImm32 { alu_op, rd, imm }); - } else if let Some(mem) = input_matches_mem(ctx, inputs[1]) { - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRX { alu_op, rd, mem }); - } else if let Some(mem) = input_matches_sext16_mem(ctx, inputs[1]) { - let alu_op = choose_32_64(ty, ALUOp::Add32Ext16, ALUOp::Add64Ext16); - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRX { alu_op, rd, mem }); - } else if let Some(mem) = input_matches_sext32_mem(ctx, inputs[1]) { - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRX { - alu_op: ALUOp::Add64Ext32, - rd, - mem, - }); - } else if let Some(rm) = input_matches_sext32_reg(ctx, inputs[1]) { - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRR { - alu_op: ALUOp::Add64Ext32, - rd, - rm, - }); - } else { - let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None); - ctx.emit(Inst::AluRRR { alu_op, rd, rn, rm }); - } - } - Opcode::Isub => { - let ty = ty.unwrap(); - let alu_op = choose_32_64(ty, ALUOp::Sub32, ALUOp::Sub64); - let neg_op = choose_32_64(ty, ALUOp::Add32, ALUOp::Add64); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - if let Some(imm) = negated_input_matches_simm16(ctx, inputs[1]) { - ctx.emit(Inst::AluRRSImm16 { - alu_op: neg_op, - rd, - rn, - imm, - }); - } else if let Some(imm) = negated_input_matches_simm32(ctx, inputs[1]) { - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRSImm32 { - alu_op: neg_op, - rd, - imm, - }); - } else if let Some(mem) = input_matches_mem(ctx, inputs[1]) { - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRX { alu_op, rd, mem }); - } else if let Some(mem) = input_matches_sext16_mem(ctx, inputs[1]) { - let alu_op = choose_32_64(ty, ALUOp::Sub32Ext16, ALUOp::Sub64Ext16); - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRX { alu_op, rd, mem }); - } else if let Some(mem) = input_matches_sext32_mem(ctx, inputs[1]) { - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRX { - alu_op: ALUOp::Sub64Ext32, - rd, - mem, - }); - } else if let Some(rm) = input_matches_sext32_reg(ctx, inputs[1]) { - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRR { - alu_op: ALUOp::Sub64Ext32, - rd, - rm, - }); - } else { - let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None); - ctx.emit(Inst::AluRRR { alu_op, rd, rn, rm }); - } - } - Opcode::IaddIfcout => { - let ty = ty.unwrap(); - assert!(ty == types::I32 || ty == types::I64); - // Emit an ADD LOGICAL instruction, which sets the condition code - // to indicate an (unsigned) carry bit. - let alu_op = choose_32_64(ty, ALUOp::AddLogical32, ALUOp::AddLogical64); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - if let Some(imm) = input_matches_uimm32(ctx, inputs[1]) { - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRUImm32 { alu_op, rd, imm }); - } else if let Some(mem) = input_matches_mem(ctx, inputs[1]) { - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRX { alu_op, rd, mem }); - } else if let Some(mem) = input_matches_uext32_mem(ctx, inputs[1]) { - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRX { - alu_op: ALUOp::AddLogical64Ext32, - rd, - mem, - }); - } else if let Some(rm) = input_matches_uext32_reg(ctx, inputs[1]) { - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRR { - alu_op: ALUOp::AddLogical64Ext32, - rd, - rm, - }); - } else { - let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None); - ctx.emit(Inst::AluRRR { alu_op, rd, rn, rm }); - } - } + match op { + Opcode::Nop + | Opcode::Copy + | Opcode::Iconst + | Opcode::Bconst + | Opcode::F32const + | Opcode::F64const + | Opcode::Null + | Opcode::Iadd + | Opcode::IaddIfcout + | Opcode::Isub + | Opcode::Iabs + | Opcode::Ineg + | Opcode::Imul + | Opcode::Umulhi + | Opcode::Smulhi + | Opcode::Udiv + | Opcode::Urem + | Opcode::Sdiv + | Opcode::Srem + | Opcode::Ishl + | Opcode::Ushr + | Opcode::Sshr + | Opcode::Rotr + | Opcode::Rotl + | Opcode::Ireduce + | Opcode::Uextend + | Opcode::Sextend + | Opcode::Bnot + | Opcode::Band + | Opcode::Bor + | Opcode::Bxor + | Opcode::BandNot + | Opcode::BorNot + | Opcode::BxorNot + | Opcode::Bitselect + | Opcode::Breduce + | Opcode::Bextend + | Opcode::Bmask + | Opcode::Bint + | Opcode::Clz + | Opcode::Cls + | Opcode::Ctz + | Opcode::Popcnt + | Opcode::Fadd + | Opcode::Fsub + | Opcode::Fmul + | Opcode::Fdiv + | Opcode::Fmin + | Opcode::Fmax + | Opcode::Sqrt + | Opcode::Fneg + | Opcode::Fabs + | Opcode::Fpromote + | Opcode::Fdemote + | Opcode::Ceil + | Opcode::Floor + | Opcode::Trunc + | Opcode::Nearest + | Opcode::Fma + | Opcode::Fcopysign + | Opcode::FcvtFromUint + | Opcode::FcvtFromSint + | Opcode::FcvtToUint + | Opcode::FcvtToSint + | Opcode::FcvtToUintSat + | Opcode::FcvtToSintSat + | Opcode::Bitcast + | Opcode::Load + | Opcode::Uload8 + | Opcode::Sload8 + | Opcode::Uload16 + | Opcode::Sload16 + | Opcode::Uload32 + | Opcode::Sload32 + | Opcode::Store + | Opcode::Istore8 + | Opcode::Istore16 + | Opcode::Istore32 + | Opcode::AtomicRmw + | Opcode::AtomicCas + | Opcode::AtomicLoad + | Opcode::AtomicStore + | Opcode::Fence + | Opcode::Icmp + | Opcode::Fcmp + | Opcode::IsNull + | Opcode::IsInvalid + | Opcode::Select + | Opcode::SelectifSpectreGuard + | Opcode::StackAddr + | Opcode::FuncAddr + | Opcode::SymbolValue => implemented_in_isle(), Opcode::UaddSat | Opcode::SaddSat => unimplemented!(), Opcode::UsubSat | Opcode::SsubSat => unimplemented!(), - Opcode::Iabs => { - let ty = ty.unwrap(); - let op = choose_32_64(ty, UnaryOp::Abs32, UnaryOp::Abs64); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - if let Some(rn) = input_matches_sext32_reg(ctx, inputs[0]) { - ctx.emit(Inst::UnaryRR { - op: UnaryOp::Abs64Ext32, - rd, - rn, - }); - } else { - let narrow_mode = if ty.bits() < 32 { - NarrowValueMode::SignExtend32 - } else { - NarrowValueMode::None - }; - let rn = put_input_in_reg(ctx, inputs[0], narrow_mode); - ctx.emit(Inst::UnaryRR { op, rd, rn }); - } - } - Opcode::Ineg => { - let ty = ty.unwrap(); - let op = choose_32_64(ty, UnaryOp::Neg32, UnaryOp::Neg64); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - if let Some(rn) = input_matches_sext32_reg(ctx, inputs[0]) { - ctx.emit(Inst::UnaryRR { - op: UnaryOp::Neg64Ext32, - rd, - rn, - }); - } else { - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - ctx.emit(Inst::UnaryRR { op, rd, rn }); - } - } + Opcode::Bitrev => unimplemented!(), - Opcode::Imul => { - let ty = ty.unwrap(); - let alu_op = choose_32_64(ty, ALUOp::Mul32, ALUOp::Mul64); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - if let Some(imm) = input_matches_simm16(ctx, inputs[1]) { - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRSImm16 { alu_op, rd, imm }); - } else if let Some(imm) = input_matches_simm32(ctx, inputs[1]) { - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRSImm32 { alu_op, rd, imm }); - } else if let Some(mem) = input_matches_mem(ctx, inputs[1]) { - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRX { alu_op, rd, mem }); - } else if let Some(mem) = input_matches_sext16_mem(ctx, inputs[1]) { - let alu_op = choose_32_64(ty, ALUOp::Mul32Ext16, ALUOp::Mul64Ext16); - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRX { alu_op, rd, mem }); - } else if let Some(mem) = input_matches_sext32_mem(ctx, inputs[1]) { - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRX { - alu_op: ALUOp::Mul64Ext32, - rd, - mem, - }); - } else if let Some(rm) = input_matches_sext32_reg(ctx, inputs[1]) { - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRR { - alu_op: ALUOp::Mul64Ext32, - rd, - rm, - }); - } else { - let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None); - ctx.emit(Inst::AluRRR { alu_op, rd, rn, rm }); - } - } + Opcode::FcvtLowFromSint => unimplemented!("FcvtLowFromSint"), - Opcode::Umulhi | Opcode::Smulhi => { - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let is_signed = op == Opcode::Smulhi; - let input_ty = ctx.input_ty(insn, 0); - assert!(ctx.input_ty(insn, 1) == input_ty); - assert!(ctx.output_ty(insn, 0) == input_ty); - - match input_ty { - types::I64 => { - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None); - - if is_signed { - ctx.emit(Inst::SMulWide { rn, rm }); - ctx.emit(Inst::gen_move(rd, gpr(0), input_ty)); - } else { - ctx.emit(Inst::gen_move(writable_gpr(1), rm, input_ty)); - ctx.emit(Inst::UMulWide { rn }); - ctx.emit(Inst::gen_move(rd, gpr(0), input_ty)); - } - } - types::I32 => { - let narrow_mode = if is_signed { - NarrowValueMode::SignExtend64 - } else { - NarrowValueMode::ZeroExtend64 - }; - let rn = put_input_in_reg(ctx, inputs[0], narrow_mode); - let rm = put_input_in_reg(ctx, inputs[1], narrow_mode); - ctx.emit(Inst::AluRRR { - alu_op: ALUOp::Mul64, - rd, - rn, - rm, - }); - let shift_op = if is_signed { - ShiftOp::AShR64 - } else { - ShiftOp::LShR64 - }; - ctx.emit(Inst::ShiftRR { - shift_op, - rd, - rn: rd.to_reg(), - shift_imm: 32, - shift_reg: zero_reg(), - }); - } - types::I16 | types::I8 => { - let narrow_mode = if is_signed { - NarrowValueMode::SignExtend32 - } else { - NarrowValueMode::ZeroExtend32 - }; - let rn = put_input_in_reg(ctx, inputs[0], narrow_mode); - let rm = put_input_in_reg(ctx, inputs[1], narrow_mode); - ctx.emit(Inst::AluRRR { - alu_op: ALUOp::Mul32, - rd, - rn, - rm, - }); - let shift_op = if is_signed { - ShiftOp::AShR32 - } else { - ShiftOp::LShR32 - }; - let shift_amt = match input_ty { - types::I16 => 16, - types::I8 => 8, - _ => unreachable!(), - }; - ctx.emit(Inst::ShiftRR { - shift_op, - rd, - rn: rd.to_reg(), - shift_imm: shift_amt, - shift_reg: zero_reg(), - }); - } - _ => { - panic!("Unsupported argument type for umulhi/smulhi: {}", input_ty); - } - } + Opcode::StackLoad | Opcode::StackStore => { + panic!("Direct stack memory access not supported; should not be used by Wasm"); } - Opcode::Udiv | Opcode::Urem => { - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let ty = ty.unwrap(); - - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - if ty_bits(ty) <= 32 { - lower_constant_u32(ctx, writable_gpr(0), 0); - if ty_bits(ty) < 32 { - ctx.emit(Inst::Extend { - rd: writable_gpr(1), - rn, - signed: false, - from_bits: ty_bits(ty) as u8, - to_bits: 32, - }); - } else { - ctx.emit(Inst::mov32(writable_gpr(1), rn)); - } - } else { - lower_constant_u64(ctx, writable_gpr(0), 0); - ctx.emit(Inst::mov64(writable_gpr(1), rn)); - } - - let narrow_mode = if ty.bits() < 32 { - NarrowValueMode::ZeroExtend32 - } else { - NarrowValueMode::None - }; - let rm = put_input_in_reg(ctx, inputs[1], narrow_mode); - - if input_maybe_imm(ctx, inputs[1], 0) && flags.avoid_div_traps() { - ctx.emit(Inst::CmpTrapRSImm16 { - op: choose_32_64(ty, CmpOp::CmpS32, CmpOp::CmpS64), - rn: rm, - imm: 0, - cond: Cond::from_intcc(IntCC::Equal), - trap_code: TrapCode::IntegerDivisionByZero, - }); - } - - if ty_bits(ty) <= 32 { - ctx.emit(Inst::UDivMod32 { rn: rm }); - } else { - ctx.emit(Inst::UDivMod64 { rn: rm }); - } + Opcode::ConstAddr => unimplemented!(), - if op == Opcode::Udiv { - ctx.emit(Inst::gen_move(rd, gpr(1), ty)); - } else { - ctx.emit(Inst::gen_move(rd, gpr(0), ty)); - } + Opcode::HeapAddr => { + panic!("heap_addr should have been removed by legalization!"); } - Opcode::Sdiv | Opcode::Srem => { - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let ty = ty.unwrap(); - - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - if ty_bits(ty) < 64 { - ctx.emit(Inst::Extend { - rd: writable_gpr(1), - rn, - signed: true, - from_bits: ty_bits(ty) as u8, - to_bits: 64, - }); - } else { - ctx.emit(Inst::mov64(writable_gpr(1), rn)); - } - - let narrow_mode = if ty.bits() < 32 { - NarrowValueMode::SignExtend32 - } else { - NarrowValueMode::None - }; - let rm = put_input_in_reg(ctx, inputs[1], narrow_mode); - - if input_maybe_imm(ctx, inputs[1], 0) && flags.avoid_div_traps() { - ctx.emit(Inst::CmpTrapRSImm16 { - op: choose_32_64(ty, CmpOp::CmpS32, CmpOp::CmpS64), - rn: rm, - imm: 0, - cond: Cond::from_intcc(IntCC::Equal), - trap_code: TrapCode::IntegerDivisionByZero, - }); - } - - if input_maybe_imm(ctx, inputs[1], 0xffff_ffff_ffff_ffff) { - if op == Opcode::Sdiv { - let tmp = ctx.alloc_tmp(ty).only_reg().unwrap(); - if ty_bits(ty) <= 32 { - lower_constant_u32(ctx, tmp, (1 << (ty_bits(ty) - 1)) - 1); - } else { - lower_constant_u64(ctx, tmp, (1 << (ty_bits(ty) - 1)) - 1); - } - ctx.emit(Inst::AluRRR { - alu_op: choose_32_64(ty, ALUOp::Xor32, ALUOp::Xor64), - rd: tmp, - rn: tmp.to_reg(), - rm: gpr(1), - }); - ctx.emit(Inst::AluRRR { - alu_op: choose_32_64(ty, ALUOp::And32, ALUOp::And64), - rd: tmp, - rn: tmp.to_reg(), - rm, - }); - ctx.emit(Inst::CmpTrapRSImm16 { - op: choose_32_64(ty, CmpOp::CmpS32, CmpOp::CmpS64), - rn: tmp.to_reg(), - imm: -1, - cond: Cond::from_intcc(IntCC::Equal), - trap_code: TrapCode::IntegerOverflow, - }); - } else { - if ty_bits(ty) > 32 { - ctx.emit(Inst::CmpRSImm16 { - op: CmpOp::CmpS64, - rn: rm, - imm: -1, - }); - ctx.emit(Inst::CMov64SImm16 { - rd: writable_gpr(1), - cond: Cond::from_intcc(IntCC::Equal), - imm: 0, - }); - } - } - } - - if ty_bits(ty) <= 32 { - ctx.emit(Inst::SDivMod32 { rn: rm }); - } else { - ctx.emit(Inst::SDivMod64 { rn: rm }); - } - - if op == Opcode::Sdiv { - ctx.emit(Inst::gen_move(rd, gpr(1), ty)); - } else { - ctx.emit(Inst::gen_move(rd, gpr(0), ty)); - } + Opcode::TableAddr => { + panic!("table_addr should have been removed by legalization!"); } - Opcode::Uextend | Opcode::Sextend => { - let ty = ty.unwrap(); - let to_bits = ty_bits(ty) as u8; - let to_bits = std::cmp::max(32, to_bits); - let narrow_mode = match (op, to_bits) { - (Opcode::Uextend, 32) => NarrowValueMode::ZeroExtend32, - (Opcode::Uextend, 64) => NarrowValueMode::ZeroExtend64, - (Opcode::Sextend, 32) => NarrowValueMode::SignExtend32, - (Opcode::Sextend, 64) => NarrowValueMode::SignExtend64, - _ => unreachable!(), - }; - let rn = put_input_in_reg(ctx, inputs[0], narrow_mode); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - ctx.emit(Inst::gen_move(rd, rn, ty)); + Opcode::GlobalValue => { + panic!("global_value should have been removed by legalization!"); } - Opcode::Ishl | Opcode::Ushr | Opcode::Sshr => { - let ty = ty.unwrap(); - let size = ty_bits(ty); - let narrow_mode = match (op, size) { - (Opcode::Ishl, _) => NarrowValueMode::None, - (Opcode::Ushr, 64) => NarrowValueMode::ZeroExtend64, - (Opcode::Ushr, _) => NarrowValueMode::ZeroExtend32, - (Opcode::Sshr, 64) => NarrowValueMode::SignExtend64, - (Opcode::Sshr, _) => NarrowValueMode::SignExtend32, - _ => unreachable!(), - }; - let shift_op = match op { - Opcode::Ishl => choose_32_64(ty, ShiftOp::LShL32, ShiftOp::LShL64), - Opcode::Ushr => choose_32_64(ty, ShiftOp::LShR32, ShiftOp::LShR64), - Opcode::Sshr => choose_32_64(ty, ShiftOp::AShR32, ShiftOp::AShR64), - _ => unreachable!(), - }; - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let rn = put_input_in_reg(ctx, inputs[0], narrow_mode); - if let Some(imm) = input_matches_const(ctx, inputs[1]) { - let shift_imm = (imm & (size as u64 - 1)) as u8; - let shift_reg = zero_reg(); - ctx.emit(Inst::ShiftRR { - shift_op, - rd, - rn, - shift_imm, - shift_reg, - }); - } else { - let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None); - let shift_imm = 0; - let shift_reg = if size < 64 { - let tmp = ctx.alloc_tmp(types::I64).only_reg().unwrap(); - ctx.emit(Inst::gen_move(tmp, rm, types::I64)); - ctx.emit(Inst::AluRUImm16Shifted { - alu_op: ALUOp::And64, - rd: tmp, - imm: UImm16Shifted::maybe_from_u64(size as u64 - 1).unwrap(), - }); - tmp.to_reg() - } else { - rm - }; - ctx.emit(Inst::ShiftRR { - shift_op, - rd, - rn, - shift_imm, - shift_reg, - }); - } + Opcode::TlsValue => { + unimplemented!("Thread-local storage support not implemented!"); } - Opcode::Rotr | Opcode::Rotl => { - // s390x doesn't have a right-rotate instruction, but a right rotation of K places is - // effectively a left rotation of N - K places, if N is the integer's bit size. We - // implement right rotations with this trick. - // - // For a 32-bit or 64-bit rotate-left, we can use the ROR instruction directly. - // - // For a < 32-bit rotate-left, we synthesize this as: - // - // rotr rd, rn, rm - // - // => - // - // zero-extend rn, <32-or-64> - // and tmp_masked_rm, rm, - // sub tmp1, tmp_masked_rm, - // sub tmp1, zero, tmp1 ; neg - // lsr tmp2, rn, tmp_masked_rm - // lsl rd, rn, tmp1 - // orr rd, rd, tmp2 - // - // For a constant amount, we can instead do: - // - // zero-extend rn, <32-or-64> - // lsr tmp2, rn, # - // lsl rd, rn, - // orr rd, rd, tmp2 - - let is_rotr = op == Opcode::Rotr; - - let ty = ty.unwrap(); - let ty_bits_size = ty_bits(ty) as u64; - - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let rn = put_input_in_reg( - ctx, - inputs[0], - if ty_bits_size <= 32 { - NarrowValueMode::ZeroExtend32 - } else { - NarrowValueMode::ZeroExtend64 - }, - ); - - if ty_bits_size == 32 || ty_bits_size == 64 { - let shift_op = choose_32_64(ty, ShiftOp::RotL32, ShiftOp::RotL64); - if let Some(imm) = input_matches_const(ctx, inputs[1]) { - let shiftcount = imm & (ty_bits_size - 1); - let shiftcount = if is_rotr { - ty_bits_size - shiftcount - } else { - shiftcount - }; - ctx.emit(Inst::ShiftRR { - shift_op, - rd, - rn, - shift_imm: shiftcount as u8, - shift_reg: zero_reg(), - }); - } else { - let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None); - let rm = if is_rotr { - // Really ty_bits_size - rn, but the upper bits of the result are - // ignored (because of the implicit masking done by the instruction), - // so this is equivalent to negating the input. - let op = choose_32_64(ty, UnaryOp::Neg32, UnaryOp::Neg64); - let tmp = ctx.alloc_tmp(ty).only_reg().unwrap(); - ctx.emit(Inst::UnaryRR { - op, - rd: tmp, - rn: rm, - }); - tmp.to_reg() - } else { - rm - }; - ctx.emit(Inst::ShiftRR { - shift_op, - rd, - rn, - shift_imm: 0, - shift_reg: rm, - }); - } - } else { - debug_assert!(ty_bits_size < 32); - - if let Some(imm) = input_matches_const(ctx, inputs[1]) { - let rot_count = imm & (ty_bits_size - 1); - let (lshl_count, lshr_count) = if is_rotr { - (ty_bits_size - rot_count, rot_count) - } else { - (rot_count, ty_bits_size - rot_count) - }; + Opcode::GetPinnedReg | Opcode::SetPinnedReg => { + unimplemented!("Pinned register support not implemented!"); + } - let tmp1 = ctx.alloc_tmp(types::I32).only_reg().unwrap(); - ctx.emit(Inst::ShiftRR { - shift_op: ShiftOp::LShL32, - rd: tmp1, - rn, - shift_imm: lshl_count as u8, - shift_reg: zero_reg(), - }); - - let tmp2 = ctx.alloc_tmp(types::I32).only_reg().unwrap(); - ctx.emit(Inst::ShiftRR { - shift_op: ShiftOp::LShR32, - rd: tmp2, - rn, - shift_imm: lshr_count as u8, - shift_reg: zero_reg(), - }); - - ctx.emit(Inst::AluRRR { - alu_op: ALUOp::Orr32, - rd, - rn: tmp1.to_reg(), - rm: tmp2.to_reg(), - }); - } else { - let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None); - let tmp1 = ctx.alloc_tmp(types::I32).only_reg().unwrap(); - let tmp2 = ctx.alloc_tmp(types::I32).only_reg().unwrap(); - - ctx.emit(Inst::mov32(tmp1, rm)); - ctx.emit(Inst::UnaryRR { - op: UnaryOp::Neg32, - rd: tmp2, - rn: rm, - }); - - ctx.emit(Inst::AluRUImm16Shifted { - alu_op: ALUOp::And32, - rd: tmp1, - imm: UImm16Shifted::maybe_from_u64(ty_bits_size - 1).unwrap(), - }); - ctx.emit(Inst::AluRUImm16Shifted { - alu_op: ALUOp::And32, - rd: tmp2, - imm: UImm16Shifted::maybe_from_u64(ty_bits_size - 1).unwrap(), - }); - - let (lshr, lshl) = if is_rotr { (tmp2, tmp1) } else { (tmp1, tmp2) }; - - ctx.emit(Inst::ShiftRR { - shift_op: ShiftOp::LShL32, - rd: lshl, - rn, - shift_imm: 0, - shift_reg: lshl.to_reg(), - }); - - ctx.emit(Inst::ShiftRR { - shift_op: ShiftOp::LShR32, - rd: lshr, - rn, - shift_imm: 0, - shift_reg: lshr.to_reg(), - }); - - ctx.emit(Inst::AluRRR { - alu_op: ALUOp::Orr32, - rd, - rn: lshl.to_reg(), - rm: lshr.to_reg(), - }); - } - } + Opcode::Trap | Opcode::ResumableTrap => { + let trap_code = ctx.data(insn).trap_code().unwrap(); + ctx.emit_safepoint(Inst::Trap { trap_code }) } - Opcode::Bnot => { - let ty = ty.unwrap(); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - if isa_flags.has_mie2() { - ctx.emit(Inst::AluRRR { - alu_op: choose_32_64(ty, ALUOp::OrrNot32, ALUOp::OrrNot64), - rd, - rn, - rm: rn, - }); - } else { - ctx.emit(Inst::gen_move(rd, rn, ty)); - lower_bnot(ctx, ty, rd); - } - } - - Opcode::Band => { - let ty = ty.unwrap(); - let alu_op = choose_32_64(ty, ALUOp::And32, ALUOp::And64); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - if let Some(imm) = input_matches_uimm16shifted_inv(ctx, inputs[1]) { - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRUImm16Shifted { alu_op, rd, imm }); - } else if let Some(imm) = input_matches_uimm32shifted_inv(ctx, inputs[1]) { - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRUImm32Shifted { alu_op, rd, imm }); - } else if let Some(mem) = input_matches_mem(ctx, inputs[1]) { - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRX { alu_op, rd, mem }); - } else { - let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None); - ctx.emit(Inst::AluRRR { alu_op, rd, rn, rm }); - } - } - - Opcode::Bor => { - let ty = ty.unwrap(); - let alu_op = choose_32_64(ty, ALUOp::Orr32, ALUOp::Orr64); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - if let Some(imm) = input_matches_uimm16shifted(ctx, inputs[1]) { - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRUImm16Shifted { alu_op, rd, imm }); - } else if let Some(imm) = input_matches_uimm32shifted(ctx, inputs[1]) { - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRUImm32Shifted { alu_op, rd, imm }); - } else if let Some(mem) = input_matches_mem(ctx, inputs[1]) { - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRX { alu_op, rd, mem }); - } else { - let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None); - ctx.emit(Inst::AluRRR { alu_op, rd, rn, rm }); - } - } - - Opcode::Bxor => { - let ty = ty.unwrap(); - let alu_op = choose_32_64(ty, ALUOp::Xor32, ALUOp::Xor64); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - if let Some(imm) = input_matches_uimm32shifted(ctx, inputs[1]) { - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRUImm32Shifted { alu_op, rd, imm }); - } else if let Some(mem) = input_matches_mem(ctx, inputs[1]) { - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRX { alu_op, rd, mem }); - } else { - let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None); - ctx.emit(Inst::AluRRR { alu_op, rd, rn, rm }); - } - } - - Opcode::BandNot | Opcode::BorNot | Opcode::BxorNot => { - let ty = ty.unwrap(); - let alu_op = match (op, isa_flags.has_mie2()) { - (Opcode::BandNot, true) => choose_32_64(ty, ALUOp::AndNot32, ALUOp::AndNot64), - (Opcode::BorNot, true) => choose_32_64(ty, ALUOp::OrrNot32, ALUOp::OrrNot64), - (Opcode::BxorNot, true) => choose_32_64(ty, ALUOp::XorNot32, ALUOp::XorNot64), - (Opcode::BandNot, false) => choose_32_64(ty, ALUOp::And32, ALUOp::And64), - (Opcode::BorNot, false) => choose_32_64(ty, ALUOp::Orr32, ALUOp::Orr64), - (Opcode::BxorNot, false) => choose_32_64(ty, ALUOp::Xor32, ALUOp::Xor64), - _ => unreachable!(), - }; - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None); - ctx.emit(Inst::AluRRR { alu_op, rd, rn, rm }); - if !isa_flags.has_mie2() { - lower_bnot(ctx, ty, rd); - } - } - - Opcode::Bitselect => { - let ty = ty.unwrap(); - let tmp = ctx.alloc_tmp(types::I64).only_reg().unwrap(); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let rcond = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - let rn = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None); - let rm = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None); - ctx.emit(Inst::AluRRR { - alu_op: choose_32_64(ty, ALUOp::And32, ALUOp::And64), - rd: tmp, - rn, - rm: rcond, - }); - if isa_flags.has_mie2() { - ctx.emit(Inst::AluRRR { - alu_op: choose_32_64(ty, ALUOp::AndNot32, ALUOp::AndNot64), - rd, - rn: rm, - rm: rcond, - }); - } else { - ctx.emit(Inst::AluRRR { - alu_op: choose_32_64(ty, ALUOp::And32, ALUOp::And64), - rd, - rn: rm, - rm: rcond, - }); - lower_bnot(ctx, ty, rd); - } - ctx.emit(Inst::AluRRR { - alu_op: choose_32_64(ty, ALUOp::Orr32, ALUOp::Orr64), - rd, - rn: rd.to_reg(), - rm: tmp.to_reg(), - }); - } - - Opcode::Bextend | Opcode::Bmask => { - // Bextend and Bmask both simply sign-extend. This works for: - // - Bextend, because booleans are stored as 0 / -1, so we - // sign-extend the -1 to a -1 in the wider width. - // - Bmask, because the resulting integer mask value must be - // all-ones (-1) if the argument is true. - // - // For a sign-extension from a 1-bit value (Case 1 below), we need - // to do things a bit specially, because the ISA does not have a - // 1-to-N-bit sign extension instruction. For 8-bit or wider - // sources (Case 2 below), we do a sign extension normally. - - let from_ty = ctx.input_ty(insn, 0); - let to_ty = ctx.output_ty(insn, 0); - let from_bits = ty_bits(from_ty); - let to_bits = ty_bits(to_ty); - - assert!( - from_bits <= 64 && to_bits <= 64, - "Vector Bextend not supported yet" - ); - - if from_bits >= to_bits { - // Just a move. - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - let ty = ctx.input_ty(insn, 0); - ctx.emit(Inst::gen_move(rd, rn, ty)); - } else if from_bits == 1 { - assert!(to_bits >= 8); - // Case 1: 1-bit to N-bit extension: use a shift-left / - // shift-right sequence to create a 0 / -1 result. - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - let shl_op = choose_32_64(to_ty, ShiftOp::LShL32, ShiftOp::LShL64); - let shr_op = choose_32_64(to_ty, ShiftOp::AShR32, ShiftOp::AShR64); - let count = if to_bits > 32 { 63 } else { 31 }; - ctx.emit(Inst::ShiftRR { - shift_op: shl_op, - rd, - rn, - shift_imm: count, - shift_reg: zero_reg(), - }); - ctx.emit(Inst::ShiftRR { - shift_op: shr_op, - rd, - rn: rd.to_reg(), - shift_imm: count, - shift_reg: zero_reg(), - }); - } else { - // Case 2: 8-or-more-bit to N-bit extension: just sign-extend. A - // `true` (all ones, or `-1`) will be extended to -1 with the - // larger width. - assert!(from_bits >= 8); - let narrow_mode = if to_bits == 64 { - NarrowValueMode::SignExtend64 - } else { - assert!(to_bits <= 32); - NarrowValueMode::SignExtend32 - }; - let rn = put_input_in_reg(ctx, inputs[0], narrow_mode); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - ctx.emit(Inst::gen_move(rd, rn, to_ty)); - } - } - - Opcode::Bint => { - // Booleans are stored as all-zeroes (0) or all-ones (-1). We AND - // out the LSB to give a 0 / 1-valued integer result. - let ty = ty.unwrap(); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - if ty_bits(ty) <= 16 { - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRUImm16Shifted { - alu_op: ALUOp::And32, - rd, - imm: UImm16Shifted::maybe_from_u64(1).unwrap(), - }); - } else if ty_bits(ty) <= 32 { - ctx.emit(Inst::gen_move(rd, rn, ty)); - ctx.emit(Inst::AluRUImm32Shifted { - alu_op: ALUOp::And32, - rd, - imm: UImm32Shifted::maybe_from_u64(1).unwrap(), - }); - } else { - let tmp = ctx.alloc_tmp(types::I64).only_reg().unwrap(); - lower_constant_u64(ctx, tmp, 1); - ctx.emit(Inst::AluRRR { - alu_op: ALUOp::And64, - rd, - rn, - rm: tmp.to_reg(), - }); - } - } - - Opcode::Clz => { - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - let ty = ty.unwrap(); - let ty_bits_size = ty_bits(ty); - - let rn = if ty_bits_size < 64 { - let tmp = ctx.alloc_tmp(types::I64).only_reg().unwrap(); - ctx.emit(Inst::Extend { - rd: tmp, - rn, - signed: false, - from_bits: ty_bits_size as u8, - to_bits: 64, - }); - tmp.to_reg() - } else { - rn - }; - - ctx.emit(Inst::Flogr { rn }); - ctx.emit(Inst::gen_move(rd, gpr(0), ty)); - - if ty_bits_size < 64 { - ctx.emit(Inst::AluRSImm16 { - alu_op: ALUOp::Add32, - rd, - imm: -(64 - ty_bits_size as i16), - }); - } - } - - Opcode::Cls => { - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - let ty = ty.unwrap(); - let ty_bits_size = ty_bits(ty); - - let rn = if ty_bits_size < 64 { - let tmp = ctx.alloc_tmp(types::I64).only_reg().unwrap(); - ctx.emit(Inst::Extend { - rd: tmp, - rn, - signed: true, - from_bits: ty_bits_size as u8, - to_bits: 64, - }); - tmp.to_reg() - } else { - rn - }; - - // tmp = rn ^ ((signed)rn >> 63) - let tmp = ctx.alloc_tmp(types::I64).only_reg().unwrap(); - ctx.emit(Inst::ShiftRR { - shift_op: ShiftOp::AShR64, - rd: tmp, - rn, - shift_imm: 63, - shift_reg: zero_reg(), - }); - ctx.emit(Inst::AluRRR { - alu_op: ALUOp::Xor64, - rd: tmp, - rn: tmp.to_reg(), - rm: rn, - }); - - ctx.emit(Inst::Flogr { rn: tmp.to_reg() }); - ctx.emit(Inst::gen_move(rd, gpr(0), ty)); - - if ty_bits_size < 64 { - ctx.emit(Inst::AluRSImm16 { - alu_op: ALUOp::Add32, - rd, - imm: -(64 - ty_bits_size as i16), - }); - } - } - - Opcode::Ctz => { - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - let ty = ty.unwrap(); - let ty_bits_size = ty_bits(ty); - - let rn = if ty_bits_size < 64 { - let tmp = ctx.alloc_tmp(types::I64).only_reg().unwrap(); - ctx.emit(Inst::gen_move(tmp, rn, ty)); - ctx.emit(Inst::AluRUImm16Shifted { - alu_op: ALUOp::Orr64, - rd: tmp, - imm: UImm16Shifted::maybe_from_u64(1u64 << ty_bits_size).unwrap(), - }); - tmp.to_reg() - } else { - rn - }; - - // tmp = rn & -rn - let tmp = ctx.alloc_tmp(types::I64).only_reg().unwrap(); - ctx.emit(Inst::UnaryRR { - op: UnaryOp::Neg64, - rd: tmp, - rn, - }); - ctx.emit(Inst::AluRRR { - alu_op: ALUOp::And64, - rd: tmp, - rn: tmp.to_reg(), - rm: rn, - }); - - ctx.emit(Inst::Flogr { rn: tmp.to_reg() }); - if ty_bits_size == 64 { - ctx.emit(Inst::CMov64SImm16 { - rd: writable_gpr(0), - cond: Cond::from_intcc(IntCC::Equal), - imm: -1, - }); - } - - if ty_bits_size <= 32 { - lower_constant_u32(ctx, rd, 63); - } else { - lower_constant_u64(ctx, rd, 63); - } - let alu_op = choose_32_64(ty, ALUOp::Sub32, ALUOp::Sub64); - ctx.emit(Inst::AluRRR { - alu_op, - rd, - rn: rd.to_reg(), - rm: gpr(0), - }); - } - - Opcode::Bitrev => unimplemented!(), - - Opcode::Popcnt => { - let ty = ty.unwrap(); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - if ty_bits(ty) <= 8 { - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - ctx.emit(Inst::UnaryRR { - op: UnaryOp::PopcntByte, - rd, - rn, - }); - } else if isa_flags.has_mie2() { - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::ZeroExtend64); - ctx.emit(Inst::UnaryRR { - op: UnaryOp::PopcntReg, - rd, - rn, - }); - } else { - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - ctx.emit(Inst::UnaryRR { - op: UnaryOp::PopcntByte, - rd, - rn, - }); - let tmp = ctx.alloc_tmp(types::I64).only_reg().unwrap(); - let mut shift = ty_bits(ty) as u8; - while shift > 8 { - shift = shift / 2; - ctx.emit(Inst::ShiftRR { - shift_op: choose_32_64(ty, ShiftOp::LShL32, ShiftOp::LShL64), - rd: tmp, - rn: rd.to_reg(), - shift_imm: shift, - shift_reg: zero_reg(), - }); - ctx.emit(Inst::AluRR { - alu_op: choose_32_64(ty, ALUOp::Add32, ALUOp::Add64), - rd, - rm: tmp.to_reg(), - }); - } - let shift = ty_bits(ty) as u8 - 8; - ctx.emit(Inst::ShiftRR { - shift_op: choose_32_64(ty, ShiftOp::LShR32, ShiftOp::LShR64), - rd, - rn: rd.to_reg(), - shift_imm: shift, - shift_reg: zero_reg(), - }); - } - } - - Opcode::Fadd | Opcode::Fsub | Opcode::Fmul | Opcode::Fdiv => { - let bits = ty_bits(ctx.output_ty(insn, 0)); - let fpu_op = match (op, bits) { - (Opcode::Fadd, 32) => FPUOp2::Add32, - (Opcode::Fadd, 64) => FPUOp2::Add64, - (Opcode::Fsub, 32) => FPUOp2::Sub32, - (Opcode::Fsub, 64) => FPUOp2::Sub64, - (Opcode::Fmul, 32) => FPUOp2::Mul32, - (Opcode::Fmul, 64) => FPUOp2::Mul64, - (Opcode::Fdiv, 32) => FPUOp2::Div32, - (Opcode::Fdiv, 64) => FPUOp2::Div64, - _ => panic!("Unknown op/bits combination"), - }; - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - ctx.emit(Inst::mov64(rd, rn)); - ctx.emit(Inst::FpuRRR { fpu_op, rd, rm }); - } - - Opcode::Fmin | Opcode::Fmax => { - let bits = ty_bits(ctx.output_ty(insn, 0)); - let fpu_op = match (op, bits) { - (Opcode::Fmin, 32) => FPUOp2::Min32, - (Opcode::Fmin, 64) => FPUOp2::Min64, - (Opcode::Fmax, 32) => FPUOp2::Max32, - (Opcode::Fmax, 64) => FPUOp2::Max64, - _ => panic!("Unknown op/bits combination"), - }; - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - ctx.emit(Inst::FpuVecRRR { fpu_op, rd, rn, rm }); - } - - Opcode::Sqrt | Opcode::Fneg | Opcode::Fabs | Opcode::Fpromote | Opcode::Fdemote => { - let bits = ty_bits(ctx.output_ty(insn, 0)); - let fpu_op = match (op, bits) { - (Opcode::Sqrt, 32) => FPUOp1::Sqrt32, - (Opcode::Sqrt, 64) => FPUOp1::Sqrt64, - (Opcode::Fneg, 32) => FPUOp1::Neg32, - (Opcode::Fneg, 64) => FPUOp1::Neg64, - (Opcode::Fabs, 32) => FPUOp1::Abs32, - (Opcode::Fabs, 64) => FPUOp1::Abs64, - (Opcode::Fpromote, 32) => panic!("Cannot promote to 32 bits"), - (Opcode::Fpromote, 64) => FPUOp1::Cvt32To64, - (Opcode::Fdemote, 32) => FPUOp1::Cvt64To32, - (Opcode::Fdemote, 64) => panic!("Cannot demote to 64 bits"), - _ => panic!("Unknown op/bits combination"), - }; - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - ctx.emit(Inst::FpuRR { fpu_op, rd, rn }); - } - - Opcode::Ceil | Opcode::Floor | Opcode::Trunc | Opcode::Nearest => { - let bits = ty_bits(ctx.output_ty(insn, 0)); - let op = match (op, bits) { - (Opcode::Ceil, 32) => FpuRoundMode::Plus32, - (Opcode::Ceil, 64) => FpuRoundMode::Plus64, - (Opcode::Floor, 32) => FpuRoundMode::Minus32, - (Opcode::Floor, 64) => FpuRoundMode::Minus64, - (Opcode::Trunc, 32) => FpuRoundMode::Zero32, - (Opcode::Trunc, 64) => FpuRoundMode::Zero64, - (Opcode::Nearest, 32) => FpuRoundMode::Nearest32, - (Opcode::Nearest, 64) => FpuRoundMode::Nearest64, - _ => panic!("Unknown op/bits combination"), - }; - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - ctx.emit(Inst::FpuRound { op, rd, rn }); - } - - Opcode::Fma => { - let bits = ty_bits(ctx.output_ty(insn, 0)); - let fpu_op = match bits { - 32 => FPUOp3::MAdd32, - 64 => FPUOp3::MAdd64, - _ => panic!("Unknown op size"), - }; - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None); - let ra = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - ctx.emit(Inst::mov64(rd, ra)); - ctx.emit(Inst::FpuRRRR { fpu_op, rd, rn, rm }); - } - - Opcode::Fcopysign => { - let ty = ctx.output_ty(insn, 0); - let bits = ty_bits(ty) as u8; - assert!(bits == 32 || bits == 64); - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - - ctx.emit(Inst::FpuCopysign { rd, rn, rm }); - } - - Opcode::FcvtFromUint | Opcode::FcvtFromSint => { - let in_bits = ty_bits(ctx.input_ty(insn, 0)); - let out_bits = ty_bits(ctx.output_ty(insn, 0)); - let signed = op == Opcode::FcvtFromSint; - let op = match (signed, in_bits, out_bits) { - (false, 32, 32) => IntToFpuOp::U32ToF32, - (true, 32, 32) => IntToFpuOp::I32ToF32, - (false, 32, 64) => IntToFpuOp::U32ToF64, - (true, 32, 64) => IntToFpuOp::I32ToF64, - (false, 64, 32) => IntToFpuOp::U64ToF32, - (true, 64, 32) => IntToFpuOp::I64ToF32, - (false, 64, 64) => IntToFpuOp::U64ToF64, - (true, 64, 64) => IntToFpuOp::I64ToF64, - _ => panic!("Unknown input/output-bits combination"), - }; - let narrow_mode = match (signed, in_bits) { - (false, 32) => NarrowValueMode::ZeroExtend32, - (true, 32) => NarrowValueMode::SignExtend32, - (false, 64) => NarrowValueMode::ZeroExtend64, - (true, 64) => NarrowValueMode::SignExtend64, - _ => panic!("Unknown input size"), - }; - let rn = put_input_in_reg(ctx, inputs[0], narrow_mode); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - ctx.emit(Inst::IntToFpu { op, rd, rn }); - } - - Opcode::FcvtToUint | Opcode::FcvtToSint => { - let in_bits = ty_bits(ctx.input_ty(insn, 0)); - let out_bits = ty_bits(ctx.output_ty(insn, 0)); - let signed = op == Opcode::FcvtToSint; - let op = match (signed, in_bits, out_bits) { - (false, 32, 32) => FpuToIntOp::F32ToU32, - (true, 32, 32) => FpuToIntOp::F32ToI32, - (false, 32, 64) => FpuToIntOp::F32ToU64, - (true, 32, 64) => FpuToIntOp::F32ToI64, - (false, 64, 32) => FpuToIntOp::F64ToU32, - (true, 64, 32) => FpuToIntOp::F64ToI32, - (false, 64, 64) => FpuToIntOp::F64ToU64, - (true, 64, 64) => FpuToIntOp::F64ToI64, - _ => panic!("Unknown input/output-bits combination"), - }; - - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - - // First, check whether the input is a NaN and trap if so. - if in_bits == 32 { - ctx.emit(Inst::FpuCmp32 { rn, rm: rn }); - } else { - ctx.emit(Inst::FpuCmp64 { rn, rm: rn }); - } - ctx.emit(Inst::TrapIf { - trap_code: TrapCode::BadConversionToInteger, - cond: Cond::from_floatcc(FloatCC::Unordered), - }); - - // Perform the conversion. If this sets CC 3, we have a - // "special case". Since we already exluded the case where - // the input was a NaN, the only other option is that the - // conversion overflowed the target type. - ctx.emit(Inst::FpuToInt { op, rd, rn }); - ctx.emit(Inst::TrapIf { - trap_code: TrapCode::IntegerOverflow, - cond: Cond::from_floatcc(FloatCC::Unordered), - }); - } - - Opcode::FcvtToUintSat | Opcode::FcvtToSintSat => { - let in_bits = ty_bits(ctx.input_ty(insn, 0)); - let out_bits = ty_bits(ctx.output_ty(insn, 0)); - let signed = op == Opcode::FcvtToSintSat; - let op = match (signed, in_bits, out_bits) { - (false, 32, 32) => FpuToIntOp::F32ToU32, - (true, 32, 32) => FpuToIntOp::F32ToI32, - (false, 32, 64) => FpuToIntOp::F32ToU64, - (true, 32, 64) => FpuToIntOp::F32ToI64, - (false, 64, 32) => FpuToIntOp::F64ToU32, - (true, 64, 32) => FpuToIntOp::F64ToI32, - (false, 64, 64) => FpuToIntOp::F64ToU64, - (true, 64, 64) => FpuToIntOp::F64ToI64, - _ => panic!("Unknown input/output-bits combination"), - }; - - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - - // Perform the conversion. - ctx.emit(Inst::FpuToInt { op, rd, rn }); - - // In most special cases, the Z instruction already yields the - // result expected by Cranelift semantic. The only exception - // it the case where the input was a Nan. We explicitly check - // for that and force the output to 0 in that case. - if in_bits == 32 { - ctx.emit(Inst::FpuCmp32 { rn, rm: rn }); - } else { - ctx.emit(Inst::FpuCmp64 { rn, rm: rn }); - } - let cond = Cond::from_floatcc(FloatCC::Unordered); - if out_bits <= 32 { - ctx.emit(Inst::CMov32SImm16 { rd, cond, imm: 0 }); - } else { - ctx.emit(Inst::CMov64SImm16 { rd, cond, imm: 0 }); - } - } - - Opcode::FcvtLowFromSint => unimplemented!("FcvtLowFromSint"), - - Opcode::Bitcast => { - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - let input_ty = ctx.input_ty(insn, 0); - let output_ty = ctx.output_ty(insn, 0); - lower_bitcast(ctx, rd, output_ty, rn, input_ty); - } - - Opcode::Load - | Opcode::Uload8 - | Opcode::Sload8 - | Opcode::Uload16 - | Opcode::Sload16 - | Opcode::Uload32 - | Opcode::Sload32 - | Opcode::LoadComplex - | Opcode::Uload8Complex - | Opcode::Sload8Complex - | Opcode::Uload16Complex - | Opcode::Sload16Complex - | Opcode::Uload32Complex - | Opcode::Sload32Complex => { - let off = ctx.data(insn).load_store_offset().unwrap(); - let flags = ctx.memflags(insn).unwrap(); - let endianness = flags.endianness(Endianness::Big); - let elem_ty = ctx.output_ty(insn, 0); - let is_float = ty_is_float(elem_ty); - let to_bits = ty_bits(elem_ty); - let from_bits = match op { - Opcode::Load | Opcode::LoadComplex => to_bits, - Opcode::Sload8 | Opcode::Uload8 | Opcode::Sload8Complex | Opcode::Uload8Complex => { - 8 - } - Opcode::Sload16 - | Opcode::Uload16 - | Opcode::Sload16Complex - | Opcode::Uload16Complex => 16, - Opcode::Sload32 - | Opcode::Uload32 - | Opcode::Sload32Complex - | Opcode::Uload32Complex => 32, - _ => unreachable!(), - }; - let ext_bits = if to_bits < 32 { 32 } else { to_bits }; - let sign_extend = match op { - Opcode::Sload8 - | Opcode::Sload8Complex - | Opcode::Sload16 - | Opcode::Sload16Complex - | Opcode::Sload32 - | Opcode::Sload32Complex => true, - _ => false, - }; - - let mem = lower_address(ctx, &inputs[..], off, flags); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - - if endianness == Endianness::Big { - ctx.emit(match (ext_bits, from_bits, sign_extend, is_float) { - (32, 32, _, true) => Inst::FpuLoad32 { rd, mem }, - (64, 64, _, true) => Inst::FpuLoad64 { rd, mem }, - (32, 32, _, false) => Inst::Load32 { rd, mem }, - (64, 64, _, false) => Inst::Load64 { rd, mem }, - (32, 8, false, _) => Inst::Load32ZExt8 { rd, mem }, - (32, 8, true, _) => Inst::Load32SExt8 { rd, mem }, - (32, 16, false, _) => Inst::Load32ZExt16 { rd, mem }, - (32, 16, true, _) => Inst::Load32SExt16 { rd, mem }, - (64, 8, false, _) => Inst::Load64ZExt8 { rd, mem }, - (64, 8, true, _) => Inst::Load64SExt8 { rd, mem }, - (64, 16, false, _) => Inst::Load64ZExt16 { rd, mem }, - (64, 16, true, _) => Inst::Load64SExt16 { rd, mem }, - (64, 32, false, _) => Inst::Load64ZExt32 { rd, mem }, - (64, 32, true, _) => Inst::Load64SExt32 { rd, mem }, - _ => panic!("Unsupported size in load"), - }); - } else if !is_float { - ctx.emit(match (ext_bits, from_bits, sign_extend) { - (_, 16, _) => Inst::LoadRev16 { rd, mem }, - (_, 32, _) => Inst::LoadRev32 { rd, mem }, - (_, 64, _) => Inst::LoadRev64 { rd, mem }, - (32, 8, false) => Inst::Load32ZExt8 { rd, mem }, - (32, 8, true) => Inst::Load32SExt8 { rd, mem }, - (64, 8, false) => Inst::Load64ZExt8 { rd, mem }, - (64, 8, true) => Inst::Load64SExt8 { rd, mem }, - _ => panic!("Unsupported size in load"), - }); - if to_bits > from_bits && from_bits > 8 { - ctx.emit(Inst::Extend { - rd, - rn: rd.to_reg(), - signed: sign_extend, - from_bits: from_bits as u8, - to_bits: to_bits as u8, - }); - } - } else if isa_flags.has_vxrs_ext2() { - ctx.emit(match from_bits { - 32 => Inst::FpuLoadRev32 { rd, mem }, - 64 => Inst::FpuLoadRev64 { rd, mem }, - _ => panic!("Unsupported size in load"), - }); - } else { - match from_bits { - 32 => { - let tmp = ctx.alloc_tmp(types::I32).only_reg().unwrap(); - ctx.emit(Inst::LoadRev32 { rd: tmp, mem }); - lower_bitcast(ctx, rd, elem_ty, tmp.to_reg(), types::I32); - } - 64 => { - let tmp = ctx.alloc_tmp(types::I64).only_reg().unwrap(); - ctx.emit(Inst::LoadRev64 { rd: tmp, mem }); - lower_bitcast(ctx, rd, elem_ty, tmp.to_reg(), types::I64); - } - _ => panic!("Unsupported size in load"), - } - } - } - - Opcode::Store - | Opcode::Istore8 - | Opcode::Istore16 - | Opcode::Istore32 - | Opcode::StoreComplex - | Opcode::Istore8Complex - | Opcode::Istore16Complex - | Opcode::Istore32Complex => { - let off = ctx.data(insn).load_store_offset().unwrap(); - let flags = ctx.memflags(insn).unwrap(); - let endianness = flags.endianness(Endianness::Big); - let elem_ty = match op { - Opcode::Istore8 | Opcode::Istore8Complex => types::I8, - Opcode::Istore16 | Opcode::Istore16Complex => types::I16, - Opcode::Istore32 | Opcode::Istore32Complex => types::I32, - Opcode::Store | Opcode::StoreComplex => ctx.input_ty(insn, 0), - _ => unreachable!(), - }; - - let mem = lower_address(ctx, &inputs[1..], off, flags); - - if ty_is_float(elem_ty) { - let rd = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - if endianness == Endianness::Big { - ctx.emit(match ty_bits(elem_ty) { - 32 => Inst::FpuStore32 { rd, mem }, - 64 => Inst::FpuStore64 { rd, mem }, - _ => panic!("Unsupported size in store"), - }); - } else if isa_flags.has_vxrs_ext2() { - ctx.emit(match ty_bits(elem_ty) { - 32 => Inst::FpuStoreRev32 { rd, mem }, - 64 => Inst::FpuStoreRev64 { rd, mem }, - _ => panic!("Unsupported size in store"), - }); - } else { - match ty_bits(elem_ty) { - 32 => { - let tmp = ctx.alloc_tmp(types::I32).only_reg().unwrap(); - lower_bitcast(ctx, tmp, types::I32, rd, elem_ty); - ctx.emit(Inst::StoreRev32 { - rd: tmp.to_reg(), - mem, - }); - } - 64 => { - let tmp = ctx.alloc_tmp(types::I64).only_reg().unwrap(); - lower_bitcast(ctx, tmp, types::I64, rd, elem_ty); - ctx.emit(Inst::StoreRev64 { - rd: tmp.to_reg(), - mem, - }); - } - _ => panic!("Unsupported size in load"), - } - } - } else if ty_bits(elem_ty) <= 16 { - if let Some(imm) = input_matches_const(ctx, inputs[0]) { - ctx.emit(match (endianness, ty_bits(elem_ty)) { - (_, 1) | (_, 8) => Inst::StoreImm8 { - imm: imm as u8, - mem, - }, - (Endianness::Big, 16) => Inst::StoreImm16 { - imm: imm as i16, - mem, - }, - (Endianness::Little, 16) => Inst::StoreImm16 { - imm: (imm as i16).swap_bytes(), - mem, - }, - _ => panic!("Unsupported size in store"), - }); - } else { - let rd = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - ctx.emit(match (endianness, ty_bits(elem_ty)) { - (_, 1) | (_, 8) => Inst::Store8 { rd, mem }, - (Endianness::Big, 16) => Inst::Store16 { rd, mem }, - (Endianness::Little, 16) => Inst::StoreRev16 { rd, mem }, - _ => panic!("Unsupported size in store"), - }); - } - } else if endianness == Endianness::Big { - if let Some(imm) = input_matches_simm16(ctx, inputs[0]) { - ctx.emit(match ty_bits(elem_ty) { - 32 => Inst::StoreImm32SExt16 { imm, mem }, - 64 => Inst::StoreImm64SExt16 { imm, mem }, - _ => panic!("Unsupported size in store"), - }); - } else { - let rd = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - ctx.emit(match ty_bits(elem_ty) { - 32 => Inst::Store32 { rd, mem }, - 64 => Inst::Store64 { rd, mem }, - _ => panic!("Unsupported size in store"), - }); - } - } else { - let rd = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - ctx.emit(match ty_bits(elem_ty) { - 32 => Inst::StoreRev32 { rd, mem }, - 64 => Inst::StoreRev64 { rd, mem }, - _ => panic!("Unsupported size in store"), - }); - } - } - - Opcode::StackLoad | Opcode::StackStore => { - panic!("Direct stack memory access not supported; should not be used by Wasm"); - } - - Opcode::StackAddr => { - let (stack_slot, offset) = match *ctx.data(insn) { - InstructionData::StackLoad { - opcode: Opcode::StackAddr, - stack_slot, - offset, - } => (stack_slot, offset), - _ => unreachable!(), - }; - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let offset: i32 = offset.into(); - let inst = ctx - .abi() - .stackslot_addr(stack_slot, u32::try_from(offset).unwrap(), rd); - ctx.emit(inst); - } - - Opcode::ConstAddr => unimplemented!(), - - Opcode::FuncAddr => { - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let (extname, dist) = ctx.call_target(insn).unwrap(); - let extname = extname.clone(); - if dist == RelocDistance::Near { - ctx.emit(Inst::LoadAddr { - rd, - mem: MemArg::Symbol { - name: Box::new(extname), - offset: 0, - flags: MemFlags::trusted(), - }, - }); - } else { - ctx.emit(Inst::LoadExtNameFar { - rd, - name: Box::new(extname), - offset: 0, - }); - } - } - - Opcode::SymbolValue => { - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let (extname, dist, offset) = ctx.symbol_value(insn).unwrap(); - let extname = extname.clone(); - if dist == RelocDistance::Near && (offset & 1) == 0 && i32::try_from(offset).is_ok() { - ctx.emit(Inst::LoadAddr { - rd, - mem: MemArg::Symbol { - name: Box::new(extname), - offset: i32::try_from(offset).unwrap(), - flags: MemFlags::trusted(), - }, - }); - } else { - ctx.emit(Inst::LoadExtNameFar { - rd, - name: Box::new(extname), - offset, - }); - } - } - - Opcode::HeapAddr => { - panic!("heap_addr should have been removed by legalization!"); - } - - Opcode::TableAddr => { - panic!("table_addr should have been removed by legalization!"); - } - - Opcode::GlobalValue => { - panic!("global_value should have been removed by legalization!"); - } - - Opcode::TlsValue => { - unimplemented!("Thread-local storage support not implemented!"); - } - - Opcode::GetPinnedReg | Opcode::SetPinnedReg => { - unimplemented!("Pinned register support not implemented!"); - } - - Opcode::Icmp => { - let condcode = ctx.data(insn).cond_code().unwrap(); - let cond = Cond::from_intcc(condcode); - let is_signed = condcode_is_signed(condcode); - lower_icmp_to_flags(ctx, insn, is_signed, true); - - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let ty = ctx.output_ty(insn, 0); - lower_flags_to_bool_result(ctx, cond, rd, ty); - } - - Opcode::Fcmp => { - let condcode = ctx.data(insn).fp_cond_code().unwrap(); - let cond = Cond::from_floatcc(condcode); - lower_fcmp_to_flags(ctx, insn); - - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let ty = ctx.output_ty(insn, 0); - lower_flags_to_bool_result(ctx, cond, rd, ty); - } - - Opcode::IsNull | Opcode::IsInvalid => { - // Null references are represented by the constant value 0; invalid - // references are represented by the constant value -1. - let cond = Cond::from_intcc(IntCC::Equal); - let imm = match op { - Opcode::IsNull => 0, - Opcode::IsInvalid => -1, - _ => unreachable!(), - }; - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - ctx.emit(Inst::CmpRSImm16 { - op: CmpOp::CmpS64, - rn, - imm, - }); - - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let ty = ctx.output_ty(insn, 0); - lower_flags_to_bool_result(ctx, cond, rd, ty); - } - - Opcode::Select => { - let ty = ctx.output_ty(insn, 0); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let rn = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None); - let rm = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None); - let cond = lower_boolean_to_flags(ctx, inputs[0]); - ctx.emit(Inst::gen_move(rd, rm, ty)); - if ty_is_float(ty) { - if ty_bits(ty) < 64 { - ctx.emit(Inst::FpuCMov32 { rd, cond, rm: rn }); - } else { - ctx.emit(Inst::FpuCMov64 { rd, cond, rm: rn }); - } - } else { - if ty_bits(ty) < 64 { - ctx.emit(Inst::CMov32 { rd, cond, rm: rn }); - } else { - ctx.emit(Inst::CMov64 { rd, cond, rm: rn }); - } - } - } - - Opcode::SelectifSpectreGuard => { - let ty = ctx.output_ty(insn, 0); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let rn = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None); - let rm = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None); - let condcode = ctx.data(insn).cond_code().unwrap(); - let cond = Cond::from_intcc(condcode); - let is_signed = condcode_is_signed(condcode); - - // Verification ensures that the input is always a single-def ifcmp. - let cmp_insn = ctx - .get_input_as_source_or_const(inputs[0].insn, inputs[0].input) - .inst - .unwrap() - .0; - debug_assert_eq!(ctx.data(cmp_insn).opcode(), Opcode::Ifcmp); - lower_icmp_to_flags(ctx, cmp_insn, is_signed, true); - - ctx.emit(Inst::gen_move(rd, rm, ty)); - if ty_is_float(ty) { - if ty_bits(ty) < 64 { - ctx.emit(Inst::FpuCMov32 { rd, cond, rm: rn }); - } else { - ctx.emit(Inst::FpuCMov64 { rd, cond, rm: rn }); - } - } else { - if ty_bits(ty) < 64 { - ctx.emit(Inst::CMov32 { rd, cond, rm: rn }); - } else { - ctx.emit(Inst::CMov64 { rd, cond, rm: rn }); - } - } - } - - Opcode::Trap | Opcode::ResumableTrap => { - let trap_code = ctx.data(insn).trap_code().unwrap(); - ctx.emit_safepoint(Inst::Trap { trap_code }) - } - - Opcode::Trapz | Opcode::Trapnz | Opcode::ResumableTrapnz => { - let cond = lower_boolean_to_flags(ctx, inputs[0]); - let negated = op == Opcode::Trapz; - let cond = if negated { cond.invert() } else { cond }; - let trap_code = ctx.data(insn).trap_code().unwrap(); - ctx.emit_safepoint(Inst::TrapIf { trap_code, cond }); + Opcode::Trapz | Opcode::Trapnz | Opcode::ResumableTrapnz => { + let cond = lower_boolean_to_flags(ctx, inputs[0]); + let negated = op == Opcode::Trapz; + let cond = if negated { cond.invert() } else { cond }; + let trap_code = ctx.data(insn).trap_code().unwrap(); + ctx.emit_safepoint(Inst::TrapIf { trap_code, cond }); } Opcode::Trapif => { @@ -2681,161 +873,6 @@ fn lower_insn_to_regs>( // N.B.: the Ret itself is generated by the ABI. } - Opcode::AtomicRmw => { - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let addr = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - let rn = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None); - let flags = ctx.memflags(insn).unwrap(); - let endianness = flags.endianness(Endianness::Big); - let ty = ty.unwrap(); - assert!(is_valid_atomic_transaction_ty(ty)); - if endianness == Endianness::Little { - unimplemented!("Little-endian atomic operations not implemented"); - } - if ty_bits(ty) < 32 { - unimplemented!("Sub-word atomic operations not implemented"); - } - let op = inst_common::AtomicRmwOp::from(ctx.data(insn).atomic_rmw_op().unwrap()); - let (alu_op, rn) = match op { - AtomicRmwOp::And => (choose_32_64(ty, ALUOp::And32, ALUOp::And64), rn), - AtomicRmwOp::Or => (choose_32_64(ty, ALUOp::Orr32, ALUOp::Orr64), rn), - AtomicRmwOp::Xor => (choose_32_64(ty, ALUOp::Xor32, ALUOp::Xor64), rn), - AtomicRmwOp::Add => (choose_32_64(ty, ALUOp::Add32, ALUOp::Add64), rn), - AtomicRmwOp::Sub => { - let tmp_ty = choose_32_64(ty, types::I32, types::I64); - let tmp = ctx.alloc_tmp(tmp_ty).only_reg().unwrap(); - let neg_op = choose_32_64(ty, UnaryOp::Neg32, UnaryOp::Neg64); - ctx.emit(Inst::UnaryRR { - op: neg_op, - rd: tmp, - rn, - }); - (choose_32_64(ty, ALUOp::Add32, ALUOp::Add64), tmp.to_reg()) - } - _ => unimplemented!("AtomicRmw operation type {:?} not implemented", op), - }; - let mem = MemArg::reg(addr, flags); - ctx.emit(Inst::AtomicRmw { - alu_op, - rd, - rn, - mem, - }); - } - Opcode::AtomicCas => { - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let addr = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None); - let rn = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None); - let flags = ctx.memflags(insn).unwrap(); - let endianness = flags.endianness(Endianness::Big); - let ty = ty.unwrap(); - assert!(is_valid_atomic_transaction_ty(ty)); - if endianness == Endianness::Little { - unimplemented!("Little-endian atomic operations not implemented"); - } - if ty_bits(ty) < 32 { - unimplemented!("Sub-word atomic operations not implemented"); - } - let mem = MemArg::reg(addr, flags); - ctx.emit(Inst::gen_move(rd, rm, ty)); - if ty_bits(ty) == 32 { - ctx.emit(Inst::AtomicCas32 { rd, rn, mem }); - } else { - ctx.emit(Inst::AtomicCas64 { rd, rn, mem }); - } - } - Opcode::AtomicLoad => { - let flags = ctx.memflags(insn).unwrap(); - let endianness = flags.endianness(Endianness::Big); - let ty = ty.unwrap(); - assert!(is_valid_atomic_transaction_ty(ty)); - - let mem = lower_address(ctx, &inputs[..], 0, flags); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - - if endianness == Endianness::Big { - ctx.emit(match ty_bits(ty) { - 8 => Inst::Load32ZExt8 { rd, mem }, - 16 => Inst::Load32ZExt16 { rd, mem }, - 32 => Inst::Load32 { rd, mem }, - 64 => Inst::Load64 { rd, mem }, - _ => panic!("Unsupported size in load"), - }); - } else { - ctx.emit(match ty_bits(ty) { - 8 => Inst::Load32ZExt8 { rd, mem }, - 16 => Inst::LoadRev16 { rd, mem }, - 32 => Inst::LoadRev32 { rd, mem }, - 64 => Inst::LoadRev64 { rd, mem }, - _ => panic!("Unsupported size in load"), - }); - } - } - Opcode::AtomicStore => { - let flags = ctx.memflags(insn).unwrap(); - let endianness = flags.endianness(Endianness::Big); - let ty = ctx.input_ty(insn, 0); - assert!(is_valid_atomic_transaction_ty(ty)); - - let mem = lower_address(ctx, &inputs[1..], 0, flags); - - if ty_bits(ty) <= 16 { - if let Some(imm) = input_matches_const(ctx, inputs[0]) { - ctx.emit(match (endianness, ty_bits(ty)) { - (_, 8) => Inst::StoreImm8 { - imm: imm as u8, - mem, - }, - (Endianness::Big, 16) => Inst::StoreImm16 { - imm: imm as i16, - mem, - }, - (Endianness::Little, 16) => Inst::StoreImm16 { - imm: (imm as i16).swap_bytes(), - mem, - }, - _ => panic!("Unsupported size in store"), - }); - } else { - let rd = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - ctx.emit(match (endianness, ty_bits(ty)) { - (_, 8) => Inst::Store8 { rd, mem }, - (Endianness::Big, 16) => Inst::Store16 { rd, mem }, - (Endianness::Little, 16) => Inst::StoreRev16 { rd, mem }, - _ => panic!("Unsupported size in store"), - }); - } - } else if endianness == Endianness::Big { - if let Some(imm) = input_matches_simm16(ctx, inputs[0]) { - ctx.emit(match ty_bits(ty) { - 32 => Inst::StoreImm32SExt16 { imm, mem }, - 64 => Inst::StoreImm64SExt16 { imm, mem }, - _ => panic!("Unsupported size in store"), - }); - } else { - let rd = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - ctx.emit(match ty_bits(ty) { - 32 => Inst::Store32 { rd, mem }, - 64 => Inst::Store64 { rd, mem }, - _ => panic!("Unsupported size in store"), - }); - } - } else { - let rd = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); - ctx.emit(match ty_bits(ty) { - 32 => Inst::StoreRev32 { rd, mem }, - 64 => Inst::StoreRev64 { rd, mem }, - _ => panic!("Unsupported size in store"), - }); - } - - ctx.emit(Inst::Fence); - } - Opcode::Fence => { - ctx.emit(Inst::Fence); - } - Opcode::RawBitcast | Opcode::Splat | Opcode::Swizzle @@ -2891,6 +928,20 @@ fn lower_insn_to_regs>( panic!("Unused opcode should not be encountered."); } + Opcode::LoadComplex + | Opcode::Uload8Complex + | Opcode::Sload8Complex + | Opcode::Uload16Complex + | Opcode::Sload16Complex + | Opcode::Uload32Complex + | Opcode::Sload32Complex + | Opcode::StoreComplex + | Opcode::Istore8Complex + | Opcode::Istore16Complex + | Opcode::Istore32Complex => { + panic!("Load/store complex opcode should not be encountered."); + } + Opcode::Ifcmp | Opcode::Ffcmp | Opcode::Trapff diff --git a/cranelift/codegen/src/isa/s390x/lower/isle.rs b/cranelift/codegen/src/isa/s390x/lower/isle.rs new file mode 100644 index 000000000000..d4dcc0b5556e --- /dev/null +++ b/cranelift/codegen/src/isa/s390x/lower/isle.rs @@ -0,0 +1,475 @@ +//! ISLE integration glue code for s390x lowering. + +// Pull in the ISLE generated code. +pub mod generated_code; + +// Types that the generated ISLE code uses via `use super::*`. +use super::{ + BranchTarget, CallIndInfo, CallInfo, Cond, Inst as MInst, JTSequenceInfo, MachLabel, MemArg, + MemFlags, Opcode, Reg, UImm16Shifted, UImm32Shifted, +}; +use crate::isa::s390x::settings::Flags as IsaFlags; +use crate::machinst::isle::*; +use crate::settings::Flags; +use crate::{ + ir::{ + condcodes::*, immediates::*, types::*, AtomicRmwOp, Endianness, ExternalName, Inst, + InstructionData, StackSlot, TrapCode, Value, ValueLabel, ValueList, + }, + isa::s390x::inst::s390x_map_regs, + isa::unwind::UnwindInst, + machinst::{InsnOutput, LowerCtx, RelocDistance}, +}; +use std::boxed::Box; +use std::convert::TryFrom; +use std::vec::Vec; + +type BoxCallInfo = Box; +type BoxCallIndInfo = Box; +type VecMachLabel = Vec; +type BoxJTSequenceInfo = Box; +type BoxExternalName = Box; + +/// The main entry point for lowering with ISLE. +pub(crate) fn lower( + lower_ctx: &mut C, + flags: &Flags, + isa_flags: &IsaFlags, + outputs: &[InsnOutput], + inst: Inst, +) -> Result<(), ()> +where + C: LowerCtx, +{ + lower_common( + lower_ctx, + flags, + isa_flags, + outputs, + inst, + |cx, insn| generated_code::constructor_lower(cx, insn), + s390x_map_regs, + ) +} + +impl generated_code::Context for IsleContext<'_, C, Flags, IsaFlags, 6> +where + C: LowerCtx, +{ + isle_prelude_methods!(); + + #[inline] + fn allow_div_traps(&mut self, _: Type) -> Option<()> { + if !self.flags.avoid_div_traps() { + Some(()) + } else { + None + } + } + + #[inline] + fn mie2_enabled(&mut self, _: Type) -> Option<()> { + if self.isa_flags.has_mie2() { + Some(()) + } else { + None + } + } + + #[inline] + fn mie2_disabled(&mut self, _: Type) -> Option<()> { + if !self.isa_flags.has_mie2() { + Some(()) + } else { + None + } + } + + #[inline] + fn vxrs_ext2_enabled(&mut self, _: Type) -> Option<()> { + if self.isa_flags.has_vxrs_ext2() { + Some(()) + } else { + None + } + } + + #[inline] + fn vxrs_ext2_disabled(&mut self, _: Type) -> Option<()> { + if !self.isa_flags.has_vxrs_ext2() { + Some(()) + } else { + None + } + } + + #[inline] + fn symbol_value_data(&mut self, inst: Inst) -> Option<(BoxExternalName, RelocDistance, i64)> { + let (name, dist, offset) = self.lower_ctx.symbol_value(inst)?; + Some((Box::new(name.clone()), dist, offset)) + } + + #[inline] + fn call_target_data(&mut self, inst: Inst) -> Option<(BoxExternalName, RelocDistance)> { + let (name, dist) = self.lower_ctx.call_target(inst)?; + Some((Box::new(name.clone()), dist)) + } + + #[inline] + fn writable_gpr(&mut self, regno: u8) -> WritableReg { + super::writable_gpr(regno) + } + + #[inline] + fn zero_reg(&mut self) -> Reg { + super::zero_reg() + } + + #[inline] + fn gpr32_ty(&mut self, ty: Type) -> Option { + match ty { + I8 | I16 | I32 | B1 | B8 | B16 | B32 => Some(ty), + _ => None, + } + } + + #[inline] + fn gpr64_ty(&mut self, ty: Type) -> Option { + match ty { + I64 | B64 | R64 => Some(ty), + _ => None, + } + } + + #[inline] + fn uimm32shifted(&mut self, n: u32, shift: u8) -> UImm32Shifted { + UImm32Shifted::maybe_with_shift(n, shift).unwrap() + } + + #[inline] + fn uimm16shifted(&mut self, n: u16, shift: u8) -> UImm16Shifted { + UImm16Shifted::maybe_with_shift(n, shift).unwrap() + } + + #[inline] + fn i64_nonequal(&mut self, val: i64, cmp: i64) -> Option { + if val != cmp { + Some(val) + } else { + None + } + } + + #[inline] + fn u8_as_u16(&mut self, n: u8) -> u16 { + n as u16 + } + + #[inline] + fn u64_as_u32(&mut self, n: u64) -> u32 { + n as u32 + } + + #[inline] + fn u64_as_i16(&mut self, n: u64) -> i16 { + n as i16 + } + + #[inline] + fn u64_nonzero_hipart(&mut self, n: u64) -> Option { + let part = n & 0xffff_ffff_0000_0000; + if part != 0 { + Some(part) + } else { + None + } + } + + #[inline] + fn u64_nonzero_lopart(&mut self, n: u64) -> Option { + let part = n & 0x0000_0000_ffff_ffff; + if part != 0 { + Some(part) + } else { + None + } + } + + #[inline] + fn i32_from_u64(&mut self, n: u64) -> Option { + if let Ok(imm) = i32::try_from(n as i64) { + Some(imm) + } else { + None + } + } + + #[inline] + fn i16_from_u64(&mut self, n: u64) -> Option { + if let Ok(imm) = i16::try_from(n as i64) { + Some(imm) + } else { + None + } + } + + #[inline] + fn uimm32shifted_from_u64(&mut self, n: u64) -> Option { + UImm32Shifted::maybe_from_u64(n) + } + + #[inline] + fn uimm16shifted_from_u64(&mut self, n: u64) -> Option { + UImm16Shifted::maybe_from_u64(n) + } + + #[inline] + fn u64_from_value(&mut self, val: Value) -> Option { + let inst = self.lower_ctx.dfg().value_def(val).inst()?; + let constant = self.lower_ctx.get_constant(inst)?; + Some(constant) + } + + #[inline] + fn u32_from_value(&mut self, val: Value) -> Option { + let constant = self.u64_from_value(val)?; + let imm = u32::try_from(constant).ok()?; + Some(imm) + } + + #[inline] + fn u8_from_value(&mut self, val: Value) -> Option { + let constant = self.u64_from_value(val)?; + let imm = u8::try_from(constant).ok()?; + Some(imm) + } + + #[inline] + fn u64_from_signed_value(&mut self, val: Value) -> Option { + let inst = self.lower_ctx.dfg().value_def(val).inst()?; + let constant = self.lower_ctx.get_constant(inst)?; + let ty = self.lower_ctx.output_ty(inst, 0); + Some(super::sign_extend_to_u64(constant, self.ty_bits(ty))) + } + + #[inline] + fn i64_from_value(&mut self, val: Value) -> Option { + let constant = self.u64_from_signed_value(val)? as i64; + Some(constant) + } + + #[inline] + fn i32_from_value(&mut self, val: Value) -> Option { + let constant = self.u64_from_signed_value(val)? as i64; + let imm = i32::try_from(constant).ok()?; + Some(imm) + } + + #[inline] + fn i16_from_value(&mut self, val: Value) -> Option { + let constant = self.u64_from_signed_value(val)? as i64; + let imm = i16::try_from(constant).ok()?; + Some(imm) + } + + #[inline] + fn i16_from_swapped_value(&mut self, val: Value) -> Option { + let constant = self.u64_from_signed_value(val)? as i64; + let imm = i16::try_from(constant).ok()?; + Some(imm.swap_bytes()) + } + + #[inline] + fn i64_from_negated_value(&mut self, val: Value) -> Option { + let constant = self.u64_from_signed_value(val)? as i64; + let imm = -constant; + Some(imm) + } + + #[inline] + fn i32_from_negated_value(&mut self, val: Value) -> Option { + let constant = self.u64_from_signed_value(val)? as i64; + let imm = i32::try_from(-constant).ok()?; + Some(imm) + } + + #[inline] + fn i16_from_negated_value(&mut self, val: Value) -> Option { + let constant = self.u64_from_signed_value(val)? as i64; + let imm = i16::try_from(-constant).ok()?; + Some(imm) + } + + #[inline] + fn uimm16shifted_from_value(&mut self, val: Value) -> Option { + let constant = self.u64_from_value(val)?; + UImm16Shifted::maybe_from_u64(constant) + } + + #[inline] + fn uimm32shifted_from_value(&mut self, val: Value) -> Option { + let constant = self.u64_from_value(val)?; + UImm32Shifted::maybe_from_u64(constant) + } + + #[inline] + fn uimm16shifted_from_inverted_value(&mut self, val: Value) -> Option { + let constant = self.u64_from_value(val)?; + let imm = UImm16Shifted::maybe_from_u64(!constant)?; + Some(imm.negate_bits()) + } + + #[inline] + fn uimm32shifted_from_inverted_value(&mut self, val: Value) -> Option { + let constant = self.u64_from_value(val)?; + let imm = UImm32Shifted::maybe_from_u64(!constant)?; + Some(imm.negate_bits()) + } + + #[inline] + fn mask_amt_imm(&mut self, ty: Type, amt: i64) -> u8 { + let mask = self.ty_bits(ty) - 1; + (amt as u8) & mask + } + + #[inline] + fn mask_as_cond(&mut self, mask: u8) -> Cond { + Cond::from_mask(mask) + } + + #[inline] + fn intcc_as_cond(&mut self, cc: &IntCC) -> Cond { + Cond::from_intcc(*cc) + } + + #[inline] + fn floatcc_as_cond(&mut self, cc: &FloatCC) -> Cond { + Cond::from_floatcc(*cc) + } + + #[inline] + fn invert_cond(&mut self, cond: &Cond) -> Cond { + Cond::invert(*cond) + } + + #[inline] + fn signed(&mut self, cc: &IntCC) -> Option<()> { + if super::condcode_is_signed(*cc) { + Some(()) + } else { + None + } + } + + #[inline] + fn unsigned(&mut self, cc: &IntCC) -> Option<()> { + if !super::condcode_is_signed(*cc) { + Some(()) + } else { + None + } + } + + #[inline] + fn reloc_distance_near(&mut self, dist: &RelocDistance) -> Option<()> { + if *dist == RelocDistance::Near { + Some(()) + } else { + None + } + } + + #[inline] + fn zero_offset(&mut self) -> Offset32 { + Offset32::new(0) + } + + #[inline] + fn i64_from_offset(&mut self, off: Offset32) -> i64 { + i64::from(off) + } + + #[inline] + fn littleendian(&mut self, flags: MemFlags) -> Option<()> { + let endianness = flags.endianness(Endianness::Big); + if endianness == Endianness::Little { + Some(()) + } else { + None + } + } + + #[inline] + fn bigendian(&mut self, flags: MemFlags) -> Option<()> { + let endianness = flags.endianness(Endianness::Big); + if endianness == Endianness::Big { + Some(()) + } else { + None + } + } + + #[inline] + fn memflags_trusted(&mut self) -> MemFlags { + MemFlags::trusted() + } + + #[inline] + fn memarg_reg_plus_reg(&mut self, x: Reg, y: Reg, flags: MemFlags) -> MemArg { + MemArg::reg_plus_reg(x, y, flags) + } + + #[inline] + fn memarg_reg_plus_off(&mut self, reg: Reg, off: i64, flags: MemFlags) -> MemArg { + MemArg::reg_plus_off(reg, off, flags) + } + + #[inline] + fn memarg_symbol(&mut self, name: BoxExternalName, offset: i32, flags: MemFlags) -> MemArg { + MemArg::Symbol { + name, + offset, + flags, + } + } + + #[inline] + fn memarg_symbol_offset_sum(&mut self, off1: i64, off2: i64) -> Option { + let off = i32::try_from(off1 + off2).ok()?; + if off & 1 == 0 { + Some(off) + } else { + None + } + } + + #[inline] + fn abi_stackslot_addr( + &mut self, + dst: WritableReg, + stack_slot: StackSlot, + offset: Offset32, + ) -> MInst { + let offset = u32::try_from(i32::from(offset)).unwrap(); + self.lower_ctx.abi().stackslot_addr(stack_slot, offset, dst) + } + + #[inline] + fn sinkable_inst(&mut self, val: Value) -> Option { + let input = self.lower_ctx.get_value_as_source_or_const(val); + if let Some((inst, 0)) = input.inst { + return Some(inst); + } + None + } + + #[inline] + fn sink_inst(&mut self, inst: Inst) -> Unit { + self.lower_ctx.sink_inst(inst); + } + + #[inline] + fn emit(&mut self, inst: &MInst) -> Unit { + self.emitted_insts.push(inst.clone()); + } +} diff --git a/cranelift/codegen/src/isa/s390x/lower/isle/generated_code.manifest b/cranelift/codegen/src/isa/s390x/lower/isle/generated_code.manifest new file mode 100644 index 000000000000..202397176c41 --- /dev/null +++ b/cranelift/codegen/src/isa/s390x/lower/isle/generated_code.manifest @@ -0,0 +1,4 @@ +src/clif.isle f176ef3bba99365 +src/prelude.isle 51d2aef2566c1c96 +src/isa/s390x/inst.isle 63cf833b5cfd727d +src/isa/s390x/lower.isle a0e21a567040bc33 diff --git a/cranelift/codegen/src/isa/s390x/lower/isle/generated_code.rs b/cranelift/codegen/src/isa/s390x/lower/isle/generated_code.rs new file mode 100644 index 000000000000..1103e01d860b --- /dev/null +++ b/cranelift/codegen/src/isa/s390x/lower/isle/generated_code.rs @@ -0,0 +1,11438 @@ +// GENERATED BY ISLE. DO NOT EDIT! +// +// Generated automatically from the instruction-selection DSL code in: +// - src/clif.isle +// - src/prelude.isle +// - src/isa/s390x/inst.isle +// - src/isa/s390x/lower.isle + +#![allow(dead_code, unreachable_code, unreachable_patterns)] +#![allow(unused_imports, unused_variables, non_snake_case)] +#![allow(irrefutable_let_patterns)] + +use super::*; // Pulls in all external types. + +/// Context during lowering: an implementation of this trait +/// must be provided with all external constructors and extractors. +/// A mutable borrow is passed along through all lowering logic. +pub trait Context { + fn unpack_value_array_2(&mut self, arg0: &ValueArray2) -> (Value, Value); + fn pack_value_array_2(&mut self, arg0: Value, arg1: Value) -> ValueArray2; + fn unpack_value_array_3(&mut self, arg0: &ValueArray3) -> (Value, Value, Value); + fn pack_value_array_3(&mut self, arg0: Value, arg1: Value, arg2: Value) -> ValueArray3; + fn u32_add(&mut self, arg0: u32, arg1: u32) -> u32; + fn u8_and(&mut self, arg0: u8, arg1: u8) -> u8; + fn value_reg(&mut self, arg0: Reg) -> ValueRegs; + fn value_regs(&mut self, arg0: Reg, arg1: Reg) -> ValueRegs; + fn value_regs_invalid(&mut self) -> ValueRegs; + fn temp_writable_reg(&mut self, arg0: Type) -> WritableReg; + fn invalid_reg(&mut self) -> Reg; + fn put_in_reg(&mut self, arg0: Value) -> Reg; + fn put_in_regs(&mut self, arg0: Value) -> ValueRegs; + fn value_regs_get(&mut self, arg0: ValueRegs, arg1: usize) -> Reg; + fn u8_as_u64(&mut self, arg0: u8) -> u64; + fn u16_as_u64(&mut self, arg0: u16) -> u64; + fn u32_as_u64(&mut self, arg0: u32) -> u64; + fn ty_bits(&mut self, arg0: Type) -> u8; + fn ty_bits_u16(&mut self, arg0: Type) -> u16; + fn ty_bytes(&mut self, arg0: Type) -> u16; + fn lane_type(&mut self, arg0: Type) -> Type; + fn fits_in_16(&mut self, arg0: Type) -> Option; + fn fits_in_32(&mut self, arg0: Type) -> Option; + fn fits_in_64(&mut self, arg0: Type) -> Option; + fn ty_32_or_64(&mut self, arg0: Type) -> Option; + fn ty_8_or_16(&mut self, arg0: Type) -> Option; + fn vec128(&mut self, arg0: Type) -> Option; + fn not_i64x2(&mut self, arg0: Type) -> Option<()>; + fn value_list_slice(&mut self, arg0: ValueList) -> ValueSlice; + fn unwrap_head_value_list_1(&mut self, arg0: ValueList) -> (Value, ValueSlice); + fn unwrap_head_value_list_2(&mut self, arg0: ValueList) -> (Value, Value, ValueSlice); + fn writable_reg_to_reg(&mut self, arg0: WritableReg) -> Reg; + fn u8_from_uimm8(&mut self, arg0: Uimm8) -> u8; + fn u64_from_imm64(&mut self, arg0: Imm64) -> u64; + fn nonzero_u64_from_imm64(&mut self, arg0: Imm64) -> Option; + fn u64_from_ieee32(&mut self, arg0: Ieee32) -> u64; + fn u64_from_ieee64(&mut self, arg0: Ieee64) -> u64; + fn inst_results(&mut self, arg0: Inst) -> ValueSlice; + fn first_result(&mut self, arg0: Inst) -> Option; + fn inst_data(&mut self, arg0: Inst) -> InstructionData; + fn value_type(&mut self, arg0: Value) -> Type; + fn multi_lane(&mut self, arg0: Type) -> Option<(u8, u16)>; + fn def_inst(&mut self, arg0: Value) -> Option; + fn trap_code_division_by_zero(&mut self) -> TrapCode; + fn trap_code_integer_overflow(&mut self) -> TrapCode; + fn trap_code_bad_conversion_to_integer(&mut self) -> TrapCode; + fn avoid_div_traps(&mut self, arg0: Type) -> Option<()>; + fn mie2_enabled(&mut self, arg0: Type) -> Option<()>; + fn mie2_disabled(&mut self, arg0: Type) -> Option<()>; + fn vxrs_ext2_enabled(&mut self, arg0: Type) -> Option<()>; + fn vxrs_ext2_disabled(&mut self, arg0: Type) -> Option<()>; + fn allow_div_traps(&mut self, arg0: Type) -> Option<()>; + fn symbol_value_data(&mut self, arg0: Inst) -> Option<(BoxExternalName, RelocDistance, i64)>; + fn call_target_data(&mut self, arg0: Inst) -> Option<(BoxExternalName, RelocDistance)>; + fn writable_gpr(&mut self, arg0: u8) -> WritableReg; + fn zero_reg(&mut self) -> Reg; + fn gpr32_ty(&mut self, arg0: Type) -> Option; + fn gpr64_ty(&mut self, arg0: Type) -> Option; + fn uimm32shifted(&mut self, arg0: u32, arg1: u8) -> UImm32Shifted; + fn uimm16shifted(&mut self, arg0: u16, arg1: u8) -> UImm16Shifted; + fn i64_nonequal(&mut self, arg0: i64, arg1: i64) -> Option; + fn u8_as_u16(&mut self, arg0: u8) -> u16; + fn u64_as_u32(&mut self, arg0: u64) -> u32; + fn u64_as_i16(&mut self, arg0: u64) -> i16; + fn u64_nonzero_hipart(&mut self, arg0: u64) -> Option; + fn u64_nonzero_lopart(&mut self, arg0: u64) -> Option; + fn i32_from_u64(&mut self, arg0: u64) -> Option; + fn i16_from_u64(&mut self, arg0: u64) -> Option; + fn uimm32shifted_from_u64(&mut self, arg0: u64) -> Option; + fn uimm16shifted_from_u64(&mut self, arg0: u64) -> Option; + fn u64_from_value(&mut self, arg0: Value) -> Option; + fn u32_from_value(&mut self, arg0: Value) -> Option; + fn u8_from_value(&mut self, arg0: Value) -> Option; + fn u64_from_signed_value(&mut self, arg0: Value) -> Option; + fn i64_from_value(&mut self, arg0: Value) -> Option; + fn i32_from_value(&mut self, arg0: Value) -> Option; + fn i16_from_value(&mut self, arg0: Value) -> Option; + fn i16_from_swapped_value(&mut self, arg0: Value) -> Option; + fn i64_from_negated_value(&mut self, arg0: Value) -> Option; + fn i32_from_negated_value(&mut self, arg0: Value) -> Option; + fn i16_from_negated_value(&mut self, arg0: Value) -> Option; + fn uimm16shifted_from_value(&mut self, arg0: Value) -> Option; + fn uimm32shifted_from_value(&mut self, arg0: Value) -> Option; + fn uimm16shifted_from_inverted_value(&mut self, arg0: Value) -> Option; + fn uimm32shifted_from_inverted_value(&mut self, arg0: Value) -> Option; + fn mask_amt_imm(&mut self, arg0: Type, arg1: i64) -> u8; + fn mask_as_cond(&mut self, arg0: u8) -> Cond; + fn intcc_as_cond(&mut self, arg0: &IntCC) -> Cond; + fn floatcc_as_cond(&mut self, arg0: &FloatCC) -> Cond; + fn invert_cond(&mut self, arg0: &Cond) -> Cond; + fn signed(&mut self, arg0: &IntCC) -> Option<()>; + fn unsigned(&mut self, arg0: &IntCC) -> Option<()>; + fn reloc_distance_near(&mut self, arg0: &RelocDistance) -> Option<()>; + fn zero_offset(&mut self) -> Offset32; + fn i64_from_offset(&mut self, arg0: Offset32) -> i64; + fn littleendian(&mut self, arg0: MemFlags) -> Option<()>; + fn bigendian(&mut self, arg0: MemFlags) -> Option<()>; + fn memflags_trusted(&mut self) -> MemFlags; + fn memarg_reg_plus_reg(&mut self, arg0: Reg, arg1: Reg, arg2: MemFlags) -> MemArg; + fn memarg_reg_plus_off(&mut self, arg0: Reg, arg1: i64, arg2: MemFlags) -> MemArg; + fn memarg_symbol(&mut self, arg0: BoxExternalName, arg1: i32, arg2: MemFlags) -> MemArg; + fn memarg_symbol_offset_sum(&mut self, arg0: i64, arg1: i64) -> Option; + fn abi_stackslot_addr(&mut self, arg0: WritableReg, arg1: StackSlot, arg2: Offset32) -> MInst; + fn sinkable_inst(&mut self, arg0: Value) -> Option; + fn sink_inst(&mut self, arg0: Inst) -> Unit; + fn emit(&mut self, arg0: &MInst) -> Unit; +} + +/// Internal type SideEffectNoResult: defined at src/prelude.isle line 282. +#[derive(Clone, Debug)] +pub enum SideEffectNoResult { + Inst { inst: MInst }, +} + +/// Internal type ProducesFlags: defined at src/prelude.isle line 295. +#[derive(Clone, Debug)] +pub enum ProducesFlags { + ProducesFlags { inst: MInst, result: Reg }, +} + +/// Internal type ConsumesFlags: defined at src/prelude.isle line 298. +#[derive(Clone, Debug)] +pub enum ConsumesFlags { + ConsumesFlags { inst: MInst, result: Reg }, +} + +/// Internal type MInst: defined at src/isa/s390x/inst.isle line 2. +#[derive(Clone, Debug)] +pub enum MInst { + Nop0, + Nop2, + AluRRR { + alu_op: ALUOp, + rd: WritableReg, + rn: Reg, + rm: Reg, + }, + AluRRSImm16 { + alu_op: ALUOp, + rd: WritableReg, + rn: Reg, + imm: i16, + }, + AluRR { + alu_op: ALUOp, + rd: WritableReg, + rm: Reg, + }, + AluRX { + alu_op: ALUOp, + rd: WritableReg, + mem: MemArg, + }, + AluRSImm16 { + alu_op: ALUOp, + rd: WritableReg, + imm: i16, + }, + AluRSImm32 { + alu_op: ALUOp, + rd: WritableReg, + imm: i32, + }, + AluRUImm32 { + alu_op: ALUOp, + rd: WritableReg, + imm: u32, + }, + AluRUImm16Shifted { + alu_op: ALUOp, + rd: WritableReg, + imm: UImm16Shifted, + }, + AluRUImm32Shifted { + alu_op: ALUOp, + rd: WritableReg, + imm: UImm32Shifted, + }, + SMulWide { + rn: Reg, + rm: Reg, + }, + UMulWide { + rn: Reg, + }, + SDivMod32 { + rn: Reg, + }, + SDivMod64 { + rn: Reg, + }, + UDivMod32 { + rn: Reg, + }, + UDivMod64 { + rn: Reg, + }, + Flogr { + rn: Reg, + }, + ShiftRR { + shift_op: ShiftOp, + rd: WritableReg, + rn: Reg, + shift_imm: u8, + shift_reg: Reg, + }, + UnaryRR { + op: UnaryOp, + rd: WritableReg, + rn: Reg, + }, + CmpRR { + op: CmpOp, + rn: Reg, + rm: Reg, + }, + CmpRX { + op: CmpOp, + rn: Reg, + mem: MemArg, + }, + CmpRSImm16 { + op: CmpOp, + rn: Reg, + imm: i16, + }, + CmpRSImm32 { + op: CmpOp, + rn: Reg, + imm: i32, + }, + CmpRUImm32 { + op: CmpOp, + rn: Reg, + imm: u32, + }, + CmpTrapRR { + op: CmpOp, + rn: Reg, + rm: Reg, + cond: Cond, + trap_code: TrapCode, + }, + CmpTrapRSImm16 { + op: CmpOp, + rn: Reg, + imm: i16, + cond: Cond, + trap_code: TrapCode, + }, + CmpTrapRUImm16 { + op: CmpOp, + rn: Reg, + imm: u16, + cond: Cond, + trap_code: TrapCode, + }, + AtomicRmw { + alu_op: ALUOp, + rd: WritableReg, + rn: Reg, + mem: MemArg, + }, + AtomicCas32 { + rd: WritableReg, + rn: Reg, + mem: MemArg, + }, + AtomicCas64 { + rd: WritableReg, + rn: Reg, + mem: MemArg, + }, + Fence, + Load32 { + rd: WritableReg, + mem: MemArg, + }, + Load32ZExt8 { + rd: WritableReg, + mem: MemArg, + }, + Load32SExt8 { + rd: WritableReg, + mem: MemArg, + }, + Load32ZExt16 { + rd: WritableReg, + mem: MemArg, + }, + Load32SExt16 { + rd: WritableReg, + mem: MemArg, + }, + Load64 { + rd: WritableReg, + mem: MemArg, + }, + Load64ZExt8 { + rd: WritableReg, + mem: MemArg, + }, + Load64SExt8 { + rd: WritableReg, + mem: MemArg, + }, + Load64ZExt16 { + rd: WritableReg, + mem: MemArg, + }, + Load64SExt16 { + rd: WritableReg, + mem: MemArg, + }, + Load64ZExt32 { + rd: WritableReg, + mem: MemArg, + }, + Load64SExt32 { + rd: WritableReg, + mem: MemArg, + }, + LoadRev16 { + rd: WritableReg, + mem: MemArg, + }, + LoadRev32 { + rd: WritableReg, + mem: MemArg, + }, + LoadRev64 { + rd: WritableReg, + mem: MemArg, + }, + Store8 { + rd: Reg, + mem: MemArg, + }, + Store16 { + rd: Reg, + mem: MemArg, + }, + Store32 { + rd: Reg, + mem: MemArg, + }, + Store64 { + rd: Reg, + mem: MemArg, + }, + StoreImm8 { + imm: u8, + mem: MemArg, + }, + StoreImm16 { + imm: i16, + mem: MemArg, + }, + StoreImm32SExt16 { + imm: i16, + mem: MemArg, + }, + StoreImm64SExt16 { + imm: i16, + mem: MemArg, + }, + StoreRev16 { + rd: Reg, + mem: MemArg, + }, + StoreRev32 { + rd: Reg, + mem: MemArg, + }, + StoreRev64 { + rd: Reg, + mem: MemArg, + }, + LoadMultiple64 { + rt: WritableReg, + rt2: WritableReg, + mem: MemArg, + }, + StoreMultiple64 { + rt: Reg, + rt2: Reg, + mem: MemArg, + }, + Mov32 { + rd: WritableReg, + rm: Reg, + }, + Mov64 { + rd: WritableReg, + rm: Reg, + }, + Mov32Imm { + rd: WritableReg, + imm: u32, + }, + Mov32SImm16 { + rd: WritableReg, + imm: i16, + }, + Mov64SImm16 { + rd: WritableReg, + imm: i16, + }, + Mov64SImm32 { + rd: WritableReg, + imm: i32, + }, + Mov64UImm16Shifted { + rd: WritableReg, + imm: UImm16Shifted, + }, + Mov64UImm32Shifted { + rd: WritableReg, + imm: UImm32Shifted, + }, + Insert64UImm16Shifted { + rd: WritableReg, + imm: UImm16Shifted, + }, + Insert64UImm32Shifted { + rd: WritableReg, + imm: UImm32Shifted, + }, + Extend { + rd: WritableReg, + rn: Reg, + signed: bool, + from_bits: u8, + to_bits: u8, + }, + CMov32 { + rd: WritableReg, + cond: Cond, + rm: Reg, + }, + CMov64 { + rd: WritableReg, + cond: Cond, + rm: Reg, + }, + CMov32SImm16 { + rd: WritableReg, + cond: Cond, + imm: i16, + }, + CMov64SImm16 { + rd: WritableReg, + cond: Cond, + imm: i16, + }, + FpuMove32 { + rd: WritableReg, + rn: Reg, + }, + FpuMove64 { + rd: WritableReg, + rn: Reg, + }, + FpuCMov32 { + rd: WritableReg, + cond: Cond, + rm: Reg, + }, + FpuCMov64 { + rd: WritableReg, + cond: Cond, + rm: Reg, + }, + MovToFpr { + rd: WritableReg, + rn: Reg, + }, + MovFromFpr { + rd: WritableReg, + rn: Reg, + }, + FpuRR { + fpu_op: FPUOp1, + rd: WritableReg, + rn: Reg, + }, + FpuRRR { + fpu_op: FPUOp2, + rd: WritableReg, + rm: Reg, + }, + FpuRRRR { + fpu_op: FPUOp3, + rd: WritableReg, + rn: Reg, + rm: Reg, + }, + FpuCopysign { + rd: WritableReg, + rn: Reg, + rm: Reg, + }, + FpuCmp32 { + rn: Reg, + rm: Reg, + }, + FpuCmp64 { + rn: Reg, + rm: Reg, + }, + FpuLoad32 { + rd: WritableReg, + mem: MemArg, + }, + FpuStore32 { + rd: Reg, + mem: MemArg, + }, + FpuLoad64 { + rd: WritableReg, + mem: MemArg, + }, + FpuStore64 { + rd: Reg, + mem: MemArg, + }, + FpuLoadRev32 { + rd: WritableReg, + mem: MemArg, + }, + FpuStoreRev32 { + rd: Reg, + mem: MemArg, + }, + FpuLoadRev64 { + rd: WritableReg, + mem: MemArg, + }, + FpuStoreRev64 { + rd: Reg, + mem: MemArg, + }, + LoadFpuConst32 { + rd: WritableReg, + const_data: u32, + }, + LoadFpuConst64 { + rd: WritableReg, + const_data: u64, + }, + FpuToInt { + op: FpuToIntOp, + rd: WritableReg, + rn: Reg, + }, + IntToFpu { + op: IntToFpuOp, + rd: WritableReg, + rn: Reg, + }, + FpuRound { + op: FpuRoundMode, + rd: WritableReg, + rn: Reg, + }, + FpuVecRRR { + fpu_op: FPUOp2, + rd: WritableReg, + rn: Reg, + rm: Reg, + }, + Call { + link: WritableReg, + info: BoxCallInfo, + }, + CallInd { + link: WritableReg, + info: BoxCallIndInfo, + }, + Ret { + link: Reg, + }, + EpiloguePlaceholder, + Jump { + dest: BranchTarget, + }, + CondBr { + taken: BranchTarget, + not_taken: BranchTarget, + cond: Cond, + }, + TrapIf { + cond: Cond, + trap_code: TrapCode, + }, + OneWayCondBr { + target: BranchTarget, + cond: Cond, + }, + IndirectBr { + rn: Reg, + targets: VecMachLabel, + }, + Debugtrap, + Trap { + trap_code: TrapCode, + }, + JTSequence { + info: BoxJTSequenceInfo, + ridx: Reg, + rtmp1: WritableReg, + rtmp2: WritableReg, + }, + LoadExtNameFar { + rd: WritableReg, + name: BoxExternalName, + offset: i64, + }, + LoadAddr { + rd: WritableReg, + mem: MemArg, + }, + VirtualSPOffsetAdj { + offset: i64, + }, + ValueLabelMarker { + reg: Reg, + label: ValueLabel, + }, + Unwind { + inst: UnwindInst, + }, +} + +/// Internal type ALUOp: defined at src/isa/s390x/inst.isle line 691. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum ALUOp { + Add32, + Add32Ext16, + Add64, + Add64Ext16, + Add64Ext32, + AddLogical32, + AddLogical64, + AddLogical64Ext32, + Sub32, + Sub32Ext16, + Sub64, + Sub64Ext16, + Sub64Ext32, + SubLogical32, + SubLogical64, + SubLogical64Ext32, + Mul32, + Mul32Ext16, + Mul64, + Mul64Ext16, + Mul64Ext32, + And32, + And64, + Orr32, + Orr64, + Xor32, + Xor64, + AndNot32, + AndNot64, + OrrNot32, + OrrNot64, + XorNot32, + XorNot64, +} + +/// Internal type UnaryOp: defined at src/isa/s390x/inst.isle line 732. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum UnaryOp { + Abs32, + Abs64, + Abs64Ext32, + Neg32, + Neg64, + Neg64Ext32, + PopcntByte, + PopcntReg, +} + +/// Internal type ShiftOp: defined at src/isa/s390x/inst.isle line 745. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum ShiftOp { + RotL32, + RotL64, + LShL32, + LShL64, + LShR32, + LShR64, + AShR32, + AShR64, +} + +/// Internal type CmpOp: defined at src/isa/s390x/inst.isle line 758. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum CmpOp { + CmpS32, + CmpS32Ext16, + CmpS64, + CmpS64Ext16, + CmpS64Ext32, + CmpL32, + CmpL32Ext16, + CmpL64, + CmpL64Ext16, + CmpL64Ext32, +} + +/// Internal type FPUOp1: defined at src/isa/s390x/inst.isle line 773. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum FPUOp1 { + Abs32, + Abs64, + Neg32, + Neg64, + NegAbs32, + NegAbs64, + Sqrt32, + Sqrt64, + Cvt32To64, + Cvt64To32, +} + +/// Internal type FPUOp2: defined at src/isa/s390x/inst.isle line 788. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum FPUOp2 { + Add32, + Add64, + Sub32, + Sub64, + Mul32, + Mul64, + Div32, + Div64, + Max32, + Max64, + Min32, + Min64, +} + +/// Internal type FPUOp3: defined at src/isa/s390x/inst.isle line 805. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum FPUOp3 { + MAdd32, + MAdd64, + MSub32, + MSub64, +} + +/// Internal type FpuToIntOp: defined at src/isa/s390x/inst.isle line 814. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum FpuToIntOp { + F32ToU32, + F32ToI32, + F32ToU64, + F32ToI64, + F64ToU32, + F64ToI32, + F64ToU64, + F64ToI64, +} + +/// Internal type IntToFpuOp: defined at src/isa/s390x/inst.isle line 827. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum IntToFpuOp { + U32ToF32, + I32ToF32, + U32ToF64, + I32ToF64, + U64ToF32, + I64ToF32, + U64ToF64, + I64ToF64, +} + +/// Internal type FpuRoundMode: defined at src/isa/s390x/inst.isle line 841. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum FpuRoundMode { + Minus32, + Minus64, + Plus32, + Plus64, + Zero32, + Zero64, + Nearest32, + Nearest64, +} + +/// Internal type WritableRegPair: defined at src/isa/s390x/inst.isle line 1238. +#[derive(Clone, Debug)] +pub enum WritableRegPair { + WritableRegPair { hi: WritableReg, lo: WritableReg }, +} + +/// Internal type RegPair: defined at src/isa/s390x/inst.isle line 1260. +#[derive(Clone, Debug)] +pub enum RegPair { + RegPair { hi: Reg, lo: Reg }, +} + +/// Internal type ProducesBool: defined at src/isa/s390x/inst.isle line 2160. +#[derive(Clone, Debug)] +pub enum ProducesBool { + ProducesBool { producer: ProducesFlags, cond: Cond }, +} + +// Generated as internal constructor for term temp_reg. +pub fn constructor_temp_reg(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + // Rule at src/prelude.isle line 70. + let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); + let expr1_0 = C::writable_reg_to_reg(ctx, expr0_0); + return Some(expr1_0); +} + +// Generated as internal constructor for term lo_reg. +pub fn constructor_lo_reg(ctx: &mut C, arg0: Value) -> Option { + let pattern0_0 = arg0; + // Rule at src/prelude.isle line 105. + let expr0_0 = C::put_in_regs(ctx, pattern0_0); + let expr1_0: usize = 0; + let expr2_0 = C::value_regs_get(ctx, expr0_0, expr1_0); + return Some(expr2_0); +} + +// Generated as internal constructor for term value_regs_none. +pub fn constructor_value_regs_none( + ctx: &mut C, + arg0: &SideEffectNoResult, +) -> Option { + let pattern0_0 = arg0; + if let &SideEffectNoResult::Inst { + inst: ref pattern1_0, + } = pattern0_0 + { + // Rule at src/prelude.isle line 287. + let expr0_0 = C::emit(ctx, &pattern1_0); + let expr1_0 = C::value_regs_invalid(ctx); + return Some(expr1_0); + } + return None; +} + +// Generated as internal constructor for term with_flags. +pub fn constructor_with_flags( + ctx: &mut C, + arg0: &ProducesFlags, + arg1: &ConsumesFlags, +) -> Option { + let pattern0_0 = arg0; + if let &ProducesFlags::ProducesFlags { + inst: ref pattern1_0, + result: pattern1_1, + } = pattern0_0 + { + let pattern2_0 = arg1; + if let &ConsumesFlags::ConsumesFlags { + inst: ref pattern3_0, + result: pattern3_1, + } = pattern2_0 + { + // Rule at src/prelude.isle line 308. + let expr0_0 = C::emit(ctx, &pattern1_0); + let expr1_0 = C::emit(ctx, &pattern3_0); + let expr2_0 = C::value_regs(ctx, pattern1_1, pattern3_1); + return Some(expr2_0); + } + } + return None; +} + +// Generated as internal constructor for term with_flags_1. +pub fn constructor_with_flags_1( + ctx: &mut C, + arg0: &ProducesFlags, + arg1: &ConsumesFlags, +) -> Option { + let pattern0_0 = arg0; + if let &ProducesFlags::ProducesFlags { + inst: ref pattern1_0, + result: pattern1_1, + } = pattern0_0 + { + let pattern2_0 = arg1; + if let &ConsumesFlags::ConsumesFlags { + inst: ref pattern3_0, + result: pattern3_1, + } = pattern2_0 + { + // Rule at src/prelude.isle line 316. + let expr0_0 = C::emit(ctx, &pattern1_0); + let expr1_0 = C::emit(ctx, &pattern3_0); + return Some(pattern3_1); + } + } + return None; +} + +// Generated as internal constructor for term with_flags_2. +pub fn constructor_with_flags_2( + ctx: &mut C, + arg0: &ProducesFlags, + arg1: &ConsumesFlags, + arg2: &ConsumesFlags, +) -> Option { + let pattern0_0 = arg0; + if let &ProducesFlags::ProducesFlags { + inst: ref pattern1_0, + result: pattern1_1, + } = pattern0_0 + { + let pattern2_0 = arg1; + if let &ConsumesFlags::ConsumesFlags { + inst: ref pattern3_0, + result: pattern3_1, + } = pattern2_0 + { + let pattern4_0 = arg2; + if let &ConsumesFlags::ConsumesFlags { + inst: ref pattern5_0, + result: pattern5_1, + } = pattern4_0 + { + // Rule at src/prelude.isle line 326. + let expr0_0 = C::emit(ctx, &pattern1_0); + let expr1_0 = C::emit(ctx, &pattern5_0); + let expr2_0 = C::emit(ctx, &pattern3_0); + let expr3_0 = C::value_regs(ctx, pattern3_1, pattern5_1); + return Some(expr3_0); + } + } + } + return None; +} + +// Generated as internal constructor for term mask_amt_reg. +pub fn constructor_mask_amt_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { + let pattern0_0 = arg0; + if let Some(pattern1_0) = C::gpr32_ty(ctx, pattern0_0) { + let pattern2_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1016. + let expr0_0: i64 = -1; + let expr1_0 = C::mask_amt_imm(ctx, pattern1_0, expr0_0); + let expr2_0 = C::u8_as_u16(ctx, expr1_0); + let expr3_0: u8 = 0; + let expr4_0 = C::uimm16shifted(ctx, expr2_0, expr3_0); + let expr5_0 = constructor_and_uimm16shifted(ctx, pattern1_0, pattern2_0, expr4_0)?; + return Some(expr5_0); + } + if let Some(pattern1_0) = C::gpr64_ty(ctx, pattern0_0) { + let pattern2_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1019. + return Some(pattern2_0); + } + return None; +} + +// Generated as internal constructor for term lower_address. +pub fn constructor_lower_address( + ctx: &mut C, + arg0: MemFlags, + arg1: Value, + arg2: Offset32, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + if let Some(pattern2_0) = C::def_inst(ctx, pattern1_0) { + let pattern3_0 = C::inst_data(ctx, pattern2_0); + if let &InstructionData::Binary { + opcode: ref pattern4_0, + args: ref pattern4_1, + } = &pattern3_0 + { + if let &Opcode::Iadd = &pattern4_0 { + let (pattern6_0, pattern6_1) = C::unpack_value_array_2(ctx, &pattern4_1); + let pattern7_0 = arg2; + let pattern8_0 = C::i64_from_offset(ctx, pattern7_0); + if pattern8_0 == 0 { + // Rule at src/isa/s390x/inst.isle line 1102. + let expr0_0 = C::put_in_reg(ctx, pattern6_0); + let expr1_0 = C::put_in_reg(ctx, pattern6_1); + let expr2_0 = C::memarg_reg_plus_reg(ctx, expr0_0, expr1_0, pattern0_0); + return Some(expr2_0); + } + } + } + if let Some((pattern3_0, pattern3_1, pattern3_2)) = C::symbol_value_data(ctx, pattern2_0) { + if let Some(()) = C::reloc_distance_near(ctx, &pattern3_1) { + let pattern5_0 = arg2; + let pattern6_0 = C::i64_from_offset(ctx, pattern5_0); + let closure7 = || { + return Some(pattern3_2); + }; + if let Some(pattern7_0) = closure7() { + if let Some(pattern8_0) = + C::memarg_symbol_offset_sum(ctx, pattern6_0, pattern7_0) + { + // Rule at src/isa/s390x/inst.isle line 1105. + let expr0_0 = C::memarg_symbol(ctx, pattern3_0, pattern8_0, pattern0_0); + return Some(expr0_0); + } + } + } + } + } + let pattern2_0 = arg2; + let pattern3_0 = C::i64_from_offset(ctx, pattern2_0); + // Rule at src/isa/s390x/inst.isle line 1099. + let expr0_0 = C::put_in_reg(ctx, pattern1_0); + let expr1_0 = C::memarg_reg_plus_off(ctx, expr0_0, pattern3_0, pattern0_0); + return Some(expr1_0); +} + +// Generated as internal constructor for term stack_addr_impl. +pub fn constructor_stack_addr_impl( + ctx: &mut C, + arg0: Type, + arg1: StackSlot, + arg2: Offset32, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1132. + let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); + let expr1_0 = C::abi_stackslot_addr(ctx, expr0_0, pattern1_0, pattern2_0); + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = C::writable_reg_to_reg(ctx, expr0_0); + return Some(expr3_0); +} + +// Generated as internal constructor for term sink_load. +pub fn constructor_sink_load(ctx: &mut C, arg0: Inst) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = C::inst_data(ctx, pattern0_0); + if let &InstructionData::Load { + opcode: ref pattern2_0, + arg: pattern2_1, + flags: pattern2_2, + offset: pattern2_3, + } = &pattern1_0 + { + if let &Opcode::Load = &pattern2_0 { + // Rule at src/isa/s390x/inst.isle line 1202. + let expr0_0 = C::sink_inst(ctx, pattern0_0); + let expr1_0 = constructor_lower_address(ctx, pattern2_2, pattern2_1, pattern2_3)?; + return Some(expr1_0); + } + } + return None; +} + +// Generated as internal constructor for term sink_sload16. +pub fn constructor_sink_sload16(ctx: &mut C, arg0: Inst) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = C::inst_data(ctx, pattern0_0); + if let &InstructionData::Load { + opcode: ref pattern2_0, + arg: pattern2_1, + flags: pattern2_2, + offset: pattern2_3, + } = &pattern1_0 + { + if let &Opcode::Sload16 = &pattern2_0 { + // Rule at src/isa/s390x/inst.isle line 1209. + let expr0_0 = C::sink_inst(ctx, pattern0_0); + let expr1_0 = constructor_lower_address(ctx, pattern2_2, pattern2_1, pattern2_3)?; + return Some(expr1_0); + } + } + return None; +} + +// Generated as internal constructor for term sink_sload32. +pub fn constructor_sink_sload32(ctx: &mut C, arg0: Inst) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = C::inst_data(ctx, pattern0_0); + if let &InstructionData::Load { + opcode: ref pattern2_0, + arg: pattern2_1, + flags: pattern2_2, + offset: pattern2_3, + } = &pattern1_0 + { + if let &Opcode::Sload32 = &pattern2_0 { + // Rule at src/isa/s390x/inst.isle line 1216. + let expr0_0 = C::sink_inst(ctx, pattern0_0); + let expr1_0 = constructor_lower_address(ctx, pattern2_2, pattern2_1, pattern2_3)?; + return Some(expr1_0); + } + } + return None; +} + +// Generated as internal constructor for term sink_uload16. +pub fn constructor_sink_uload16(ctx: &mut C, arg0: Inst) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = C::inst_data(ctx, pattern0_0); + if let &InstructionData::Load { + opcode: ref pattern2_0, + arg: pattern2_1, + flags: pattern2_2, + offset: pattern2_3, + } = &pattern1_0 + { + if let &Opcode::Uload16 = &pattern2_0 { + // Rule at src/isa/s390x/inst.isle line 1223. + let expr0_0 = C::sink_inst(ctx, pattern0_0); + let expr1_0 = constructor_lower_address(ctx, pattern2_2, pattern2_1, pattern2_3)?; + return Some(expr1_0); + } + } + return None; +} + +// Generated as internal constructor for term sink_uload32. +pub fn constructor_sink_uload32(ctx: &mut C, arg0: Inst) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = C::inst_data(ctx, pattern0_0); + if let &InstructionData::Load { + opcode: ref pattern2_0, + arg: pattern2_1, + flags: pattern2_2, + offset: pattern2_3, + } = &pattern1_0 + { + if let &Opcode::Uload32 = &pattern2_0 { + // Rule at src/isa/s390x/inst.isle line 1230. + let expr0_0 = C::sink_inst(ctx, pattern0_0); + let expr1_0 = constructor_lower_address(ctx, pattern2_2, pattern2_1, pattern2_3)?; + return Some(expr1_0); + } + } + return None; +} + +// Generated as internal constructor for term temp_writable_regpair. +pub fn constructor_temp_writable_regpair(ctx: &mut C) -> Option { + // Rule at src/isa/s390x/inst.isle line 1243. + let expr0_0: u8 = 0; + let expr1_0 = C::writable_gpr(ctx, expr0_0); + let expr2_0: u8 = 1; + let expr3_0 = C::writable_gpr(ctx, expr2_0); + let expr4_0 = WritableRegPair::WritableRegPair { + hi: expr1_0, + lo: expr3_0, + }; + return Some(expr4_0); +} + +// Generated as internal constructor for term copy_writable_regpair. +pub fn constructor_copy_writable_regpair( + ctx: &mut C, + arg0: &RegPair, +) -> Option { + let pattern0_0 = arg0; + // Rule at src/isa/s390x/inst.isle line 1249. + let expr0_0 = constructor_temp_writable_regpair(ctx)?; + return Some(expr0_0); +} + +// Generated as internal constructor for term writable_regpair_hi. +pub fn constructor_writable_regpair_hi( + ctx: &mut C, + arg0: &WritableRegPair, +) -> Option { + let pattern0_0 = arg0; + if let &WritableRegPair::WritableRegPair { + hi: pattern1_0, + lo: pattern1_1, + } = pattern0_0 + { + // Rule at src/isa/s390x/inst.isle line 1253. + return Some(pattern1_0); + } + return None; +} + +// Generated as internal constructor for term writable_regpair_lo. +pub fn constructor_writable_regpair_lo( + ctx: &mut C, + arg0: &WritableRegPair, +) -> Option { + let pattern0_0 = arg0; + if let &WritableRegPair::WritableRegPair { + hi: pattern1_0, + lo: pattern1_1, + } = pattern0_0 + { + // Rule at src/isa/s390x/inst.isle line 1257. + return Some(pattern1_1); + } + return None; +} + +// Generated as internal constructor for term writable_regpair_to_regpair. +pub fn constructor_writable_regpair_to_regpair( + ctx: &mut C, + arg0: &WritableRegPair, +) -> Option { + let pattern0_0 = arg0; + if let &WritableRegPair::WritableRegPair { + hi: pattern1_0, + lo: pattern1_1, + } = pattern0_0 + { + // Rule at src/isa/s390x/inst.isle line 1264. + let expr0_0 = C::writable_reg_to_reg(ctx, pattern1_0); + let expr1_0 = C::writable_reg_to_reg(ctx, pattern1_1); + let expr2_0 = RegPair::RegPair { + hi: expr0_0, + lo: expr1_0, + }; + return Some(expr2_0); + } + return None; +} + +// Generated as internal constructor for term uninitialized_regpair. +pub fn constructor_uninitialized_regpair(ctx: &mut C) -> Option { + // Rule at src/isa/s390x/inst.isle line 1269. + let expr0_0 = constructor_temp_writable_regpair(ctx)?; + let expr1_0 = constructor_writable_regpair_to_regpair(ctx, &expr0_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term regpair_hi. +pub fn constructor_regpair_hi(ctx: &mut C, arg0: &RegPair) -> Option { + let pattern0_0 = arg0; + if let &RegPair::RegPair { + hi: pattern1_0, + lo: pattern1_1, + } = pattern0_0 + { + // Rule at src/isa/s390x/inst.isle line 1274. + return Some(pattern1_0); + } + return None; +} + +// Generated as internal constructor for term regpair_lo. +pub fn constructor_regpair_lo(ctx: &mut C, arg0: &RegPair) -> Option { + let pattern0_0 = arg0; + if let &RegPair::RegPair { + hi: pattern1_0, + lo: pattern1_1, + } = pattern0_0 + { + // Rule at src/isa/s390x/inst.isle line 1278. + return Some(pattern1_1); + } + return None; +} + +// Generated as internal constructor for term alu_rrr. +pub fn constructor_alu_rrr( + ctx: &mut C, + arg0: Type, + arg1: &ALUOp, + arg2: Reg, + arg3: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 1292. + let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); + let expr1_0 = MInst::AluRRR { + alu_op: pattern1_0.clone(), + rd: expr0_0, + rn: pattern2_0, + rm: pattern3_0, + }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = C::writable_reg_to_reg(ctx, expr0_0); + return Some(expr3_0); +} + +// Generated as internal constructor for term alu_rrsimm16. +pub fn constructor_alu_rrsimm16( + ctx: &mut C, + arg0: Type, + arg1: &ALUOp, + arg2: Reg, + arg3: i16, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 1299. + let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); + let expr1_0 = MInst::AluRRSImm16 { + alu_op: pattern1_0.clone(), + rd: expr0_0, + rn: pattern2_0, + imm: pattern3_0, + }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = C::writable_reg_to_reg(ctx, expr0_0); + return Some(expr3_0); +} + +// Generated as internal constructor for term alu_rr. +pub fn constructor_alu_rr( + ctx: &mut C, + arg0: Type, + arg1: &ALUOp, + arg2: Reg, + arg3: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 1306. + let expr0_0 = constructor_copy_writable_reg(ctx, pattern0_0, pattern2_0)?; + let expr1_0 = MInst::AluRR { + alu_op: pattern1_0.clone(), + rd: expr0_0, + rm: pattern3_0, + }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = C::writable_reg_to_reg(ctx, expr0_0); + return Some(expr3_0); +} + +// Generated as internal constructor for term alu_rx. +pub fn constructor_alu_rx( + ctx: &mut C, + arg0: Type, + arg1: &ALUOp, + arg2: Reg, + arg3: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 1313. + let expr0_0 = constructor_copy_writable_reg(ctx, pattern0_0, pattern2_0)?; + let expr1_0 = MInst::AluRX { + alu_op: pattern1_0.clone(), + rd: expr0_0, + mem: pattern3_0.clone(), + }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = C::writable_reg_to_reg(ctx, expr0_0); + return Some(expr3_0); +} + +// Generated as internal constructor for term alu_rsimm16. +pub fn constructor_alu_rsimm16( + ctx: &mut C, + arg0: Type, + arg1: &ALUOp, + arg2: Reg, + arg3: i16, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 1320. + let expr0_0 = constructor_copy_writable_reg(ctx, pattern0_0, pattern2_0)?; + let expr1_0 = MInst::AluRSImm16 { + alu_op: pattern1_0.clone(), + rd: expr0_0, + imm: pattern3_0, + }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = C::writable_reg_to_reg(ctx, expr0_0); + return Some(expr3_0); +} + +// Generated as internal constructor for term alu_rsimm32. +pub fn constructor_alu_rsimm32( + ctx: &mut C, + arg0: Type, + arg1: &ALUOp, + arg2: Reg, + arg3: i32, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 1327. + let expr0_0 = constructor_copy_writable_reg(ctx, pattern0_0, pattern2_0)?; + let expr1_0 = MInst::AluRSImm32 { + alu_op: pattern1_0.clone(), + rd: expr0_0, + imm: pattern3_0, + }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = C::writable_reg_to_reg(ctx, expr0_0); + return Some(expr3_0); +} + +// Generated as internal constructor for term alu_ruimm32. +pub fn constructor_alu_ruimm32( + ctx: &mut C, + arg0: Type, + arg1: &ALUOp, + arg2: Reg, + arg3: u32, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 1334. + let expr0_0 = constructor_copy_writable_reg(ctx, pattern0_0, pattern2_0)?; + let expr1_0 = MInst::AluRUImm32 { + alu_op: pattern1_0.clone(), + rd: expr0_0, + imm: pattern3_0, + }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = C::writable_reg_to_reg(ctx, expr0_0); + return Some(expr3_0); +} + +// Generated as internal constructor for term alu_ruimm16shifted. +pub fn constructor_alu_ruimm16shifted( + ctx: &mut C, + arg0: Type, + arg1: &ALUOp, + arg2: Reg, + arg3: UImm16Shifted, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 1341. + let expr0_0 = constructor_copy_writable_reg(ctx, pattern0_0, pattern2_0)?; + let expr1_0 = MInst::AluRUImm16Shifted { + alu_op: pattern1_0.clone(), + rd: expr0_0, + imm: pattern3_0, + }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = C::writable_reg_to_reg(ctx, expr0_0); + return Some(expr3_0); +} + +// Generated as internal constructor for term alu_ruimm32shifted. +pub fn constructor_alu_ruimm32shifted( + ctx: &mut C, + arg0: Type, + arg1: &ALUOp, + arg2: Reg, + arg3: UImm32Shifted, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 1348. + let expr0_0 = constructor_copy_writable_reg(ctx, pattern0_0, pattern2_0)?; + let expr1_0 = MInst::AluRUImm32Shifted { + alu_op: pattern1_0.clone(), + rd: expr0_0, + imm: pattern3_0, + }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = C::writable_reg_to_reg(ctx, expr0_0); + return Some(expr3_0); +} + +// Generated as internal constructor for term smul_wide. +pub fn constructor_smul_wide(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1355. + let expr0_0 = constructor_temp_writable_regpair(ctx)?; + let expr1_0 = MInst::SMulWide { + rn: pattern0_0, + rm: pattern1_0, + }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = constructor_writable_regpair_to_regpair(ctx, &expr0_0)?; + return Some(expr3_0); +} + +// Generated as internal constructor for term umul_wide. +pub fn constructor_umul_wide(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1362. + let expr0_0 = constructor_temp_writable_regpair(ctx)?; + let expr1_0 = constructor_writable_regpair_lo(ctx, &expr0_0)?; + let expr2_0 = MInst::Mov64 { + rd: expr1_0, + rm: pattern1_0, + }; + let expr3_0 = C::emit(ctx, &expr2_0); + let expr4_0 = MInst::UMulWide { rn: pattern0_0 }; + let expr5_0 = C::emit(ctx, &expr4_0); + let expr6_0 = constructor_writable_regpair_to_regpair(ctx, &expr0_0)?; + return Some(expr6_0); +} + +// Generated as internal constructor for term sdivmod32. +pub fn constructor_sdivmod32( + ctx: &mut C, + arg0: &RegPair, + arg1: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1370. + let expr0_0 = constructor_copy_writable_regpair(ctx, pattern0_0)?; + let expr1_0 = MInst::SDivMod32 { rn: pattern1_0 }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = constructor_writable_regpair_to_regpair(ctx, &expr0_0)?; + return Some(expr3_0); +} + +// Generated as internal constructor for term sdivmod64. +pub fn constructor_sdivmod64( + ctx: &mut C, + arg0: &RegPair, + arg1: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1377. + let expr0_0 = constructor_copy_writable_regpair(ctx, pattern0_0)?; + let expr1_0 = MInst::SDivMod64 { rn: pattern1_0 }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = constructor_writable_regpair_to_regpair(ctx, &expr0_0)?; + return Some(expr3_0); +} + +// Generated as internal constructor for term udivmod32. +pub fn constructor_udivmod32( + ctx: &mut C, + arg0: &RegPair, + arg1: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1384. + let expr0_0 = constructor_copy_writable_regpair(ctx, pattern0_0)?; + let expr1_0 = MInst::UDivMod32 { rn: pattern1_0 }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = constructor_writable_regpair_to_regpair(ctx, &expr0_0)?; + return Some(expr3_0); +} + +// Generated as internal constructor for term udivmod64. +pub fn constructor_udivmod64( + ctx: &mut C, + arg0: &RegPair, + arg1: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1391. + let expr0_0 = constructor_copy_writable_regpair(ctx, pattern0_0)?; + let expr1_0 = MInst::UDivMod64 { rn: pattern1_0 }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = constructor_writable_regpair_to_regpair(ctx, &expr0_0)?; + return Some(expr3_0); +} + +// Generated as internal constructor for term shift_rr. +pub fn constructor_shift_rr( + ctx: &mut C, + arg0: Type, + arg1: &ShiftOp, + arg2: Reg, + arg3: u8, + arg4: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + let pattern4_0 = arg4; + // Rule at src/isa/s390x/inst.isle line 1398. + let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); + let expr1_0 = MInst::ShiftRR { + shift_op: pattern1_0.clone(), + rd: expr0_0, + rn: pattern2_0, + shift_imm: pattern3_0, + shift_reg: pattern4_0, + }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = C::writable_reg_to_reg(ctx, expr0_0); + return Some(expr3_0); +} + +// Generated as internal constructor for term unary_rr. +pub fn constructor_unary_rr( + ctx: &mut C, + arg0: Type, + arg1: &UnaryOp, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1405. + let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); + let expr1_0 = MInst::UnaryRR { + op: pattern1_0.clone(), + rd: expr0_0, + rn: pattern2_0, + }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = C::writable_reg_to_reg(ctx, expr0_0); + return Some(expr3_0); +} + +// Generated as internal constructor for term cmp_rr. +pub fn constructor_cmp_rr( + ctx: &mut C, + arg0: &CmpOp, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1412. + let expr0_0 = MInst::CmpRR { + op: pattern0_0.clone(), + rn: pattern1_0, + rm: pattern2_0, + }; + let expr1_0 = C::invalid_reg(ctx); + let expr2_0 = ProducesFlags::ProducesFlags { + inst: expr0_0, + result: expr1_0, + }; + return Some(expr2_0); +} + +// Generated as internal constructor for term cmp_rx. +pub fn constructor_cmp_rx( + ctx: &mut C, + arg0: &CmpOp, + arg1: Reg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1418. + let expr0_0 = MInst::CmpRX { + op: pattern0_0.clone(), + rn: pattern1_0, + mem: pattern2_0.clone(), + }; + let expr1_0 = C::invalid_reg(ctx); + let expr2_0 = ProducesFlags::ProducesFlags { + inst: expr0_0, + result: expr1_0, + }; + return Some(expr2_0); +} + +// Generated as internal constructor for term cmp_rsimm16. +pub fn constructor_cmp_rsimm16( + ctx: &mut C, + arg0: &CmpOp, + arg1: Reg, + arg2: i16, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1424. + let expr0_0 = MInst::CmpRSImm16 { + op: pattern0_0.clone(), + rn: pattern1_0, + imm: pattern2_0, + }; + let expr1_0 = C::invalid_reg(ctx); + let expr2_0 = ProducesFlags::ProducesFlags { + inst: expr0_0, + result: expr1_0, + }; + return Some(expr2_0); +} + +// Generated as internal constructor for term cmp_rsimm32. +pub fn constructor_cmp_rsimm32( + ctx: &mut C, + arg0: &CmpOp, + arg1: Reg, + arg2: i32, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1430. + let expr0_0 = MInst::CmpRSImm32 { + op: pattern0_0.clone(), + rn: pattern1_0, + imm: pattern2_0, + }; + let expr1_0 = C::invalid_reg(ctx); + let expr2_0 = ProducesFlags::ProducesFlags { + inst: expr0_0, + result: expr1_0, + }; + return Some(expr2_0); +} + +// Generated as internal constructor for term cmp_ruimm32. +pub fn constructor_cmp_ruimm32( + ctx: &mut C, + arg0: &CmpOp, + arg1: Reg, + arg2: u32, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1436. + let expr0_0 = MInst::CmpRUImm32 { + op: pattern0_0.clone(), + rn: pattern1_0, + imm: pattern2_0, + }; + let expr1_0 = C::invalid_reg(ctx); + let expr2_0 = ProducesFlags::ProducesFlags { + inst: expr0_0, + result: expr1_0, + }; + return Some(expr2_0); +} + +// Generated as internal constructor for term atomic_rmw_impl. +pub fn constructor_atomic_rmw_impl( + ctx: &mut C, + arg0: Type, + arg1: &ALUOp, + arg2: Reg, + arg3: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 1442. + let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); + let expr1_0 = MInst::AtomicRmw { + alu_op: pattern1_0.clone(), + rd: expr0_0, + rn: pattern2_0, + mem: pattern3_0.clone(), + }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = C::writable_reg_to_reg(ctx, expr0_0); + return Some(expr3_0); +} + +// Generated as internal constructor for term atomic_cas32. +pub fn constructor_atomic_cas32( + ctx: &mut C, + arg0: Reg, + arg1: Reg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1449. + let expr0_0: Type = I32; + let expr1_0 = constructor_copy_writable_reg(ctx, expr0_0, pattern0_0)?; + let expr2_0 = MInst::AtomicCas32 { + rd: expr1_0, + rn: pattern1_0, + mem: pattern2_0.clone(), + }; + let expr3_0 = C::emit(ctx, &expr2_0); + let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0); + return Some(expr4_0); +} + +// Generated as internal constructor for term atomic_cas64. +pub fn constructor_atomic_cas64( + ctx: &mut C, + arg0: Reg, + arg1: Reg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1456. + let expr0_0: Type = I64; + let expr1_0 = constructor_copy_writable_reg(ctx, expr0_0, pattern0_0)?; + let expr2_0 = MInst::AtomicCas64 { + rd: expr1_0, + rn: pattern1_0, + mem: pattern2_0.clone(), + }; + let expr3_0 = C::emit(ctx, &expr2_0); + let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0); + return Some(expr4_0); +} + +// Generated as internal constructor for term fence_impl. +pub fn constructor_fence_impl(ctx: &mut C) -> Option { + // Rule at src/isa/s390x/inst.isle line 1463. + let expr0_0 = MInst::Fence; + let expr1_0 = SideEffectNoResult::Inst { inst: expr0_0 }; + return Some(expr1_0); +} + +// Generated as internal constructor for term load32. +pub fn constructor_load32(ctx: &mut C, arg0: &MemArg) -> Option { + let pattern0_0 = arg0; + // Rule at src/isa/s390x/inst.isle line 1468. + let expr0_0: Type = I32; + let expr1_0 = C::temp_writable_reg(ctx, expr0_0); + let expr2_0 = MInst::Load32 { + rd: expr1_0, + mem: pattern0_0.clone(), + }; + let expr3_0 = C::emit(ctx, &expr2_0); + let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0); + return Some(expr4_0); +} + +// Generated as internal constructor for term load64. +pub fn constructor_load64(ctx: &mut C, arg0: &MemArg) -> Option { + let pattern0_0 = arg0; + // Rule at src/isa/s390x/inst.isle line 1475. + let expr0_0: Type = I64; + let expr1_0 = C::temp_writable_reg(ctx, expr0_0); + let expr2_0 = MInst::Load64 { + rd: expr1_0, + mem: pattern0_0.clone(), + }; + let expr3_0 = C::emit(ctx, &expr2_0); + let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0); + return Some(expr4_0); +} + +// Generated as internal constructor for term loadrev16. +pub fn constructor_loadrev16(ctx: &mut C, arg0: &MemArg) -> Option { + let pattern0_0 = arg0; + // Rule at src/isa/s390x/inst.isle line 1482. + let expr0_0: Type = I32; + let expr1_0 = C::temp_writable_reg(ctx, expr0_0); + let expr2_0 = MInst::LoadRev16 { + rd: expr1_0, + mem: pattern0_0.clone(), + }; + let expr3_0 = C::emit(ctx, &expr2_0); + let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0); + return Some(expr4_0); +} + +// Generated as internal constructor for term loadrev32. +pub fn constructor_loadrev32(ctx: &mut C, arg0: &MemArg) -> Option { + let pattern0_0 = arg0; + // Rule at src/isa/s390x/inst.isle line 1489. + let expr0_0: Type = I32; + let expr1_0 = C::temp_writable_reg(ctx, expr0_0); + let expr2_0 = MInst::LoadRev32 { + rd: expr1_0, + mem: pattern0_0.clone(), + }; + let expr3_0 = C::emit(ctx, &expr2_0); + let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0); + return Some(expr4_0); +} + +// Generated as internal constructor for term loadrev64. +pub fn constructor_loadrev64(ctx: &mut C, arg0: &MemArg) -> Option { + let pattern0_0 = arg0; + // Rule at src/isa/s390x/inst.isle line 1496. + let expr0_0: Type = I64; + let expr1_0 = C::temp_writable_reg(ctx, expr0_0); + let expr2_0 = MInst::LoadRev64 { + rd: expr1_0, + mem: pattern0_0.clone(), + }; + let expr3_0 = C::emit(ctx, &expr2_0); + let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0); + return Some(expr4_0); +} + +// Generated as internal constructor for term store8. +pub fn constructor_store8( + ctx: &mut C, + arg0: Reg, + arg1: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1503. + let expr0_0 = MInst::Store8 { + rd: pattern0_0, + mem: pattern1_0.clone(), + }; + let expr1_0 = SideEffectNoResult::Inst { inst: expr0_0 }; + return Some(expr1_0); +} + +// Generated as internal constructor for term store16. +pub fn constructor_store16( + ctx: &mut C, + arg0: Reg, + arg1: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1508. + let expr0_0 = MInst::Store16 { + rd: pattern0_0, + mem: pattern1_0.clone(), + }; + let expr1_0 = SideEffectNoResult::Inst { inst: expr0_0 }; + return Some(expr1_0); +} + +// Generated as internal constructor for term store32. +pub fn constructor_store32( + ctx: &mut C, + arg0: Reg, + arg1: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1513. + let expr0_0 = MInst::Store32 { + rd: pattern0_0, + mem: pattern1_0.clone(), + }; + let expr1_0 = SideEffectNoResult::Inst { inst: expr0_0 }; + return Some(expr1_0); +} + +// Generated as internal constructor for term store64. +pub fn constructor_store64( + ctx: &mut C, + arg0: Reg, + arg1: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1518. + let expr0_0 = MInst::Store64 { + rd: pattern0_0, + mem: pattern1_0.clone(), + }; + let expr1_0 = SideEffectNoResult::Inst { inst: expr0_0 }; + return Some(expr1_0); +} + +// Generated as internal constructor for term store8_imm. +pub fn constructor_store8_imm( + ctx: &mut C, + arg0: u8, + arg1: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1523. + let expr0_0 = MInst::StoreImm8 { + imm: pattern0_0, + mem: pattern1_0.clone(), + }; + let expr1_0 = SideEffectNoResult::Inst { inst: expr0_0 }; + return Some(expr1_0); +} + +// Generated as internal constructor for term store16_imm. +pub fn constructor_store16_imm( + ctx: &mut C, + arg0: i16, + arg1: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1528. + let expr0_0 = MInst::StoreImm16 { + imm: pattern0_0, + mem: pattern1_0.clone(), + }; + let expr1_0 = SideEffectNoResult::Inst { inst: expr0_0 }; + return Some(expr1_0); +} + +// Generated as internal constructor for term store32_simm16. +pub fn constructor_store32_simm16( + ctx: &mut C, + arg0: i16, + arg1: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1533. + let expr0_0 = MInst::StoreImm32SExt16 { + imm: pattern0_0, + mem: pattern1_0.clone(), + }; + let expr1_0 = SideEffectNoResult::Inst { inst: expr0_0 }; + return Some(expr1_0); +} + +// Generated as internal constructor for term store64_simm16. +pub fn constructor_store64_simm16( + ctx: &mut C, + arg0: i16, + arg1: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1538. + let expr0_0 = MInst::StoreImm64SExt16 { + imm: pattern0_0, + mem: pattern1_0.clone(), + }; + let expr1_0 = SideEffectNoResult::Inst { inst: expr0_0 }; + return Some(expr1_0); +} + +// Generated as internal constructor for term storerev16. +pub fn constructor_storerev16( + ctx: &mut C, + arg0: Reg, + arg1: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1543. + let expr0_0 = MInst::StoreRev16 { + rd: pattern0_0, + mem: pattern1_0.clone(), + }; + let expr1_0 = SideEffectNoResult::Inst { inst: expr0_0 }; + return Some(expr1_0); +} + +// Generated as internal constructor for term storerev32. +pub fn constructor_storerev32( + ctx: &mut C, + arg0: Reg, + arg1: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1548. + let expr0_0 = MInst::StoreRev32 { + rd: pattern0_0, + mem: pattern1_0.clone(), + }; + let expr1_0 = SideEffectNoResult::Inst { inst: expr0_0 }; + return Some(expr1_0); +} + +// Generated as internal constructor for term storerev64. +pub fn constructor_storerev64( + ctx: &mut C, + arg0: Reg, + arg1: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1553. + let expr0_0 = MInst::StoreRev64 { + rd: pattern0_0, + mem: pattern1_0.clone(), + }; + let expr1_0 = SideEffectNoResult::Inst { inst: expr0_0 }; + return Some(expr1_0); +} + +// Generated as internal constructor for term fpu_rr. +pub fn constructor_fpu_rr( + ctx: &mut C, + arg0: Type, + arg1: &FPUOp1, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1558. + let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); + let expr1_0 = MInst::FpuRR { + fpu_op: pattern1_0.clone(), + rd: expr0_0, + rn: pattern2_0, + }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = C::writable_reg_to_reg(ctx, expr0_0); + return Some(expr3_0); +} + +// Generated as internal constructor for term fpu_rrr. +pub fn constructor_fpu_rrr( + ctx: &mut C, + arg0: Type, + arg1: &FPUOp2, + arg2: Reg, + arg3: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 1565. + let expr0_0 = constructor_copy_writable_reg(ctx, pattern0_0, pattern2_0)?; + let expr1_0 = MInst::FpuRRR { + fpu_op: pattern1_0.clone(), + rd: expr0_0, + rm: pattern3_0, + }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = C::writable_reg_to_reg(ctx, expr0_0); + return Some(expr3_0); +} + +// Generated as internal constructor for term fpu_rrrr. +pub fn constructor_fpu_rrrr( + ctx: &mut C, + arg0: Type, + arg1: &FPUOp3, + arg2: Reg, + arg3: Reg, + arg4: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + let pattern4_0 = arg4; + // Rule at src/isa/s390x/inst.isle line 1572. + let expr0_0 = constructor_copy_writable_reg(ctx, pattern0_0, pattern2_0)?; + let expr1_0 = MInst::FpuRRRR { + fpu_op: pattern1_0.clone(), + rd: expr0_0, + rn: pattern3_0, + rm: pattern4_0, + }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = C::writable_reg_to_reg(ctx, expr0_0); + return Some(expr3_0); +} + +// Generated as internal constructor for term fpu_copysign. +pub fn constructor_fpu_copysign( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1579. + let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); + let expr1_0 = MInst::FpuCopysign { + rd: expr0_0, + rn: pattern1_0, + rm: pattern2_0, + }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = C::writable_reg_to_reg(ctx, expr0_0); + return Some(expr3_0); +} + +// Generated as internal constructor for term fpu_cmp32. +pub fn constructor_fpu_cmp32( + ctx: &mut C, + arg0: Reg, + arg1: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1586. + let expr0_0 = MInst::FpuCmp32 { + rn: pattern0_0, + rm: pattern1_0, + }; + let expr1_0 = C::invalid_reg(ctx); + let expr2_0 = ProducesFlags::ProducesFlags { + inst: expr0_0, + result: expr1_0, + }; + return Some(expr2_0); +} + +// Generated as internal constructor for term fpu_cmp64. +pub fn constructor_fpu_cmp64( + ctx: &mut C, + arg0: Reg, + arg1: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1592. + let expr0_0 = MInst::FpuCmp64 { + rn: pattern0_0, + rm: pattern1_0, + }; + let expr1_0 = C::invalid_reg(ctx); + let expr2_0 = ProducesFlags::ProducesFlags { + inst: expr0_0, + result: expr1_0, + }; + return Some(expr2_0); +} + +// Generated as internal constructor for term fpu_to_int. +pub fn constructor_fpu_to_int( + ctx: &mut C, + arg0: Type, + arg1: &FpuToIntOp, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1598. + let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); + let expr1_0 = MInst::FpuToInt { + op: pattern1_0.clone(), + rd: expr0_0, + rn: pattern2_0, + }; + let expr2_0 = C::writable_reg_to_reg(ctx, expr0_0); + let expr3_0 = ProducesFlags::ProducesFlags { + inst: expr1_0, + result: expr2_0, + }; + return Some(expr3_0); +} + +// Generated as internal constructor for term int_to_fpu. +pub fn constructor_int_to_fpu( + ctx: &mut C, + arg0: Type, + arg1: &IntToFpuOp, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1605. + let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); + let expr1_0 = MInst::IntToFpu { + op: pattern1_0.clone(), + rd: expr0_0, + rn: pattern2_0, + }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = C::writable_reg_to_reg(ctx, expr0_0); + return Some(expr3_0); +} + +// Generated as internal constructor for term fpu_round. +pub fn constructor_fpu_round( + ctx: &mut C, + arg0: Type, + arg1: &FpuRoundMode, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1612. + let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); + let expr1_0 = MInst::FpuRound { + op: pattern1_0.clone(), + rd: expr0_0, + rn: pattern2_0, + }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = C::writable_reg_to_reg(ctx, expr0_0); + return Some(expr3_0); +} + +// Generated as internal constructor for term fpuvec_rrr. +pub fn constructor_fpuvec_rrr( + ctx: &mut C, + arg0: Type, + arg1: &FPUOp2, + arg2: Reg, + arg3: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 1619. + let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); + let expr1_0 = MInst::FpuVecRRR { + fpu_op: pattern1_0.clone(), + rd: expr0_0, + rn: pattern2_0, + rm: pattern3_0, + }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = C::writable_reg_to_reg(ctx, expr0_0); + return Some(expr3_0); +} + +// Generated as internal constructor for term mov_to_fpr. +pub fn constructor_mov_to_fpr(ctx: &mut C, arg0: Reg) -> Option { + let pattern0_0 = arg0; + // Rule at src/isa/s390x/inst.isle line 1626. + let expr0_0: Type = F64; + let expr1_0 = C::temp_writable_reg(ctx, expr0_0); + let expr2_0 = MInst::MovToFpr { + rd: expr1_0, + rn: pattern0_0, + }; + let expr3_0 = C::emit(ctx, &expr2_0); + let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0); + return Some(expr4_0); +} + +// Generated as internal constructor for term mov_from_fpr. +pub fn constructor_mov_from_fpr(ctx: &mut C, arg0: Reg) -> Option { + let pattern0_0 = arg0; + // Rule at src/isa/s390x/inst.isle line 1633. + let expr0_0: Type = I64; + let expr1_0 = C::temp_writable_reg(ctx, expr0_0); + let expr2_0 = MInst::MovFromFpr { + rd: expr1_0, + rn: pattern0_0, + }; + let expr3_0 = C::emit(ctx, &expr2_0); + let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0); + return Some(expr4_0); +} + +// Generated as internal constructor for term fpu_load32. +pub fn constructor_fpu_load32(ctx: &mut C, arg0: &MemArg) -> Option { + let pattern0_0 = arg0; + // Rule at src/isa/s390x/inst.isle line 1640. + let expr0_0: Type = F32; + let expr1_0 = C::temp_writable_reg(ctx, expr0_0); + let expr2_0 = MInst::FpuLoad32 { + rd: expr1_0, + mem: pattern0_0.clone(), + }; + let expr3_0 = C::emit(ctx, &expr2_0); + let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0); + return Some(expr4_0); +} + +// Generated as internal constructor for term fpu_load64. +pub fn constructor_fpu_load64(ctx: &mut C, arg0: &MemArg) -> Option { + let pattern0_0 = arg0; + // Rule at src/isa/s390x/inst.isle line 1647. + let expr0_0: Type = F64; + let expr1_0 = C::temp_writable_reg(ctx, expr0_0); + let expr2_0 = MInst::FpuLoad64 { + rd: expr1_0, + mem: pattern0_0.clone(), + }; + let expr3_0 = C::emit(ctx, &expr2_0); + let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0); + return Some(expr4_0); +} + +// Generated as internal constructor for term fpu_loadrev32. +pub fn constructor_fpu_loadrev32(ctx: &mut C, arg0: &MemArg) -> Option { + let pattern0_0 = arg0; + // Rule at src/isa/s390x/inst.isle line 1654. + let expr0_0: Type = F32; + let expr1_0 = C::temp_writable_reg(ctx, expr0_0); + let expr2_0 = MInst::FpuLoadRev32 { + rd: expr1_0, + mem: pattern0_0.clone(), + }; + let expr3_0 = C::emit(ctx, &expr2_0); + let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0); + return Some(expr4_0); +} + +// Generated as internal constructor for term fpu_loadrev64. +pub fn constructor_fpu_loadrev64(ctx: &mut C, arg0: &MemArg) -> Option { + let pattern0_0 = arg0; + // Rule at src/isa/s390x/inst.isle line 1661. + let expr0_0: Type = F64; + let expr1_0 = C::temp_writable_reg(ctx, expr0_0); + let expr2_0 = MInst::FpuLoadRev64 { + rd: expr1_0, + mem: pattern0_0.clone(), + }; + let expr3_0 = C::emit(ctx, &expr2_0); + let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0); + return Some(expr4_0); +} + +// Generated as internal constructor for term fpu_store32. +pub fn constructor_fpu_store32( + ctx: &mut C, + arg0: Reg, + arg1: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1668. + let expr0_0 = MInst::FpuStore32 { + rd: pattern0_0, + mem: pattern1_0.clone(), + }; + let expr1_0 = SideEffectNoResult::Inst { inst: expr0_0 }; + return Some(expr1_0); +} + +// Generated as internal constructor for term fpu_store64. +pub fn constructor_fpu_store64( + ctx: &mut C, + arg0: Reg, + arg1: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1673. + let expr0_0 = MInst::FpuStore64 { + rd: pattern0_0, + mem: pattern1_0.clone(), + }; + let expr1_0 = SideEffectNoResult::Inst { inst: expr0_0 }; + return Some(expr1_0); +} + +// Generated as internal constructor for term fpu_storerev32. +pub fn constructor_fpu_storerev32( + ctx: &mut C, + arg0: Reg, + arg1: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1678. + let expr0_0 = MInst::FpuStoreRev32 { + rd: pattern0_0, + mem: pattern1_0.clone(), + }; + let expr1_0 = SideEffectNoResult::Inst { inst: expr0_0 }; + return Some(expr1_0); +} + +// Generated as internal constructor for term fpu_storerev64. +pub fn constructor_fpu_storerev64( + ctx: &mut C, + arg0: Reg, + arg1: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1683. + let expr0_0 = MInst::FpuStoreRev64 { + rd: pattern0_0, + mem: pattern1_0.clone(), + }; + let expr1_0 = SideEffectNoResult::Inst { inst: expr0_0 }; + return Some(expr1_0); +} + +// Generated as internal constructor for term load_ext_name_far. +pub fn constructor_load_ext_name_far( + ctx: &mut C, + arg0: BoxExternalName, + arg1: i64, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1688. + let expr0_0: Type = I64; + let expr1_0 = C::temp_writable_reg(ctx, expr0_0); + let expr2_0 = MInst::LoadExtNameFar { + rd: expr1_0, + name: pattern0_0, + offset: pattern1_0, + }; + let expr3_0 = C::emit(ctx, &expr2_0); + let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0); + return Some(expr4_0); +} + +// Generated as internal constructor for term load_addr. +pub fn constructor_load_addr(ctx: &mut C, arg0: &MemArg) -> Option { + let pattern0_0 = arg0; + // Rule at src/isa/s390x/inst.isle line 1695. + let expr0_0: Type = I64; + let expr1_0 = C::temp_writable_reg(ctx, expr0_0); + let expr2_0 = MInst::LoadAddr { + rd: expr1_0, + mem: pattern0_0.clone(), + }; + let expr3_0 = C::emit(ctx, &expr2_0); + let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0); + return Some(expr4_0); +} + +// Generated as internal constructor for term drop_flags. +pub fn constructor_drop_flags(ctx: &mut C, arg0: &ProducesFlags) -> Option { + let pattern0_0 = arg0; + if let &ProducesFlags::ProducesFlags { + inst: ref pattern1_0, + result: pattern1_1, + } = pattern0_0 + { + // Rule at src/isa/s390x/inst.isle line 1702. + let expr0_0 = C::emit(ctx, &pattern1_0); + return Some(pattern1_1); + } + return None; +} + +// Generated as internal constructor for term emit_mov. +pub fn constructor_emit_mov( + ctx: &mut C, + arg0: Type, + arg1: WritableReg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == F32 { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1718. + let expr0_0 = MInst::FpuMove32 { + rd: pattern2_0, + rn: pattern3_0, + }; + let expr1_0 = C::emit(ctx, &expr0_0); + return Some(expr1_0); + } + if pattern0_0 == F64 { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1721. + let expr0_0 = MInst::FpuMove64 { + rd: pattern2_0, + rn: pattern3_0, + }; + let expr1_0 = C::emit(ctx, &expr0_0); + return Some(expr1_0); + } + if let Some(pattern1_0) = C::gpr32_ty(ctx, pattern0_0) { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1712. + let expr0_0 = MInst::Mov32 { + rd: pattern2_0, + rm: pattern3_0, + }; + let expr1_0 = C::emit(ctx, &expr0_0); + return Some(expr1_0); + } + if let Some(pattern1_0) = C::gpr64_ty(ctx, pattern0_0) { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1715. + let expr0_0 = MInst::Mov64 { + rd: pattern2_0, + rm: pattern3_0, + }; + let expr1_0 = C::emit(ctx, &expr0_0); + return Some(expr1_0); + } + return None; +} + +// Generated as internal constructor for term copy_writable_reg. +pub fn constructor_copy_writable_reg( + ctx: &mut C, + arg0: Type, + arg1: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1726. + let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); + let expr1_0 = constructor_emit_mov(ctx, pattern0_0, expr0_0, pattern1_0)?; + return Some(expr0_0); +} + +// Generated as internal constructor for term copy_reg. +pub fn constructor_copy_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1733. + let expr0_0 = constructor_copy_writable_reg(ctx, pattern0_0, pattern1_0)?; + let expr1_0 = C::writable_reg_to_reg(ctx, expr0_0); + return Some(expr1_0); +} + +// Generated as internal constructor for term emit_imm. +pub fn constructor_emit_imm( + ctx: &mut C, + arg0: Type, + arg1: WritableReg, + arg2: u64, +) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == F32 { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1788. + let expr0_0 = C::u64_as_u32(ctx, pattern3_0); + let expr1_0 = MInst::LoadFpuConst32 { + rd: pattern2_0, + const_data: expr0_0, + }; + let expr2_0 = C::emit(ctx, &expr1_0); + return Some(expr2_0); + } + if pattern0_0 == F64 { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1793. + let expr0_0 = MInst::LoadFpuConst64 { + rd: pattern2_0, + const_data: pattern3_0, + }; + let expr1_0 = C::emit(ctx, &expr0_0); + return Some(expr1_0); + } + if let Some(pattern1_0) = C::fits_in_16(ctx, pattern0_0) { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1742. + let expr0_0 = C::u64_as_i16(ctx, pattern3_0); + let expr1_0 = MInst::Mov32SImm16 { + rd: pattern2_0, + imm: expr0_0, + }; + let expr2_0 = C::emit(ctx, &expr1_0); + return Some(expr2_0); + } + if let Some(pattern1_0) = C::gpr32_ty(ctx, pattern0_0) { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + if let Some(pattern4_0) = C::i16_from_u64(ctx, pattern3_0) { + // Rule at src/isa/s390x/inst.isle line 1746. + let expr0_0 = MInst::Mov32SImm16 { + rd: pattern2_0, + imm: pattern4_0, + }; + let expr1_0 = C::emit(ctx, &expr0_0); + return Some(expr1_0); + } + // Rule at src/isa/s390x/inst.isle line 1750. + let expr0_0 = C::u64_as_u32(ctx, pattern3_0); + let expr1_0 = MInst::Mov32Imm { + rd: pattern2_0, + imm: expr0_0, + }; + let expr2_0 = C::emit(ctx, &expr1_0); + return Some(expr2_0); + } + if let Some(pattern1_0) = C::gpr64_ty(ctx, pattern0_0) { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + if let Some(pattern4_0) = C::u64_nonzero_hipart(ctx, pattern3_0) { + if let Some(pattern5_0) = C::u64_nonzero_lopart(ctx, pattern3_0) { + // Rule at src/isa/s390x/inst.isle line 1770. + let expr0_0 = constructor_emit_imm(ctx, pattern1_0, pattern2_0, pattern4_0)?; + let expr1_0 = constructor_emit_insert_imm(ctx, pattern2_0, pattern5_0)?; + return Some(expr1_0); + } + } + if let Some(pattern4_0) = C::i16_from_u64(ctx, pattern3_0) { + // Rule at src/isa/s390x/inst.isle line 1754. + let expr0_0 = MInst::Mov64SImm16 { + rd: pattern2_0, + imm: pattern4_0, + }; + let expr1_0 = C::emit(ctx, &expr0_0); + return Some(expr1_0); + } + if let Some(pattern4_0) = C::i32_from_u64(ctx, pattern3_0) { + // Rule at src/isa/s390x/inst.isle line 1758. + let expr0_0 = MInst::Mov64SImm32 { + rd: pattern2_0, + imm: pattern4_0, + }; + let expr1_0 = C::emit(ctx, &expr0_0); + return Some(expr1_0); + } + if let Some(pattern4_0) = C::uimm32shifted_from_u64(ctx, pattern3_0) { + // Rule at src/isa/s390x/inst.isle line 1766. + let expr0_0 = MInst::Mov64UImm32Shifted { + rd: pattern2_0, + imm: pattern4_0, + }; + let expr1_0 = C::emit(ctx, &expr0_0); + return Some(expr1_0); + } + if let Some(pattern4_0) = C::uimm16shifted_from_u64(ctx, pattern3_0) { + // Rule at src/isa/s390x/inst.isle line 1762. + let expr0_0 = MInst::Mov64UImm16Shifted { + rd: pattern2_0, + imm: pattern4_0, + }; + let expr1_0 = C::emit(ctx, &expr0_0); + return Some(expr1_0); + } + } + return None; +} + +// Generated as internal constructor for term emit_insert_imm. +pub fn constructor_emit_insert_imm( + ctx: &mut C, + arg0: WritableReg, + arg1: u64, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + if let Some(pattern2_0) = C::uimm32shifted_from_u64(ctx, pattern1_0) { + // Rule at src/isa/s390x/inst.isle line 1783. + let expr0_0 = MInst::Insert64UImm32Shifted { + rd: pattern0_0, + imm: pattern2_0, + }; + let expr1_0 = C::emit(ctx, &expr0_0); + return Some(expr1_0); + } + if let Some(pattern2_0) = C::uimm16shifted_from_u64(ctx, pattern1_0) { + // Rule at src/isa/s390x/inst.isle line 1779. + let expr0_0 = MInst::Insert64UImm16Shifted { + rd: pattern0_0, + imm: pattern2_0, + }; + let expr1_0 = C::emit(ctx, &expr0_0); + return Some(expr1_0); + } + return None; +} + +// Generated as internal constructor for term imm. +pub fn constructor_imm(ctx: &mut C, arg0: Type, arg1: u64) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1798. + let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); + let expr1_0 = constructor_emit_imm(ctx, pattern0_0, expr0_0, pattern1_0)?; + let expr2_0 = C::writable_reg_to_reg(ctx, expr0_0); + return Some(expr2_0); +} + +// Generated as internal constructor for term imm_regpair_lo. +pub fn constructor_imm_regpair_lo( + ctx: &mut C, + arg0: Type, + arg1: u64, + arg2: &RegPair, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1806. + let expr0_0 = constructor_copy_writable_regpair(ctx, pattern2_0)?; + let expr1_0 = constructor_writable_regpair_lo(ctx, &expr0_0)?; + let expr2_0 = constructor_emit_imm(ctx, pattern0_0, expr1_0, pattern1_0)?; + let expr3_0 = constructor_writable_regpair_to_regpair(ctx, &expr0_0)?; + return Some(expr3_0); +} + +// Generated as internal constructor for term imm_regpair_hi. +pub fn constructor_imm_regpair_hi( + ctx: &mut C, + arg0: Type, + arg1: u64, + arg2: &RegPair, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1814. + let expr0_0 = constructor_copy_writable_regpair(ctx, pattern2_0)?; + let expr1_0 = constructor_writable_regpair_hi(ctx, &expr0_0)?; + let expr2_0 = constructor_emit_imm(ctx, pattern0_0, expr1_0, pattern1_0)?; + let expr3_0 = constructor_writable_regpair_to_regpair(ctx, &expr0_0)?; + return Some(expr3_0); +} + +// Generated as internal constructor for term ty_ext32. +pub fn constructor_ty_ext32(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I8 { + // Rule at src/isa/s390x/inst.isle line 1824. + let expr0_0: Type = I32; + return Some(expr0_0); + } + if pattern0_0 == I16 { + // Rule at src/isa/s390x/inst.isle line 1825. + let expr0_0: Type = I32; + return Some(expr0_0); + } + if pattern0_0 == I32 { + // Rule at src/isa/s390x/inst.isle line 1826. + let expr0_0: Type = I32; + return Some(expr0_0); + } + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 1827. + let expr0_0: Type = I64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term ty_ext64. +pub fn constructor_ty_ext64(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I8 { + // Rule at src/isa/s390x/inst.isle line 1831. + let expr0_0: Type = I64; + return Some(expr0_0); + } + if pattern0_0 == I16 { + // Rule at src/isa/s390x/inst.isle line 1832. + let expr0_0: Type = I64; + return Some(expr0_0); + } + if pattern0_0 == I32 { + // Rule at src/isa/s390x/inst.isle line 1833. + let expr0_0: Type = I64; + return Some(expr0_0); + } + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 1834. + let expr0_0: Type = I64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term emit_zext32_reg. +pub fn constructor_emit_zext32_reg( + ctx: &mut C, + arg0: WritableReg, + arg1: Type, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1839. + let expr0_0: bool = false; + let expr1_0 = C::ty_bits(ctx, pattern1_0); + let expr2_0: u8 = 32; + let expr3_0 = MInst::Extend { + rd: pattern0_0, + rn: pattern2_0, + signed: expr0_0, + from_bits: expr1_0, + to_bits: expr2_0, + }; + let expr4_0 = C::emit(ctx, &expr3_0); + return Some(expr4_0); +} + +// Generated as internal constructor for term emit_sext32_reg. +pub fn constructor_emit_sext32_reg( + ctx: &mut C, + arg0: WritableReg, + arg1: Type, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1845. + let expr0_0: bool = true; + let expr1_0 = C::ty_bits(ctx, pattern1_0); + let expr2_0: u8 = 32; + let expr3_0 = MInst::Extend { + rd: pattern0_0, + rn: pattern2_0, + signed: expr0_0, + from_bits: expr1_0, + to_bits: expr2_0, + }; + let expr4_0 = C::emit(ctx, &expr3_0); + return Some(expr4_0); +} + +// Generated as internal constructor for term emit_zext64_reg. +pub fn constructor_emit_zext64_reg( + ctx: &mut C, + arg0: WritableReg, + arg1: Type, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1851. + let expr0_0: bool = false; + let expr1_0 = C::ty_bits(ctx, pattern1_0); + let expr2_0: u8 = 64; + let expr3_0 = MInst::Extend { + rd: pattern0_0, + rn: pattern2_0, + signed: expr0_0, + from_bits: expr1_0, + to_bits: expr2_0, + }; + let expr4_0 = C::emit(ctx, &expr3_0); + return Some(expr4_0); +} + +// Generated as internal constructor for term emit_sext64_reg. +pub fn constructor_emit_sext64_reg( + ctx: &mut C, + arg0: WritableReg, + arg1: Type, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1857. + let expr0_0: bool = true; + let expr1_0 = C::ty_bits(ctx, pattern1_0); + let expr2_0: u8 = 64; + let expr3_0 = MInst::Extend { + rd: pattern0_0, + rn: pattern2_0, + signed: expr0_0, + from_bits: expr1_0, + to_bits: expr2_0, + }; + let expr4_0 = C::emit(ctx, &expr3_0); + return Some(expr4_0); +} + +// Generated as internal constructor for term zext32_reg. +pub fn constructor_zext32_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1863. + let expr0_0: Type = I32; + let expr1_0 = C::temp_writable_reg(ctx, expr0_0); + let expr2_0 = constructor_emit_zext32_reg(ctx, expr1_0, pattern0_0, pattern1_0)?; + let expr3_0 = C::writable_reg_to_reg(ctx, expr1_0); + return Some(expr3_0); +} + +// Generated as internal constructor for term sext32_reg. +pub fn constructor_sext32_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1871. + let expr0_0: Type = I32; + let expr1_0 = C::temp_writable_reg(ctx, expr0_0); + let expr2_0 = constructor_emit_sext32_reg(ctx, expr1_0, pattern0_0, pattern1_0)?; + let expr3_0 = C::writable_reg_to_reg(ctx, expr1_0); + return Some(expr3_0); +} + +// Generated as internal constructor for term zext64_reg. +pub fn constructor_zext64_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1879. + let expr0_0: Type = I64; + let expr1_0 = C::temp_writable_reg(ctx, expr0_0); + let expr2_0 = constructor_emit_zext64_reg(ctx, expr1_0, pattern0_0, pattern1_0)?; + let expr3_0 = C::writable_reg_to_reg(ctx, expr1_0); + return Some(expr3_0); +} + +// Generated as internal constructor for term sext64_reg. +pub fn constructor_sext64_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1887. + let expr0_0: Type = I64; + let expr1_0 = C::temp_writable_reg(ctx, expr0_0); + let expr2_0 = constructor_emit_sext64_reg(ctx, expr1_0, pattern0_0, pattern1_0)?; + let expr3_0 = C::writable_reg_to_reg(ctx, expr1_0); + return Some(expr3_0); +} + +// Generated as internal constructor for term emit_zext32_mem. +pub fn constructor_emit_zext32_mem( + ctx: &mut C, + arg0: WritableReg, + arg1: Type, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + if pattern1_0 == I8 { + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1895. + let expr0_0 = MInst::Load32ZExt8 { + rd: pattern0_0, + mem: pattern3_0.clone(), + }; + let expr1_0 = C::emit(ctx, &expr0_0); + return Some(expr1_0); + } + if pattern1_0 == I16 { + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1896. + let expr0_0 = MInst::Load32ZExt16 { + rd: pattern0_0, + mem: pattern3_0.clone(), + }; + let expr1_0 = C::emit(ctx, &expr0_0); + return Some(expr1_0); + } + return None; +} + +// Generated as internal constructor for term emit_sext32_mem. +pub fn constructor_emit_sext32_mem( + ctx: &mut C, + arg0: WritableReg, + arg1: Type, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + if pattern1_0 == I8 { + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1900. + let expr0_0 = MInst::Load32SExt8 { + rd: pattern0_0, + mem: pattern3_0.clone(), + }; + let expr1_0 = C::emit(ctx, &expr0_0); + return Some(expr1_0); + } + if pattern1_0 == I16 { + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1901. + let expr0_0 = MInst::Load32SExt16 { + rd: pattern0_0, + mem: pattern3_0.clone(), + }; + let expr1_0 = C::emit(ctx, &expr0_0); + return Some(expr1_0); + } + return None; +} + +// Generated as internal constructor for term emit_zext64_mem. +pub fn constructor_emit_zext64_mem( + ctx: &mut C, + arg0: WritableReg, + arg1: Type, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + if pattern1_0 == I8 { + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1905. + let expr0_0 = MInst::Load64ZExt8 { + rd: pattern0_0, + mem: pattern3_0.clone(), + }; + let expr1_0 = C::emit(ctx, &expr0_0); + return Some(expr1_0); + } + if pattern1_0 == I16 { + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1906. + let expr0_0 = MInst::Load64ZExt16 { + rd: pattern0_0, + mem: pattern3_0.clone(), + }; + let expr1_0 = C::emit(ctx, &expr0_0); + return Some(expr1_0); + } + if pattern1_0 == I32 { + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1907. + let expr0_0 = MInst::Load64ZExt32 { + rd: pattern0_0, + mem: pattern3_0.clone(), + }; + let expr1_0 = C::emit(ctx, &expr0_0); + return Some(expr1_0); + } + return None; +} + +// Generated as internal constructor for term emit_sext64_mem. +pub fn constructor_emit_sext64_mem( + ctx: &mut C, + arg0: WritableReg, + arg1: Type, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + if pattern1_0 == I8 { + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1911. + let expr0_0 = MInst::Load64SExt8 { + rd: pattern0_0, + mem: pattern3_0.clone(), + }; + let expr1_0 = C::emit(ctx, &expr0_0); + return Some(expr1_0); + } + if pattern1_0 == I16 { + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1912. + let expr0_0 = MInst::Load64SExt16 { + rd: pattern0_0, + mem: pattern3_0.clone(), + }; + let expr1_0 = C::emit(ctx, &expr0_0); + return Some(expr1_0); + } + if pattern1_0 == I32 { + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1913. + let expr0_0 = MInst::Load64SExt32 { + rd: pattern0_0, + mem: pattern3_0.clone(), + }; + let expr1_0 = C::emit(ctx, &expr0_0); + return Some(expr1_0); + } + return None; +} + +// Generated as internal constructor for term zext32_mem. +pub fn constructor_zext32_mem(ctx: &mut C, arg0: Type, arg1: &MemArg) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1917. + let expr0_0: Type = I32; + let expr1_0 = C::temp_writable_reg(ctx, expr0_0); + let expr2_0 = constructor_emit_zext32_mem(ctx, expr1_0, pattern0_0, pattern1_0)?; + let expr3_0 = C::writable_reg_to_reg(ctx, expr1_0); + return Some(expr3_0); +} + +// Generated as internal constructor for term sext32_mem. +pub fn constructor_sext32_mem(ctx: &mut C, arg0: Type, arg1: &MemArg) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1924. + let expr0_0: Type = I32; + let expr1_0 = C::temp_writable_reg(ctx, expr0_0); + let expr2_0 = constructor_emit_sext32_mem(ctx, expr1_0, pattern0_0, pattern1_0)?; + let expr3_0 = C::writable_reg_to_reg(ctx, expr1_0); + return Some(expr3_0); +} + +// Generated as internal constructor for term zext64_mem. +pub fn constructor_zext64_mem(ctx: &mut C, arg0: Type, arg1: &MemArg) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1931. + let expr0_0: Type = I64; + let expr1_0 = C::temp_writable_reg(ctx, expr0_0); + let expr2_0 = constructor_emit_zext64_mem(ctx, expr1_0, pattern0_0, pattern1_0)?; + let expr3_0 = C::writable_reg_to_reg(ctx, expr1_0); + return Some(expr3_0); +} + +// Generated as internal constructor for term sext64_mem. +pub fn constructor_sext64_mem(ctx: &mut C, arg0: Type, arg1: &MemArg) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1938. + let expr0_0: Type = I64; + let expr1_0 = C::temp_writable_reg(ctx, expr0_0); + let expr2_0 = constructor_emit_sext64_mem(ctx, expr1_0, pattern0_0, pattern1_0)?; + let expr3_0 = C::writable_reg_to_reg(ctx, expr1_0); + return Some(expr3_0); +} + +// Generated as internal constructor for term emit_put_in_reg_zext32. +pub fn constructor_emit_put_in_reg_zext32( + ctx: &mut C, + arg0: WritableReg, + arg1: Value, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = C::value_type(ctx, pattern1_0); + if let Some(pattern3_0) = C::u64_from_value(ctx, pattern1_0) { + // Rule at src/isa/s390x/inst.isle line 1946. + let expr0_0 = constructor_ty_ext32(ctx, pattern2_0)?; + let expr1_0 = constructor_emit_imm(ctx, expr0_0, pattern0_0, pattern3_0)?; + return Some(expr1_0); + } + if let Some(pattern3_0) = C::fits_in_16(ctx, pattern2_0) { + if let Some(pattern4_0) = C::sinkable_inst(ctx, pattern1_0) { + let pattern5_0 = C::inst_data(ctx, pattern4_0); + if let &InstructionData::Load { + opcode: ref pattern6_0, + arg: pattern6_1, + flags: pattern6_2, + offset: pattern6_3, + } = &pattern5_0 + { + if let &Opcode::Load = &pattern6_0 { + if let Some(()) = C::bigendian(ctx, pattern6_2) { + // Rule at src/isa/s390x/inst.isle line 1948. + let expr0_0 = constructor_sink_load(ctx, pattern4_0)?; + let expr1_0 = + constructor_emit_zext32_mem(ctx, pattern0_0, pattern3_0, &expr0_0)?; + return Some(expr1_0); + } + } + } + } + // Rule at src/isa/s390x/inst.isle line 1950. + let expr0_0 = C::put_in_reg(ctx, pattern1_0); + let expr1_0 = constructor_emit_zext32_reg(ctx, pattern0_0, pattern3_0, expr0_0)?; + return Some(expr1_0); + } + if let Some(pattern3_0) = C::ty_32_or_64(ctx, pattern2_0) { + // Rule at src/isa/s390x/inst.isle line 1952. + let expr0_0 = C::put_in_reg(ctx, pattern1_0); + let expr1_0 = constructor_emit_mov(ctx, pattern3_0, pattern0_0, expr0_0)?; + return Some(expr1_0); + } + return None; +} + +// Generated as internal constructor for term emit_put_in_reg_sext32. +pub fn constructor_emit_put_in_reg_sext32( + ctx: &mut C, + arg0: WritableReg, + arg1: Value, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = C::value_type(ctx, pattern1_0); + if let Some(pattern3_0) = C::u64_from_signed_value(ctx, pattern1_0) { + // Rule at src/isa/s390x/inst.isle line 1957. + let expr0_0 = constructor_ty_ext32(ctx, pattern2_0)?; + let expr1_0 = constructor_emit_imm(ctx, expr0_0, pattern0_0, pattern3_0)?; + return Some(expr1_0); + } + if let Some(pattern3_0) = C::fits_in_16(ctx, pattern2_0) { + if let Some(pattern4_0) = C::sinkable_inst(ctx, pattern1_0) { + let pattern5_0 = C::inst_data(ctx, pattern4_0); + if let &InstructionData::Load { + opcode: ref pattern6_0, + arg: pattern6_1, + flags: pattern6_2, + offset: pattern6_3, + } = &pattern5_0 + { + if let &Opcode::Load = &pattern6_0 { + if let Some(()) = C::bigendian(ctx, pattern6_2) { + // Rule at src/isa/s390x/inst.isle line 1959. + let expr0_0 = constructor_sink_load(ctx, pattern4_0)?; + let expr1_0 = + constructor_emit_sext32_mem(ctx, pattern0_0, pattern3_0, &expr0_0)?; + return Some(expr1_0); + } + } + } + } + // Rule at src/isa/s390x/inst.isle line 1961. + let expr0_0 = C::put_in_reg(ctx, pattern1_0); + let expr1_0 = constructor_emit_sext32_reg(ctx, pattern0_0, pattern3_0, expr0_0)?; + return Some(expr1_0); + } + if let Some(pattern3_0) = C::ty_32_or_64(ctx, pattern2_0) { + // Rule at src/isa/s390x/inst.isle line 1963. + let expr0_0 = C::put_in_reg(ctx, pattern1_0); + let expr1_0 = constructor_emit_mov(ctx, pattern3_0, pattern0_0, expr0_0)?; + return Some(expr1_0); + } + return None; +} + +// Generated as internal constructor for term emit_put_in_reg_zext64. +pub fn constructor_emit_put_in_reg_zext64( + ctx: &mut C, + arg0: WritableReg, + arg1: Value, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = C::value_type(ctx, pattern1_0); + if let Some(pattern3_0) = C::u64_from_value(ctx, pattern1_0) { + // Rule at src/isa/s390x/inst.isle line 1968. + let expr0_0 = constructor_ty_ext64(ctx, pattern2_0)?; + let expr1_0 = constructor_emit_imm(ctx, expr0_0, pattern0_0, pattern3_0)?; + return Some(expr1_0); + } + if let Some(pattern3_0) = C::gpr32_ty(ctx, pattern2_0) { + if let Some(pattern4_0) = C::sinkable_inst(ctx, pattern1_0) { + let pattern5_0 = C::inst_data(ctx, pattern4_0); + if let &InstructionData::Load { + opcode: ref pattern6_0, + arg: pattern6_1, + flags: pattern6_2, + offset: pattern6_3, + } = &pattern5_0 + { + if let &Opcode::Load = &pattern6_0 { + if let Some(()) = C::bigendian(ctx, pattern6_2) { + // Rule at src/isa/s390x/inst.isle line 1970. + let expr0_0 = constructor_sink_load(ctx, pattern4_0)?; + let expr1_0 = + constructor_emit_zext64_mem(ctx, pattern0_0, pattern3_0, &expr0_0)?; + return Some(expr1_0); + } + } + } + } + // Rule at src/isa/s390x/inst.isle line 1972. + let expr0_0 = C::put_in_reg(ctx, pattern1_0); + let expr1_0 = constructor_emit_zext64_reg(ctx, pattern0_0, pattern3_0, expr0_0)?; + return Some(expr1_0); + } + if let Some(pattern3_0) = C::gpr64_ty(ctx, pattern2_0) { + // Rule at src/isa/s390x/inst.isle line 1974. + let expr0_0 = C::put_in_reg(ctx, pattern1_0); + let expr1_0 = constructor_emit_mov(ctx, pattern3_0, pattern0_0, expr0_0)?; + return Some(expr1_0); + } + return None; +} + +// Generated as internal constructor for term emit_put_in_reg_sext64. +pub fn constructor_emit_put_in_reg_sext64( + ctx: &mut C, + arg0: WritableReg, + arg1: Value, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = C::value_type(ctx, pattern1_0); + if let Some(pattern3_0) = C::u64_from_signed_value(ctx, pattern1_0) { + // Rule at src/isa/s390x/inst.isle line 1979. + let expr0_0 = constructor_ty_ext64(ctx, pattern2_0)?; + let expr1_0 = constructor_emit_imm(ctx, expr0_0, pattern0_0, pattern3_0)?; + return Some(expr1_0); + } + if let Some(pattern3_0) = C::gpr32_ty(ctx, pattern2_0) { + if let Some(pattern4_0) = C::sinkable_inst(ctx, pattern1_0) { + let pattern5_0 = C::inst_data(ctx, pattern4_0); + if let &InstructionData::Load { + opcode: ref pattern6_0, + arg: pattern6_1, + flags: pattern6_2, + offset: pattern6_3, + } = &pattern5_0 + { + if let &Opcode::Load = &pattern6_0 { + if let Some(()) = C::bigendian(ctx, pattern6_2) { + // Rule at src/isa/s390x/inst.isle line 1981. + let expr0_0 = constructor_sink_load(ctx, pattern4_0)?; + let expr1_0 = + constructor_emit_sext64_mem(ctx, pattern0_0, pattern3_0, &expr0_0)?; + return Some(expr1_0); + } + } + } + } + // Rule at src/isa/s390x/inst.isle line 1983. + let expr0_0 = C::put_in_reg(ctx, pattern1_0); + let expr1_0 = constructor_emit_sext64_reg(ctx, pattern0_0, pattern3_0, expr0_0)?; + return Some(expr1_0); + } + if let Some(pattern3_0) = C::gpr64_ty(ctx, pattern2_0) { + // Rule at src/isa/s390x/inst.isle line 1985. + let expr0_0 = C::put_in_reg(ctx, pattern1_0); + let expr1_0 = constructor_emit_mov(ctx, pattern3_0, pattern0_0, expr0_0)?; + return Some(expr1_0); + } + return None; +} + +// Generated as internal constructor for term put_in_reg_zext32. +pub fn constructor_put_in_reg_zext32(ctx: &mut C, arg0: Value) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = C::value_type(ctx, pattern0_0); + if let Some(pattern2_0) = C::u64_from_value(ctx, pattern0_0) { + // Rule at src/isa/s390x/inst.isle line 1990. + let expr0_0 = constructor_ty_ext32(ctx, pattern1_0)?; + let expr1_0 = constructor_imm(ctx, expr0_0, pattern2_0)?; + return Some(expr1_0); + } + if let Some(pattern2_0) = C::fits_in_16(ctx, pattern1_0) { + if let Some(pattern3_0) = C::sinkable_inst(ctx, pattern0_0) { + let pattern4_0 = C::inst_data(ctx, pattern3_0); + if let &InstructionData::Load { + opcode: ref pattern5_0, + arg: pattern5_1, + flags: pattern5_2, + offset: pattern5_3, + } = &pattern4_0 + { + if let &Opcode::Load = &pattern5_0 { + if let Some(()) = C::bigendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/inst.isle line 1992. + let expr0_0 = constructor_sink_load(ctx, pattern3_0)?; + let expr1_0 = constructor_zext32_mem(ctx, pattern2_0, &expr0_0)?; + return Some(expr1_0); + } + } + } + } + // Rule at src/isa/s390x/inst.isle line 1994. + let expr0_0 = C::put_in_reg(ctx, pattern0_0); + let expr1_0 = constructor_zext32_reg(ctx, pattern2_0, expr0_0)?; + return Some(expr1_0); + } + if let Some(pattern2_0) = C::ty_32_or_64(ctx, pattern1_0) { + // Rule at src/isa/s390x/inst.isle line 1996. + let expr0_0 = C::put_in_reg(ctx, pattern0_0); + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term put_in_reg_sext32. +pub fn constructor_put_in_reg_sext32(ctx: &mut C, arg0: Value) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = C::value_type(ctx, pattern0_0); + if let Some(pattern2_0) = C::u64_from_signed_value(ctx, pattern0_0) { + // Rule at src/isa/s390x/inst.isle line 2001. + let expr0_0 = constructor_ty_ext32(ctx, pattern1_0)?; + let expr1_0 = constructor_imm(ctx, expr0_0, pattern2_0)?; + return Some(expr1_0); + } + if let Some(pattern2_0) = C::fits_in_16(ctx, pattern1_0) { + if let Some(pattern3_0) = C::sinkable_inst(ctx, pattern0_0) { + let pattern4_0 = C::inst_data(ctx, pattern3_0); + if let &InstructionData::Load { + opcode: ref pattern5_0, + arg: pattern5_1, + flags: pattern5_2, + offset: pattern5_3, + } = &pattern4_0 + { + if let &Opcode::Load = &pattern5_0 { + if let Some(()) = C::bigendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/inst.isle line 2003. + let expr0_0 = constructor_sink_load(ctx, pattern3_0)?; + let expr1_0 = constructor_sext32_mem(ctx, pattern2_0, &expr0_0)?; + return Some(expr1_0); + } + } + } + } + // Rule at src/isa/s390x/inst.isle line 2005. + let expr0_0 = C::put_in_reg(ctx, pattern0_0); + let expr1_0 = constructor_sext32_reg(ctx, pattern2_0, expr0_0)?; + return Some(expr1_0); + } + if let Some(pattern2_0) = C::ty_32_or_64(ctx, pattern1_0) { + // Rule at src/isa/s390x/inst.isle line 2007. + let expr0_0 = C::put_in_reg(ctx, pattern0_0); + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term put_in_reg_zext64. +pub fn constructor_put_in_reg_zext64(ctx: &mut C, arg0: Value) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = C::value_type(ctx, pattern0_0); + if let Some(pattern2_0) = C::u64_from_value(ctx, pattern0_0) { + // Rule at src/isa/s390x/inst.isle line 2012. + let expr0_0 = constructor_ty_ext64(ctx, pattern1_0)?; + let expr1_0 = constructor_imm(ctx, expr0_0, pattern2_0)?; + return Some(expr1_0); + } + if let Some(pattern2_0) = C::gpr32_ty(ctx, pattern1_0) { + if let Some(pattern3_0) = C::sinkable_inst(ctx, pattern0_0) { + let pattern4_0 = C::inst_data(ctx, pattern3_0); + if let &InstructionData::Load { + opcode: ref pattern5_0, + arg: pattern5_1, + flags: pattern5_2, + offset: pattern5_3, + } = &pattern4_0 + { + if let &Opcode::Load = &pattern5_0 { + if let Some(()) = C::bigendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/inst.isle line 2014. + let expr0_0 = constructor_sink_load(ctx, pattern3_0)?; + let expr1_0 = constructor_zext64_mem(ctx, pattern2_0, &expr0_0)?; + return Some(expr1_0); + } + } + } + } + // Rule at src/isa/s390x/inst.isle line 2016. + let expr0_0 = C::put_in_reg(ctx, pattern0_0); + let expr1_0 = constructor_zext64_reg(ctx, pattern2_0, expr0_0)?; + return Some(expr1_0); + } + if let Some(pattern2_0) = C::gpr64_ty(ctx, pattern1_0) { + // Rule at src/isa/s390x/inst.isle line 2018. + let expr0_0 = C::put_in_reg(ctx, pattern0_0); + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term put_in_reg_sext64. +pub fn constructor_put_in_reg_sext64(ctx: &mut C, arg0: Value) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = C::value_type(ctx, pattern0_0); + if let Some(pattern2_0) = C::u64_from_signed_value(ctx, pattern0_0) { + // Rule at src/isa/s390x/inst.isle line 2023. + let expr0_0 = constructor_ty_ext64(ctx, pattern1_0)?; + let expr1_0 = constructor_imm(ctx, expr0_0, pattern2_0)?; + return Some(expr1_0); + } + if let Some(pattern2_0) = C::gpr32_ty(ctx, pattern1_0) { + if let Some(pattern3_0) = C::sinkable_inst(ctx, pattern0_0) { + let pattern4_0 = C::inst_data(ctx, pattern3_0); + if let &InstructionData::Load { + opcode: ref pattern5_0, + arg: pattern5_1, + flags: pattern5_2, + offset: pattern5_3, + } = &pattern4_0 + { + if let &Opcode::Load = &pattern5_0 { + if let Some(()) = C::bigendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/inst.isle line 2025. + let expr0_0 = constructor_sink_load(ctx, pattern3_0)?; + let expr1_0 = constructor_sext64_mem(ctx, pattern2_0, &expr0_0)?; + return Some(expr1_0); + } + } + } + } + // Rule at src/isa/s390x/inst.isle line 2027. + let expr0_0 = C::put_in_reg(ctx, pattern0_0); + let expr1_0 = constructor_sext64_reg(ctx, pattern2_0, expr0_0)?; + return Some(expr1_0); + } + if let Some(pattern2_0) = C::gpr64_ty(ctx, pattern1_0) { + // Rule at src/isa/s390x/inst.isle line 2029. + let expr0_0 = C::put_in_reg(ctx, pattern0_0); + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term put_in_regpair_lo_zext32. +pub fn constructor_put_in_regpair_lo_zext32( + ctx: &mut C, + arg0: Value, + arg1: &RegPair, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 2035. + let expr0_0 = constructor_copy_writable_regpair(ctx, pattern1_0)?; + let expr1_0 = constructor_writable_regpair_lo(ctx, &expr0_0)?; + let expr2_0 = constructor_emit_put_in_reg_zext32(ctx, expr1_0, pattern0_0)?; + let expr3_0 = constructor_writable_regpair_to_regpair(ctx, &expr0_0)?; + return Some(expr3_0); +} + +// Generated as internal constructor for term put_in_regpair_lo_sext32. +pub fn constructor_put_in_regpair_lo_sext32( + ctx: &mut C, + arg0: Value, + arg1: &RegPair, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 2043. + let expr0_0 = constructor_copy_writable_regpair(ctx, pattern1_0)?; + let expr1_0 = constructor_writable_regpair_lo(ctx, &expr0_0)?; + let expr2_0 = constructor_emit_put_in_reg_sext32(ctx, expr1_0, pattern0_0)?; + let expr3_0 = constructor_writable_regpair_to_regpair(ctx, &expr0_0)?; + return Some(expr3_0); +} + +// Generated as internal constructor for term put_in_regpair_lo_zext64. +pub fn constructor_put_in_regpair_lo_zext64( + ctx: &mut C, + arg0: Value, + arg1: &RegPair, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 2051. + let expr0_0 = constructor_copy_writable_regpair(ctx, pattern1_0)?; + let expr1_0 = constructor_writable_regpair_lo(ctx, &expr0_0)?; + let expr2_0 = constructor_emit_put_in_reg_zext64(ctx, expr1_0, pattern0_0)?; + let expr3_0 = constructor_writable_regpair_to_regpair(ctx, &expr0_0)?; + return Some(expr3_0); +} + +// Generated as internal constructor for term put_in_regpair_lo_sext64. +pub fn constructor_put_in_regpair_lo_sext64( + ctx: &mut C, + arg0: Value, + arg1: &RegPair, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 2059. + let expr0_0 = constructor_copy_writable_regpair(ctx, pattern1_0)?; + let expr1_0 = constructor_writable_regpair_lo(ctx, &expr0_0)?; + let expr2_0 = constructor_emit_put_in_reg_sext64(ctx, expr1_0, pattern0_0)?; + let expr3_0 = constructor_writable_regpair_to_regpair(ctx, &expr0_0)?; + return Some(expr3_0); +} + +// Generated as internal constructor for term emit_cmov_imm. +pub fn constructor_emit_cmov_imm( + ctx: &mut C, + arg0: Type, + arg1: WritableReg, + arg2: &Cond, + arg3: i16, +) -> Option { + let pattern0_0 = arg0; + if let Some(pattern1_0) = C::gpr32_ty(ctx, pattern0_0) { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + let pattern4_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 2069. + let expr0_0 = MInst::CMov32SImm16 { + rd: pattern2_0, + cond: pattern3_0.clone(), + imm: pattern4_0, + }; + let expr1_0 = C::writable_reg_to_reg(ctx, pattern2_0); + let expr2_0 = ConsumesFlags::ConsumesFlags { + inst: expr0_0, + result: expr1_0, + }; + return Some(expr2_0); + } + if let Some(pattern1_0) = C::gpr64_ty(ctx, pattern0_0) { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + let pattern4_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 2072. + let expr0_0 = MInst::CMov64SImm16 { + rd: pattern2_0, + cond: pattern3_0.clone(), + imm: pattern4_0, + }; + let expr1_0 = C::writable_reg_to_reg(ctx, pattern2_0); + let expr2_0 = ConsumesFlags::ConsumesFlags { + inst: expr0_0, + result: expr1_0, + }; + return Some(expr2_0); + } + return None; +} + +// Generated as internal constructor for term cmov_imm. +pub fn constructor_cmov_imm( + ctx: &mut C, + arg0: Type, + arg1: &Cond, + arg2: i16, + arg3: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 2078. + let expr0_0 = constructor_copy_writable_reg(ctx, pattern0_0, pattern3_0)?; + let expr1_0 = constructor_emit_cmov_imm(ctx, pattern0_0, expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term cmov_imm_regpair_lo. +pub fn constructor_cmov_imm_regpair_lo( + ctx: &mut C, + arg0: Type, + arg1: &ProducesFlags, + arg2: &Cond, + arg3: i16, + arg4: &RegPair, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + let pattern4_0 = arg4; + // Rule at src/isa/s390x/inst.isle line 2085. + let expr0_0 = constructor_copy_writable_regpair(ctx, pattern4_0)?; + let expr1_0 = constructor_writable_regpair_lo(ctx, &expr0_0)?; + let expr2_0 = constructor_emit_cmov_imm(ctx, pattern0_0, expr1_0, pattern2_0, pattern3_0)?; + let expr3_0 = constructor_with_flags_1(ctx, pattern1_0, &expr2_0)?; + let expr4_0 = constructor_writable_regpair_to_regpair(ctx, &expr0_0)?; + return Some(expr4_0); +} + +// Generated as internal constructor for term cmov_imm_regpair_hi. +pub fn constructor_cmov_imm_regpair_hi( + ctx: &mut C, + arg0: Type, + arg1: &ProducesFlags, + arg2: &Cond, + arg3: i16, + arg4: &RegPair, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + let pattern4_0 = arg4; + // Rule at src/isa/s390x/inst.isle line 2094. + let expr0_0 = constructor_copy_writable_regpair(ctx, pattern4_0)?; + let expr1_0 = constructor_writable_regpair_hi(ctx, &expr0_0)?; + let expr2_0 = constructor_emit_cmov_imm(ctx, pattern0_0, expr1_0, pattern2_0, pattern3_0)?; + let expr3_0 = constructor_with_flags_1(ctx, pattern1_0, &expr2_0)?; + let expr4_0 = constructor_writable_regpair_to_regpair(ctx, &expr0_0)?; + return Some(expr4_0); +} + +// Generated as internal constructor for term emit_cmov_reg. +pub fn constructor_emit_cmov_reg( + ctx: &mut C, + arg0: Type, + arg1: WritableReg, + arg2: &Cond, + arg3: Reg, +) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == F32 { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + let pattern4_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 2108. + let expr0_0 = MInst::FpuCMov32 { + rd: pattern2_0, + cond: pattern3_0.clone(), + rm: pattern4_0, + }; + let expr1_0 = C::writable_reg_to_reg(ctx, pattern2_0); + let expr2_0 = ConsumesFlags::ConsumesFlags { + inst: expr0_0, + result: expr1_0, + }; + return Some(expr2_0); + } + if pattern0_0 == F64 { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + let pattern4_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 2111. + let expr0_0 = MInst::FpuCMov64 { + rd: pattern2_0, + cond: pattern3_0.clone(), + rm: pattern4_0, + }; + let expr1_0 = C::writable_reg_to_reg(ctx, pattern2_0); + let expr2_0 = ConsumesFlags::ConsumesFlags { + inst: expr0_0, + result: expr1_0, + }; + return Some(expr2_0); + } + if let Some(pattern1_0) = C::gpr32_ty(ctx, pattern0_0) { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + let pattern4_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 2102. + let expr0_0 = MInst::CMov32 { + rd: pattern2_0, + cond: pattern3_0.clone(), + rm: pattern4_0, + }; + let expr1_0 = C::writable_reg_to_reg(ctx, pattern2_0); + let expr2_0 = ConsumesFlags::ConsumesFlags { + inst: expr0_0, + result: expr1_0, + }; + return Some(expr2_0); + } + if let Some(pattern1_0) = C::gpr64_ty(ctx, pattern0_0) { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + let pattern4_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 2105. + let expr0_0 = MInst::CMov64 { + rd: pattern2_0, + cond: pattern3_0.clone(), + rm: pattern4_0, + }; + let expr1_0 = C::writable_reg_to_reg(ctx, pattern2_0); + let expr2_0 = ConsumesFlags::ConsumesFlags { + inst: expr0_0, + result: expr1_0, + }; + return Some(expr2_0); + } + return None; +} + +// Generated as internal constructor for term cmov_reg. +pub fn constructor_cmov_reg( + ctx: &mut C, + arg0: Type, + arg1: &Cond, + arg2: Reg, + arg3: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 2117. + let expr0_0 = constructor_copy_writable_reg(ctx, pattern0_0, pattern3_0)?; + let expr1_0 = constructor_emit_cmov_reg(ctx, pattern0_0, expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term trap_if. +pub fn constructor_trap_if( + ctx: &mut C, + arg0: &ProducesFlags, + arg1: &Cond, + arg2: &TrapCode, +) -> Option { + let pattern0_0 = arg0; + if let &ProducesFlags::ProducesFlags { + inst: ref pattern1_0, + result: pattern1_1, + } = pattern0_0 + { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2125. + let expr0_0 = C::emit(ctx, &pattern1_0); + let expr1_0 = MInst::TrapIf { + cond: pattern2_0.clone(), + trap_code: pattern3_0.clone(), + }; + let expr2_0 = C::emit(ctx, &expr1_0); + return Some(pattern1_1); + } + return None; +} + +// Generated as internal constructor for term icmps_reg_and_trap. +pub fn constructor_icmps_reg_and_trap( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, + arg3: &Cond, + arg4: &TrapCode, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + let pattern4_0 = arg4; + // Rule at src/isa/s390x/inst.isle line 2131. + let expr0_0 = constructor_cmpop_cmps(ctx, pattern0_0)?; + let expr1_0 = MInst::CmpTrapRR { + op: expr0_0, + rn: pattern1_0, + rm: pattern2_0, + cond: pattern3_0.clone(), + trap_code: pattern4_0.clone(), + }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = C::invalid_reg(ctx); + return Some(expr3_0); +} + +// Generated as internal constructor for term icmps_simm16_and_trap. +pub fn constructor_icmps_simm16_and_trap( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: i16, + arg3: &Cond, + arg4: &TrapCode, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + let pattern4_0 = arg4; + // Rule at src/isa/s390x/inst.isle line 2137. + let expr0_0 = constructor_cmpop_cmps(ctx, pattern0_0)?; + let expr1_0 = MInst::CmpTrapRSImm16 { + op: expr0_0, + rn: pattern1_0, + imm: pattern2_0, + cond: pattern3_0.clone(), + trap_code: pattern4_0.clone(), + }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = C::invalid_reg(ctx); + return Some(expr3_0); +} + +// Generated as internal constructor for term icmpu_reg_and_trap. +pub fn constructor_icmpu_reg_and_trap( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, + arg3: &Cond, + arg4: &TrapCode, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + let pattern4_0 = arg4; + // Rule at src/isa/s390x/inst.isle line 2143. + let expr0_0 = constructor_cmpop_cmpu(ctx, pattern0_0)?; + let expr1_0 = MInst::CmpTrapRR { + op: expr0_0, + rn: pattern1_0, + rm: pattern2_0, + cond: pattern3_0.clone(), + trap_code: pattern4_0.clone(), + }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = C::invalid_reg(ctx); + return Some(expr3_0); +} + +// Generated as internal constructor for term icmpu_uimm16_and_trap. +pub fn constructor_icmpu_uimm16_and_trap( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: u16, + arg3: &Cond, + arg4: &TrapCode, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + let pattern4_0 = arg4; + // Rule at src/isa/s390x/inst.isle line 2149. + let expr0_0 = constructor_cmpop_cmpu(ctx, pattern0_0)?; + let expr1_0 = MInst::CmpTrapRUImm16 { + op: expr0_0, + rn: pattern1_0, + imm: pattern2_0, + cond: pattern3_0.clone(), + trap_code: pattern4_0.clone(), + }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = C::invalid_reg(ctx); + return Some(expr3_0); +} + +// Generated as internal constructor for term bool. +pub fn constructor_bool( + ctx: &mut C, + arg0: &ProducesFlags, + arg1: &Cond, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 2162. + let expr0_0 = ProducesBool::ProducesBool { + producer: pattern0_0.clone(), + cond: pattern1_0.clone(), + }; + return Some(expr0_0); +} + +// Generated as internal constructor for term invert_bool. +pub fn constructor_invert_bool( + ctx: &mut C, + arg0: &ProducesBool, +) -> Option { + let pattern0_0 = arg0; + if let &ProducesBool::ProducesBool { + producer: ref pattern1_0, + cond: ref pattern1_1, + } = pattern0_0 + { + // Rule at src/isa/s390x/inst.isle line 2166. + let expr0_0 = C::invert_cond(ctx, &pattern1_1); + let expr1_0 = constructor_bool(ctx, &pattern1_0, &expr0_0)?; + return Some(expr1_0); + } + return None; +} + +// Generated as internal constructor for term emit_producer. +pub fn constructor_emit_producer(ctx: &mut C, arg0: &ProducesFlags) -> Option { + let pattern0_0 = arg0; + if let &ProducesFlags::ProducesFlags { + inst: ref pattern1_0, + result: pattern1_1, + } = pattern0_0 + { + // Rule at src/isa/s390x/inst.isle line 2175. + let expr0_0 = C::emit(ctx, &pattern1_0); + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term emit_consumer. +pub fn constructor_emit_consumer(ctx: &mut C, arg0: &ConsumesFlags) -> Option { + let pattern0_0 = arg0; + if let &ConsumesFlags::ConsumesFlags { + inst: ref pattern1_0, + result: pattern1_1, + } = pattern0_0 + { + // Rule at src/isa/s390x/inst.isle line 2177. + let expr0_0 = C::emit(ctx, &pattern1_0); + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term select_bool_reg. +pub fn constructor_select_bool_reg( + ctx: &mut C, + arg0: Type, + arg1: &ProducesBool, + arg2: Reg, + arg3: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + if let &ProducesBool::ProducesBool { + producer: ref pattern2_0, + cond: ref pattern2_1, + } = pattern1_0 + { + let pattern3_0 = arg2; + let pattern4_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 2181. + let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); + let expr1_0 = constructor_emit_producer(ctx, &pattern2_0)?; + let expr2_0 = constructor_emit_mov(ctx, pattern0_0, expr0_0, pattern4_0)?; + let expr3_0 = constructor_emit_cmov_reg(ctx, pattern0_0, expr0_0, &pattern2_1, pattern3_0)?; + let expr4_0 = constructor_emit_consumer(ctx, &expr3_0)?; + let expr5_0 = C::writable_reg_to_reg(ctx, expr0_0); + return Some(expr5_0); + } + return None; +} + +// Generated as internal constructor for term select_bool_imm. +pub fn constructor_select_bool_imm( + ctx: &mut C, + arg0: Type, + arg1: &ProducesBool, + arg2: i16, + arg3: u64, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + if let &ProducesBool::ProducesBool { + producer: ref pattern2_0, + cond: ref pattern2_1, + } = pattern1_0 + { + let pattern3_0 = arg2; + let pattern4_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 2190. + let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); + let expr1_0 = constructor_emit_producer(ctx, &pattern2_0)?; + let expr2_0 = constructor_emit_imm(ctx, pattern0_0, expr0_0, pattern4_0)?; + let expr3_0 = constructor_emit_cmov_imm(ctx, pattern0_0, expr0_0, &pattern2_1, pattern3_0)?; + let expr4_0 = constructor_emit_consumer(ctx, &expr3_0)?; + let expr5_0 = C::writable_reg_to_reg(ctx, expr0_0); + return Some(expr5_0); + } + return None; +} + +// Generated as internal constructor for term lower_bool. +pub fn constructor_lower_bool( + ctx: &mut C, + arg0: Type, + arg1: &ProducesBool, +) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == B1 { + let pattern2_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 2200. + let expr0_0: Type = B1; + let expr1_0: i16 = 1; + let expr2_0: u64 = 0; + let expr3_0 = constructor_select_bool_imm(ctx, expr0_0, pattern2_0, expr1_0, expr2_0)?; + return Some(expr3_0); + } + if pattern0_0 == B8 { + let pattern2_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 2201. + let expr0_0: Type = B8; + let expr1_0: i16 = -1; + let expr2_0: u64 = 0; + let expr3_0 = constructor_select_bool_imm(ctx, expr0_0, pattern2_0, expr1_0, expr2_0)?; + return Some(expr3_0); + } + if pattern0_0 == B16 { + let pattern2_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 2202. + let expr0_0: Type = B16; + let expr1_0: i16 = -1; + let expr2_0: u64 = 0; + let expr3_0 = constructor_select_bool_imm(ctx, expr0_0, pattern2_0, expr1_0, expr2_0)?; + return Some(expr3_0); + } + if pattern0_0 == B32 { + let pattern2_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 2203. + let expr0_0: Type = B32; + let expr1_0: i16 = -1; + let expr2_0: u64 = 0; + let expr3_0 = constructor_select_bool_imm(ctx, expr0_0, pattern2_0, expr1_0, expr2_0)?; + return Some(expr3_0); + } + if pattern0_0 == B64 { + let pattern2_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 2204. + let expr0_0: Type = B64; + let expr1_0: i16 = -1; + let expr2_0: u64 = 0; + let expr3_0 = constructor_select_bool_imm(ctx, expr0_0, pattern2_0, expr1_0, expr2_0)?; + return Some(expr3_0); + } + return None; +} + +// Generated as internal constructor for term clz_reg. +pub fn constructor_clz_reg(ctx: &mut C, arg0: i16, arg1: Reg) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == 64 { + let pattern2_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 2213. + let expr0_0 = constructor_temp_writable_regpair(ctx)?; + let expr1_0 = MInst::Flogr { rn: pattern2_0 }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = constructor_writable_regpair_to_regpair(ctx, &expr0_0)?; + return Some(expr3_0); + } + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 2222. + let expr0_0 = constructor_temp_writable_regpair(ctx)?; + let expr1_0 = MInst::Flogr { rn: pattern1_0 }; + let expr2_0 = C::emit(ctx, &expr1_0); + let expr3_0 = constructor_writable_regpair_hi(ctx, &expr0_0)?; + let expr4_0 = IntCC::Equal; + let expr5_0 = C::intcc_as_cond(ctx, &expr4_0); + let expr6_0 = MInst::CMov64SImm16 { + rd: expr3_0, + cond: expr5_0, + imm: pattern0_0, + }; + let expr7_0 = C::emit(ctx, &expr6_0); + let expr8_0 = constructor_writable_regpair_to_regpair(ctx, &expr0_0)?; + return Some(expr8_0); +} + +// Generated as internal constructor for term aluop_add. +pub fn constructor_aluop_add(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I8 { + // Rule at src/isa/s390x/inst.isle line 2233. + let expr0_0 = ALUOp::Add32; + return Some(expr0_0); + } + if pattern0_0 == I16 { + // Rule at src/isa/s390x/inst.isle line 2234. + let expr0_0 = ALUOp::Add32; + return Some(expr0_0); + } + if pattern0_0 == I32 { + // Rule at src/isa/s390x/inst.isle line 2235. + let expr0_0 = ALUOp::Add32; + return Some(expr0_0); + } + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2236. + let expr0_0 = ALUOp::Add64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term aluop_add_sext16. +pub fn constructor_aluop_add_sext16(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I16 { + // Rule at src/isa/s390x/inst.isle line 2239. + let expr0_0 = ALUOp::Add32Ext16; + return Some(expr0_0); + } + if pattern0_0 == I32 { + // Rule at src/isa/s390x/inst.isle line 2240. + let expr0_0 = ALUOp::Add32Ext16; + return Some(expr0_0); + } + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2241. + let expr0_0 = ALUOp::Add64Ext16; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term aluop_add_sext32. +pub fn constructor_aluop_add_sext32(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2244. + let expr0_0 = ALUOp::Add64Ext32; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term add_reg. +pub fn constructor_add_reg( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2247. + let expr0_0 = constructor_aluop_add(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term add_reg_sext32. +pub fn constructor_add_reg_sext32( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2250. + let expr0_0 = constructor_aluop_add_sext32(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term add_simm16. +pub fn constructor_add_simm16( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: i16, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2253. + let expr0_0 = constructor_aluop_add(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rrsimm16(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term add_simm32. +pub fn constructor_add_simm32( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: i32, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2256. + let expr0_0 = constructor_aluop_add(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rsimm32(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term add_mem. +pub fn constructor_add_mem( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2259. + let expr0_0 = constructor_aluop_add(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term add_mem_sext16. +pub fn constructor_add_mem_sext16( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2262. + let expr0_0 = constructor_aluop_add_sext16(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term add_mem_sext32. +pub fn constructor_add_mem_sext32( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2265. + let expr0_0 = constructor_aluop_add_sext32(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term aluop_add_logical. +pub fn constructor_aluop_add_logical(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I32 { + // Rule at src/isa/s390x/inst.isle line 2271. + let expr0_0 = ALUOp::AddLogical32; + return Some(expr0_0); + } + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2272. + let expr0_0 = ALUOp::AddLogical64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term aluop_add_logical_zext32. +pub fn constructor_aluop_add_logical_zext32(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2275. + let expr0_0 = ALUOp::AddLogical64Ext32; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term add_logical_reg. +pub fn constructor_add_logical_reg( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2278. + let expr0_0 = constructor_aluop_add_logical(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term add_logical_reg_zext32. +pub fn constructor_add_logical_reg_zext32( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2281. + let expr0_0 = constructor_aluop_add_logical_zext32(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term add_logical_zimm32. +pub fn constructor_add_logical_zimm32( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: u32, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2284. + let expr0_0 = constructor_aluop_add_logical(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_ruimm32(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term add_logical_mem. +pub fn constructor_add_logical_mem( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2287. + let expr0_0 = constructor_aluop_add_logical(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term add_logical_mem_zext32. +pub fn constructor_add_logical_mem_zext32( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2290. + let expr0_0 = constructor_aluop_add_logical_zext32(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term aluop_sub. +pub fn constructor_aluop_sub(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I8 { + // Rule at src/isa/s390x/inst.isle line 2296. + let expr0_0 = ALUOp::Sub32; + return Some(expr0_0); + } + if pattern0_0 == I16 { + // Rule at src/isa/s390x/inst.isle line 2297. + let expr0_0 = ALUOp::Sub32; + return Some(expr0_0); + } + if pattern0_0 == I32 { + // Rule at src/isa/s390x/inst.isle line 2298. + let expr0_0 = ALUOp::Sub32; + return Some(expr0_0); + } + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2299. + let expr0_0 = ALUOp::Sub64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term aluop_sub_sext16. +pub fn constructor_aluop_sub_sext16(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I16 { + // Rule at src/isa/s390x/inst.isle line 2302. + let expr0_0 = ALUOp::Sub32Ext16; + return Some(expr0_0); + } + if pattern0_0 == I32 { + // Rule at src/isa/s390x/inst.isle line 2303. + let expr0_0 = ALUOp::Sub32Ext16; + return Some(expr0_0); + } + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2304. + let expr0_0 = ALUOp::Sub64Ext16; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term aluop_sub_sext32. +pub fn constructor_aluop_sub_sext32(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2307. + let expr0_0 = ALUOp::Sub64Ext32; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term sub_reg. +pub fn constructor_sub_reg( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2310. + let expr0_0 = constructor_aluop_sub(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term sub_reg_sext32. +pub fn constructor_sub_reg_sext32( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2313. + let expr0_0 = constructor_aluop_sub_sext32(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term sub_mem. +pub fn constructor_sub_mem( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2316. + let expr0_0 = constructor_aluop_sub(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term sub_mem_sext16. +pub fn constructor_sub_mem_sext16( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2319. + let expr0_0 = constructor_aluop_sub_sext16(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term sub_mem_sext32. +pub fn constructor_sub_mem_sext32( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2322. + let expr0_0 = constructor_aluop_sub_sext32(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term aluop_sub_logical. +pub fn constructor_aluop_sub_logical(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I32 { + // Rule at src/isa/s390x/inst.isle line 2328. + let expr0_0 = ALUOp::SubLogical32; + return Some(expr0_0); + } + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2329. + let expr0_0 = ALUOp::SubLogical64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term aluop_sub_logical_zext32. +pub fn constructor_aluop_sub_logical_zext32(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2332. + let expr0_0 = ALUOp::SubLogical64Ext32; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term sub_logical_reg. +pub fn constructor_sub_logical_reg( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2335. + let expr0_0 = constructor_aluop_sub_logical(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term sub_logical_reg_zext32. +pub fn constructor_sub_logical_reg_zext32( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2338. + let expr0_0 = constructor_aluop_sub_logical_zext32(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term sub_logical_zimm32. +pub fn constructor_sub_logical_zimm32( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: u32, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2341. + let expr0_0 = constructor_aluop_sub_logical(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_ruimm32(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term sub_logical_mem. +pub fn constructor_sub_logical_mem( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2344. + let expr0_0 = constructor_aluop_sub_logical(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term sub_logical_mem_zext32. +pub fn constructor_sub_logical_mem_zext32( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2347. + let expr0_0 = constructor_aluop_sub_logical(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term aluop_mul. +pub fn constructor_aluop_mul(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I8 { + // Rule at src/isa/s390x/inst.isle line 2353. + let expr0_0 = ALUOp::Mul32; + return Some(expr0_0); + } + if pattern0_0 == I16 { + // Rule at src/isa/s390x/inst.isle line 2354. + let expr0_0 = ALUOp::Mul32; + return Some(expr0_0); + } + if pattern0_0 == I32 { + // Rule at src/isa/s390x/inst.isle line 2355. + let expr0_0 = ALUOp::Mul32; + return Some(expr0_0); + } + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2356. + let expr0_0 = ALUOp::Mul64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term aluop_mul_sext16. +pub fn constructor_aluop_mul_sext16(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I16 { + // Rule at src/isa/s390x/inst.isle line 2359. + let expr0_0 = ALUOp::Mul32Ext16; + return Some(expr0_0); + } + if pattern0_0 == I32 { + // Rule at src/isa/s390x/inst.isle line 2360. + let expr0_0 = ALUOp::Mul32Ext16; + return Some(expr0_0); + } + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2361. + let expr0_0 = ALUOp::Mul64Ext16; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term aluop_mul_sext32. +pub fn constructor_aluop_mul_sext32(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2364. + let expr0_0 = ALUOp::Mul64Ext32; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term mul_reg. +pub fn constructor_mul_reg( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2367. + let expr0_0 = constructor_aluop_mul(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term mul_reg_sext32. +pub fn constructor_mul_reg_sext32( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2370. + let expr0_0 = constructor_aluop_mul_sext32(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term mul_simm16. +pub fn constructor_mul_simm16( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: i16, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2373. + let expr0_0 = constructor_aluop_mul(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rsimm16(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term mul_simm32. +pub fn constructor_mul_simm32( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: i32, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2376. + let expr0_0 = constructor_aluop_mul(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rsimm32(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term mul_mem. +pub fn constructor_mul_mem( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2379. + let expr0_0 = constructor_aluop_mul(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term mul_mem_sext16. +pub fn constructor_mul_mem_sext16( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2382. + let expr0_0 = constructor_aluop_mul_sext16(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term mul_mem_sext32. +pub fn constructor_mul_mem_sext32( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2385. + let expr0_0 = constructor_aluop_mul_sext32(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term udivmod. +pub fn constructor_udivmod( + ctx: &mut C, + arg0: Type, + arg1: &RegPair, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I32 { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2391. + let expr0_0 = constructor_udivmod32(ctx, pattern2_0, pattern3_0)?; + return Some(expr0_0); + } + if pattern0_0 == I64 { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2392. + let expr0_0 = constructor_udivmod64(ctx, pattern2_0, pattern3_0)?; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term sdivmod. +pub fn constructor_sdivmod( + ctx: &mut C, + arg0: Type, + arg1: &RegPair, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I32 { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2398. + let expr0_0 = constructor_sdivmod32(ctx, pattern2_0, pattern3_0)?; + return Some(expr0_0); + } + if pattern0_0 == I64 { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2399. + let expr0_0 = constructor_sdivmod64(ctx, pattern2_0, pattern3_0)?; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term aluop_and. +pub fn constructor_aluop_and(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if let Some(pattern1_0) = C::gpr32_ty(ctx, pattern0_0) { + // Rule at src/isa/s390x/inst.isle line 2405. + let expr0_0 = ALUOp::And32; + return Some(expr0_0); + } + if let Some(pattern1_0) = C::gpr64_ty(ctx, pattern0_0) { + // Rule at src/isa/s390x/inst.isle line 2406. + let expr0_0 = ALUOp::And64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term and_reg. +pub fn constructor_and_reg( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2409. + let expr0_0 = constructor_aluop_and(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term and_uimm16shifted. +pub fn constructor_and_uimm16shifted( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: UImm16Shifted, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2412. + let expr0_0 = constructor_aluop_and(ctx, pattern0_0)?; + let expr1_0 = + constructor_alu_ruimm16shifted(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term and_uimm32shifted. +pub fn constructor_and_uimm32shifted( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: UImm32Shifted, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2415. + let expr0_0 = constructor_aluop_and(ctx, pattern0_0)?; + let expr1_0 = + constructor_alu_ruimm32shifted(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term and_mem. +pub fn constructor_and_mem( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2418. + let expr0_0 = constructor_aluop_and(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term aluop_or. +pub fn constructor_aluop_or(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if let Some(pattern1_0) = C::gpr32_ty(ctx, pattern0_0) { + // Rule at src/isa/s390x/inst.isle line 2424. + let expr0_0 = ALUOp::Orr32; + return Some(expr0_0); + } + if let Some(pattern1_0) = C::gpr64_ty(ctx, pattern0_0) { + // Rule at src/isa/s390x/inst.isle line 2425. + let expr0_0 = ALUOp::Orr64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term or_reg. +pub fn constructor_or_reg( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2428. + let expr0_0 = constructor_aluop_or(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term or_uimm16shifted. +pub fn constructor_or_uimm16shifted( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: UImm16Shifted, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2431. + let expr0_0 = constructor_aluop_or(ctx, pattern0_0)?; + let expr1_0 = + constructor_alu_ruimm16shifted(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term or_uimm32shifted. +pub fn constructor_or_uimm32shifted( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: UImm32Shifted, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2434. + let expr0_0 = constructor_aluop_or(ctx, pattern0_0)?; + let expr1_0 = + constructor_alu_ruimm32shifted(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term or_mem. +pub fn constructor_or_mem( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2437. + let expr0_0 = constructor_aluop_or(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term aluop_xor. +pub fn constructor_aluop_xor(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if let Some(pattern1_0) = C::gpr32_ty(ctx, pattern0_0) { + // Rule at src/isa/s390x/inst.isle line 2443. + let expr0_0 = ALUOp::Xor32; + return Some(expr0_0); + } + if let Some(pattern1_0) = C::gpr64_ty(ctx, pattern0_0) { + // Rule at src/isa/s390x/inst.isle line 2444. + let expr0_0 = ALUOp::Xor64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term xor_reg. +pub fn constructor_xor_reg( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2447. + let expr0_0 = constructor_aluop_xor(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term xor_uimm32shifted. +pub fn constructor_xor_uimm32shifted( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: UImm32Shifted, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2450. + let expr0_0 = constructor_aluop_xor(ctx, pattern0_0)?; + let expr1_0 = + constructor_alu_ruimm32shifted(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term xor_mem. +pub fn constructor_xor_mem( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2453. + let expr0_0 = constructor_aluop_xor(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term not_reg. +pub fn constructor_not_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { + let pattern0_0 = arg0; + if let Some(pattern1_0) = C::gpr32_ty(ctx, pattern0_0) { + let pattern2_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 2459. + let expr0_0: u32 = 4294967295; + let expr1_0: u8 = 0; + let expr2_0 = C::uimm32shifted(ctx, expr0_0, expr1_0); + let expr3_0 = constructor_xor_uimm32shifted(ctx, pattern1_0, pattern2_0, expr2_0)?; + return Some(expr3_0); + } + if let Some(pattern1_0) = C::gpr64_ty(ctx, pattern0_0) { + let pattern2_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 2461. + let expr0_0: u32 = 4294967295; + let expr1_0: u8 = 0; + let expr2_0 = C::uimm32shifted(ctx, expr0_0, expr1_0); + let expr3_0 = constructor_xor_uimm32shifted(ctx, pattern1_0, pattern2_0, expr2_0)?; + let expr4_0: u32 = 4294967295; + let expr5_0: u8 = 32; + let expr6_0 = C::uimm32shifted(ctx, expr4_0, expr5_0); + let expr7_0 = constructor_xor_uimm32shifted(ctx, pattern1_0, expr3_0, expr6_0)?; + return Some(expr7_0); + } + return None; +} + +// Generated as internal constructor for term aluop_and_not. +pub fn constructor_aluop_and_not(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if let Some(pattern1_0) = C::gpr32_ty(ctx, pattern0_0) { + // Rule at src/isa/s390x/inst.isle line 2470. + let expr0_0 = ALUOp::AndNot32; + return Some(expr0_0); + } + if let Some(pattern1_0) = C::gpr64_ty(ctx, pattern0_0) { + // Rule at src/isa/s390x/inst.isle line 2471. + let expr0_0 = ALUOp::AndNot64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term and_not_reg. +pub fn constructor_and_not_reg( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2474. + let expr0_0 = constructor_aluop_and_not(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term aluop_or_not. +pub fn constructor_aluop_or_not(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if let Some(pattern1_0) = C::gpr32_ty(ctx, pattern0_0) { + // Rule at src/isa/s390x/inst.isle line 2480. + let expr0_0 = ALUOp::OrrNot32; + return Some(expr0_0); + } + if let Some(pattern1_0) = C::gpr64_ty(ctx, pattern0_0) { + // Rule at src/isa/s390x/inst.isle line 2481. + let expr0_0 = ALUOp::OrrNot64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term or_not_reg. +pub fn constructor_or_not_reg( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2484. + let expr0_0 = constructor_aluop_or_not(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term aluop_xor_not. +pub fn constructor_aluop_xor_not(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if let Some(pattern1_0) = C::gpr32_ty(ctx, pattern0_0) { + // Rule at src/isa/s390x/inst.isle line 2490. + let expr0_0 = ALUOp::XorNot32; + return Some(expr0_0); + } + if let Some(pattern1_0) = C::gpr64_ty(ctx, pattern0_0) { + // Rule at src/isa/s390x/inst.isle line 2491. + let expr0_0 = ALUOp::XorNot64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term xor_not_reg. +pub fn constructor_xor_not_reg( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2494. + let expr0_0 = constructor_aluop_xor_not(ctx, pattern0_0)?; + let expr1_0 = constructor_alu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term unaryop_abs. +pub fn constructor_unaryop_abs(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I32 { + // Rule at src/isa/s390x/inst.isle line 2500. + let expr0_0 = UnaryOp::Abs32; + return Some(expr0_0); + } + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2501. + let expr0_0 = UnaryOp::Abs64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term unaryop_abs_sext32. +pub fn constructor_unaryop_abs_sext32(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2504. + let expr0_0 = UnaryOp::Abs64Ext32; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term abs_reg. +pub fn constructor_abs_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 2507. + let expr0_0 = constructor_unaryop_abs(ctx, pattern0_0)?; + let expr1_0 = constructor_unary_rr(ctx, pattern0_0, &expr0_0, pattern1_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term abs_reg_sext32. +pub fn constructor_abs_reg_sext32(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 2510. + let expr0_0 = constructor_unaryop_abs_sext32(ctx, pattern0_0)?; + let expr1_0 = constructor_unary_rr(ctx, pattern0_0, &expr0_0, pattern1_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term unaryop_neg. +pub fn constructor_unaryop_neg(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I8 { + // Rule at src/isa/s390x/inst.isle line 2516. + let expr0_0 = UnaryOp::Neg32; + return Some(expr0_0); + } + if pattern0_0 == I16 { + // Rule at src/isa/s390x/inst.isle line 2517. + let expr0_0 = UnaryOp::Neg32; + return Some(expr0_0); + } + if pattern0_0 == I32 { + // Rule at src/isa/s390x/inst.isle line 2518. + let expr0_0 = UnaryOp::Neg32; + return Some(expr0_0); + } + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2519. + let expr0_0 = UnaryOp::Neg64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term unaryop_neg_sext32. +pub fn constructor_unaryop_neg_sext32(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2522. + let expr0_0 = UnaryOp::Neg64Ext32; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term neg_reg. +pub fn constructor_neg_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 2525. + let expr0_0 = constructor_unaryop_neg(ctx, pattern0_0)?; + let expr1_0 = constructor_unary_rr(ctx, pattern0_0, &expr0_0, pattern1_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term neg_reg_sext32. +pub fn constructor_neg_reg_sext32(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 2528. + let expr0_0 = constructor_unaryop_neg_sext32(ctx, pattern0_0)?; + let expr1_0 = constructor_unary_rr(ctx, pattern0_0, &expr0_0, pattern1_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term shiftop_rot. +pub fn constructor_shiftop_rot(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I32 { + // Rule at src/isa/s390x/inst.isle line 2534. + let expr0_0 = ShiftOp::RotL32; + return Some(expr0_0); + } + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2535. + let expr0_0 = ShiftOp::RotL64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term rot_reg. +pub fn constructor_rot_reg( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2538. + let expr0_0 = constructor_shiftop_rot(ctx, pattern0_0)?; + let expr1_0: u8 = 0; + let expr2_0 = constructor_shift_rr(ctx, pattern0_0, &expr0_0, pattern1_0, expr1_0, pattern2_0)?; + return Some(expr2_0); +} + +// Generated as internal constructor for term rot_imm. +pub fn constructor_rot_imm( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: u8, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2542. + let expr0_0 = constructor_shiftop_rot(ctx, pattern0_0)?; + let expr1_0 = C::zero_reg(ctx); + let expr2_0 = constructor_shift_rr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0, expr1_0)?; + return Some(expr2_0); +} + +// Generated as internal constructor for term shiftop_lshl. +pub fn constructor_shiftop_lshl(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I8 { + // Rule at src/isa/s390x/inst.isle line 2549. + let expr0_0 = ShiftOp::LShL32; + return Some(expr0_0); + } + if pattern0_0 == I16 { + // Rule at src/isa/s390x/inst.isle line 2550. + let expr0_0 = ShiftOp::LShL32; + return Some(expr0_0); + } + if pattern0_0 == I32 { + // Rule at src/isa/s390x/inst.isle line 2551. + let expr0_0 = ShiftOp::LShL32; + return Some(expr0_0); + } + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2552. + let expr0_0 = ShiftOp::LShL64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term lshl_reg. +pub fn constructor_lshl_reg( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2555. + let expr0_0 = constructor_shiftop_lshl(ctx, pattern0_0)?; + let expr1_0: u8 = 0; + let expr2_0 = constructor_shift_rr(ctx, pattern0_0, &expr0_0, pattern1_0, expr1_0, pattern2_0)?; + return Some(expr2_0); +} + +// Generated as internal constructor for term lshl_imm. +pub fn constructor_lshl_imm( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: u8, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2559. + let expr0_0 = constructor_shiftop_lshl(ctx, pattern0_0)?; + let expr1_0 = C::zero_reg(ctx); + let expr2_0 = constructor_shift_rr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0, expr1_0)?; + return Some(expr2_0); +} + +// Generated as internal constructor for term shiftop_lshr. +pub fn constructor_shiftop_lshr(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I32 { + // Rule at src/isa/s390x/inst.isle line 2566. + let expr0_0 = ShiftOp::LShR32; + return Some(expr0_0); + } + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2567. + let expr0_0 = ShiftOp::LShR64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term lshr_reg. +pub fn constructor_lshr_reg( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2570. + let expr0_0 = constructor_shiftop_lshr(ctx, pattern0_0)?; + let expr1_0: u8 = 0; + let expr2_0 = constructor_shift_rr(ctx, pattern0_0, &expr0_0, pattern1_0, expr1_0, pattern2_0)?; + return Some(expr2_0); +} + +// Generated as internal constructor for term lshr_imm. +pub fn constructor_lshr_imm( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: u8, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2574. + let expr0_0 = constructor_shiftop_lshr(ctx, pattern0_0)?; + let expr1_0 = C::zero_reg(ctx); + let expr2_0 = constructor_shift_rr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0, expr1_0)?; + return Some(expr2_0); +} + +// Generated as internal constructor for term shiftop_ashr. +pub fn constructor_shiftop_ashr(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I32 { + // Rule at src/isa/s390x/inst.isle line 2581. + let expr0_0 = ShiftOp::AShR32; + return Some(expr0_0); + } + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2582. + let expr0_0 = ShiftOp::AShR64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term ashr_reg. +pub fn constructor_ashr_reg( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2585. + let expr0_0 = constructor_shiftop_ashr(ctx, pattern0_0)?; + let expr1_0: u8 = 0; + let expr2_0 = constructor_shift_rr(ctx, pattern0_0, &expr0_0, pattern1_0, expr1_0, pattern2_0)?; + return Some(expr2_0); +} + +// Generated as internal constructor for term ashr_imm. +pub fn constructor_ashr_imm( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: u8, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2589. + let expr0_0 = constructor_shiftop_ashr(ctx, pattern0_0)?; + let expr1_0 = C::zero_reg(ctx); + let expr2_0 = constructor_shift_rr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0, expr1_0)?; + return Some(expr2_0); +} + +// Generated as internal constructor for term popcnt_byte. +pub fn constructor_popcnt_byte(ctx: &mut C, arg0: Reg) -> Option { + let pattern0_0 = arg0; + // Rule at src/isa/s390x/inst.isle line 2596. + let expr0_0: Type = I64; + let expr1_0 = UnaryOp::PopcntByte; + let expr2_0 = constructor_unary_rr(ctx, expr0_0, &expr1_0, pattern0_0)?; + return Some(expr2_0); +} + +// Generated as internal constructor for term popcnt_reg. +pub fn constructor_popcnt_reg(ctx: &mut C, arg0: Reg) -> Option { + let pattern0_0 = arg0; + // Rule at src/isa/s390x/inst.isle line 2599. + let expr0_0: Type = I64; + let expr1_0 = UnaryOp::PopcntReg; + let expr2_0 = constructor_unary_rr(ctx, expr0_0, &expr1_0, pattern0_0)?; + return Some(expr2_0); +} + +// Generated as internal constructor for term atomic_rmw_and. +pub fn constructor_atomic_rmw_and( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I32 { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2605. + let expr0_0: Type = I32; + let expr1_0 = ALUOp::And32; + let expr2_0 = constructor_atomic_rmw_impl(ctx, expr0_0, &expr1_0, pattern2_0, pattern3_0)?; + return Some(expr2_0); + } + if pattern0_0 == I64 { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2606. + let expr0_0: Type = I64; + let expr1_0 = ALUOp::And64; + let expr2_0 = constructor_atomic_rmw_impl(ctx, expr0_0, &expr1_0, pattern2_0, pattern3_0)?; + return Some(expr2_0); + } + return None; +} + +// Generated as internal constructor for term atomic_rmw_or. +pub fn constructor_atomic_rmw_or( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I32 { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2609. + let expr0_0: Type = I32; + let expr1_0 = ALUOp::Orr32; + let expr2_0 = constructor_atomic_rmw_impl(ctx, expr0_0, &expr1_0, pattern2_0, pattern3_0)?; + return Some(expr2_0); + } + if pattern0_0 == I64 { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2610. + let expr0_0: Type = I64; + let expr1_0 = ALUOp::Orr64; + let expr2_0 = constructor_atomic_rmw_impl(ctx, expr0_0, &expr1_0, pattern2_0, pattern3_0)?; + return Some(expr2_0); + } + return None; +} + +// Generated as internal constructor for term atomic_rmw_xor. +pub fn constructor_atomic_rmw_xor( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I32 { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2613. + let expr0_0: Type = I32; + let expr1_0 = ALUOp::Xor32; + let expr2_0 = constructor_atomic_rmw_impl(ctx, expr0_0, &expr1_0, pattern2_0, pattern3_0)?; + return Some(expr2_0); + } + if pattern0_0 == I64 { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2614. + let expr0_0: Type = I64; + let expr1_0 = ALUOp::Xor64; + let expr2_0 = constructor_atomic_rmw_impl(ctx, expr0_0, &expr1_0, pattern2_0, pattern3_0)?; + return Some(expr2_0); + } + return None; +} + +// Generated as internal constructor for term atomic_rmw_add. +pub fn constructor_atomic_rmw_add( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I32 { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2617. + let expr0_0: Type = I32; + let expr1_0 = ALUOp::Add32; + let expr2_0 = constructor_atomic_rmw_impl(ctx, expr0_0, &expr1_0, pattern2_0, pattern3_0)?; + return Some(expr2_0); + } + if pattern0_0 == I64 { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2618. + let expr0_0: Type = I64; + let expr1_0 = ALUOp::Add64; + let expr2_0 = constructor_atomic_rmw_impl(ctx, expr0_0, &expr1_0, pattern2_0, pattern3_0)?; + return Some(expr2_0); + } + return None; +} + +// Generated as internal constructor for term fpuop2_add. +pub fn constructor_fpuop2_add(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == F32 { + // Rule at src/isa/s390x/inst.isle line 2624. + let expr0_0 = FPUOp2::Add32; + return Some(expr0_0); + } + if pattern0_0 == F64 { + // Rule at src/isa/s390x/inst.isle line 2625. + let expr0_0 = FPUOp2::Add64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term fadd_reg. +pub fn constructor_fadd_reg( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2628. + let expr0_0 = constructor_fpuop2_add(ctx, pattern0_0)?; + let expr1_0 = constructor_fpu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term fpuop2_sub. +pub fn constructor_fpuop2_sub(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == F32 { + // Rule at src/isa/s390x/inst.isle line 2634. + let expr0_0 = FPUOp2::Sub32; + return Some(expr0_0); + } + if pattern0_0 == F64 { + // Rule at src/isa/s390x/inst.isle line 2635. + let expr0_0 = FPUOp2::Sub64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term fsub_reg. +pub fn constructor_fsub_reg( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2638. + let expr0_0 = constructor_fpuop2_sub(ctx, pattern0_0)?; + let expr1_0 = constructor_fpu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term fpuop2_mul. +pub fn constructor_fpuop2_mul(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == F32 { + // Rule at src/isa/s390x/inst.isle line 2644. + let expr0_0 = FPUOp2::Mul32; + return Some(expr0_0); + } + if pattern0_0 == F64 { + // Rule at src/isa/s390x/inst.isle line 2645. + let expr0_0 = FPUOp2::Mul64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term fmul_reg. +pub fn constructor_fmul_reg( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2648. + let expr0_0 = constructor_fpuop2_mul(ctx, pattern0_0)?; + let expr1_0 = constructor_fpu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term fpuop2_div. +pub fn constructor_fpuop2_div(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == F32 { + // Rule at src/isa/s390x/inst.isle line 2654. + let expr0_0 = FPUOp2::Div32; + return Some(expr0_0); + } + if pattern0_0 == F64 { + // Rule at src/isa/s390x/inst.isle line 2655. + let expr0_0 = FPUOp2::Div64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term fdiv_reg. +pub fn constructor_fdiv_reg( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2658. + let expr0_0 = constructor_fpuop2_div(ctx, pattern0_0)?; + let expr1_0 = constructor_fpu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term fpuop2_min. +pub fn constructor_fpuop2_min(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == F32 { + // Rule at src/isa/s390x/inst.isle line 2664. + let expr0_0 = FPUOp2::Min32; + return Some(expr0_0); + } + if pattern0_0 == F64 { + // Rule at src/isa/s390x/inst.isle line 2665. + let expr0_0 = FPUOp2::Min64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term fmin_reg. +pub fn constructor_fmin_reg( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2668. + let expr0_0 = constructor_fpuop2_min(ctx, pattern0_0)?; + let expr1_0 = constructor_fpuvec_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term fpuop2_max. +pub fn constructor_fpuop2_max(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == F32 { + // Rule at src/isa/s390x/inst.isle line 2674. + let expr0_0 = FPUOp2::Max32; + return Some(expr0_0); + } + if pattern0_0 == F64 { + // Rule at src/isa/s390x/inst.isle line 2675. + let expr0_0 = FPUOp2::Max64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term fmax_reg. +pub fn constructor_fmax_reg( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2678. + let expr0_0 = constructor_fpuop2_max(ctx, pattern0_0)?; + let expr1_0 = constructor_fpuvec_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term fpuop3_fma. +pub fn constructor_fpuop3_fma(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == F32 { + // Rule at src/isa/s390x/inst.isle line 2684. + let expr0_0 = FPUOp3::MAdd32; + return Some(expr0_0); + } + if pattern0_0 == F64 { + // Rule at src/isa/s390x/inst.isle line 2685. + let expr0_0 = FPUOp3::MAdd64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term fma_reg. +pub fn constructor_fma_reg( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, + arg3: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 2688. + let expr0_0 = constructor_fpuop3_fma(ctx, pattern0_0)?; + let expr1_0 = constructor_fpu_rrrr( + ctx, pattern0_0, &expr0_0, pattern3_0, pattern1_0, pattern2_0, + )?; + return Some(expr1_0); +} + +// Generated as internal constructor for term fpuop1_sqrt. +pub fn constructor_fpuop1_sqrt(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == F32 { + // Rule at src/isa/s390x/inst.isle line 2694. + let expr0_0 = FPUOp1::Sqrt32; + return Some(expr0_0); + } + if pattern0_0 == F64 { + // Rule at src/isa/s390x/inst.isle line 2695. + let expr0_0 = FPUOp1::Sqrt64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term sqrt_reg. +pub fn constructor_sqrt_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 2698. + let expr0_0 = constructor_fpuop1_sqrt(ctx, pattern0_0)?; + let expr1_0 = constructor_fpu_rr(ctx, pattern0_0, &expr0_0, pattern1_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term fpuop1_neg. +pub fn constructor_fpuop1_neg(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == F32 { + // Rule at src/isa/s390x/inst.isle line 2704. + let expr0_0 = FPUOp1::Neg32; + return Some(expr0_0); + } + if pattern0_0 == F64 { + // Rule at src/isa/s390x/inst.isle line 2705. + let expr0_0 = FPUOp1::Neg64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term fneg_reg. +pub fn constructor_fneg_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 2708. + let expr0_0 = constructor_fpuop1_neg(ctx, pattern0_0)?; + let expr1_0 = constructor_fpu_rr(ctx, pattern0_0, &expr0_0, pattern1_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term fpuop1_abs. +pub fn constructor_fpuop1_abs(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == F32 { + // Rule at src/isa/s390x/inst.isle line 2714. + let expr0_0 = FPUOp1::Abs32; + return Some(expr0_0); + } + if pattern0_0 == F64 { + // Rule at src/isa/s390x/inst.isle line 2715. + let expr0_0 = FPUOp1::Abs64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term fabs_reg. +pub fn constructor_fabs_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 2718. + let expr0_0 = constructor_fpuop1_abs(ctx, pattern0_0)?; + let expr1_0 = constructor_fpu_rr(ctx, pattern0_0, &expr0_0, pattern1_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term fpuroundmode_ceil. +pub fn constructor_fpuroundmode_ceil(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == F32 { + // Rule at src/isa/s390x/inst.isle line 2724. + let expr0_0 = FpuRoundMode::Plus32; + return Some(expr0_0); + } + if pattern0_0 == F64 { + // Rule at src/isa/s390x/inst.isle line 2725. + let expr0_0 = FpuRoundMode::Plus64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term ceil_reg. +pub fn constructor_ceil_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 2728. + let expr0_0 = constructor_fpuroundmode_ceil(ctx, pattern0_0)?; + let expr1_0 = constructor_fpu_round(ctx, pattern0_0, &expr0_0, pattern1_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term fpuroundmode_floor. +pub fn constructor_fpuroundmode_floor(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == F32 { + // Rule at src/isa/s390x/inst.isle line 2734. + let expr0_0 = FpuRoundMode::Minus32; + return Some(expr0_0); + } + if pattern0_0 == F64 { + // Rule at src/isa/s390x/inst.isle line 2735. + let expr0_0 = FpuRoundMode::Minus64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term floor_reg. +pub fn constructor_floor_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 2738. + let expr0_0 = constructor_fpuroundmode_floor(ctx, pattern0_0)?; + let expr1_0 = constructor_fpu_round(ctx, pattern0_0, &expr0_0, pattern1_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term fpuroundmode_trunc. +pub fn constructor_fpuroundmode_trunc(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == F32 { + // Rule at src/isa/s390x/inst.isle line 2744. + let expr0_0 = FpuRoundMode::Zero32; + return Some(expr0_0); + } + if pattern0_0 == F64 { + // Rule at src/isa/s390x/inst.isle line 2745. + let expr0_0 = FpuRoundMode::Zero64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term trunc_reg. +pub fn constructor_trunc_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 2748. + let expr0_0 = constructor_fpuroundmode_trunc(ctx, pattern0_0)?; + let expr1_0 = constructor_fpu_round(ctx, pattern0_0, &expr0_0, pattern1_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term fpuroundmode_nearest. +pub fn constructor_fpuroundmode_nearest( + ctx: &mut C, + arg0: Type, +) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == F32 { + // Rule at src/isa/s390x/inst.isle line 2754. + let expr0_0 = FpuRoundMode::Nearest32; + return Some(expr0_0); + } + if pattern0_0 == F64 { + // Rule at src/isa/s390x/inst.isle line 2755. + let expr0_0 = FpuRoundMode::Nearest64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term nearest_reg. +pub fn constructor_nearest_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 2758. + let expr0_0 = constructor_fpuroundmode_nearest(ctx, pattern0_0)?; + let expr1_0 = constructor_fpu_round(ctx, pattern0_0, &expr0_0, pattern1_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term fpuop1_promote. +pub fn constructor_fpuop1_promote( + ctx: &mut C, + arg0: Type, + arg1: Type, +) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == F64 { + let pattern2_0 = arg1; + if pattern2_0 == F32 { + // Rule at src/isa/s390x/inst.isle line 2764. + let expr0_0 = FPUOp1::Cvt32To64; + return Some(expr0_0); + } + } + return None; +} + +// Generated as internal constructor for term fpromote_reg. +pub fn constructor_fpromote_reg( + ctx: &mut C, + arg0: Type, + arg1: Type, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2767. + let expr0_0 = constructor_fpuop1_promote(ctx, pattern0_0, pattern1_0)?; + let expr1_0 = constructor_fpu_rr(ctx, pattern0_0, &expr0_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term fpuop1_demote. +pub fn constructor_fpuop1_demote( + ctx: &mut C, + arg0: Type, + arg1: Type, +) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == F32 { + let pattern2_0 = arg1; + if pattern2_0 == F64 { + // Rule at src/isa/s390x/inst.isle line 2774. + let expr0_0 = FPUOp1::Cvt64To32; + return Some(expr0_0); + } + } + return None; +} + +// Generated as internal constructor for term fdemote_reg. +pub fn constructor_fdemote_reg( + ctx: &mut C, + arg0: Type, + arg1: Type, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2777. + let expr0_0 = constructor_fpuop1_demote(ctx, pattern0_0, pattern1_0)?; + let expr1_0 = constructor_fpu_rr(ctx, pattern0_0, &expr0_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term uint_to_fpu_op. +pub fn constructor_uint_to_fpu_op( + ctx: &mut C, + arg0: Type, + arg1: Type, +) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == F32 { + let pattern2_0 = arg1; + if pattern2_0 == I32 { + // Rule at src/isa/s390x/inst.isle line 2784. + let expr0_0 = IntToFpuOp::U32ToF32; + return Some(expr0_0); + } + if pattern2_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2786. + let expr0_0 = IntToFpuOp::U64ToF32; + return Some(expr0_0); + } + } + if pattern0_0 == F64 { + let pattern2_0 = arg1; + if pattern2_0 == I32 { + // Rule at src/isa/s390x/inst.isle line 2785. + let expr0_0 = IntToFpuOp::U32ToF64; + return Some(expr0_0); + } + if pattern2_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2787. + let expr0_0 = IntToFpuOp::U64ToF64; + return Some(expr0_0); + } + } + return None; +} + +// Generated as internal constructor for term fcvt_from_uint_reg. +pub fn constructor_fcvt_from_uint_reg( + ctx: &mut C, + arg0: Type, + arg1: Type, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2790. + let expr0_0 = constructor_uint_to_fpu_op(ctx, pattern0_0, pattern1_0)?; + let expr1_0 = constructor_int_to_fpu(ctx, pattern0_0, &expr0_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term sint_to_fpu_op. +pub fn constructor_sint_to_fpu_op( + ctx: &mut C, + arg0: Type, + arg1: Type, +) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == F32 { + let pattern2_0 = arg1; + if pattern2_0 == I32 { + // Rule at src/isa/s390x/inst.isle line 2797. + let expr0_0 = IntToFpuOp::I32ToF32; + return Some(expr0_0); + } + if pattern2_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2799. + let expr0_0 = IntToFpuOp::I64ToF32; + return Some(expr0_0); + } + } + if pattern0_0 == F64 { + let pattern2_0 = arg1; + if pattern2_0 == I32 { + // Rule at src/isa/s390x/inst.isle line 2798. + let expr0_0 = IntToFpuOp::I32ToF64; + return Some(expr0_0); + } + if pattern2_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2800. + let expr0_0 = IntToFpuOp::I64ToF64; + return Some(expr0_0); + } + } + return None; +} + +// Generated as internal constructor for term fcvt_from_sint_reg. +pub fn constructor_fcvt_from_sint_reg( + ctx: &mut C, + arg0: Type, + arg1: Type, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2803. + let expr0_0 = constructor_sint_to_fpu_op(ctx, pattern0_0, pattern1_0)?; + let expr1_0 = constructor_int_to_fpu(ctx, pattern0_0, &expr0_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term fpu_to_uint_op. +pub fn constructor_fpu_to_uint_op( + ctx: &mut C, + arg0: Type, + arg1: Type, +) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I32 { + let pattern2_0 = arg1; + if pattern2_0 == F32 { + // Rule at src/isa/s390x/inst.isle line 2810. + let expr0_0 = FpuToIntOp::F32ToU32; + return Some(expr0_0); + } + if pattern2_0 == F64 { + // Rule at src/isa/s390x/inst.isle line 2811. + let expr0_0 = FpuToIntOp::F64ToU32; + return Some(expr0_0); + } + } + if pattern0_0 == I64 { + let pattern2_0 = arg1; + if pattern2_0 == F32 { + // Rule at src/isa/s390x/inst.isle line 2812. + let expr0_0 = FpuToIntOp::F32ToU64; + return Some(expr0_0); + } + if pattern2_0 == F64 { + // Rule at src/isa/s390x/inst.isle line 2813. + let expr0_0 = FpuToIntOp::F64ToU64; + return Some(expr0_0); + } + } + return None; +} + +// Generated as internal constructor for term fcvt_to_uint_reg_with_flags. +pub fn constructor_fcvt_to_uint_reg_with_flags( + ctx: &mut C, + arg0: Type, + arg1: Type, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2816. + let expr0_0 = constructor_fpu_to_uint_op(ctx, pattern0_0, pattern1_0)?; + let expr1_0 = constructor_fpu_to_int(ctx, pattern0_0, &expr0_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term fcvt_to_uint_reg. +pub fn constructor_fcvt_to_uint_reg( + ctx: &mut C, + arg0: Type, + arg1: Type, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2820. + let expr0_0 = constructor_fcvt_to_uint_reg_with_flags(ctx, pattern0_0, pattern1_0, pattern2_0)?; + let expr1_0 = constructor_drop_flags(ctx, &expr0_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term fpu_to_sint_op. +pub fn constructor_fpu_to_sint_op( + ctx: &mut C, + arg0: Type, + arg1: Type, +) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I32 { + let pattern2_0 = arg1; + if pattern2_0 == F32 { + // Rule at src/isa/s390x/inst.isle line 2827. + let expr0_0 = FpuToIntOp::F32ToI32; + return Some(expr0_0); + } + if pattern2_0 == F64 { + // Rule at src/isa/s390x/inst.isle line 2828. + let expr0_0 = FpuToIntOp::F64ToI32; + return Some(expr0_0); + } + } + if pattern0_0 == I64 { + let pattern2_0 = arg1; + if pattern2_0 == F32 { + // Rule at src/isa/s390x/inst.isle line 2829. + let expr0_0 = FpuToIntOp::F32ToI64; + return Some(expr0_0); + } + if pattern2_0 == F64 { + // Rule at src/isa/s390x/inst.isle line 2830. + let expr0_0 = FpuToIntOp::F64ToI64; + return Some(expr0_0); + } + } + return None; +} + +// Generated as internal constructor for term fcvt_to_sint_reg_with_flags. +pub fn constructor_fcvt_to_sint_reg_with_flags( + ctx: &mut C, + arg0: Type, + arg1: Type, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2833. + let expr0_0 = constructor_fpu_to_sint_op(ctx, pattern0_0, pattern1_0)?; + let expr1_0 = constructor_fpu_to_int(ctx, pattern0_0, &expr0_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term fcvt_to_sint_reg. +pub fn constructor_fcvt_to_sint_reg( + ctx: &mut C, + arg0: Type, + arg1: Type, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2837. + let expr0_0 = constructor_fcvt_to_sint_reg_with_flags(ctx, pattern0_0, pattern1_0, pattern2_0)?; + let expr1_0 = constructor_drop_flags(ctx, &expr0_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term cmpop_cmps. +pub fn constructor_cmpop_cmps(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I32 { + // Rule at src/isa/s390x/inst.isle line 2844. + let expr0_0 = CmpOp::CmpS32; + return Some(expr0_0); + } + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2845. + let expr0_0 = CmpOp::CmpS64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term cmpop_cmps_sext16. +pub fn constructor_cmpop_cmps_sext16(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I32 { + // Rule at src/isa/s390x/inst.isle line 2848. + let expr0_0 = CmpOp::CmpS32Ext16; + return Some(expr0_0); + } + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2849. + let expr0_0 = CmpOp::CmpS64Ext16; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term cmpop_cmps_sext32. +pub fn constructor_cmpop_cmps_sext32(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2852. + let expr0_0 = CmpOp::CmpS64Ext32; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term icmps_reg. +pub fn constructor_icmps_reg( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2855. + let expr0_0 = constructor_cmpop_cmps(ctx, pattern0_0)?; + let expr1_0 = constructor_cmp_rr(ctx, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term icmps_reg_sext32. +pub fn constructor_icmps_reg_sext32( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2858. + let expr0_0 = constructor_cmpop_cmps_sext32(ctx, pattern0_0)?; + let expr1_0 = constructor_cmp_rr(ctx, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term icmps_simm16. +pub fn constructor_icmps_simm16( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: i16, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2861. + let expr0_0 = constructor_cmpop_cmps(ctx, pattern0_0)?; + let expr1_0 = constructor_cmp_rsimm16(ctx, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term icmps_simm32. +pub fn constructor_icmps_simm32( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: i32, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2864. + let expr0_0 = constructor_cmpop_cmps(ctx, pattern0_0)?; + let expr1_0 = constructor_cmp_rsimm32(ctx, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term icmps_mem. +pub fn constructor_icmps_mem( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2867. + let expr0_0 = constructor_cmpop_cmps(ctx, pattern0_0)?; + let expr1_0 = constructor_cmp_rx(ctx, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term icmps_mem_sext16. +pub fn constructor_icmps_mem_sext16( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2870. + let expr0_0 = constructor_cmpop_cmps_sext16(ctx, pattern0_0)?; + let expr1_0 = constructor_cmp_rx(ctx, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term icmps_mem_sext32. +pub fn constructor_icmps_mem_sext32( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2873. + let expr0_0 = constructor_cmpop_cmps_sext32(ctx, pattern0_0)?; + let expr1_0 = constructor_cmp_rx(ctx, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term cmpop_cmpu. +pub fn constructor_cmpop_cmpu(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I32 { + // Rule at src/isa/s390x/inst.isle line 2879. + let expr0_0 = CmpOp::CmpL32; + return Some(expr0_0); + } + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2880. + let expr0_0 = CmpOp::CmpL64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term cmpop_cmpu_zext16. +pub fn constructor_cmpop_cmpu_zext16(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I32 { + // Rule at src/isa/s390x/inst.isle line 2883. + let expr0_0 = CmpOp::CmpL32Ext16; + return Some(expr0_0); + } + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2884. + let expr0_0 = CmpOp::CmpL64Ext16; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term cmpop_cmpu_zext32. +pub fn constructor_cmpop_cmpu_zext32(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2887. + let expr0_0 = CmpOp::CmpL64Ext32; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term icmpu_reg. +pub fn constructor_icmpu_reg( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2890. + let expr0_0 = constructor_cmpop_cmpu(ctx, pattern0_0)?; + let expr1_0 = constructor_cmp_rr(ctx, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term icmpu_reg_zext32. +pub fn constructor_icmpu_reg_zext32( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2893. + let expr0_0 = constructor_cmpop_cmpu_zext32(ctx, pattern0_0)?; + let expr1_0 = constructor_cmp_rr(ctx, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term icmpu_uimm32. +pub fn constructor_icmpu_uimm32( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: u32, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2896. + let expr0_0 = constructor_cmpop_cmpu(ctx, pattern0_0)?; + let expr1_0 = constructor_cmp_ruimm32(ctx, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term icmpu_mem. +pub fn constructor_icmpu_mem( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2899. + let expr0_0 = constructor_cmpop_cmpu(ctx, pattern0_0)?; + let expr1_0 = constructor_cmp_rx(ctx, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term icmpu_mem_zext16. +pub fn constructor_icmpu_mem_zext16( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2902. + let expr0_0 = constructor_cmpop_cmpu_zext16(ctx, pattern0_0)?; + let expr1_0 = constructor_cmp_rx(ctx, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term icmpu_mem_zext32. +pub fn constructor_icmpu_mem_zext32( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2905. + let expr0_0 = constructor_cmpop_cmpu_zext32(ctx, pattern0_0)?; + let expr1_0 = constructor_cmp_rx(ctx, &expr0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term fcmp_reg. +pub fn constructor_fcmp_reg( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == F32 { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2911. + let expr0_0 = constructor_fpu_cmp32(ctx, pattern2_0, pattern3_0)?; + return Some(expr0_0); + } + if pattern0_0 == F64 { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2912. + let expr0_0 = constructor_fpu_cmp64(ctx, pattern2_0, pattern3_0)?; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term lower. +pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = C::inst_data(ctx, pattern0_0); + match &pattern1_0 { + &InstructionData::NullAry { + opcode: ref pattern2_0, + } => { + match &pattern2_0 { + &Opcode::Nop => { + // Rule at src/isa/s390x/lower.isle line 42. + let expr0_0 = C::invalid_reg(ctx); + let expr1_0 = C::value_reg(ctx, expr0_0); + return Some(expr1_0); + } + &Opcode::Fence => { + // Rule at src/isa/s390x/lower.isle line 1600. + let expr0_0 = constructor_fence_impl(ctx)?; + let expr1_0 = constructor_value_regs_none(ctx, &expr0_0)?; + return Some(expr1_0); + } + _ => {} + } + } + &InstructionData::FuncAddr { + opcode: ref pattern2_0, + func_ref: pattern2_1, + } => { + if let &Opcode::FuncAddr = &pattern2_0 { + if let Some((pattern4_0, pattern4_1)) = C::call_target_data(ctx, pattern0_0) { + if let Some(()) = C::reloc_distance_near(ctx, &pattern4_1) { + // Rule at src/isa/s390x/lower.isle line 1154. + let expr0_0: i32 = 0; + let expr1_0 = C::memflags_trusted(ctx); + let expr2_0 = C::memarg_symbol(ctx, pattern4_0, expr0_0, expr1_0); + let expr3_0 = constructor_load_addr(ctx, &expr2_0)?; + let expr4_0 = C::value_reg(ctx, expr3_0); + return Some(expr4_0); + } + // Rule at src/isa/s390x/lower.isle line 1159. + let expr0_0: i64 = 0; + let expr1_0 = constructor_load_ext_name_far(ctx, pattern4_0, expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + } + } + &InstructionData::UnaryGlobalValue { + opcode: ref pattern2_0, + global_value: pattern2_1, + } => { + if let &Opcode::SymbolValue = &pattern2_0 { + if let Some((pattern4_0, pattern4_1, pattern4_2)) = + C::symbol_value_data(ctx, pattern0_0) + { + if let Some(()) = C::reloc_distance_near(ctx, &pattern4_1) { + let pattern6_0 = 0; + if let Some(pattern7_0) = + C::memarg_symbol_offset_sum(ctx, pattern4_2, pattern6_0) + { + // Rule at src/isa/s390x/lower.isle line 1167. + let expr0_0 = C::memflags_trusted(ctx); + let expr1_0 = C::memarg_symbol(ctx, pattern4_0, pattern7_0, expr0_0); + let expr2_0 = constructor_load_addr(ctx, &expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + // Rule at src/isa/s390x/lower.isle line 1173. + let expr0_0 = constructor_load_ext_name_far(ctx, pattern4_0, pattern4_2)?; + let expr1_0 = C::value_reg(ctx, expr0_0); + return Some(expr1_0); + } + } + } + &InstructionData::UnaryIeee32 { + opcode: ref pattern2_0, + imm: pattern2_1, + } => { + if let &Opcode::F32const = &pattern2_0 { + let pattern4_0 = C::u64_from_ieee32(ctx, pattern2_1); + // Rule at src/isa/s390x/lower.isle line 24. + let expr0_0: Type = F32; + let expr1_0 = constructor_imm(ctx, expr0_0, pattern4_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + } + &InstructionData::UnaryIeee64 { + opcode: ref pattern2_0, + imm: pattern2_1, + } => { + if let &Opcode::F64const = &pattern2_0 { + let pattern4_0 = C::u64_from_ieee64(ctx, pattern2_1); + // Rule at src/isa/s390x/lower.isle line 30. + let expr0_0: Type = F64; + let expr1_0 = constructor_imm(ctx, expr0_0, pattern4_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + } + &InstructionData::StoreNoOffset { + opcode: ref pattern2_0, + args: ref pattern2_1, + flags: pattern2_2, + } => { + if let &Opcode::AtomicStore = &pattern2_0 { + let (pattern4_0, pattern4_1) = C::unpack_value_array_2(ctx, &pattern2_1); + let pattern5_0 = C::value_type(ctx, pattern4_0); + if pattern5_0 == I8 { + // Rule at src/isa/s390x/lower.isle line 1581. + let expr0_0 = C::zero_offset(ctx); + let expr1_0 = + constructor_istore8_impl(ctx, pattern2_2, pattern4_0, pattern4_1, expr0_0)?; + let expr2_0 = constructor_atomic_store_impl(ctx, &expr1_0)?; + return Some(expr2_0); + } + if pattern5_0 == I16 { + // Rule at src/isa/s390x/lower.isle line 1585. + let expr0_0 = C::zero_offset(ctx); + let expr1_0 = constructor_istore16_impl( + ctx, pattern2_2, pattern4_0, pattern4_1, expr0_0, + )?; + let expr2_0 = constructor_atomic_store_impl(ctx, &expr1_0)?; + return Some(expr2_0); + } + if pattern5_0 == I32 { + // Rule at src/isa/s390x/lower.isle line 1589. + let expr0_0 = C::zero_offset(ctx); + let expr1_0 = constructor_istore32_impl( + ctx, pattern2_2, pattern4_0, pattern4_1, expr0_0, + )?; + let expr2_0 = constructor_atomic_store_impl(ctx, &expr1_0)?; + return Some(expr2_0); + } + if pattern5_0 == I64 { + // Rule at src/isa/s390x/lower.isle line 1593. + let expr0_0 = C::zero_offset(ctx); + let expr1_0 = constructor_istore64_impl( + ctx, pattern2_2, pattern4_0, pattern4_1, expr0_0, + )?; + let expr2_0 = constructor_atomic_store_impl(ctx, &expr1_0)?; + return Some(expr2_0); + } + } + } + &InstructionData::Store { + opcode: ref pattern2_0, + args: ref pattern2_1, + flags: pattern2_2, + offset: pattern2_3, + } => { + match &pattern2_0 { + &Opcode::Store => { + let (pattern4_0, pattern4_1) = C::unpack_value_array_2(ctx, &pattern2_1); + let pattern5_0 = C::value_type(ctx, pattern4_0); + if pattern5_0 == I8 { + // Rule at src/isa/s390x/lower.isle line 1353. + let expr0_0 = constructor_istore8_impl( + ctx, pattern2_2, pattern4_0, pattern4_1, pattern2_3, + )?; + let expr1_0 = constructor_value_regs_none(ctx, &expr0_0)?; + return Some(expr1_0); + } + if pattern5_0 == I16 { + // Rule at src/isa/s390x/lower.isle line 1357. + let expr0_0 = constructor_istore16_impl( + ctx, pattern2_2, pattern4_0, pattern4_1, pattern2_3, + )?; + let expr1_0 = constructor_value_regs_none(ctx, &expr0_0)?; + return Some(expr1_0); + } + if pattern5_0 == I32 { + // Rule at src/isa/s390x/lower.isle line 1361. + let expr0_0 = constructor_istore32_impl( + ctx, pattern2_2, pattern4_0, pattern4_1, pattern2_3, + )?; + let expr1_0 = constructor_value_regs_none(ctx, &expr0_0)?; + return Some(expr1_0); + } + if pattern5_0 == I64 { + // Rule at src/isa/s390x/lower.isle line 1365. + let expr0_0 = constructor_istore64_impl( + ctx, pattern2_2, pattern4_0, pattern4_1, pattern2_3, + )?; + let expr1_0 = constructor_value_regs_none(ctx, &expr0_0)?; + return Some(expr1_0); + } + if pattern5_0 == R64 { + // Rule at src/isa/s390x/lower.isle line 1369. + let expr0_0 = constructor_istore64_impl( + ctx, pattern2_2, pattern4_0, pattern4_1, pattern2_3, + )?; + let expr1_0 = constructor_value_regs_none(ctx, &expr0_0)?; + return Some(expr1_0); + } + if pattern5_0 == F32 { + if let Some(()) = C::bigendian(ctx, pattern2_2) { + // Rule at src/isa/s390x/lower.isle line 1373. + let expr0_0 = C::put_in_reg(ctx, pattern4_0); + let expr1_0 = + constructor_lower_address(ctx, pattern2_2, pattern4_1, pattern2_3)?; + let expr2_0 = constructor_fpu_store32(ctx, expr0_0, &expr1_0)?; + let expr3_0 = constructor_value_regs_none(ctx, &expr2_0)?; + return Some(expr3_0); + } + if let Some(()) = C::vxrs_ext2_enabled(ctx, pattern5_0) { + if let Some(()) = C::littleendian(ctx, pattern2_2) { + // Rule at src/isa/s390x/lower.isle line 1379. + let expr0_0 = C::put_in_reg(ctx, pattern4_0); + let expr1_0 = constructor_lower_address( + ctx, pattern2_2, pattern4_1, pattern2_3, + )?; + let expr2_0 = constructor_fpu_storerev32(ctx, expr0_0, &expr1_0)?; + let expr3_0 = constructor_value_regs_none(ctx, &expr2_0)?; + return Some(expr3_0); + } + } + if let Some(()) = C::vxrs_ext2_disabled(ctx, pattern5_0) { + if let Some(()) = C::littleendian(ctx, pattern2_2) { + // Rule at src/isa/s390x/lower.isle line 1385. + let expr0_0: Type = I64; + let expr1_0 = C::put_in_reg(ctx, pattern4_0); + let expr2_0 = constructor_mov_from_fpr(ctx, expr1_0)?; + let expr3_0: u8 = 32; + let expr4_0 = constructor_lshr_imm(ctx, expr0_0, expr2_0, expr3_0)?; + let expr5_0 = constructor_lower_address( + ctx, pattern2_2, pattern4_1, pattern2_3, + )?; + let expr6_0 = constructor_storerev32(ctx, expr4_0, &expr5_0)?; + let expr7_0 = constructor_value_regs_none(ctx, &expr6_0)?; + return Some(expr7_0); + } + } + } + if pattern5_0 == F64 { + if let Some(()) = C::bigendian(ctx, pattern2_2) { + // Rule at src/isa/s390x/lower.isle line 1391. + let expr0_0 = C::put_in_reg(ctx, pattern4_0); + let expr1_0 = + constructor_lower_address(ctx, pattern2_2, pattern4_1, pattern2_3)?; + let expr2_0 = constructor_fpu_store64(ctx, expr0_0, &expr1_0)?; + let expr3_0 = constructor_value_regs_none(ctx, &expr2_0)?; + return Some(expr3_0); + } + if let Some(()) = C::vxrs_ext2_enabled(ctx, pattern5_0) { + if let Some(()) = C::littleendian(ctx, pattern2_2) { + // Rule at src/isa/s390x/lower.isle line 1397. + let expr0_0 = C::put_in_reg(ctx, pattern4_0); + let expr1_0 = constructor_lower_address( + ctx, pattern2_2, pattern4_1, pattern2_3, + )?; + let expr2_0 = constructor_fpu_storerev64(ctx, expr0_0, &expr1_0)?; + let expr3_0 = constructor_value_regs_none(ctx, &expr2_0)?; + return Some(expr3_0); + } + } + if let Some(()) = C::vxrs_ext2_disabled(ctx, pattern5_0) { + if let Some(()) = C::littleendian(ctx, pattern2_2) { + // Rule at src/isa/s390x/lower.isle line 1403. + let expr0_0 = C::put_in_reg(ctx, pattern4_0); + let expr1_0 = constructor_mov_from_fpr(ctx, expr0_0)?; + let expr2_0 = constructor_lower_address( + ctx, pattern2_2, pattern4_1, pattern2_3, + )?; + let expr3_0 = constructor_storerev64(ctx, expr1_0, &expr2_0)?; + let expr4_0 = constructor_value_regs_none(ctx, &expr3_0)?; + return Some(expr4_0); + } + } + } + } + &Opcode::Istore8 => { + let (pattern4_0, pattern4_1) = C::unpack_value_array_2(ctx, &pattern2_1); + // Rule at src/isa/s390x/lower.isle line 1412. + let expr0_0 = constructor_istore8_impl( + ctx, pattern2_2, pattern4_0, pattern4_1, pattern2_3, + )?; + let expr1_0 = constructor_value_regs_none(ctx, &expr0_0)?; + return Some(expr1_0); + } + &Opcode::Istore16 => { + let (pattern4_0, pattern4_1) = C::unpack_value_array_2(ctx, &pattern2_1); + // Rule at src/isa/s390x/lower.isle line 1430. + let expr0_0 = constructor_istore16_impl( + ctx, pattern2_2, pattern4_0, pattern4_1, pattern2_3, + )?; + let expr1_0 = constructor_value_regs_none(ctx, &expr0_0)?; + return Some(expr1_0); + } + &Opcode::Istore32 => { + let (pattern4_0, pattern4_1) = C::unpack_value_array_2(ctx, &pattern2_1); + // Rule at src/isa/s390x/lower.isle line 1456. + let expr0_0 = constructor_istore32_impl( + ctx, pattern2_2, pattern4_0, pattern4_1, pattern2_3, + )?; + let expr1_0 = constructor_value_regs_none(ctx, &expr0_0)?; + return Some(expr1_0); + } + _ => {} + } + } + &InstructionData::Unary { + opcode: ref pattern2_0, + arg: pattern2_1, + } => { + match &pattern2_0 { + &Opcode::Copy => { + // Rule at src/isa/s390x/lower.isle line 48. + let expr0_0 = C::put_in_reg(ctx, pattern2_1); + let expr1_0 = C::value_reg(ctx, expr0_0); + return Some(expr1_0); + } + &Opcode::Breduce => { + // Rule at src/isa/s390x/lower.isle line 747. + let expr0_0 = C::put_in_reg(ctx, pattern2_1); + let expr1_0 = C::value_reg(ctx, expr0_0); + return Some(expr1_0); + } + &Opcode::Ireduce => { + // Rule at src/isa/s390x/lower.isle line 591. + let expr0_0 = C::put_in_reg(ctx, pattern2_1); + let expr1_0 = C::value_reg(ctx, expr0_0); + return Some(expr1_0); + } + _ => {} + } + } + _ => {} + } + if let Some(pattern1_0) = C::first_result(ctx, pattern0_0) { + let pattern2_0 = C::value_type(ctx, pattern1_0); + if pattern2_0 == B1 { + let pattern4_0 = C::inst_data(ctx, pattern0_0); + if let &InstructionData::Unary { + opcode: ref pattern5_0, + arg: pattern5_1, + } = &pattern4_0 + { + match &pattern5_0 { + &Opcode::IsNull => { + let pattern7_0 = C::value_type(ctx, pattern5_1); + if pattern7_0 == R64 { + // Rule at src/isa/s390x/lower.isle line 1735. + let expr0_0: Type = B1; + let expr1_0: Type = I64; + let expr2_0 = C::put_in_reg(ctx, pattern5_1); + let expr3_0: i16 = 0; + let expr4_0 = constructor_icmps_simm16(ctx, expr1_0, expr2_0, expr3_0)?; + let expr5_0 = IntCC::Equal; + let expr6_0 = C::intcc_as_cond(ctx, &expr5_0); + let expr7_0 = constructor_bool(ctx, &expr4_0, &expr6_0)?; + let expr8_0 = constructor_lower_bool(ctx, expr0_0, &expr7_0)?; + let expr9_0 = C::value_reg(ctx, expr8_0); + return Some(expr9_0); + } + } + &Opcode::IsInvalid => { + let pattern7_0 = C::value_type(ctx, pattern5_1); + if pattern7_0 == R64 { + // Rule at src/isa/s390x/lower.isle line 1741. + let expr0_0: Type = B1; + let expr1_0: Type = I64; + let expr2_0 = C::put_in_reg(ctx, pattern5_1); + let expr3_0: i16 = -1; + let expr4_0 = constructor_icmps_simm16(ctx, expr1_0, expr2_0, expr3_0)?; + let expr5_0 = IntCC::Equal; + let expr6_0 = C::intcc_as_cond(ctx, &expr5_0); + let expr7_0 = constructor_bool(ctx, &expr4_0, &expr6_0)?; + let expr8_0 = constructor_lower_bool(ctx, expr0_0, &expr7_0)?; + let expr9_0 = C::value_reg(ctx, expr8_0); + return Some(expr9_0); + } + } + _ => {} + } + } + } + if pattern2_0 == I8 { + let pattern4_0 = C::inst_data(ctx, pattern0_0); + match &pattern4_0 { + &InstructionData::Unary { + opcode: ref pattern5_0, + arg: pattern5_1, + } => { + if let &Opcode::Popcnt = &pattern5_0 { + // Rule at src/isa/s390x/lower.isle line 879. + let expr0_0 = C::put_in_reg(ctx, pattern5_1); + let expr1_0 = constructor_popcnt_byte(ctx, expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + } + &InstructionData::LoadNoOffset { + opcode: ref pattern5_0, + arg: pattern5_1, + flags: pattern5_2, + } => { + if let &Opcode::AtomicLoad = &pattern5_0 { + // Rule at src/isa/s390x/lower.isle line 1544. + let expr0_0: Type = I8; + let expr1_0 = C::zero_offset(ctx); + let expr2_0 = + constructor_lower_address(ctx, pattern5_2, pattern5_1, expr1_0)?; + let expr3_0 = constructor_zext32_mem(ctx, expr0_0, &expr2_0)?; + let expr4_0 = C::value_reg(ctx, expr3_0); + return Some(expr4_0); + } + } + &InstructionData::Load { + opcode: ref pattern5_0, + arg: pattern5_1, + flags: pattern5_2, + offset: pattern5_3, + } => { + if let &Opcode::Load = &pattern5_0 { + // Rule at src/isa/s390x/lower.isle line 1181. + let expr0_0: Type = I8; + let expr1_0 = + constructor_lower_address(ctx, pattern5_2, pattern5_1, pattern5_3)?; + let expr2_0 = constructor_zext32_mem(ctx, expr0_0, &expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + _ => {} + } + } + if pattern2_0 == I16 { + let pattern4_0 = C::inst_data(ctx, pattern0_0); + match &pattern4_0 { + &InstructionData::LoadNoOffset { + opcode: ref pattern5_0, + arg: pattern5_1, + flags: pattern5_2, + } => { + if let &Opcode::AtomicLoad = &pattern5_0 { + if let Some(()) = C::littleendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1552. + let expr0_0 = C::zero_offset(ctx); + let expr1_0 = + constructor_lower_address(ctx, pattern5_2, pattern5_1, expr0_0)?; + let expr2_0 = constructor_loadrev16(ctx, &expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + if let Some(()) = C::bigendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1548. + let expr0_0: Type = I16; + let expr1_0 = C::zero_offset(ctx); + let expr2_0 = + constructor_lower_address(ctx, pattern5_2, pattern5_1, expr1_0)?; + let expr3_0 = constructor_zext32_mem(ctx, expr0_0, &expr2_0)?; + let expr4_0 = C::value_reg(ctx, expr3_0); + return Some(expr4_0); + } + } + } + &InstructionData::Load { + opcode: ref pattern5_0, + arg: pattern5_1, + flags: pattern5_2, + offset: pattern5_3, + } => { + if let &Opcode::Load = &pattern5_0 { + if let Some(()) = C::littleendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1189. + let expr0_0 = + constructor_lower_address(ctx, pattern5_2, pattern5_1, pattern5_3)?; + let expr1_0 = constructor_loadrev16(ctx, &expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + if let Some(()) = C::bigendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1185. + let expr0_0: Type = I16; + let expr1_0 = + constructor_lower_address(ctx, pattern5_2, pattern5_1, pattern5_3)?; + let expr2_0 = constructor_zext32_mem(ctx, expr0_0, &expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + } + _ => {} + } + } + if pattern2_0 == I32 { + let pattern4_0 = C::inst_data(ctx, pattern0_0); + match &pattern4_0 { + &InstructionData::Binary { + opcode: ref pattern5_0, + args: ref pattern5_1, + } => { + match &pattern5_0 { + &Opcode::Umulhi => { + let (pattern7_0, pattern7_1) = + C::unpack_value_array_2(ctx, &pattern5_1); + // Rule at src/isa/s390x/lower.isle line 247. + let expr0_0 = constructor_put_in_reg_zext64(ctx, pattern7_0)?; + let expr1_0 = constructor_put_in_reg_zext64(ctx, pattern7_1)?; + let expr2_0: Type = I64; + let expr3_0 = constructor_mul_reg(ctx, expr2_0, expr0_0, expr1_0)?; + let expr4_0: Type = I64; + let expr5_0: u8 = 32; + let expr6_0 = constructor_lshr_imm(ctx, expr4_0, expr3_0, expr5_0)?; + let expr7_0 = C::value_reg(ctx, expr6_0); + return Some(expr7_0); + } + &Opcode::Smulhi => { + let (pattern7_0, pattern7_1) = + C::unpack_value_array_2(ctx, &pattern5_1); + // Rule at src/isa/s390x/lower.isle line 269. + let expr0_0 = constructor_put_in_reg_sext64(ctx, pattern7_0)?; + let expr1_0 = constructor_put_in_reg_sext64(ctx, pattern7_1)?; + let expr2_0: Type = I64; + let expr3_0 = constructor_mul_reg(ctx, expr2_0, expr0_0, expr1_0)?; + let expr4_0: Type = I64; + let expr5_0: u8 = 32; + let expr6_0 = constructor_ashr_imm(ctx, expr4_0, expr3_0, expr5_0)?; + let expr7_0 = C::value_reg(ctx, expr6_0); + return Some(expr7_0); + } + _ => {} + } + } + &InstructionData::AtomicCas { + opcode: ref pattern5_0, + args: ref pattern5_1, + flags: pattern5_2, + } => { + if let &Opcode::AtomicCas = &pattern5_0 { + let (pattern7_0, pattern7_1, pattern7_2) = + C::unpack_value_array_3(ctx, &pattern5_1); + if let Some(()) = C::bigendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1529. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = C::put_in_reg(ctx, pattern7_2); + let expr2_0 = C::zero_offset(ctx); + let expr3_0 = + constructor_lower_address(ctx, pattern5_2, pattern7_0, expr2_0)?; + let expr4_0 = + constructor_atomic_cas32(ctx, expr0_0, expr1_0, &expr3_0)?; + let expr5_0 = C::value_reg(ctx, expr4_0); + return Some(expr5_0); + } + } + } + &InstructionData::Unary { + opcode: ref pattern5_0, + arg: pattern5_1, + } => { + if let &Opcode::Bitcast = &pattern5_0 { + let pattern7_0 = C::value_type(ctx, pattern5_1); + if pattern7_0 == F32 { + // Rule at src/isa/s390x/lower.isle line 1140. + let expr0_0: Type = I64; + let expr1_0 = C::put_in_reg(ctx, pattern5_1); + let expr2_0 = constructor_mov_from_fpr(ctx, expr1_0)?; + let expr3_0: u8 = 32; + let expr4_0 = constructor_lshr_imm(ctx, expr0_0, expr2_0, expr3_0)?; + let expr5_0 = C::value_reg(ctx, expr4_0); + return Some(expr5_0); + } + } + } + &InstructionData::LoadNoOffset { + opcode: ref pattern5_0, + arg: pattern5_1, + flags: pattern5_2, + } => { + if let &Opcode::AtomicLoad = &pattern5_0 { + if let Some(()) = C::littleendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1560. + let expr0_0 = C::zero_offset(ctx); + let expr1_0 = + constructor_lower_address(ctx, pattern5_2, pattern5_1, expr0_0)?; + let expr2_0 = constructor_loadrev32(ctx, &expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + if let Some(()) = C::bigendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1556. + let expr0_0 = C::zero_offset(ctx); + let expr1_0 = + constructor_lower_address(ctx, pattern5_2, pattern5_1, expr0_0)?; + let expr2_0 = constructor_load32(ctx, &expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + } + &InstructionData::Load { + opcode: ref pattern5_0, + arg: pattern5_1, + flags: pattern5_2, + offset: pattern5_3, + } => { + if let &Opcode::Load = &pattern5_0 { + if let Some(()) = C::littleendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1197. + let expr0_0 = + constructor_lower_address(ctx, pattern5_2, pattern5_1, pattern5_3)?; + let expr1_0 = constructor_loadrev32(ctx, &expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + if let Some(()) = C::bigendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1193. + let expr0_0 = + constructor_lower_address(ctx, pattern5_2, pattern5_1, pattern5_3)?; + let expr1_0 = constructor_load32(ctx, &expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + } + } + _ => {} + } + } + if pattern2_0 == I64 { + let pattern4_0 = C::inst_data(ctx, pattern0_0); + match &pattern4_0 { + &InstructionData::Binary { + opcode: ref pattern5_0, + args: ref pattern5_1, + } => { + match &pattern5_0 { + &Opcode::Umulhi => { + let (pattern7_0, pattern7_1) = + C::unpack_value_array_2(ctx, &pattern5_1); + // Rule at src/isa/s390x/lower.isle line 254. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = C::put_in_reg(ctx, pattern7_1); + let expr2_0 = constructor_umul_wide(ctx, expr0_0, expr1_0)?; + let expr3_0: Type = I64; + let expr4_0 = constructor_regpair_hi(ctx, &expr2_0)?; + let expr5_0 = constructor_copy_reg(ctx, expr3_0, expr4_0)?; + let expr6_0 = C::value_reg(ctx, expr5_0); + return Some(expr6_0); + } + &Opcode::Smulhi => { + let (pattern7_0, pattern7_1) = + C::unpack_value_array_2(ctx, &pattern5_1); + // Rule at src/isa/s390x/lower.isle line 276. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = C::put_in_reg(ctx, pattern7_1); + let expr2_0 = constructor_smul_wide(ctx, expr0_0, expr1_0)?; + let expr3_0: Type = I64; + let expr4_0 = constructor_regpair_hi(ctx, &expr2_0)?; + let expr5_0 = constructor_copy_reg(ctx, expr3_0, expr4_0)?; + let expr6_0 = C::value_reg(ctx, expr5_0); + return Some(expr6_0); + } + _ => {} + } + } + &InstructionData::AtomicCas { + opcode: ref pattern5_0, + args: ref pattern5_1, + flags: pattern5_2, + } => { + if let &Opcode::AtomicCas = &pattern5_0 { + let (pattern7_0, pattern7_1, pattern7_2) = + C::unpack_value_array_3(ctx, &pattern5_1); + if let Some(()) = C::bigendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1534. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = C::put_in_reg(ctx, pattern7_2); + let expr2_0 = C::zero_offset(ctx); + let expr3_0 = + constructor_lower_address(ctx, pattern5_2, pattern7_0, expr2_0)?; + let expr4_0 = + constructor_atomic_cas64(ctx, expr0_0, expr1_0, &expr3_0)?; + let expr5_0 = C::value_reg(ctx, expr4_0); + return Some(expr5_0); + } + } + } + &InstructionData::Unary { + opcode: ref pattern5_0, + arg: pattern5_1, + } => { + if let &Opcode::Bitcast = &pattern5_0 { + let pattern7_0 = C::value_type(ctx, pattern5_1); + if pattern7_0 == F64 { + // Rule at src/isa/s390x/lower.isle line 1130. + let expr0_0 = C::put_in_reg(ctx, pattern5_1); + let expr1_0 = constructor_mov_from_fpr(ctx, expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + } + } + &InstructionData::LoadNoOffset { + opcode: ref pattern5_0, + arg: pattern5_1, + flags: pattern5_2, + } => { + if let &Opcode::AtomicLoad = &pattern5_0 { + if let Some(()) = C::littleendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1568. + let expr0_0 = C::zero_offset(ctx); + let expr1_0 = + constructor_lower_address(ctx, pattern5_2, pattern5_1, expr0_0)?; + let expr2_0 = constructor_loadrev64(ctx, &expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + if let Some(()) = C::bigendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1564. + let expr0_0 = C::zero_offset(ctx); + let expr1_0 = + constructor_lower_address(ctx, pattern5_2, pattern5_1, expr0_0)?; + let expr2_0 = constructor_load64(ctx, &expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + } + &InstructionData::Load { + opcode: ref pattern5_0, + arg: pattern5_1, + flags: pattern5_2, + offset: pattern5_3, + } => { + if let &Opcode::Load = &pattern5_0 { + if let Some(()) = C::littleendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1205. + let expr0_0 = + constructor_lower_address(ctx, pattern5_2, pattern5_1, pattern5_3)?; + let expr1_0 = constructor_loadrev64(ctx, &expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + if let Some(()) = C::bigendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1201. + let expr0_0 = + constructor_lower_address(ctx, pattern5_2, pattern5_1, pattern5_3)?; + let expr1_0 = constructor_load64(ctx, &expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + } + } + _ => {} + } + } + if pattern2_0 == R64 { + let pattern4_0 = C::inst_data(ctx, pattern0_0); + if let &InstructionData::Load { + opcode: ref pattern5_0, + arg: pattern5_1, + flags: pattern5_2, + offset: pattern5_3, + } = &pattern4_0 + { + if let &Opcode::Load = &pattern5_0 { + if let Some(()) = C::littleendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1213. + let expr0_0 = + constructor_lower_address(ctx, pattern5_2, pattern5_1, pattern5_3)?; + let expr1_0 = constructor_loadrev64(ctx, &expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + if let Some(()) = C::bigendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1209. + let expr0_0 = + constructor_lower_address(ctx, pattern5_2, pattern5_1, pattern5_3)?; + let expr1_0 = constructor_load64(ctx, &expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + } + } + } + if pattern2_0 == F32 { + let pattern4_0 = C::inst_data(ctx, pattern0_0); + match &pattern4_0 { + &InstructionData::Unary { + opcode: ref pattern5_0, + arg: pattern5_1, + } => { + if let &Opcode::Bitcast = &pattern5_0 { + let pattern7_0 = C::value_type(ctx, pattern5_1); + if pattern7_0 == I32 { + // Rule at src/isa/s390x/lower.isle line 1135. + let expr0_0: Type = I64; + let expr1_0 = C::put_in_reg(ctx, pattern5_1); + let expr2_0: u8 = 32; + let expr3_0 = constructor_lshl_imm(ctx, expr0_0, expr1_0, expr2_0)?; + let expr4_0 = constructor_mov_to_fpr(ctx, expr3_0)?; + let expr5_0 = C::value_reg(ctx, expr4_0); + return Some(expr5_0); + } + } + } + &InstructionData::Load { + opcode: ref pattern5_0, + arg: pattern5_1, + flags: pattern5_2, + offset: pattern5_3, + } => { + if let &Opcode::Load = &pattern5_0 { + if let Some(()) = C::bigendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1217. + let expr0_0 = + constructor_lower_address(ctx, pattern5_2, pattern5_1, pattern5_3)?; + let expr1_0 = constructor_fpu_load32(ctx, &expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + } + } + _ => {} + } + } + if pattern2_0 == F64 { + let pattern4_0 = C::inst_data(ctx, pattern0_0); + match &pattern4_0 { + &InstructionData::Unary { + opcode: ref pattern5_0, + arg: pattern5_1, + } => { + if let &Opcode::Bitcast = &pattern5_0 { + let pattern7_0 = C::value_type(ctx, pattern5_1); + if pattern7_0 == I64 { + // Rule at src/isa/s390x/lower.isle line 1126. + let expr0_0 = C::put_in_reg(ctx, pattern5_1); + let expr1_0 = constructor_mov_to_fpr(ctx, expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + } + } + &InstructionData::Load { + opcode: ref pattern5_0, + arg: pattern5_1, + flags: pattern5_2, + offset: pattern5_3, + } => { + if let &Opcode::Load = &pattern5_0 { + if let Some(()) = C::bigendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1232. + let expr0_0 = + constructor_lower_address(ctx, pattern5_2, pattern5_1, pattern5_3)?; + let expr1_0 = constructor_fpu_load64(ctx, &expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + } + } + _ => {} + } + } + let pattern3_0 = C::inst_data(ctx, pattern0_0); + match &pattern3_0 { + &InstructionData::NullAry { + opcode: ref pattern4_0, + } => { + if let &Opcode::Null = &pattern4_0 { + // Rule at src/isa/s390x/lower.isle line 36. + let expr0_0: u64 = 0; + let expr1_0 = constructor_imm(ctx, pattern2_0, expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + } + &InstructionData::UnaryImm { + opcode: ref pattern4_0, + imm: pattern4_1, + } => { + if let &Opcode::Iconst = &pattern4_0 { + let pattern6_0 = C::u64_from_imm64(ctx, pattern4_1); + // Rule at src/isa/s390x/lower.isle line 10. + let expr0_0 = constructor_imm(ctx, pattern2_0, pattern6_0)?; + let expr1_0 = C::value_reg(ctx, expr0_0); + return Some(expr1_0); + } + } + &InstructionData::StackLoad { + opcode: ref pattern4_0, + stack_slot: pattern4_1, + offset: pattern4_2, + } => { + if let &Opcode::StackAddr = &pattern4_0 { + // Rule at src/isa/s390x/lower.isle line 1147. + let expr0_0 = + constructor_stack_addr_impl(ctx, pattern2_0, pattern4_1, pattern4_2)?; + let expr1_0 = C::value_reg(ctx, expr0_0); + return Some(expr1_0); + } + } + &InstructionData::UnaryBool { + opcode: ref pattern4_0, + imm: pattern4_1, + } => { + if let &Opcode::Bconst = &pattern4_0 { + if pattern4_1 == true { + // Rule at src/isa/s390x/lower.isle line 18. + let expr0_0: u64 = 1; + let expr1_0 = constructor_imm(ctx, pattern2_0, expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + if pattern4_1 == false { + // Rule at src/isa/s390x/lower.isle line 16. + let expr0_0: u64 = 0; + let expr1_0 = constructor_imm(ctx, pattern2_0, expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + } + } + &InstructionData::Binary { + opcode: ref pattern4_0, + args: ref pattern4_1, + } => { + match &pattern4_0 { + &Opcode::Fadd => { + let (pattern6_0, pattern6_1) = C::unpack_value_array_2(ctx, &pattern4_1); + // Rule at src/isa/s390x/lower.isle line 915. + let expr0_0 = C::put_in_reg(ctx, pattern6_0); + let expr1_0 = C::put_in_reg(ctx, pattern6_1); + let expr2_0 = constructor_fadd_reg(ctx, pattern2_0, expr0_0, expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + &Opcode::Fsub => { + let (pattern6_0, pattern6_1) = C::unpack_value_array_2(ctx, &pattern4_1); + // Rule at src/isa/s390x/lower.isle line 922. + let expr0_0 = C::put_in_reg(ctx, pattern6_0); + let expr1_0 = C::put_in_reg(ctx, pattern6_1); + let expr2_0 = constructor_fsub_reg(ctx, pattern2_0, expr0_0, expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + &Opcode::Fmul => { + let (pattern6_0, pattern6_1) = C::unpack_value_array_2(ctx, &pattern4_1); + // Rule at src/isa/s390x/lower.isle line 929. + let expr0_0 = C::put_in_reg(ctx, pattern6_0); + let expr1_0 = C::put_in_reg(ctx, pattern6_1); + let expr2_0 = constructor_fmul_reg(ctx, pattern2_0, expr0_0, expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + &Opcode::Fdiv => { + let (pattern6_0, pattern6_1) = C::unpack_value_array_2(ctx, &pattern4_1); + // Rule at src/isa/s390x/lower.isle line 936. + let expr0_0 = C::put_in_reg(ctx, pattern6_0); + let expr1_0 = C::put_in_reg(ctx, pattern6_1); + let expr2_0 = constructor_fdiv_reg(ctx, pattern2_0, expr0_0, expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + &Opcode::Fcopysign => { + let (pattern6_0, pattern6_1) = C::unpack_value_array_2(ctx, &pattern4_1); + // Rule at src/isa/s390x/lower.isle line 957. + let expr0_0 = C::put_in_reg(ctx, pattern6_0); + let expr1_0 = C::put_in_reg(ctx, pattern6_1); + let expr2_0 = constructor_fpu_copysign(ctx, pattern2_0, expr0_0, expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + &Opcode::Fmin => { + let (pattern6_0, pattern6_1) = C::unpack_value_array_2(ctx, &pattern4_1); + // Rule at src/isa/s390x/lower.isle line 943. + let expr0_0 = C::put_in_reg(ctx, pattern6_0); + let expr1_0 = C::put_in_reg(ctx, pattern6_1); + let expr2_0 = constructor_fmin_reg(ctx, pattern2_0, expr0_0, expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + &Opcode::Fmax => { + let (pattern6_0, pattern6_1) = C::unpack_value_array_2(ctx, &pattern4_1); + // Rule at src/isa/s390x/lower.isle line 950. + let expr0_0 = C::put_in_reg(ctx, pattern6_0); + let expr1_0 = C::put_in_reg(ctx, pattern6_1); + let expr2_0 = constructor_fmax_reg(ctx, pattern2_0, expr0_0, expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + _ => {} + } + } + &InstructionData::FloatCompare { + opcode: ref pattern4_0, + args: ref pattern4_1, + cond: ref pattern4_2, + } => { + if let &Opcode::Fcmp = &pattern4_0 { + let (pattern6_0, pattern6_1) = C::unpack_value_array_2(ctx, &pattern4_1); + // Rule at src/isa/s390x/lower.isle line 1722. + let expr0_0 = constructor_fcmp_val(ctx, &pattern4_2, pattern6_0, pattern6_1)?; + let expr1_0 = constructor_lower_bool(ctx, pattern2_0, &expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + } + &InstructionData::IntCompare { + opcode: ref pattern4_0, + args: ref pattern4_1, + cond: ref pattern4_2, + } => { + if let &Opcode::Icmp = &pattern4_0 { + let (pattern6_0, pattern6_1) = C::unpack_value_array_2(ctx, &pattern4_1); + // Rule at src/isa/s390x/lower.isle line 1633. + let expr0_0: bool = true; + let expr1_0 = + constructor_icmp_val(ctx, expr0_0, &pattern4_2, pattern6_0, pattern6_1)?; + let expr2_0 = constructor_lower_bool(ctx, pattern2_0, &expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + &InstructionData::Ternary { + opcode: ref pattern4_0, + args: ref pattern4_1, + } => { + match &pattern4_0 { + &Opcode::Select => { + let (pattern6_0, pattern6_1, pattern6_2) = + C::unpack_value_array_3(ctx, &pattern4_1); + // Rule at src/isa/s390x/lower.isle line 1764. + let expr0_0 = constructor_value_nonzero(ctx, pattern6_0)?; + let expr1_0 = C::put_in_reg(ctx, pattern6_1); + let expr2_0 = C::put_in_reg(ctx, pattern6_2); + let expr3_0 = constructor_select_bool_reg( + ctx, pattern2_0, &expr0_0, expr1_0, expr2_0, + )?; + let expr4_0 = C::value_reg(ctx, expr3_0); + return Some(expr4_0); + } + &Opcode::Fma => { + let (pattern6_0, pattern6_1, pattern6_2) = + C::unpack_value_array_3(ctx, &pattern4_1); + // Rule at src/isa/s390x/lower.isle line 964. + let expr0_0 = C::put_in_reg(ctx, pattern6_0); + let expr1_0 = C::put_in_reg(ctx, pattern6_1); + let expr2_0 = C::put_in_reg(ctx, pattern6_2); + let expr3_0 = + constructor_fma_reg(ctx, pattern2_0, expr0_0, expr1_0, expr2_0)?; + let expr4_0 = C::value_reg(ctx, expr3_0); + return Some(expr4_0); + } + _ => {} + } + } + &InstructionData::IntSelect { + opcode: ref pattern4_0, + args: ref pattern4_1, + cond: ref pattern4_2, + } => { + if let &Opcode::SelectifSpectreGuard = &pattern4_0 { + let (pattern6_0, pattern6_1, pattern6_2) = + C::unpack_value_array_3(ctx, &pattern4_1); + if let Some(pattern7_0) = C::def_inst(ctx, pattern6_0) { + let pattern8_0 = C::inst_data(ctx, pattern7_0); + if let &InstructionData::Binary { + opcode: ref pattern9_0, + args: ref pattern9_1, + } = &pattern8_0 + { + if let &Opcode::Ifcmp = &pattern9_0 { + let (pattern11_0, pattern11_1) = + C::unpack_value_array_2(ctx, &pattern9_1); + // Rule at src/isa/s390x/lower.isle line 1776. + let expr0_0: bool = false; + let expr1_0 = constructor_icmp_val( + ctx, + expr0_0, + &pattern4_2, + pattern11_0, + pattern11_1, + )?; + let expr2_0 = C::put_in_reg(ctx, pattern6_1); + let expr3_0 = C::put_in_reg(ctx, pattern6_2); + let expr4_0 = constructor_select_bool_reg( + ctx, pattern2_0, &expr1_0, expr2_0, expr3_0, + )?; + let expr5_0 = C::value_reg(ctx, expr4_0); + return Some(expr5_0); + } + } + } + } + } + &InstructionData::Unary { + opcode: ref pattern4_0, + arg: pattern4_1, + } => { + match &pattern4_0 { + &Opcode::Sqrt => { + // Rule at src/isa/s390x/lower.isle line 971. + let expr0_0 = C::put_in_reg(ctx, pattern4_1); + let expr1_0 = constructor_sqrt_reg(ctx, pattern2_0, expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + &Opcode::Fneg => { + // Rule at src/isa/s390x/lower.isle line 978. + let expr0_0 = C::put_in_reg(ctx, pattern4_1); + let expr1_0 = constructor_fneg_reg(ctx, pattern2_0, expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + &Opcode::Fabs => { + // Rule at src/isa/s390x/lower.isle line 985. + let expr0_0 = C::put_in_reg(ctx, pattern4_1); + let expr1_0 = constructor_fabs_reg(ctx, pattern2_0, expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + &Opcode::Ceil => { + // Rule at src/isa/s390x/lower.isle line 992. + let expr0_0 = C::put_in_reg(ctx, pattern4_1); + let expr1_0 = constructor_ceil_reg(ctx, pattern2_0, expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + &Opcode::Floor => { + // Rule at src/isa/s390x/lower.isle line 999. + let expr0_0 = C::put_in_reg(ctx, pattern4_1); + let expr1_0 = constructor_floor_reg(ctx, pattern2_0, expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + &Opcode::Trunc => { + // Rule at src/isa/s390x/lower.isle line 1006. + let expr0_0 = C::put_in_reg(ctx, pattern4_1); + let expr1_0 = constructor_trunc_reg(ctx, pattern2_0, expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + &Opcode::Nearest => { + // Rule at src/isa/s390x/lower.isle line 1013. + let expr0_0 = C::put_in_reg(ctx, pattern4_1); + let expr1_0 = constructor_nearest_reg(ctx, pattern2_0, expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + &Opcode::Bextend => { + // Rule at src/isa/s390x/lower.isle line 755. + let expr0_0 = constructor_cast_bool(ctx, pattern2_0, pattern4_1)?; + let expr1_0 = C::value_reg(ctx, expr0_0); + return Some(expr1_0); + } + &Opcode::Bmask => { + // Rule at src/isa/s390x/lower.isle line 757. + let expr0_0 = constructor_cast_bool(ctx, pattern2_0, pattern4_1)?; + let expr1_0 = C::value_reg(ctx, expr0_0); + return Some(expr1_0); + } + &Opcode::Fpromote => { + let pattern6_0 = C::value_type(ctx, pattern4_1); + // Rule at src/isa/s390x/lower.isle line 1020. + let expr0_0 = C::put_in_reg(ctx, pattern4_1); + let expr1_0 = + constructor_fpromote_reg(ctx, pattern2_0, pattern6_0, expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + &Opcode::Fdemote => { + let pattern6_0 = C::value_type(ctx, pattern4_1); + // Rule at src/isa/s390x/lower.isle line 1027. + let expr0_0 = C::put_in_reg(ctx, pattern4_1); + let expr1_0 = + constructor_fdemote_reg(ctx, pattern2_0, pattern6_0, expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + &Opcode::FcvtFromUint => { + let pattern6_0 = C::value_type(ctx, pattern4_1); + // Rule at src/isa/s390x/lower.isle line 1034. + let expr0_0 = constructor_ty_ext32(ctx, pattern6_0)?; + let expr1_0 = constructor_put_in_reg_zext32(ctx, pattern4_1)?; + let expr2_0 = + constructor_fcvt_from_uint_reg(ctx, pattern2_0, expr0_0, expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + &Opcode::FcvtFromSint => { + let pattern6_0 = C::value_type(ctx, pattern4_1); + // Rule at src/isa/s390x/lower.isle line 1042. + let expr0_0 = constructor_ty_ext32(ctx, pattern6_0)?; + let expr1_0 = constructor_put_in_reg_sext32(ctx, pattern4_1)?; + let expr2_0 = + constructor_fcvt_from_sint_reg(ctx, pattern2_0, expr0_0, expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + _ => {} + } + } + _ => {} + } + if let Some(()) = C::mie2_enabled(ctx, pattern2_0) { + if let Some(pattern4_0) = C::fits_in_64(ctx, pattern2_0) { + let pattern5_0 = C::inst_data(ctx, pattern0_0); + match &pattern5_0 { + &InstructionData::Binary { + opcode: ref pattern6_0, + args: ref pattern6_1, + } => { + match &pattern6_0 { + &Opcode::BandNot => { + let (pattern8_0, pattern8_1) = + C::unpack_value_array_2(ctx, &pattern6_1); + // Rule at src/isa/s390x/lower.isle line 697. + let expr0_0 = C::put_in_reg(ctx, pattern8_0); + let expr1_0 = C::put_in_reg(ctx, pattern8_1); + let expr2_0 = + constructor_and_not_reg(ctx, pattern4_0, expr0_0, expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + &Opcode::BorNot => { + let (pattern8_0, pattern8_1) = + C::unpack_value_array_2(ctx, &pattern6_1); + // Rule at src/isa/s390x/lower.isle line 708. + let expr0_0 = C::put_in_reg(ctx, pattern8_0); + let expr1_0 = C::put_in_reg(ctx, pattern8_1); + let expr2_0 = + constructor_or_not_reg(ctx, pattern4_0, expr0_0, expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + &Opcode::BxorNot => { + let (pattern8_0, pattern8_1) = + C::unpack_value_array_2(ctx, &pattern6_1); + // Rule at src/isa/s390x/lower.isle line 719. + let expr0_0 = C::put_in_reg(ctx, pattern8_0); + let expr1_0 = C::put_in_reg(ctx, pattern8_1); + let expr2_0 = + constructor_xor_not_reg(ctx, pattern4_0, expr0_0, expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + _ => {} + } + } + &InstructionData::Ternary { + opcode: ref pattern6_0, + args: ref pattern6_1, + } => { + if let &Opcode::Bitselect = &pattern6_0 { + let (pattern8_0, pattern8_1, pattern8_2) = + C::unpack_value_array_3(ctx, &pattern6_1); + // Rule at src/isa/s390x/lower.isle line 730. + let expr0_0 = C::put_in_reg(ctx, pattern8_0); + let expr1_0 = C::put_in_reg(ctx, pattern8_1); + let expr2_0 = constructor_and_reg(ctx, pattern4_0, expr1_0, expr0_0)?; + let expr3_0 = C::put_in_reg(ctx, pattern8_2); + let expr4_0 = + constructor_and_not_reg(ctx, pattern4_0, expr3_0, expr0_0)?; + let expr5_0 = constructor_or_reg(ctx, pattern4_0, expr4_0, expr2_0)?; + let expr6_0 = C::value_reg(ctx, expr5_0); + return Some(expr6_0); + } + } + &InstructionData::Unary { + opcode: ref pattern6_0, + arg: pattern6_1, + } => { + match &pattern6_0 { + &Opcode::Bnot => { + // Rule at src/isa/s390x/lower.isle line 620. + let expr0_0 = C::put_in_reg(ctx, pattern6_1); + let expr1_0 = + constructor_or_not_reg(ctx, pattern4_0, expr0_0, expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + &Opcode::Popcnt => { + // Rule at src/isa/s390x/lower.isle line 884. + let expr0_0 = constructor_put_in_reg_zext64(ctx, pattern6_1)?; + let expr1_0 = constructor_popcnt_reg(ctx, expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + _ => {} + } + } + _ => {} + } + } + } + if let Some(()) = C::mie2_disabled(ctx, pattern2_0) { + if pattern2_0 == I16 { + let pattern5_0 = C::inst_data(ctx, pattern0_0); + if let &InstructionData::Unary { + opcode: ref pattern6_0, + arg: pattern6_1, + } = &pattern5_0 + { + if let &Opcode::Popcnt = &pattern6_0 { + // Rule at src/isa/s390x/lower.isle line 893. + let expr0_0 = C::put_in_reg(ctx, pattern6_1); + let expr1_0 = constructor_popcnt_byte(ctx, expr0_0)?; + let expr2_0: Type = I32; + let expr3_0: Type = I32; + let expr4_0: u8 = 8; + let expr5_0 = constructor_lshl_imm(ctx, expr3_0, expr1_0, expr4_0)?; + let expr6_0 = constructor_add_reg(ctx, expr2_0, expr1_0, expr5_0)?; + let expr7_0: Type = I32; + let expr8_0: u8 = 8; + let expr9_0 = constructor_lshr_imm(ctx, expr7_0, expr6_0, expr8_0)?; + let expr10_0 = C::value_reg(ctx, expr9_0); + return Some(expr10_0); + } + } + } + if pattern2_0 == I32 { + let pattern5_0 = C::inst_data(ctx, pattern0_0); + if let &InstructionData::Unary { + opcode: ref pattern6_0, + arg: pattern6_1, + } = &pattern5_0 + { + if let &Opcode::Popcnt = &pattern6_0 { + // Rule at src/isa/s390x/lower.isle line 898. + let expr0_0 = C::put_in_reg(ctx, pattern6_1); + let expr1_0 = constructor_popcnt_byte(ctx, expr0_0)?; + let expr2_0: Type = I32; + let expr3_0: Type = I32; + let expr4_0: u8 = 16; + let expr5_0 = constructor_lshl_imm(ctx, expr3_0, expr1_0, expr4_0)?; + let expr6_0 = constructor_add_reg(ctx, expr2_0, expr1_0, expr5_0)?; + let expr7_0: Type = I32; + let expr8_0: Type = I32; + let expr9_0: u8 = 8; + let expr10_0 = constructor_lshl_imm(ctx, expr8_0, expr6_0, expr9_0)?; + let expr11_0 = constructor_add_reg(ctx, expr7_0, expr6_0, expr10_0)?; + let expr12_0: Type = I32; + let expr13_0: u8 = 24; + let expr14_0 = constructor_lshr_imm(ctx, expr12_0, expr11_0, expr13_0)?; + let expr15_0 = C::value_reg(ctx, expr14_0); + return Some(expr15_0); + } + } + } + if pattern2_0 == I64 { + let pattern5_0 = C::inst_data(ctx, pattern0_0); + if let &InstructionData::Unary { + opcode: ref pattern6_0, + arg: pattern6_1, + } = &pattern5_0 + { + if let &Opcode::Popcnt = &pattern6_0 { + // Rule at src/isa/s390x/lower.isle line 904. + let expr0_0 = C::put_in_reg(ctx, pattern6_1); + let expr1_0 = constructor_popcnt_byte(ctx, expr0_0)?; + let expr2_0: Type = I64; + let expr3_0: Type = I64; + let expr4_0: u8 = 32; + let expr5_0 = constructor_lshl_imm(ctx, expr3_0, expr1_0, expr4_0)?; + let expr6_0 = constructor_add_reg(ctx, expr2_0, expr1_0, expr5_0)?; + let expr7_0: Type = I64; + let expr8_0: Type = I64; + let expr9_0: u8 = 16; + let expr10_0 = constructor_lshl_imm(ctx, expr8_0, expr6_0, expr9_0)?; + let expr11_0 = constructor_add_reg(ctx, expr7_0, expr6_0, expr10_0)?; + let expr12_0: Type = I64; + let expr13_0: Type = I64; + let expr14_0: u8 = 8; + let expr15_0 = constructor_lshl_imm(ctx, expr13_0, expr11_0, expr14_0)?; + let expr16_0 = constructor_add_reg(ctx, expr12_0, expr11_0, expr15_0)?; + let expr17_0: Type = I64; + let expr18_0: u8 = 56; + let expr19_0 = constructor_lshr_imm(ctx, expr17_0, expr16_0, expr18_0)?; + let expr20_0 = C::value_reg(ctx, expr19_0); + return Some(expr20_0); + } + } + } + if let Some(pattern4_0) = C::fits_in_64(ctx, pattern2_0) { + let pattern5_0 = C::inst_data(ctx, pattern0_0); + match &pattern5_0 { + &InstructionData::Binary { + opcode: ref pattern6_0, + args: ref pattern6_1, + } => { + match &pattern6_0 { + &Opcode::BandNot => { + let (pattern8_0, pattern8_1) = + C::unpack_value_array_2(ctx, &pattern6_1); + // Rule at src/isa/s390x/lower.isle line 701. + let expr0_0 = C::put_in_reg(ctx, pattern8_0); + let expr1_0 = C::put_in_reg(ctx, pattern8_1); + let expr2_0 = + constructor_and_reg(ctx, pattern4_0, expr0_0, expr1_0)?; + let expr3_0 = constructor_not_reg(ctx, pattern4_0, expr2_0)?; + let expr4_0 = C::value_reg(ctx, expr3_0); + return Some(expr4_0); + } + &Opcode::BorNot => { + let (pattern8_0, pattern8_1) = + C::unpack_value_array_2(ctx, &pattern6_1); + // Rule at src/isa/s390x/lower.isle line 712. + let expr0_0 = C::put_in_reg(ctx, pattern8_0); + let expr1_0 = C::put_in_reg(ctx, pattern8_1); + let expr2_0 = + constructor_or_reg(ctx, pattern4_0, expr0_0, expr1_0)?; + let expr3_0 = constructor_not_reg(ctx, pattern4_0, expr2_0)?; + let expr4_0 = C::value_reg(ctx, expr3_0); + return Some(expr4_0); + } + &Opcode::BxorNot => { + let (pattern8_0, pattern8_1) = + C::unpack_value_array_2(ctx, &pattern6_1); + // Rule at src/isa/s390x/lower.isle line 723. + let expr0_0 = C::put_in_reg(ctx, pattern8_0); + let expr1_0 = C::put_in_reg(ctx, pattern8_1); + let expr2_0 = + constructor_xor_reg(ctx, pattern4_0, expr0_0, expr1_0)?; + let expr3_0 = constructor_not_reg(ctx, pattern4_0, expr2_0)?; + let expr4_0 = C::value_reg(ctx, expr3_0); + return Some(expr4_0); + } + _ => {} + } + } + &InstructionData::Ternary { + opcode: ref pattern6_0, + args: ref pattern6_1, + } => { + if let &Opcode::Bitselect = &pattern6_0 { + let (pattern8_0, pattern8_1, pattern8_2) = + C::unpack_value_array_3(ctx, &pattern6_1); + // Rule at src/isa/s390x/lower.isle line 737. + let expr0_0 = C::put_in_reg(ctx, pattern8_0); + let expr1_0 = C::put_in_reg(ctx, pattern8_1); + let expr2_0 = constructor_and_reg(ctx, pattern4_0, expr1_0, expr0_0)?; + let expr3_0 = C::put_in_reg(ctx, pattern8_2); + let expr4_0 = constructor_and_reg(ctx, pattern4_0, expr3_0, expr0_0)?; + let expr5_0 = constructor_not_reg(ctx, pattern4_0, expr4_0)?; + let expr6_0 = constructor_or_reg(ctx, pattern4_0, expr5_0, expr2_0)?; + let expr7_0 = C::value_reg(ctx, expr6_0); + return Some(expr7_0); + } + } + &InstructionData::Unary { + opcode: ref pattern6_0, + arg: pattern6_1, + } => { + if let &Opcode::Bnot = &pattern6_0 { + // Rule at src/isa/s390x/lower.isle line 625. + let expr0_0 = C::put_in_reg(ctx, pattern6_1); + let expr1_0 = constructor_not_reg(ctx, pattern4_0, expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + } + _ => {} + } + } + } + if let Some(()) = C::vxrs_ext2_enabled(ctx, pattern2_0) { + if pattern2_0 == F32 { + let pattern5_0 = C::inst_data(ctx, pattern0_0); + if let &InstructionData::Load { + opcode: ref pattern6_0, + arg: pattern6_1, + flags: pattern6_2, + offset: pattern6_3, + } = &pattern5_0 + { + if let &Opcode::Load = &pattern6_0 { + if let Some(()) = C::littleendian(ctx, pattern6_2) { + // Rule at src/isa/s390x/lower.isle line 1221. + let expr0_0 = + constructor_lower_address(ctx, pattern6_2, pattern6_1, pattern6_3)?; + let expr1_0 = constructor_fpu_loadrev32(ctx, &expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + } + } + } + if pattern2_0 == F64 { + let pattern5_0 = C::inst_data(ctx, pattern0_0); + if let &InstructionData::Load { + opcode: ref pattern6_0, + arg: pattern6_1, + flags: pattern6_2, + offset: pattern6_3, + } = &pattern5_0 + { + if let &Opcode::Load = &pattern6_0 { + if let Some(()) = C::littleendian(ctx, pattern6_2) { + // Rule at src/isa/s390x/lower.isle line 1236. + let expr0_0 = + constructor_lower_address(ctx, pattern6_2, pattern6_1, pattern6_3)?; + let expr1_0 = constructor_fpu_loadrev64(ctx, &expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + } + } + } + } + if let Some(()) = C::vxrs_ext2_disabled(ctx, pattern2_0) { + if pattern2_0 == F32 { + let pattern5_0 = C::inst_data(ctx, pattern0_0); + if let &InstructionData::Load { + opcode: ref pattern6_0, + arg: pattern6_1, + flags: pattern6_2, + offset: pattern6_3, + } = &pattern5_0 + { + if let &Opcode::Load = &pattern6_0 { + if let Some(()) = C::littleendian(ctx, pattern6_2) { + // Rule at src/isa/s390x/lower.isle line 1226. + let expr0_0 = + constructor_lower_address(ctx, pattern6_2, pattern6_1, pattern6_3)?; + let expr1_0 = constructor_loadrev32(ctx, &expr0_0)?; + let expr2_0: Type = I64; + let expr3_0: u8 = 32; + let expr4_0 = constructor_lshl_imm(ctx, expr2_0, expr1_0, expr3_0)?; + let expr5_0 = constructor_mov_to_fpr(ctx, expr4_0)?; + let expr6_0 = C::value_reg(ctx, expr5_0); + return Some(expr6_0); + } + } + } + } + if pattern2_0 == F64 { + let pattern5_0 = C::inst_data(ctx, pattern0_0); + if let &InstructionData::Load { + opcode: ref pattern6_0, + arg: pattern6_1, + flags: pattern6_2, + offset: pattern6_3, + } = &pattern5_0 + { + if let &Opcode::Load = &pattern6_0 { + if let Some(()) = C::littleendian(ctx, pattern6_2) { + // Rule at src/isa/s390x/lower.isle line 1241. + let expr0_0 = + constructor_lower_address(ctx, pattern6_2, pattern6_1, pattern6_3)?; + let expr1_0 = constructor_loadrev64(ctx, &expr0_0)?; + let expr2_0 = constructor_mov_to_fpr(ctx, expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + } + } + } + if let Some(pattern3_0) = C::fits_in_16(ctx, pattern2_0) { + let pattern4_0 = C::inst_data(ctx, pattern0_0); + if let &InstructionData::Unary { + opcode: ref pattern5_0, + arg: pattern5_1, + } = &pattern4_0 + { + if let &Opcode::Bint = &pattern5_0 { + // Rule at src/isa/s390x/lower.isle line 791. + let expr0_0 = C::put_in_reg(ctx, pattern5_1); + let expr1_0: u16 = 1; + let expr2_0: u8 = 0; + let expr3_0 = C::uimm16shifted(ctx, expr1_0, expr2_0); + let expr4_0 = constructor_and_uimm16shifted(ctx, pattern3_0, expr0_0, expr3_0)?; + let expr5_0 = C::value_reg(ctx, expr4_0); + return Some(expr5_0); + } + } + } + if let Some(pattern3_0) = C::fits_in_32(ctx, pattern2_0) { + let pattern4_0 = C::inst_data(ctx, pattern0_0); + if let &InstructionData::Unary { + opcode: ref pattern5_0, + arg: pattern5_1, + } = &pattern4_0 + { + if let &Opcode::Bint = &pattern5_0 { + // Rule at src/isa/s390x/lower.isle line 795. + let expr0_0 = C::put_in_reg(ctx, pattern5_1); + let expr1_0: u32 = 1; + let expr2_0: u8 = 0; + let expr3_0 = C::uimm32shifted(ctx, expr1_0, expr2_0); + let expr4_0 = constructor_and_uimm32shifted(ctx, pattern3_0, expr0_0, expr3_0)?; + let expr5_0 = C::value_reg(ctx, expr4_0); + return Some(expr5_0); + } + } + } + if let Some(pattern3_0) = C::fits_in_64(ctx, pattern2_0) { + let pattern4_0 = C::inst_data(ctx, pattern0_0); + match &pattern4_0 { + &InstructionData::Binary { + opcode: ref pattern5_0, + args: ref pattern5_1, + } => { + match &pattern5_0 { + &Opcode::Iadd => { + let (pattern7_0, pattern7_1) = + C::unpack_value_array_2(ctx, &pattern5_1); + if let Some(pattern8_0) = C::i16_from_value(ctx, pattern7_0) { + // Rule at src/isa/s390x/lower.isle line 67. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = + constructor_add_simm16(ctx, pattern3_0, expr0_0, pattern8_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + if let Some(pattern8_0) = C::i32_from_value(ctx, pattern7_0) { + // Rule at src/isa/s390x/lower.isle line 71. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = + constructor_add_simm32(ctx, pattern3_0, expr0_0, pattern8_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + if let Some(pattern8_0) = C::def_inst(ctx, pattern7_0) { + let pattern9_0 = C::inst_data(ctx, pattern8_0); + if let &InstructionData::Unary { + opcode: ref pattern10_0, + arg: pattern10_1, + } = &pattern9_0 + { + if let &Opcode::Sextend = &pattern10_0 { + let pattern12_0 = C::value_type(ctx, pattern10_1); + if pattern12_0 == I32 { + // Rule at src/isa/s390x/lower.isle line 61. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = C::put_in_reg(ctx, pattern10_1); + let expr2_0 = constructor_add_reg_sext32( + ctx, pattern3_0, expr0_0, expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + } + } + if let Some(pattern8_0) = C::sinkable_inst(ctx, pattern7_0) { + let pattern9_0 = C::inst_data(ctx, pattern8_0); + if let &InstructionData::Load { + opcode: ref pattern10_0, + arg: pattern10_1, + flags: pattern10_2, + offset: pattern10_3, + } = &pattern9_0 + { + match &pattern10_0 { + &Opcode::Sload16 => { + if let Some(()) = C::bigendian(ctx, pattern10_2) { + // Rule at src/isa/s390x/lower.isle line 89. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = + constructor_sink_sload16(ctx, pattern8_0)?; + let expr2_0 = constructor_add_mem_sext16( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + &Opcode::Sload32 => { + if let Some(()) = C::bigendian(ctx, pattern10_2) { + // Rule at src/isa/s390x/lower.isle line 93. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = + constructor_sink_sload32(ctx, pattern8_0)?; + let expr2_0 = constructor_add_mem_sext32( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + _ => {} + } + } + } + let pattern8_0 = C::value_type(ctx, pattern7_0); + if pattern8_0 == I16 { + if let Some(pattern10_0) = C::sinkable_inst(ctx, pattern7_0) { + let pattern11_0 = C::inst_data(ctx, pattern10_0); + if let &InstructionData::Load { + opcode: ref pattern12_0, + arg: pattern12_1, + flags: pattern12_2, + offset: pattern12_3, + } = &pattern11_0 + { + if let &Opcode::Load = &pattern12_0 { + if let Some(()) = C::bigendian(ctx, pattern12_2) { + // Rule at src/isa/s390x/lower.isle line 83. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = + constructor_sink_load(ctx, pattern10_0)?; + let expr2_0 = constructor_add_mem_sext16( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + } + } + } + if let Some(pattern9_0) = C::ty_32_or_64(ctx, pattern8_0) { + if let Some(pattern10_0) = C::sinkable_inst(ctx, pattern7_0) { + let pattern11_0 = C::inst_data(ctx, pattern10_0); + if let &InstructionData::Load { + opcode: ref pattern12_0, + arg: pattern12_1, + flags: pattern12_2, + offset: pattern12_3, + } = &pattern11_0 + { + if let &Opcode::Load = &pattern12_0 { + if let Some(()) = C::bigendian(ctx, pattern12_2) { + // Rule at src/isa/s390x/lower.isle line 77. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = + constructor_sink_load(ctx, pattern10_0)?; + let expr2_0 = constructor_add_mem( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + } + } + } + if let Some(pattern8_0) = C::i16_from_value(ctx, pattern7_1) { + // Rule at src/isa/s390x/lower.isle line 65. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = + constructor_add_simm16(ctx, pattern3_0, expr0_0, pattern8_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + if let Some(pattern8_0) = C::i32_from_value(ctx, pattern7_1) { + // Rule at src/isa/s390x/lower.isle line 69. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = + constructor_add_simm32(ctx, pattern3_0, expr0_0, pattern8_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + if let Some(pattern8_0) = C::def_inst(ctx, pattern7_1) { + let pattern9_0 = C::inst_data(ctx, pattern8_0); + if let &InstructionData::Unary { + opcode: ref pattern10_0, + arg: pattern10_1, + } = &pattern9_0 + { + if let &Opcode::Sextend = &pattern10_0 { + let pattern12_0 = C::value_type(ctx, pattern10_1); + if pattern12_0 == I32 { + // Rule at src/isa/s390x/lower.isle line 59. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = C::put_in_reg(ctx, pattern10_1); + let expr2_0 = constructor_add_reg_sext32( + ctx, pattern3_0, expr0_0, expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + } + } + if let Some(pattern8_0) = C::sinkable_inst(ctx, pattern7_1) { + let pattern9_0 = C::inst_data(ctx, pattern8_0); + if let &InstructionData::Load { + opcode: ref pattern10_0, + arg: pattern10_1, + flags: pattern10_2, + offset: pattern10_3, + } = &pattern9_0 + { + match &pattern10_0 { + &Opcode::Sload16 => { + if let Some(()) = C::bigendian(ctx, pattern10_2) { + // Rule at src/isa/s390x/lower.isle line 87. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = + constructor_sink_sload16(ctx, pattern8_0)?; + let expr2_0 = constructor_add_mem_sext16( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + &Opcode::Sload32 => { + if let Some(()) = C::bigendian(ctx, pattern10_2) { + // Rule at src/isa/s390x/lower.isle line 91. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = + constructor_sink_sload32(ctx, pattern8_0)?; + let expr2_0 = constructor_add_mem_sext32( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + _ => {} + } + } + } + let pattern8_0 = C::value_type(ctx, pattern7_1); + if pattern8_0 == I16 { + if let Some(pattern10_0) = C::sinkable_inst(ctx, pattern7_1) { + let pattern11_0 = C::inst_data(ctx, pattern10_0); + if let &InstructionData::Load { + opcode: ref pattern12_0, + arg: pattern12_1, + flags: pattern12_2, + offset: pattern12_3, + } = &pattern11_0 + { + if let &Opcode::Load = &pattern12_0 { + if let Some(()) = C::bigendian(ctx, pattern12_2) { + // Rule at src/isa/s390x/lower.isle line 81. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = + constructor_sink_load(ctx, pattern10_0)?; + let expr2_0 = constructor_add_mem_sext16( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + } + } + } + if let Some(pattern9_0) = C::ty_32_or_64(ctx, pattern8_0) { + if let Some(pattern10_0) = C::sinkable_inst(ctx, pattern7_1) { + let pattern11_0 = C::inst_data(ctx, pattern10_0); + if let &InstructionData::Load { + opcode: ref pattern12_0, + arg: pattern12_1, + flags: pattern12_2, + offset: pattern12_3, + } = &pattern11_0 + { + if let &Opcode::Load = &pattern12_0 { + if let Some(()) = C::bigendian(ctx, pattern12_2) { + // Rule at src/isa/s390x/lower.isle line 75. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = + constructor_sink_load(ctx, pattern10_0)?; + let expr2_0 = constructor_add_mem( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + } + } + } + // Rule at src/isa/s390x/lower.isle line 55. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = C::put_in_reg(ctx, pattern7_1); + let expr2_0 = constructor_add_reg(ctx, pattern3_0, expr0_0, expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + &Opcode::Isub => { + let (pattern7_0, pattern7_1) = + C::unpack_value_array_2(ctx, &pattern5_1); + if let Some(pattern8_0) = C::i16_from_negated_value(ctx, pattern7_1) { + // Rule at src/isa/s390x/lower.isle line 108. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = + constructor_add_simm16(ctx, pattern3_0, expr0_0, pattern8_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + if let Some(pattern8_0) = C::i32_from_negated_value(ctx, pattern7_1) { + // Rule at src/isa/s390x/lower.isle line 110. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = + constructor_add_simm32(ctx, pattern3_0, expr0_0, pattern8_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + if let Some(pattern8_0) = C::def_inst(ctx, pattern7_1) { + let pattern9_0 = C::inst_data(ctx, pattern8_0); + if let &InstructionData::Unary { + opcode: ref pattern10_0, + arg: pattern10_1, + } = &pattern9_0 + { + if let &Opcode::Sextend = &pattern10_0 { + let pattern12_0 = C::value_type(ctx, pattern10_1); + if pattern12_0 == I32 { + // Rule at src/isa/s390x/lower.isle line 104. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = C::put_in_reg(ctx, pattern10_1); + let expr2_0 = constructor_sub_reg_sext32( + ctx, pattern3_0, expr0_0, expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + } + } + if let Some(pattern8_0) = C::sinkable_inst(ctx, pattern7_1) { + let pattern9_0 = C::inst_data(ctx, pattern8_0); + if let &InstructionData::Load { + opcode: ref pattern10_0, + arg: pattern10_1, + flags: pattern10_2, + offset: pattern10_3, + } = &pattern9_0 + { + match &pattern10_0 { + &Opcode::Sload16 => { + if let Some(()) = C::bigendian(ctx, pattern10_2) { + // Rule at src/isa/s390x/lower.isle line 122. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = + constructor_sink_sload16(ctx, pattern8_0)?; + let expr2_0 = constructor_sub_mem_sext16( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + &Opcode::Sload32 => { + if let Some(()) = C::bigendian(ctx, pattern10_2) { + // Rule at src/isa/s390x/lower.isle line 124. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = + constructor_sink_sload32(ctx, pattern8_0)?; + let expr2_0 = constructor_sub_mem_sext32( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + _ => {} + } + } + } + let pattern8_0 = C::value_type(ctx, pattern7_1); + if pattern8_0 == I16 { + if let Some(pattern10_0) = C::sinkable_inst(ctx, pattern7_1) { + let pattern11_0 = C::inst_data(ctx, pattern10_0); + if let &InstructionData::Load { + opcode: ref pattern12_0, + arg: pattern12_1, + flags: pattern12_2, + offset: pattern12_3, + } = &pattern11_0 + { + if let &Opcode::Load = &pattern12_0 { + if let Some(()) = C::bigendian(ctx, pattern12_2) { + // Rule at src/isa/s390x/lower.isle line 118. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = + constructor_sink_load(ctx, pattern10_0)?; + let expr2_0 = constructor_sub_mem_sext16( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + } + } + } + if let Some(pattern9_0) = C::ty_32_or_64(ctx, pattern8_0) { + if let Some(pattern10_0) = C::sinkable_inst(ctx, pattern7_1) { + let pattern11_0 = C::inst_data(ctx, pattern10_0); + if let &InstructionData::Load { + opcode: ref pattern12_0, + arg: pattern12_1, + flags: pattern12_2, + offset: pattern12_3, + } = &pattern11_0 + { + if let &Opcode::Load = &pattern12_0 { + if let Some(()) = C::bigendian(ctx, pattern12_2) { + // Rule at src/isa/s390x/lower.isle line 114. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = + constructor_sink_load(ctx, pattern10_0)?; + let expr2_0 = constructor_sub_mem( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + } + } + } + // Rule at src/isa/s390x/lower.isle line 100. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = C::put_in_reg(ctx, pattern7_1); + let expr2_0 = constructor_sub_reg(ctx, pattern3_0, expr0_0, expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + &Opcode::Imul => { + let (pattern7_0, pattern7_1) = + C::unpack_value_array_2(ctx, &pattern5_1); + if let Some(pattern8_0) = C::i16_from_value(ctx, pattern7_0) { + // Rule at src/isa/s390x/lower.isle line 207. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = + constructor_mul_simm16(ctx, pattern3_0, expr0_0, pattern8_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + if let Some(pattern8_0) = C::i32_from_value(ctx, pattern7_0) { + // Rule at src/isa/s390x/lower.isle line 211. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = + constructor_mul_simm32(ctx, pattern3_0, expr0_0, pattern8_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + if let Some(pattern8_0) = C::def_inst(ctx, pattern7_0) { + let pattern9_0 = C::inst_data(ctx, pattern8_0); + if let &InstructionData::Unary { + opcode: ref pattern10_0, + arg: pattern10_1, + } = &pattern9_0 + { + if let &Opcode::Sextend = &pattern10_0 { + let pattern12_0 = C::value_type(ctx, pattern10_1); + if pattern12_0 == I32 { + // Rule at src/isa/s390x/lower.isle line 201. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = C::put_in_reg(ctx, pattern10_1); + let expr2_0 = constructor_mul_reg_sext32( + ctx, pattern3_0, expr0_0, expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + } + } + if let Some(pattern8_0) = C::sinkable_inst(ctx, pattern7_0) { + let pattern9_0 = C::inst_data(ctx, pattern8_0); + if let &InstructionData::Load { + opcode: ref pattern10_0, + arg: pattern10_1, + flags: pattern10_2, + offset: pattern10_3, + } = &pattern9_0 + { + match &pattern10_0 { + &Opcode::Sload16 => { + if let Some(()) = C::bigendian(ctx, pattern10_2) { + // Rule at src/isa/s390x/lower.isle line 229. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = + constructor_sink_sload16(ctx, pattern8_0)?; + let expr2_0 = constructor_mul_mem_sext16( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + &Opcode::Sload32 => { + if let Some(()) = C::bigendian(ctx, pattern10_2) { + // Rule at src/isa/s390x/lower.isle line 233. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = + constructor_sink_sload32(ctx, pattern8_0)?; + let expr2_0 = constructor_mul_mem_sext32( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + _ => {} + } + } + } + let pattern8_0 = C::value_type(ctx, pattern7_0); + if pattern8_0 == I16 { + if let Some(pattern10_0) = C::sinkable_inst(ctx, pattern7_0) { + let pattern11_0 = C::inst_data(ctx, pattern10_0); + if let &InstructionData::Load { + opcode: ref pattern12_0, + arg: pattern12_1, + flags: pattern12_2, + offset: pattern12_3, + } = &pattern11_0 + { + if let &Opcode::Load = &pattern12_0 { + if let Some(()) = C::bigendian(ctx, pattern12_2) { + // Rule at src/isa/s390x/lower.isle line 223. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = + constructor_sink_load(ctx, pattern10_0)?; + let expr2_0 = constructor_mul_mem_sext16( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + } + } + } + if let Some(pattern9_0) = C::ty_32_or_64(ctx, pattern8_0) { + if let Some(pattern10_0) = C::sinkable_inst(ctx, pattern7_0) { + let pattern11_0 = C::inst_data(ctx, pattern10_0); + if let &InstructionData::Load { + opcode: ref pattern12_0, + arg: pattern12_1, + flags: pattern12_2, + offset: pattern12_3, + } = &pattern11_0 + { + if let &Opcode::Load = &pattern12_0 { + if let Some(()) = C::bigendian(ctx, pattern12_2) { + // Rule at src/isa/s390x/lower.isle line 217. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = + constructor_sink_load(ctx, pattern10_0)?; + let expr2_0 = constructor_mul_mem( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + } + } + } + if let Some(pattern8_0) = C::i16_from_value(ctx, pattern7_1) { + // Rule at src/isa/s390x/lower.isle line 205. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = + constructor_mul_simm16(ctx, pattern3_0, expr0_0, pattern8_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + if let Some(pattern8_0) = C::i32_from_value(ctx, pattern7_1) { + // Rule at src/isa/s390x/lower.isle line 209. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = + constructor_mul_simm32(ctx, pattern3_0, expr0_0, pattern8_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + if let Some(pattern8_0) = C::def_inst(ctx, pattern7_1) { + let pattern9_0 = C::inst_data(ctx, pattern8_0); + if let &InstructionData::Unary { + opcode: ref pattern10_0, + arg: pattern10_1, + } = &pattern9_0 + { + if let &Opcode::Sextend = &pattern10_0 { + let pattern12_0 = C::value_type(ctx, pattern10_1); + if pattern12_0 == I32 { + // Rule at src/isa/s390x/lower.isle line 199. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = C::put_in_reg(ctx, pattern10_1); + let expr2_0 = constructor_mul_reg_sext32( + ctx, pattern3_0, expr0_0, expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + } + } + if let Some(pattern8_0) = C::sinkable_inst(ctx, pattern7_1) { + let pattern9_0 = C::inst_data(ctx, pattern8_0); + if let &InstructionData::Load { + opcode: ref pattern10_0, + arg: pattern10_1, + flags: pattern10_2, + offset: pattern10_3, + } = &pattern9_0 + { + match &pattern10_0 { + &Opcode::Sload16 => { + if let Some(()) = C::bigendian(ctx, pattern10_2) { + // Rule at src/isa/s390x/lower.isle line 227. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = + constructor_sink_sload16(ctx, pattern8_0)?; + let expr2_0 = constructor_mul_mem_sext16( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + &Opcode::Sload32 => { + if let Some(()) = C::bigendian(ctx, pattern10_2) { + // Rule at src/isa/s390x/lower.isle line 231. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = + constructor_sink_sload32(ctx, pattern8_0)?; + let expr2_0 = constructor_mul_mem_sext32( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + _ => {} + } + } + } + let pattern8_0 = C::value_type(ctx, pattern7_1); + if pattern8_0 == I16 { + if let Some(pattern10_0) = C::sinkable_inst(ctx, pattern7_1) { + let pattern11_0 = C::inst_data(ctx, pattern10_0); + if let &InstructionData::Load { + opcode: ref pattern12_0, + arg: pattern12_1, + flags: pattern12_2, + offset: pattern12_3, + } = &pattern11_0 + { + if let &Opcode::Load = &pattern12_0 { + if let Some(()) = C::bigendian(ctx, pattern12_2) { + // Rule at src/isa/s390x/lower.isle line 221. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = + constructor_sink_load(ctx, pattern10_0)?; + let expr2_0 = constructor_mul_mem_sext16( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + } + } + } + if let Some(pattern9_0) = C::ty_32_or_64(ctx, pattern8_0) { + if let Some(pattern10_0) = C::sinkable_inst(ctx, pattern7_1) { + let pattern11_0 = C::inst_data(ctx, pattern10_0); + if let &InstructionData::Load { + opcode: ref pattern12_0, + arg: pattern12_1, + flags: pattern12_2, + offset: pattern12_3, + } = &pattern11_0 + { + if let &Opcode::Load = &pattern12_0 { + if let Some(()) = C::bigendian(ctx, pattern12_2) { + // Rule at src/isa/s390x/lower.isle line 215. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = + constructor_sink_load(ctx, pattern10_0)?; + let expr2_0 = constructor_mul_mem( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + } + } + } + // Rule at src/isa/s390x/lower.isle line 195. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = C::put_in_reg(ctx, pattern7_1); + let expr2_0 = constructor_mul_reg(ctx, pattern3_0, expr0_0, expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + &Opcode::Udiv => { + let (pattern7_0, pattern7_1) = + C::unpack_value_array_2(ctx, &pattern5_1); + // Rule at src/isa/s390x/lower.isle line 298. + let expr0_0 = constructor_zero_divisor_check_needed(ctx, pattern7_1)?; + let expr1_0 = constructor_ty_ext32(ctx, pattern3_0)?; + let expr2_0: u64 = 0; + let expr3_0 = constructor_uninitialized_regpair(ctx)?; + let expr4_0 = + constructor_imm_regpair_hi(ctx, expr1_0, expr2_0, &expr3_0)?; + let expr5_0 = + constructor_put_in_regpair_lo_zext32(ctx, pattern7_0, &expr4_0)?; + let expr6_0 = constructor_put_in_reg_zext32(ctx, pattern7_1)?; + let expr7_0 = constructor_ty_ext32(ctx, pattern3_0)?; + let expr8_0 = constructor_maybe_trap_if_zero_divisor( + ctx, expr0_0, expr7_0, expr6_0, + )?; + let expr9_0 = constructor_udivmod(ctx, expr7_0, &expr5_0, expr6_0)?; + let expr10_0 = constructor_regpair_lo(ctx, &expr9_0)?; + let expr11_0 = constructor_copy_reg(ctx, pattern3_0, expr10_0)?; + let expr12_0 = C::value_reg(ctx, expr11_0); + return Some(expr12_0); + } + &Opcode::Sdiv => { + let (pattern7_0, pattern7_1) = + C::unpack_value_array_2(ctx, &pattern5_1); + // Rule at src/isa/s390x/lower.isle line 370. + let expr0_0 = constructor_zero_divisor_check_needed(ctx, pattern7_1)?; + let expr1_0 = constructor_div_overflow_check_needed(ctx, pattern7_1)?; + let expr2_0 = constructor_uninitialized_regpair(ctx)?; + let expr3_0 = + constructor_put_in_regpair_lo_sext64(ctx, pattern7_0, &expr2_0)?; + let expr4_0 = constructor_put_in_reg_sext32(ctx, pattern7_1)?; + let expr5_0 = constructor_ty_ext32(ctx, pattern3_0)?; + let expr6_0 = constructor_maybe_trap_if_zero_divisor( + ctx, expr0_0, expr5_0, expr4_0, + )?; + let expr7_0 = constructor_maybe_trap_if_sdiv_overflow( + ctx, expr1_0, expr5_0, pattern3_0, &expr3_0, expr4_0, + )?; + let expr8_0 = constructor_sdivmod(ctx, expr5_0, &expr3_0, expr4_0)?; + let expr9_0 = constructor_regpair_lo(ctx, &expr8_0)?; + let expr10_0 = constructor_copy_reg(ctx, pattern3_0, expr9_0)?; + let expr11_0 = C::value_reg(ctx, expr10_0); + return Some(expr11_0); + } + &Opcode::Urem => { + let (pattern7_0, pattern7_1) = + C::unpack_value_array_2(ctx, &pattern5_1); + // Rule at src/isa/s390x/lower.isle line 321. + let expr0_0 = constructor_zero_divisor_check_needed(ctx, pattern7_1)?; + let expr1_0: u64 = 0; + let expr2_0 = constructor_uninitialized_regpair(ctx)?; + let expr3_0 = + constructor_imm_regpair_hi(ctx, pattern3_0, expr1_0, &expr2_0)?; + let expr4_0 = + constructor_put_in_regpair_lo_zext32(ctx, pattern7_0, &expr3_0)?; + let expr5_0 = constructor_put_in_reg_zext32(ctx, pattern7_1)?; + let expr6_0 = constructor_ty_ext32(ctx, pattern3_0)?; + let expr7_0 = constructor_maybe_trap_if_zero_divisor( + ctx, expr0_0, expr6_0, expr5_0, + )?; + let expr8_0 = constructor_udivmod(ctx, expr6_0, &expr4_0, expr5_0)?; + let expr9_0 = constructor_regpair_hi(ctx, &expr8_0)?; + let expr10_0 = constructor_copy_reg(ctx, pattern3_0, expr9_0)?; + let expr11_0 = C::value_reg(ctx, expr10_0); + return Some(expr11_0); + } + &Opcode::Srem => { + let (pattern7_0, pattern7_1) = + C::unpack_value_array_2(ctx, &pattern5_1); + // Rule at src/isa/s390x/lower.isle line 393. + let expr0_0 = constructor_zero_divisor_check_needed(ctx, pattern7_1)?; + let expr1_0 = constructor_div_overflow_check_needed(ctx, pattern7_1)?; + let expr2_0 = constructor_uninitialized_regpair(ctx)?; + let expr3_0 = + constructor_put_in_regpair_lo_sext64(ctx, pattern7_0, &expr2_0)?; + let expr4_0 = constructor_put_in_reg_sext32(ctx, pattern7_1)?; + let expr5_0 = constructor_ty_ext32(ctx, pattern3_0)?; + let expr6_0 = constructor_maybe_trap_if_zero_divisor( + ctx, expr0_0, expr5_0, expr4_0, + )?; + let expr7_0 = constructor_maybe_avoid_srem_overflow( + ctx, expr1_0, expr5_0, &expr3_0, expr4_0, + )?; + let expr8_0 = constructor_sdivmod(ctx, expr5_0, &expr7_0, expr4_0)?; + let expr9_0 = constructor_regpair_hi(ctx, &expr8_0)?; + let expr10_0 = constructor_copy_reg(ctx, pattern3_0, expr9_0)?; + let expr11_0 = C::value_reg(ctx, expr10_0); + return Some(expr11_0); + } + &Opcode::IaddIfcout => { + let (pattern7_0, pattern7_1) = + C::unpack_value_array_2(ctx, &pattern5_1); + if let Some(pattern8_0) = C::u32_from_value(ctx, pattern7_0) { + // Rule at src/isa/s390x/lower.isle line 165. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = constructor_add_logical_zimm32( + ctx, pattern3_0, expr0_0, pattern8_0, + )?; + let expr2_0 = constructor_value_regs_ifcout(ctx, expr1_0)?; + return Some(expr2_0); + } + if let Some(pattern8_0) = C::def_inst(ctx, pattern7_0) { + let pattern9_0 = C::inst_data(ctx, pattern8_0); + if let &InstructionData::Unary { + opcode: ref pattern10_0, + arg: pattern10_1, + } = &pattern9_0 + { + if let &Opcode::Uextend = &pattern10_0 { + let pattern12_0 = C::value_type(ctx, pattern10_1); + if pattern12_0 == I32 { + // Rule at src/isa/s390x/lower.isle line 159. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = C::put_in_reg(ctx, pattern10_1); + let expr2_0 = constructor_add_logical_reg_zext32( + ctx, pattern3_0, expr0_0, expr1_0, + )?; + let expr3_0 = + constructor_value_regs_ifcout(ctx, expr2_0)?; + return Some(expr3_0); + } + } + } + } + if let Some(pattern8_0) = C::sinkable_inst(ctx, pattern7_0) { + let pattern9_0 = C::inst_data(ctx, pattern8_0); + if let &InstructionData::Load { + opcode: ref pattern10_0, + arg: pattern10_1, + flags: pattern10_2, + offset: pattern10_3, + } = &pattern9_0 + { + if let &Opcode::Uload32 = &pattern10_0 { + if let Some(()) = C::bigendian(ctx, pattern10_2) { + // Rule at src/isa/s390x/lower.isle line 177. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = + constructor_sink_uload32(ctx, pattern8_0)?; + let expr2_0 = constructor_add_logical_mem_zext32( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = + constructor_value_regs_ifcout(ctx, expr2_0)?; + return Some(expr3_0); + } + } + } + } + let pattern8_0 = C::value_type(ctx, pattern7_0); + if let Some(pattern9_0) = C::ty_32_or_64(ctx, pattern8_0) { + if let Some(pattern10_0) = C::sinkable_inst(ctx, pattern7_0) { + let pattern11_0 = C::inst_data(ctx, pattern10_0); + if let &InstructionData::Load { + opcode: ref pattern12_0, + arg: pattern12_1, + flags: pattern12_2, + offset: pattern12_3, + } = &pattern11_0 + { + if let &Opcode::Load = &pattern12_0 { + if let Some(()) = C::bigendian(ctx, pattern12_2) { + // Rule at src/isa/s390x/lower.isle line 171. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = + constructor_sink_load(ctx, pattern10_0)?; + let expr2_0 = constructor_add_logical_mem( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = + constructor_value_regs_ifcout(ctx, expr2_0)?; + return Some(expr3_0); + } + } + } + } + } + if let Some(pattern8_0) = C::u32_from_value(ctx, pattern7_1) { + // Rule at src/isa/s390x/lower.isle line 163. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = constructor_add_logical_zimm32( + ctx, pattern3_0, expr0_0, pattern8_0, + )?; + let expr2_0 = constructor_value_regs_ifcout(ctx, expr1_0)?; + return Some(expr2_0); + } + if let Some(pattern8_0) = C::def_inst(ctx, pattern7_1) { + let pattern9_0 = C::inst_data(ctx, pattern8_0); + if let &InstructionData::Unary { + opcode: ref pattern10_0, + arg: pattern10_1, + } = &pattern9_0 + { + if let &Opcode::Uextend = &pattern10_0 { + let pattern12_0 = C::value_type(ctx, pattern10_1); + if pattern12_0 == I32 { + // Rule at src/isa/s390x/lower.isle line 157. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = C::put_in_reg(ctx, pattern10_1); + let expr2_0 = constructor_add_logical_reg_zext32( + ctx, pattern3_0, expr0_0, expr1_0, + )?; + let expr3_0 = + constructor_value_regs_ifcout(ctx, expr2_0)?; + return Some(expr3_0); + } + } + } + } + if let Some(pattern8_0) = C::sinkable_inst(ctx, pattern7_1) { + let pattern9_0 = C::inst_data(ctx, pattern8_0); + if let &InstructionData::Load { + opcode: ref pattern10_0, + arg: pattern10_1, + flags: pattern10_2, + offset: pattern10_3, + } = &pattern9_0 + { + if let &Opcode::Uload32 = &pattern10_0 { + if let Some(()) = C::bigendian(ctx, pattern10_2) { + // Rule at src/isa/s390x/lower.isle line 175. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = + constructor_sink_uload32(ctx, pattern8_0)?; + let expr2_0 = constructor_add_logical_mem_zext32( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = + constructor_value_regs_ifcout(ctx, expr2_0)?; + return Some(expr3_0); + } + } + } + } + let pattern8_0 = C::value_type(ctx, pattern7_1); + if let Some(pattern9_0) = C::ty_32_or_64(ctx, pattern8_0) { + if let Some(pattern10_0) = C::sinkable_inst(ctx, pattern7_1) { + let pattern11_0 = C::inst_data(ctx, pattern10_0); + if let &InstructionData::Load { + opcode: ref pattern12_0, + arg: pattern12_1, + flags: pattern12_2, + offset: pattern12_3, + } = &pattern11_0 + { + if let &Opcode::Load = &pattern12_0 { + if let Some(()) = C::bigendian(ctx, pattern12_2) { + // Rule at src/isa/s390x/lower.isle line 169. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = + constructor_sink_load(ctx, pattern10_0)?; + let expr2_0 = constructor_add_logical_mem( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = + constructor_value_regs_ifcout(ctx, expr2_0)?; + return Some(expr3_0); + } + } + } + } + } + // Rule at src/isa/s390x/lower.isle line 153. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = C::put_in_reg(ctx, pattern7_1); + let expr2_0 = + constructor_add_logical_reg(ctx, pattern3_0, expr0_0, expr1_0)?; + let expr3_0 = constructor_value_regs_ifcout(ctx, expr2_0)?; + return Some(expr3_0); + } + &Opcode::Band => { + let (pattern7_0, pattern7_1) = + C::unpack_value_array_2(ctx, &pattern5_1); + let pattern8_0 = C::value_type(ctx, pattern7_0); + if let Some(pattern9_0) = C::ty_32_or_64(ctx, pattern8_0) { + if let Some(pattern10_0) = C::sinkable_inst(ctx, pattern7_0) { + let pattern11_0 = C::inst_data(ctx, pattern10_0); + if let &InstructionData::Load { + opcode: ref pattern12_0, + arg: pattern12_1, + flags: pattern12_2, + offset: pattern12_3, + } = &pattern11_0 + { + if let &Opcode::Load = &pattern12_0 { + if let Some(()) = C::bigendian(ctx, pattern12_2) { + // Rule at src/isa/s390x/lower.isle line 648. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = + constructor_sink_load(ctx, pattern10_0)?; + let expr2_0 = constructor_and_mem( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + } + } + } + if let Some(pattern8_0) = + C::uimm32shifted_from_inverted_value(ctx, pattern7_0) + { + // Rule at src/isa/s390x/lower.isle line 642. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = constructor_and_uimm32shifted( + ctx, pattern3_0, expr0_0, pattern8_0, + )?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + if let Some(pattern8_0) = + C::uimm16shifted_from_inverted_value(ctx, pattern7_0) + { + // Rule at src/isa/s390x/lower.isle line 638. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = constructor_and_uimm16shifted( + ctx, pattern3_0, expr0_0, pattern8_0, + )?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + let pattern8_0 = C::value_type(ctx, pattern7_1); + if let Some(pattern9_0) = C::ty_32_or_64(ctx, pattern8_0) { + if let Some(pattern10_0) = C::sinkable_inst(ctx, pattern7_1) { + let pattern11_0 = C::inst_data(ctx, pattern10_0); + if let &InstructionData::Load { + opcode: ref pattern12_0, + arg: pattern12_1, + flags: pattern12_2, + offset: pattern12_3, + } = &pattern11_0 + { + if let &Opcode::Load = &pattern12_0 { + if let Some(()) = C::bigendian(ctx, pattern12_2) { + // Rule at src/isa/s390x/lower.isle line 646. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = + constructor_sink_load(ctx, pattern10_0)?; + let expr2_0 = constructor_and_mem( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + } + } + } + if let Some(pattern8_0) = + C::uimm32shifted_from_inverted_value(ctx, pattern7_1) + { + // Rule at src/isa/s390x/lower.isle line 640. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = constructor_and_uimm32shifted( + ctx, pattern3_0, expr0_0, pattern8_0, + )?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + if let Some(pattern8_0) = + C::uimm16shifted_from_inverted_value(ctx, pattern7_1) + { + // Rule at src/isa/s390x/lower.isle line 636. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = constructor_and_uimm16shifted( + ctx, pattern3_0, expr0_0, pattern8_0, + )?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + // Rule at src/isa/s390x/lower.isle line 632. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = C::put_in_reg(ctx, pattern7_1); + let expr2_0 = constructor_and_reg(ctx, pattern3_0, expr0_0, expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + &Opcode::Bor => { + let (pattern7_0, pattern7_1) = + C::unpack_value_array_2(ctx, &pattern5_1); + let pattern8_0 = C::value_type(ctx, pattern7_0); + if let Some(pattern9_0) = C::ty_32_or_64(ctx, pattern8_0) { + if let Some(pattern10_0) = C::sinkable_inst(ctx, pattern7_0) { + let pattern11_0 = C::inst_data(ctx, pattern10_0); + if let &InstructionData::Load { + opcode: ref pattern12_0, + arg: pattern12_1, + flags: pattern12_2, + offset: pattern12_3, + } = &pattern11_0 + { + if let &Opcode::Load = &pattern12_0 { + if let Some(()) = C::bigendian(ctx, pattern12_2) { + // Rule at src/isa/s390x/lower.isle line 671. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = + constructor_sink_load(ctx, pattern10_0)?; + let expr2_0 = constructor_or_mem( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + } + } + } + if let Some(pattern8_0) = C::uimm32shifted_from_value(ctx, pattern7_0) { + // Rule at src/isa/s390x/lower.isle line 665. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = constructor_or_uimm32shifted( + ctx, pattern3_0, expr0_0, pattern8_0, + )?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + if let Some(pattern8_0) = C::uimm16shifted_from_value(ctx, pattern7_0) { + // Rule at src/isa/s390x/lower.isle line 661. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = constructor_or_uimm16shifted( + ctx, pattern3_0, expr0_0, pattern8_0, + )?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + let pattern8_0 = C::value_type(ctx, pattern7_1); + if let Some(pattern9_0) = C::ty_32_or_64(ctx, pattern8_0) { + if let Some(pattern10_0) = C::sinkable_inst(ctx, pattern7_1) { + let pattern11_0 = C::inst_data(ctx, pattern10_0); + if let &InstructionData::Load { + opcode: ref pattern12_0, + arg: pattern12_1, + flags: pattern12_2, + offset: pattern12_3, + } = &pattern11_0 + { + if let &Opcode::Load = &pattern12_0 { + if let Some(()) = C::bigendian(ctx, pattern12_2) { + // Rule at src/isa/s390x/lower.isle line 669. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = + constructor_sink_load(ctx, pattern10_0)?; + let expr2_0 = constructor_or_mem( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + } + } + } + if let Some(pattern8_0) = C::uimm32shifted_from_value(ctx, pattern7_1) { + // Rule at src/isa/s390x/lower.isle line 663. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = constructor_or_uimm32shifted( + ctx, pattern3_0, expr0_0, pattern8_0, + )?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + if let Some(pattern8_0) = C::uimm16shifted_from_value(ctx, pattern7_1) { + // Rule at src/isa/s390x/lower.isle line 659. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = constructor_or_uimm16shifted( + ctx, pattern3_0, expr0_0, pattern8_0, + )?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + // Rule at src/isa/s390x/lower.isle line 655. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = C::put_in_reg(ctx, pattern7_1); + let expr2_0 = constructor_or_reg(ctx, pattern3_0, expr0_0, expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + &Opcode::Bxor => { + let (pattern7_0, pattern7_1) = + C::unpack_value_array_2(ctx, &pattern5_1); + let pattern8_0 = C::value_type(ctx, pattern7_0); + if let Some(pattern9_0) = C::ty_32_or_64(ctx, pattern8_0) { + if let Some(pattern10_0) = C::sinkable_inst(ctx, pattern7_0) { + let pattern11_0 = C::inst_data(ctx, pattern10_0); + if let &InstructionData::Load { + opcode: ref pattern12_0, + arg: pattern12_1, + flags: pattern12_2, + offset: pattern12_3, + } = &pattern11_0 + { + if let &Opcode::Load = &pattern12_0 { + if let Some(()) = C::bigendian(ctx, pattern12_2) { + // Rule at src/isa/s390x/lower.isle line 690. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = + constructor_sink_load(ctx, pattern10_0)?; + let expr2_0 = constructor_xor_mem( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + } + } + } + if let Some(pattern8_0) = C::uimm32shifted_from_value(ctx, pattern7_0) { + // Rule at src/isa/s390x/lower.isle line 684. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = constructor_xor_uimm32shifted( + ctx, pattern3_0, expr0_0, pattern8_0, + )?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + let pattern8_0 = C::value_type(ctx, pattern7_1); + if let Some(pattern9_0) = C::ty_32_or_64(ctx, pattern8_0) { + if let Some(pattern10_0) = C::sinkable_inst(ctx, pattern7_1) { + let pattern11_0 = C::inst_data(ctx, pattern10_0); + if let &InstructionData::Load { + opcode: ref pattern12_0, + arg: pattern12_1, + flags: pattern12_2, + offset: pattern12_3, + } = &pattern11_0 + { + if let &Opcode::Load = &pattern12_0 { + if let Some(()) = C::bigendian(ctx, pattern12_2) { + // Rule at src/isa/s390x/lower.isle line 688. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = + constructor_sink_load(ctx, pattern10_0)?; + let expr2_0 = constructor_xor_mem( + ctx, pattern3_0, expr0_0, &expr1_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + } + } + } + if let Some(pattern8_0) = C::uimm32shifted_from_value(ctx, pattern7_1) { + // Rule at src/isa/s390x/lower.isle line 682. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = constructor_xor_uimm32shifted( + ctx, pattern3_0, expr0_0, pattern8_0, + )?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + // Rule at src/isa/s390x/lower.isle line 678. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = C::put_in_reg(ctx, pattern7_1); + let expr2_0 = constructor_xor_reg(ctx, pattern3_0, expr0_0, expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + &Opcode::Ishl => { + let (pattern7_0, pattern7_1) = + C::unpack_value_array_2(ctx, &pattern5_1); + if let Some(pattern8_0) = C::i64_from_value(ctx, pattern7_1) { + // Rule at src/isa/s390x/lower.isle line 477. + let expr0_0 = C::mask_amt_imm(ctx, pattern3_0, pattern8_0); + let expr1_0 = C::put_in_reg(ctx, pattern7_0); + let expr2_0 = + constructor_lshl_imm(ctx, pattern3_0, expr1_0, expr0_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + // Rule at src/isa/s390x/lower.isle line 472. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = constructor_mask_amt_reg(ctx, pattern3_0, expr0_0)?; + let expr2_0 = C::put_in_reg(ctx, pattern7_0); + let expr3_0 = constructor_lshl_reg(ctx, pattern3_0, expr2_0, expr1_0)?; + let expr4_0 = C::value_reg(ctx, expr3_0); + return Some(expr4_0); + } + &Opcode::Ushr => { + let (pattern7_0, pattern7_1) = + C::unpack_value_array_2(ctx, &pattern5_1); + if let Some(pattern8_0) = C::i64_from_value(ctx, pattern7_1) { + // Rule at src/isa/s390x/lower.isle line 493. + let expr0_0 = constructor_put_in_reg_zext32(ctx, pattern7_0)?; + let expr1_0 = C::mask_amt_imm(ctx, pattern3_0, pattern8_0); + let expr2_0 = constructor_ty_ext32(ctx, pattern3_0)?; + let expr3_0 = constructor_lshr_imm(ctx, expr2_0, expr0_0, expr1_0)?; + let expr4_0 = C::value_reg(ctx, expr3_0); + return Some(expr4_0); + } + // Rule at src/isa/s390x/lower.isle line 486. + let expr0_0 = constructor_put_in_reg_zext32(ctx, pattern7_0)?; + let expr1_0 = C::put_in_reg(ctx, pattern7_1); + let expr2_0 = constructor_mask_amt_reg(ctx, pattern3_0, expr1_0)?; + let expr3_0 = constructor_ty_ext32(ctx, pattern3_0)?; + let expr4_0 = constructor_lshr_reg(ctx, expr3_0, expr0_0, expr2_0)?; + let expr5_0 = C::value_reg(ctx, expr4_0); + return Some(expr5_0); + } + &Opcode::Sshr => { + let (pattern7_0, pattern7_1) = + C::unpack_value_array_2(ctx, &pattern5_1); + if let Some(pattern8_0) = C::i64_from_value(ctx, pattern7_1) { + // Rule at src/isa/s390x/lower.isle line 510. + let expr0_0 = constructor_put_in_reg_sext32(ctx, pattern7_0)?; + let expr1_0 = C::mask_amt_imm(ctx, pattern3_0, pattern8_0); + let expr2_0 = constructor_ty_ext32(ctx, pattern3_0)?; + let expr3_0 = constructor_ashr_imm(ctx, expr2_0, expr0_0, expr1_0)?; + let expr4_0 = C::value_reg(ctx, expr3_0); + return Some(expr4_0); + } + // Rule at src/isa/s390x/lower.isle line 503. + let expr0_0 = constructor_put_in_reg_sext32(ctx, pattern7_0)?; + let expr1_0 = C::put_in_reg(ctx, pattern7_1); + let expr2_0 = constructor_mask_amt_reg(ctx, pattern3_0, expr1_0)?; + let expr3_0 = constructor_ty_ext32(ctx, pattern3_0)?; + let expr4_0 = constructor_ashr_reg(ctx, expr3_0, expr0_0, expr2_0)?; + let expr5_0 = C::value_reg(ctx, expr4_0); + return Some(expr5_0); + } + _ => {} + } + } + &InstructionData::Unary { + opcode: ref pattern5_0, + arg: pattern5_1, + } => { + match &pattern5_0 { + &Opcode::Ineg => { + if let Some(pattern7_0) = C::def_inst(ctx, pattern5_1) { + let pattern8_0 = C::inst_data(ctx, pattern7_0); + if let &InstructionData::Unary { + opcode: ref pattern9_0, + arg: pattern9_1, + } = &pattern8_0 + { + if let &Opcode::Sextend = &pattern9_0 { + let pattern11_0 = C::value_type(ctx, pattern9_1); + if pattern11_0 == I32 { + // Rule at src/isa/s390x/lower.isle line 188. + let expr0_0 = C::put_in_reg(ctx, pattern9_1); + let expr1_0 = constructor_neg_reg_sext32( + ctx, pattern3_0, expr0_0, + )?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + } + } + } + // Rule at src/isa/s390x/lower.isle line 184. + let expr0_0 = C::put_in_reg(ctx, pattern5_1); + let expr1_0 = constructor_neg_reg(ctx, pattern3_0, expr0_0)?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + &Opcode::Iabs => { + if let Some(pattern7_0) = C::def_inst(ctx, pattern5_1) { + let pattern8_0 = C::inst_data(ctx, pattern7_0); + if let &InstructionData::Unary { + opcode: ref pattern9_0, + arg: pattern9_1, + } = &pattern8_0 + { + if let &Opcode::Sextend = &pattern9_0 { + let pattern11_0 = C::value_type(ctx, pattern9_1); + if pattern11_0 == I32 { + // Rule at src/isa/s390x/lower.isle line 136. + let expr0_0 = C::put_in_reg(ctx, pattern9_1); + let expr1_0 = constructor_abs_reg_sext32( + ctx, pattern3_0, expr0_0, + )?; + let expr2_0 = C::value_reg(ctx, expr1_0); + return Some(expr2_0); + } + } + } + } + // Rule at src/isa/s390x/lower.isle line 132. + let expr0_0 = constructor_ty_ext32(ctx, pattern3_0)?; + let expr1_0 = constructor_put_in_reg_sext32(ctx, pattern5_1)?; + let expr2_0 = constructor_abs_reg(ctx, expr0_0, expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + &Opcode::Clz => { + // Rule at src/isa/s390x/lower.isle line 816. + let expr0_0 = constructor_put_in_reg_zext64(ctx, pattern5_1)?; + let expr1_0: i16 = 64; + let expr2_0 = constructor_clz_reg(ctx, expr1_0, expr0_0)?; + let expr3_0 = constructor_regpair_hi(ctx, &expr2_0)?; + let expr4_0 = constructor_clz_offset(ctx, pattern3_0, expr3_0)?; + let expr5_0 = C::value_reg(ctx, expr4_0); + return Some(expr5_0); + } + &Opcode::Cls => { + // Rule at src/isa/s390x/lower.isle line 831. + let expr0_0 = constructor_put_in_reg_sext64(ctx, pattern5_1)?; + let expr1_0: Type = I64; + let expr2_0: u8 = 63; + let expr3_0 = constructor_ashr_imm(ctx, expr1_0, expr0_0, expr2_0)?; + let expr4_0: Type = I64; + let expr5_0 = constructor_xor_reg(ctx, expr4_0, expr0_0, expr3_0)?; + let expr6_0: i16 = 64; + let expr7_0 = constructor_clz_reg(ctx, expr6_0, expr5_0)?; + let expr8_0 = constructor_regpair_hi(ctx, &expr7_0)?; + let expr9_0 = constructor_clz_offset(ctx, pattern3_0, expr8_0)?; + let expr10_0 = C::value_reg(ctx, expr9_0); + return Some(expr10_0); + } + &Opcode::Bint => { + // Rule at src/isa/s390x/lower.isle line 799. + let expr0_0 = C::put_in_reg(ctx, pattern5_1); + let expr1_0: u64 = 1; + let expr2_0 = constructor_imm(ctx, pattern3_0, expr1_0)?; + let expr3_0 = constructor_and_reg(ctx, pattern3_0, expr0_0, expr2_0)?; + let expr4_0 = C::value_reg(ctx, expr3_0); + return Some(expr4_0); + } + _ => {} + } + } + _ => {} + } + } + if let Some(pattern3_0) = C::ty_32_or_64(ctx, pattern2_0) { + let pattern4_0 = C::inst_data(ctx, pattern0_0); + match &pattern4_0 { + &InstructionData::Binary { + opcode: ref pattern5_0, + args: ref pattern5_1, + } => { + match &pattern5_0 { + &Opcode::Rotl => { + let (pattern7_0, pattern7_1) = + C::unpack_value_array_2(ctx, &pattern5_1); + if let Some(pattern8_0) = C::i64_from_value(ctx, pattern7_1) { + // Rule at src/isa/s390x/lower.isle line 523. + let expr0_0 = C::mask_amt_imm(ctx, pattern3_0, pattern8_0); + let expr1_0 = C::put_in_reg(ctx, pattern7_0); + let expr2_0 = + constructor_rot_imm(ctx, pattern3_0, expr1_0, expr0_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + // Rule at src/isa/s390x/lower.isle line 519. + let expr0_0 = C::put_in_reg(ctx, pattern7_0); + let expr1_0 = C::put_in_reg(ctx, pattern7_1); + let expr2_0 = constructor_rot_reg(ctx, pattern3_0, expr0_0, expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + &Opcode::Rotr => { + let (pattern7_0, pattern7_1) = + C::unpack_value_array_2(ctx, &pattern5_1); + if let Some(pattern8_0) = C::i64_from_negated_value(ctx, pattern7_1) { + // Rule at src/isa/s390x/lower.isle line 561. + let expr0_0 = C::mask_amt_imm(ctx, pattern3_0, pattern8_0); + let expr1_0 = C::put_in_reg(ctx, pattern7_0); + let expr2_0 = + constructor_rot_imm(ctx, pattern3_0, expr1_0, expr0_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + // Rule at src/isa/s390x/lower.isle line 555. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = constructor_neg_reg(ctx, pattern3_0, expr0_0)?; + let expr2_0 = C::put_in_reg(ctx, pattern7_0); + let expr3_0 = constructor_rot_reg(ctx, pattern3_0, expr2_0, expr1_0)?; + let expr4_0 = C::value_reg(ctx, expr3_0); + return Some(expr4_0); + } + _ => {} + } + } + &InstructionData::AtomicRmw { + opcode: ref pattern5_0, + args: ref pattern5_1, + flags: pattern5_2, + op: ref pattern5_3, + } => { + if let &Opcode::AtomicRmw = &pattern5_0 { + let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); + if let Some(()) = C::bigendian(ctx, pattern5_2) { + match &pattern5_3 { + &AtomicRmwOp::Add => { + // Rule at src/isa/s390x/lower.isle line 1514. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = C::zero_offset(ctx); + let expr2_0 = constructor_lower_address( + ctx, pattern5_2, pattern7_0, expr1_0, + )?; + let expr3_0 = constructor_atomic_rmw_add( + ctx, pattern3_0, expr0_0, &expr2_0, + )?; + let expr4_0 = C::value_reg(ctx, expr3_0); + return Some(expr4_0); + } + &AtomicRmwOp::And => { + // Rule at src/isa/s390x/lower.isle line 1496. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = C::zero_offset(ctx); + let expr2_0 = constructor_lower_address( + ctx, pattern5_2, pattern7_0, expr1_0, + )?; + let expr3_0 = constructor_atomic_rmw_and( + ctx, pattern3_0, expr0_0, &expr2_0, + )?; + let expr4_0 = C::value_reg(ctx, expr3_0); + return Some(expr4_0); + } + &AtomicRmwOp::Or => { + // Rule at src/isa/s390x/lower.isle line 1502. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = C::zero_offset(ctx); + let expr2_0 = constructor_lower_address( + ctx, pattern5_2, pattern7_0, expr1_0, + )?; + let expr3_0 = constructor_atomic_rmw_or( + ctx, pattern3_0, expr0_0, &expr2_0, + )?; + let expr4_0 = C::value_reg(ctx, expr3_0); + return Some(expr4_0); + } + &AtomicRmwOp::Sub => { + // Rule at src/isa/s390x/lower.isle line 1520. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = constructor_neg_reg(ctx, pattern3_0, expr0_0)?; + let expr2_0 = C::zero_offset(ctx); + let expr3_0 = constructor_lower_address( + ctx, pattern5_2, pattern7_0, expr2_0, + )?; + let expr4_0 = constructor_atomic_rmw_add( + ctx, pattern3_0, expr1_0, &expr3_0, + )?; + let expr5_0 = C::value_reg(ctx, expr4_0); + return Some(expr5_0); + } + &AtomicRmwOp::Xor => { + // Rule at src/isa/s390x/lower.isle line 1508. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = C::zero_offset(ctx); + let expr2_0 = constructor_lower_address( + ctx, pattern5_2, pattern7_0, expr1_0, + )?; + let expr3_0 = constructor_atomic_rmw_xor( + ctx, pattern3_0, expr0_0, &expr2_0, + )?; + let expr4_0 = C::value_reg(ctx, expr3_0); + return Some(expr4_0); + } + _ => {} + } + } + } + } + &InstructionData::Unary { + opcode: ref pattern5_0, + arg: pattern5_1, + } => { + match &pattern5_0 { + &Opcode::FcvtToUint => { + let pattern7_0 = C::value_type(ctx, pattern5_1); + // Rule at src/isa/s390x/lower.isle line 1052. + let expr0_0 = C::put_in_reg(ctx, pattern5_1); + let expr1_0 = constructor_fcmp_reg(ctx, pattern7_0, expr0_0, expr0_0)?; + let expr2_0 = FloatCC::Unordered; + let expr3_0 = C::floatcc_as_cond(ctx, &expr2_0); + let expr4_0 = C::trap_code_bad_conversion_to_integer(ctx); + let expr5_0 = constructor_trap_if(ctx, &expr1_0, &expr3_0, &expr4_0)?; + let expr6_0 = constructor_fcvt_to_uint_reg_with_flags( + ctx, pattern3_0, pattern7_0, expr0_0, + )?; + let expr7_0 = FloatCC::Unordered; + let expr8_0 = C::floatcc_as_cond(ctx, &expr7_0); + let expr9_0 = C::trap_code_integer_overflow(ctx); + let expr10_0 = constructor_trap_if(ctx, &expr6_0, &expr8_0, &expr9_0)?; + let expr11_0 = C::value_reg(ctx, expr10_0); + return Some(expr11_0); + } + &Opcode::FcvtToUintSat => { + let pattern7_0 = C::value_type(ctx, pattern5_1); + // Rule at src/isa/s390x/lower.isle line 1093. + let expr0_0 = C::put_in_reg(ctx, pattern5_1); + let expr1_0 = + constructor_fcvt_to_uint_reg(ctx, pattern3_0, pattern7_0, expr0_0)?; + let expr2_0 = constructor_fcmp_reg(ctx, pattern7_0, expr0_0, expr0_0)?; + let expr3_0 = FloatCC::Unordered; + let expr4_0 = C::floatcc_as_cond(ctx, &expr3_0); + let expr5_0: i16 = 0; + let expr6_0 = + constructor_cmov_imm(ctx, pattern3_0, &expr4_0, expr5_0, expr1_0)?; + let expr7_0 = constructor_with_flags_1(ctx, &expr2_0, &expr6_0)?; + let expr8_0 = C::value_reg(ctx, expr7_0); + return Some(expr8_0); + } + &Opcode::FcvtToSint => { + let pattern7_0 = C::value_type(ctx, pattern5_1); + // Rule at src/isa/s390x/lower.isle line 1073. + let expr0_0 = C::put_in_reg(ctx, pattern5_1); + let expr1_0 = constructor_fcmp_reg(ctx, pattern7_0, expr0_0, expr0_0)?; + let expr2_0 = FloatCC::Unordered; + let expr3_0 = C::floatcc_as_cond(ctx, &expr2_0); + let expr4_0 = C::trap_code_bad_conversion_to_integer(ctx); + let expr5_0 = constructor_trap_if(ctx, &expr1_0, &expr3_0, &expr4_0)?; + let expr6_0 = constructor_fcvt_to_sint_reg_with_flags( + ctx, pattern3_0, pattern7_0, expr0_0, + )?; + let expr7_0 = FloatCC::Unordered; + let expr8_0 = C::floatcc_as_cond(ctx, &expr7_0); + let expr9_0 = C::trap_code_integer_overflow(ctx); + let expr10_0 = constructor_trap_if(ctx, &expr6_0, &expr8_0, &expr9_0)?; + let expr11_0 = C::value_reg(ctx, expr10_0); + return Some(expr11_0); + } + &Opcode::FcvtToSintSat => { + let pattern7_0 = C::value_type(ctx, pattern5_1); + // Rule at src/isa/s390x/lower.isle line 1110. + let expr0_0 = C::put_in_reg(ctx, pattern5_1); + let expr1_0 = + constructor_fcvt_to_sint_reg(ctx, pattern3_0, pattern7_0, expr0_0)?; + let expr2_0 = constructor_fcmp_reg(ctx, pattern7_0, expr0_0, expr0_0)?; + let expr3_0 = FloatCC::Unordered; + let expr4_0 = C::floatcc_as_cond(ctx, &expr3_0); + let expr5_0: i16 = 0; + let expr6_0 = + constructor_cmov_imm(ctx, pattern3_0, &expr4_0, expr5_0, expr1_0)?; + let expr7_0 = constructor_with_flags_1(ctx, &expr2_0, &expr6_0)?; + let expr8_0 = C::value_reg(ctx, expr7_0); + return Some(expr8_0); + } + _ => {} + } + } + _ => {} + } + } + if let Some(pattern3_0) = C::ty_8_or_16(ctx, pattern2_0) { + let pattern4_0 = C::inst_data(ctx, pattern0_0); + if let &InstructionData::Binary { + opcode: ref pattern5_0, + args: ref pattern5_1, + } = &pattern4_0 + { + match &pattern5_0 { + &Opcode::Umulhi => { + let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); + // Rule at src/isa/s390x/lower.isle line 240. + let expr0_0 = constructor_put_in_reg_zext32(ctx, pattern7_0)?; + let expr1_0 = constructor_put_in_reg_zext32(ctx, pattern7_1)?; + let expr2_0: Type = I32; + let expr3_0 = constructor_mul_reg(ctx, expr2_0, expr0_0, expr1_0)?; + let expr4_0: Type = I32; + let expr5_0 = C::ty_bits(ctx, pattern3_0); + let expr6_0 = constructor_lshr_imm(ctx, expr4_0, expr3_0, expr5_0)?; + let expr7_0 = C::value_reg(ctx, expr6_0); + return Some(expr7_0); + } + &Opcode::Smulhi => { + let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); + // Rule at src/isa/s390x/lower.isle line 262. + let expr0_0 = constructor_put_in_reg_sext32(ctx, pattern7_0)?; + let expr1_0 = constructor_put_in_reg_sext32(ctx, pattern7_1)?; + let expr2_0: Type = I32; + let expr3_0 = constructor_mul_reg(ctx, expr2_0, expr0_0, expr1_0)?; + let expr4_0: Type = I32; + let expr5_0 = C::ty_bits(ctx, pattern3_0); + let expr6_0 = constructor_ashr_imm(ctx, expr4_0, expr3_0, expr5_0)?; + let expr7_0 = C::value_reg(ctx, expr6_0); + return Some(expr7_0); + } + &Opcode::Rotl => { + let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); + if let Some(pattern8_0) = C::i64_from_value(ctx, pattern7_1) { + if let Some(pattern9_0) = C::i64_from_negated_value(ctx, pattern7_1) { + // Rule at src/isa/s390x/lower.isle line 541. + let expr0_0 = constructor_put_in_reg_zext32(ctx, pattern7_0)?; + let expr1_0 = constructor_ty_ext32(ctx, pattern3_0)?; + let expr2_0 = C::mask_amt_imm(ctx, pattern3_0, pattern8_0); + let expr3_0 = C::mask_amt_imm(ctx, pattern3_0, pattern9_0); + let expr4_0 = constructor_lshl_imm(ctx, expr1_0, expr0_0, expr2_0)?; + let expr5_0 = constructor_lshr_imm(ctx, expr1_0, expr0_0, expr3_0)?; + let expr6_0 = + constructor_or_reg(ctx, pattern3_0, expr4_0, expr5_0)?; + let expr7_0 = C::value_reg(ctx, expr6_0); + return Some(expr7_0); + } + } + // Rule at src/isa/s390x/lower.isle line 529. + let expr0_0 = constructor_put_in_reg_zext32(ctx, pattern7_0)?; + let expr1_0 = constructor_ty_ext32(ctx, pattern3_0)?; + let expr2_0 = C::put_in_reg(ctx, pattern7_1); + let expr3_0 = constructor_neg_reg(ctx, pattern3_0, expr2_0)?; + let expr4_0 = constructor_mask_amt_reg(ctx, pattern3_0, expr2_0)?; + let expr5_0 = constructor_mask_amt_reg(ctx, pattern3_0, expr3_0)?; + let expr6_0 = constructor_lshl_reg(ctx, expr1_0, expr0_0, expr4_0)?; + let expr7_0 = constructor_lshr_reg(ctx, expr1_0, expr0_0, expr5_0)?; + let expr8_0 = constructor_or_reg(ctx, pattern3_0, expr6_0, expr7_0)?; + let expr9_0 = C::value_reg(ctx, expr8_0); + return Some(expr9_0); + } + &Opcode::Rotr => { + let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); + if let Some(pattern8_0) = C::i64_from_value(ctx, pattern7_1) { + if let Some(pattern9_0) = C::i64_from_negated_value(ctx, pattern7_1) { + // Rule at src/isa/s390x/lower.isle line 579. + let expr0_0 = constructor_put_in_reg_zext32(ctx, pattern7_0)?; + let expr1_0 = constructor_ty_ext32(ctx, pattern3_0)?; + let expr2_0 = C::mask_amt_imm(ctx, pattern3_0, pattern8_0); + let expr3_0 = C::mask_amt_imm(ctx, pattern3_0, pattern9_0); + let expr4_0 = constructor_lshl_imm(ctx, expr1_0, expr0_0, expr3_0)?; + let expr5_0 = constructor_lshr_imm(ctx, expr1_0, expr0_0, expr2_0)?; + let expr6_0 = + constructor_or_reg(ctx, pattern3_0, expr4_0, expr5_0)?; + let expr7_0 = C::value_reg(ctx, expr6_0); + return Some(expr7_0); + } + } + // Rule at src/isa/s390x/lower.isle line 567. + let expr0_0 = constructor_put_in_reg_zext32(ctx, pattern7_0)?; + let expr1_0 = constructor_ty_ext32(ctx, pattern3_0)?; + let expr2_0 = C::put_in_reg(ctx, pattern7_1); + let expr3_0 = constructor_neg_reg(ctx, pattern3_0, expr2_0)?; + let expr4_0 = constructor_mask_amt_reg(ctx, pattern3_0, expr2_0)?; + let expr5_0 = constructor_mask_amt_reg(ctx, pattern3_0, expr3_0)?; + let expr6_0 = constructor_lshl_reg(ctx, expr1_0, expr0_0, expr5_0)?; + let expr7_0 = constructor_lshr_reg(ctx, expr1_0, expr0_0, expr4_0)?; + let expr8_0 = constructor_or_reg(ctx, pattern3_0, expr6_0, expr7_0)?; + let expr9_0 = C::value_reg(ctx, expr8_0); + return Some(expr9_0); + } + _ => {} + } + } + } + if let Some(pattern3_0) = C::gpr32_ty(ctx, pattern2_0) { + let pattern4_0 = C::inst_data(ctx, pattern0_0); + match &pattern4_0 { + &InstructionData::Unary { + opcode: ref pattern5_0, + arg: pattern5_1, + } => { + match &pattern5_0 { + &Opcode::Ctz => { + // Rule at src/isa/s390x/lower.isle line 854. + let expr0_0: Type = I64; + let expr1_0 = C::put_in_reg(ctx, pattern5_1); + let expr2_0 = constructor_ctz_guardbit(ctx, pattern3_0)?; + let expr3_0 = + constructor_or_uimm16shifted(ctx, expr0_0, expr1_0, expr2_0)?; + let expr4_0: Type = I64; + let expr5_0: Type = I64; + let expr6_0 = constructor_neg_reg(ctx, expr5_0, expr3_0)?; + let expr7_0 = constructor_and_reg(ctx, expr4_0, expr3_0, expr6_0)?; + let expr8_0: i16 = 64; + let expr9_0 = constructor_clz_reg(ctx, expr8_0, expr7_0)?; + let expr10_0: u64 = 63; + let expr11_0 = constructor_imm(ctx, pattern3_0, expr10_0)?; + let expr12_0 = constructor_regpair_hi(ctx, &expr9_0)?; + let expr13_0 = + constructor_sub_reg(ctx, pattern3_0, expr11_0, expr12_0)?; + let expr14_0 = C::value_reg(ctx, expr13_0); + return Some(expr14_0); + } + &Opcode::Uextend => { + // Rule at src/isa/s390x/lower.isle line 598. + let expr0_0 = constructor_put_in_reg_zext32(ctx, pattern5_1)?; + let expr1_0 = C::value_reg(ctx, expr0_0); + return Some(expr1_0); + } + &Opcode::Sextend => { + // Rule at src/isa/s390x/lower.isle line 609. + let expr0_0 = constructor_put_in_reg_sext32(ctx, pattern5_1)?; + let expr1_0 = C::value_reg(ctx, expr0_0); + return Some(expr1_0); + } + _ => {} + } + } + &InstructionData::Load { + opcode: ref pattern5_0, + arg: pattern5_1, + flags: pattern5_2, + offset: pattern5_3, + } => { + match &pattern5_0 { + &Opcode::Uload8 => { + // Rule at src/isa/s390x/lower.isle line 1250. + let expr0_0: Type = I8; + let expr1_0 = + constructor_lower_address(ctx, pattern5_2, pattern5_1, pattern5_3)?; + let expr2_0 = constructor_zext32_mem(ctx, expr0_0, &expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + &Opcode::Sload8 => { + // Rule at src/isa/s390x/lower.isle line 1261. + let expr0_0: Type = I8; + let expr1_0 = + constructor_lower_address(ctx, pattern5_2, pattern5_1, pattern5_3)?; + let expr2_0 = constructor_sext32_mem(ctx, expr0_0, &expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + &Opcode::Uload16 => { + if let Some(()) = C::littleendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1277. + let expr0_0 = constructor_lower_address( + ctx, pattern5_2, pattern5_1, pattern5_3, + )?; + let expr1_0 = constructor_loadrev16(ctx, &expr0_0)?; + let expr2_0: Type = I16; + let expr3_0 = constructor_zext32_reg(ctx, expr2_0, expr1_0)?; + let expr4_0 = C::value_reg(ctx, expr3_0); + return Some(expr4_0); + } + if let Some(()) = C::bigendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1272. + let expr0_0: Type = I16; + let expr1_0 = constructor_lower_address( + ctx, pattern5_2, pattern5_1, pattern5_3, + )?; + let expr2_0 = constructor_zext32_mem(ctx, expr0_0, &expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + &Opcode::Sload16 => { + if let Some(()) = C::littleendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1302. + let expr0_0 = constructor_lower_address( + ctx, pattern5_2, pattern5_1, pattern5_3, + )?; + let expr1_0 = constructor_loadrev16(ctx, &expr0_0)?; + let expr2_0: Type = I16; + let expr3_0 = constructor_sext32_reg(ctx, expr2_0, expr1_0)?; + let expr4_0 = C::value_reg(ctx, expr3_0); + return Some(expr4_0); + } + if let Some(()) = C::bigendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1297. + let expr0_0: Type = I16; + let expr1_0 = constructor_lower_address( + ctx, pattern5_2, pattern5_1, pattern5_3, + )?; + let expr2_0 = constructor_sext32_mem(ctx, expr0_0, &expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + _ => {} + } + } + _ => {} + } + } + if let Some(pattern3_0) = C::gpr64_ty(ctx, pattern2_0) { + let pattern4_0 = C::inst_data(ctx, pattern0_0); + match &pattern4_0 { + &InstructionData::Unary { + opcode: ref pattern5_0, + arg: pattern5_1, + } => { + match &pattern5_0 { + &Opcode::Ctz => { + // Rule at src/isa/s390x/lower.isle line 869. + let expr0_0 = C::put_in_reg(ctx, pattern5_1); + let expr1_0: Type = I64; + let expr2_0: Type = I64; + let expr3_0 = constructor_neg_reg(ctx, expr2_0, expr0_0)?; + let expr4_0 = constructor_and_reg(ctx, expr1_0, expr0_0, expr3_0)?; + let expr5_0: i16 = -1; + let expr6_0 = constructor_clz_reg(ctx, expr5_0, expr4_0)?; + let expr7_0: Type = I64; + let expr8_0: Type = I64; + let expr9_0: u64 = 63; + let expr10_0 = constructor_imm(ctx, expr8_0, expr9_0)?; + let expr11_0 = constructor_regpair_hi(ctx, &expr6_0)?; + let expr12_0 = constructor_sub_reg(ctx, expr7_0, expr10_0, expr11_0)?; + let expr13_0 = C::value_reg(ctx, expr12_0); + return Some(expr13_0); + } + &Opcode::Uextend => { + // Rule at src/isa/s390x/lower.isle line 602. + let expr0_0 = constructor_put_in_reg_zext64(ctx, pattern5_1)?; + let expr1_0 = C::value_reg(ctx, expr0_0); + return Some(expr1_0); + } + &Opcode::Sextend => { + // Rule at src/isa/s390x/lower.isle line 613. + let expr0_0 = constructor_put_in_reg_sext64(ctx, pattern5_1)?; + let expr1_0 = C::value_reg(ctx, expr0_0); + return Some(expr1_0); + } + _ => {} + } + } + &InstructionData::Load { + opcode: ref pattern5_0, + arg: pattern5_1, + flags: pattern5_2, + offset: pattern5_3, + } => { + match &pattern5_0 { + &Opcode::Uload8 => { + // Rule at src/isa/s390x/lower.isle line 1254. + let expr0_0: Type = I8; + let expr1_0 = + constructor_lower_address(ctx, pattern5_2, pattern5_1, pattern5_3)?; + let expr2_0 = constructor_zext64_mem(ctx, expr0_0, &expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + &Opcode::Sload8 => { + // Rule at src/isa/s390x/lower.isle line 1265. + let expr0_0: Type = I8; + let expr1_0 = + constructor_lower_address(ctx, pattern5_2, pattern5_1, pattern5_3)?; + let expr2_0 = constructor_sext64_mem(ctx, expr0_0, &expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + &Opcode::Uload16 => { + if let Some(()) = C::littleendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1288. + let expr0_0 = constructor_lower_address( + ctx, pattern5_2, pattern5_1, pattern5_3, + )?; + let expr1_0 = constructor_loadrev16(ctx, &expr0_0)?; + let expr2_0: Type = I16; + let expr3_0 = constructor_zext64_reg(ctx, expr2_0, expr1_0)?; + let expr4_0 = C::value_reg(ctx, expr3_0); + return Some(expr4_0); + } + if let Some(()) = C::bigendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1283. + let expr0_0: Type = I16; + let expr1_0 = constructor_lower_address( + ctx, pattern5_2, pattern5_1, pattern5_3, + )?; + let expr2_0 = constructor_zext64_mem(ctx, expr0_0, &expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + &Opcode::Sload16 => { + if let Some(()) = C::littleendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1313. + let expr0_0 = constructor_lower_address( + ctx, pattern5_2, pattern5_1, pattern5_3, + )?; + let expr1_0 = constructor_loadrev16(ctx, &expr0_0)?; + let expr2_0: Type = I16; + let expr3_0 = constructor_sext64_reg(ctx, expr2_0, expr1_0)?; + let expr4_0 = C::value_reg(ctx, expr3_0); + return Some(expr4_0); + } + if let Some(()) = C::bigendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1308. + let expr0_0: Type = I16; + let expr1_0 = constructor_lower_address( + ctx, pattern5_2, pattern5_1, pattern5_3, + )?; + let expr2_0 = constructor_sext64_mem(ctx, expr0_0, &expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + &Opcode::Uload32 => { + if let Some(()) = C::littleendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1327. + let expr0_0 = constructor_lower_address( + ctx, pattern5_2, pattern5_1, pattern5_3, + )?; + let expr1_0 = constructor_loadrev32(ctx, &expr0_0)?; + let expr2_0: Type = I32; + let expr3_0 = constructor_zext64_reg(ctx, expr2_0, expr1_0)?; + let expr4_0 = C::value_reg(ctx, expr3_0); + return Some(expr4_0); + } + if let Some(()) = C::bigendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1322. + let expr0_0: Type = I32; + let expr1_0 = constructor_lower_address( + ctx, pattern5_2, pattern5_1, pattern5_3, + )?; + let expr2_0 = constructor_zext64_mem(ctx, expr0_0, &expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + &Opcode::Sload32 => { + if let Some(()) = C::littleendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1341. + let expr0_0 = constructor_lower_address( + ctx, pattern5_2, pattern5_1, pattern5_3, + )?; + let expr1_0 = constructor_loadrev32(ctx, &expr0_0)?; + let expr2_0: Type = I32; + let expr3_0 = constructor_sext64_reg(ctx, expr2_0, expr1_0)?; + let expr4_0 = C::value_reg(ctx, expr3_0); + return Some(expr4_0); + } + if let Some(()) = C::bigendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1336. + let expr0_0: Type = I32; + let expr1_0 = constructor_lower_address( + ctx, pattern5_2, pattern5_1, pattern5_3, + )?; + let expr2_0 = constructor_sext64_mem(ctx, expr0_0, &expr1_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } + _ => {} + } + } + _ => {} + } + } + } + return None; +} + +// Generated as internal constructor for term value_regs_ifcout. +pub fn constructor_value_regs_ifcout(ctx: &mut C, arg0: Reg) -> Option { + let pattern0_0 = arg0; + // Rule at src/isa/s390x/lower.isle line 149. + let expr0_0: Type = I64; + let expr1_0 = C::temp_writable_reg(ctx, expr0_0); + let expr2_0 = C::writable_reg_to_reg(ctx, expr1_0); + let expr3_0 = C::value_regs(ctx, pattern0_0, expr2_0); + return Some(expr3_0); +} + +// Generated as internal constructor for term zero_divisor_check_needed. +pub fn constructor_zero_divisor_check_needed(ctx: &mut C, arg0: Value) -> Option { + let pattern0_0 = arg0; + if let Some(pattern1_0) = C::i64_from_value(ctx, pattern0_0) { + let pattern2_0 = 0; + if let Some(pattern3_0) = C::i64_nonequal(ctx, pattern1_0, pattern2_0) { + // Rule at src/isa/s390x/lower.isle line 339. + let expr0_0: bool = false; + return Some(expr0_0); + } + } + let pattern1_0 = C::value_type(ctx, pattern0_0); + if let Some(()) = C::allow_div_traps(ctx, pattern1_0) { + // Rule at src/isa/s390x/lower.isle line 340. + let expr0_0: bool = false; + return Some(expr0_0); + } + // Rule at src/isa/s390x/lower.isle line 341. + let expr0_0: bool = true; + return Some(expr0_0); +} + +// Generated as internal constructor for term maybe_trap_if_zero_divisor. +pub fn constructor_maybe_trap_if_zero_divisor( + ctx: &mut C, + arg0: bool, + arg1: Type, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == true { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + // Rule at src/isa/s390x/lower.isle line 347. + let expr0_0: i16 = 0; + let expr1_0 = IntCC::Equal; + let expr2_0 = C::intcc_as_cond(ctx, &expr1_0); + let expr3_0 = C::trap_code_division_by_zero(ctx); + let expr4_0 = constructor_icmps_simm16_and_trap( + ctx, pattern2_0, pattern3_0, expr0_0, &expr2_0, &expr3_0, + )?; + return Some(expr4_0); + } + if pattern0_0 == false { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + // Rule at src/isa/s390x/lower.isle line 346. + let expr0_0 = C::invalid_reg(ctx); + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term div_overflow_check_needed. +pub fn constructor_div_overflow_check_needed(ctx: &mut C, arg0: Value) -> Option { + let pattern0_0 = arg0; + if let Some(pattern1_0) = C::i64_from_value(ctx, pattern0_0) { + let pattern2_0 = -1; + if let Some(pattern3_0) = C::i64_nonequal(ctx, pattern1_0, pattern2_0) { + // Rule at src/isa/s390x/lower.isle line 420. + let expr0_0: bool = false; + return Some(expr0_0); + } + } + // Rule at src/isa/s390x/lower.isle line 421. + let expr0_0: bool = true; + return Some(expr0_0); +} + +// Generated as internal constructor for term maybe_trap_if_sdiv_overflow. +pub fn constructor_maybe_trap_if_sdiv_overflow( + ctx: &mut C, + arg0: bool, + arg1: Type, + arg2: Type, + arg3: &RegPair, + arg4: Reg, +) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == true { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + let pattern4_0 = arg3; + let pattern5_0 = arg4; + // Rule at src/isa/s390x/lower.isle line 434. + let expr0_0 = constructor_int_max(ctx, pattern3_0)?; + let expr1_0 = constructor_imm(ctx, pattern2_0, expr0_0)?; + let expr2_0 = constructor_regpair_lo(ctx, pattern4_0)?; + let expr3_0 = constructor_xor_reg(ctx, pattern2_0, expr1_0, expr2_0)?; + let expr4_0 = constructor_and_reg(ctx, pattern2_0, expr3_0, pattern5_0)?; + let expr5_0: i16 = -1; + let expr6_0 = IntCC::Equal; + let expr7_0 = C::intcc_as_cond(ctx, &expr6_0); + let expr8_0 = C::trap_code_integer_overflow(ctx); + let expr9_0 = constructor_icmps_simm16_and_trap( + ctx, pattern2_0, expr4_0, expr5_0, &expr7_0, &expr8_0, + )?; + return Some(expr9_0); + } + if pattern0_0 == false { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + let pattern4_0 = arg3; + let pattern5_0 = arg4; + // Rule at src/isa/s390x/lower.isle line 433. + let expr0_0 = C::invalid_reg(ctx); + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term int_max. +pub fn constructor_int_max(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I8 { + // Rule at src/isa/s390x/lower.isle line 442. + let expr0_0: u64 = 127; + return Some(expr0_0); + } + if pattern0_0 == I16 { + // Rule at src/isa/s390x/lower.isle line 443. + let expr0_0: u64 = 32767; + return Some(expr0_0); + } + if pattern0_0 == I32 { + // Rule at src/isa/s390x/lower.isle line 444. + let expr0_0: u64 = 2147483647; + return Some(expr0_0); + } + if pattern0_0 == I64 { + // Rule at src/isa/s390x/lower.isle line 445. + let expr0_0: u64 = 9223372036854775807; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term maybe_avoid_srem_overflow. +pub fn constructor_maybe_avoid_srem_overflow( + ctx: &mut C, + arg0: bool, + arg1: Type, + arg2: &RegPair, + arg3: Reg, +) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == true { + let pattern2_0 = arg1; + if pattern2_0 == I32 { + let pattern4_0 = arg2; + let pattern5_0 = arg3; + // Rule at src/isa/s390x/lower.isle line 463. + return Some(pattern4_0.clone()); + } + if pattern2_0 == I64 { + let pattern4_0 = arg2; + let pattern5_0 = arg3; + // Rule at src/isa/s390x/lower.isle line 464. + let expr0_0: Type = I64; + let expr1_0: Type = I64; + let expr2_0: i16 = -1; + let expr3_0 = constructor_icmps_simm16(ctx, expr1_0, pattern5_0, expr2_0)?; + let expr4_0 = IntCC::Equal; + let expr5_0 = C::intcc_as_cond(ctx, &expr4_0); + let expr6_0: i16 = 0; + let expr7_0 = constructor_cmov_imm_regpair_lo( + ctx, expr0_0, &expr3_0, &expr5_0, expr6_0, pattern4_0, + )?; + return Some(expr7_0); + } + } + if pattern0_0 == false { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + let pattern4_0 = arg3; + // Rule at src/isa/s390x/lower.isle line 462. + return Some(pattern3_0.clone()); + } + return None; +} + +// Generated as internal constructor for term cast_bool. +pub fn constructor_cast_bool(ctx: &mut C, arg0: Type, arg1: Value) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == B1 { + let pattern2_0 = arg1; + let pattern3_0 = C::value_type(ctx, pattern2_0); + if pattern3_0 == B1 { + // Rule at src/isa/s390x/lower.isle line 761. + let expr0_0 = C::put_in_reg(ctx, pattern2_0); + return Some(expr0_0); + } + if pattern3_0 == B8 { + // Rule at src/isa/s390x/lower.isle line 762. + let expr0_0 = C::put_in_reg(ctx, pattern2_0); + return Some(expr0_0); + } + } + if pattern0_0 == B8 { + let pattern2_0 = arg1; + let pattern3_0 = C::value_type(ctx, pattern2_0); + if pattern3_0 == B8 { + // Rule at src/isa/s390x/lower.isle line 763. + let expr0_0 = C::put_in_reg(ctx, pattern2_0); + return Some(expr0_0); + } + } + if pattern0_0 == I8 { + let pattern2_0 = arg1; + let pattern3_0 = C::value_type(ctx, pattern2_0); + if pattern3_0 == B8 { + // Rule at src/isa/s390x/lower.isle line 764. + let expr0_0 = C::put_in_reg(ctx, pattern2_0); + return Some(expr0_0); + } + } + if let Some(pattern1_0) = C::fits_in_16(ctx, pattern0_0) { + let pattern2_0 = arg1; + let pattern3_0 = C::value_type(ctx, pattern2_0); + if pattern3_0 == B16 { + // Rule at src/isa/s390x/lower.isle line 765. + let expr0_0 = C::put_in_reg(ctx, pattern2_0); + return Some(expr0_0); + } + } + if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) { + let pattern2_0 = arg1; + let pattern3_0 = C::value_type(ctx, pattern2_0); + if pattern3_0 == B32 { + // Rule at src/isa/s390x/lower.isle line 766. + let expr0_0 = C::put_in_reg(ctx, pattern2_0); + return Some(expr0_0); + } + } + if let Some(pattern1_0) = C::fits_in_64(ctx, pattern0_0) { + let pattern2_0 = arg1; + let pattern3_0 = C::value_type(ctx, pattern2_0); + if pattern3_0 == B64 { + // Rule at src/isa/s390x/lower.isle line 767. + let expr0_0 = C::put_in_reg(ctx, pattern2_0); + return Some(expr0_0); + } + } + if let Some(pattern1_0) = C::gpr32_ty(ctx, pattern0_0) { + let pattern2_0 = arg1; + let pattern3_0 = C::value_type(ctx, pattern2_0); + if pattern3_0 == B1 { + // Rule at src/isa/s390x/lower.isle line 770. + let expr0_0: Type = I32; + let expr1_0: Type = I32; + let expr2_0 = C::put_in_reg(ctx, pattern2_0); + let expr3_0: u8 = 31; + let expr4_0 = constructor_lshl_imm(ctx, expr1_0, expr2_0, expr3_0)?; + let expr5_0: u8 = 31; + let expr6_0 = constructor_ashr_imm(ctx, expr0_0, expr4_0, expr5_0)?; + return Some(expr6_0); + } + if pattern3_0 == B8 { + // Rule at src/isa/s390x/lower.isle line 776. + let expr0_0: Type = I8; + let expr1_0 = C::put_in_reg(ctx, pattern2_0); + let expr2_0 = constructor_sext32_reg(ctx, expr0_0, expr1_0)?; + return Some(expr2_0); + } + if pattern3_0 == B16 { + // Rule at src/isa/s390x/lower.isle line 778. + let expr0_0: Type = I16; + let expr1_0 = C::put_in_reg(ctx, pattern2_0); + let expr2_0 = constructor_sext32_reg(ctx, expr0_0, expr1_0)?; + return Some(expr2_0); + } + } + if let Some(pattern1_0) = C::gpr64_ty(ctx, pattern0_0) { + let pattern2_0 = arg1; + let pattern3_0 = C::value_type(ctx, pattern2_0); + if pattern3_0 == B1 { + // Rule at src/isa/s390x/lower.isle line 772. + let expr0_0: Type = I64; + let expr1_0: Type = I64; + let expr2_0 = C::put_in_reg(ctx, pattern2_0); + let expr3_0: u8 = 63; + let expr4_0 = constructor_lshl_imm(ctx, expr1_0, expr2_0, expr3_0)?; + let expr5_0: u8 = 63; + let expr6_0 = constructor_ashr_imm(ctx, expr0_0, expr4_0, expr5_0)?; + return Some(expr6_0); + } + if pattern3_0 == B8 { + // Rule at src/isa/s390x/lower.isle line 780. + let expr0_0: Type = I8; + let expr1_0 = C::put_in_reg(ctx, pattern2_0); + let expr2_0 = constructor_sext64_reg(ctx, expr0_0, expr1_0)?; + return Some(expr2_0); + } + if pattern3_0 == B16 { + // Rule at src/isa/s390x/lower.isle line 782. + let expr0_0: Type = I16; + let expr1_0 = C::put_in_reg(ctx, pattern2_0); + let expr2_0 = constructor_sext64_reg(ctx, expr0_0, expr1_0)?; + return Some(expr2_0); + } + if pattern3_0 == B32 { + // Rule at src/isa/s390x/lower.isle line 784. + let expr0_0: Type = I32; + let expr1_0 = C::put_in_reg(ctx, pattern2_0); + let expr2_0 = constructor_sext64_reg(ctx, expr0_0, expr1_0)?; + return Some(expr2_0); + } + } + return None; +} + +// Generated as internal constructor for term clz_offset. +pub fn constructor_clz_offset(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I8 { + let pattern2_0 = arg1; + // Rule at src/isa/s390x/lower.isle line 809. + let expr0_0: Type = I8; + let expr1_0: i16 = -56; + let expr2_0 = constructor_add_simm16(ctx, expr0_0, pattern2_0, expr1_0)?; + return Some(expr2_0); + } + if pattern0_0 == I16 { + let pattern2_0 = arg1; + // Rule at src/isa/s390x/lower.isle line 810. + let expr0_0: Type = I16; + let expr1_0: i16 = -48; + let expr2_0 = constructor_add_simm16(ctx, expr0_0, pattern2_0, expr1_0)?; + return Some(expr2_0); + } + if pattern0_0 == I32 { + let pattern2_0 = arg1; + // Rule at src/isa/s390x/lower.isle line 811. + let expr0_0: Type = I32; + let expr1_0: i16 = -32; + let expr2_0 = constructor_add_simm16(ctx, expr0_0, pattern2_0, expr1_0)?; + return Some(expr2_0); + } + if pattern0_0 == I64 { + let pattern2_0 = arg1; + // Rule at src/isa/s390x/lower.isle line 812. + let expr0_0: Type = I64; + let expr1_0 = constructor_copy_reg(ctx, expr0_0, pattern2_0)?; + return Some(expr1_0); + } + return None; +} + +// Generated as internal constructor for term ctz_guardbit. +pub fn constructor_ctz_guardbit(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I8 { + // Rule at src/isa/s390x/lower.isle line 861. + let expr0_0: u16 = 256; + let expr1_0: u8 = 0; + let expr2_0 = C::uimm16shifted(ctx, expr0_0, expr1_0); + return Some(expr2_0); + } + if pattern0_0 == I16 { + // Rule at src/isa/s390x/lower.isle line 862. + let expr0_0: u16 = 1; + let expr1_0: u8 = 16; + let expr2_0 = C::uimm16shifted(ctx, expr0_0, expr1_0); + return Some(expr2_0); + } + if pattern0_0 == I32 { + // Rule at src/isa/s390x/lower.isle line 863. + let expr0_0: u16 = 1; + let expr1_0: u8 = 32; + let expr2_0 = C::uimm16shifted(ctx, expr0_0, expr1_0); + return Some(expr2_0); + } + return None; +} + +// Generated as internal constructor for term istore8_impl. +pub fn constructor_istore8_impl( + ctx: &mut C, + arg0: MemFlags, + arg1: Value, + arg2: Value, + arg3: Offset32, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + if let Some(pattern2_0) = C::u8_from_value(ctx, pattern1_0) { + let pattern3_0 = arg2; + let pattern4_0 = arg3; + // Rule at src/isa/s390x/lower.isle line 1423. + let expr0_0 = constructor_lower_address(ctx, pattern0_0, pattern3_0, pattern4_0)?; + let expr1_0 = constructor_store8_imm(ctx, pattern2_0, &expr0_0)?; + return Some(expr1_0); + } + let pattern2_0 = arg2; + let pattern3_0 = arg3; + // Rule at src/isa/s390x/lower.isle line 1419. + let expr0_0 = C::put_in_reg(ctx, pattern1_0); + let expr1_0 = constructor_lower_address(ctx, pattern0_0, pattern2_0, pattern3_0)?; + let expr2_0 = constructor_store8(ctx, expr0_0, &expr1_0)?; + return Some(expr2_0); +} + +// Generated as internal constructor for term istore16_impl. +pub fn constructor_istore16_impl( + ctx: &mut C, + arg0: MemFlags, + arg1: Value, + arg2: Value, + arg3: Offset32, +) -> Option { + let pattern0_0 = arg0; + if let Some(()) = C::littleendian(ctx, pattern0_0) { + let pattern2_0 = arg1; + if let Some(pattern3_0) = C::i16_from_swapped_value(ctx, pattern2_0) { + let pattern4_0 = arg2; + let pattern5_0 = arg3; + // Rule at src/isa/s390x/lower.isle line 1449. + let expr0_0 = constructor_lower_address(ctx, pattern0_0, pattern4_0, pattern5_0)?; + let expr1_0 = constructor_store16_imm(ctx, pattern3_0, &expr0_0)?; + return Some(expr1_0); + } + let pattern3_0 = arg2; + let pattern4_0 = arg3; + // Rule at src/isa/s390x/lower.isle line 1441. + let expr0_0 = C::put_in_reg(ctx, pattern2_0); + let expr1_0 = constructor_lower_address(ctx, pattern0_0, pattern3_0, pattern4_0)?; + let expr2_0 = constructor_storerev16(ctx, expr0_0, &expr1_0)?; + return Some(expr2_0); + } + if let Some(()) = C::bigendian(ctx, pattern0_0) { + let pattern2_0 = arg1; + if let Some(pattern3_0) = C::i16_from_value(ctx, pattern2_0) { + let pattern4_0 = arg2; + let pattern5_0 = arg3; + // Rule at src/isa/s390x/lower.isle line 1445. + let expr0_0 = constructor_lower_address(ctx, pattern0_0, pattern4_0, pattern5_0)?; + let expr1_0 = constructor_store16_imm(ctx, pattern3_0, &expr0_0)?; + return Some(expr1_0); + } + let pattern3_0 = arg2; + let pattern4_0 = arg3; + // Rule at src/isa/s390x/lower.isle line 1437. + let expr0_0 = C::put_in_reg(ctx, pattern2_0); + let expr1_0 = constructor_lower_address(ctx, pattern0_0, pattern3_0, pattern4_0)?; + let expr2_0 = constructor_store16(ctx, expr0_0, &expr1_0)?; + return Some(expr2_0); + } + return None; +} + +// Generated as internal constructor for term istore32_impl. +pub fn constructor_istore32_impl( + ctx: &mut C, + arg0: MemFlags, + arg1: Value, + arg2: Value, + arg3: Offset32, +) -> Option { + let pattern0_0 = arg0; + if let Some(()) = C::littleendian(ctx, pattern0_0) { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + let pattern4_0 = arg3; + // Rule at src/isa/s390x/lower.isle line 1471. + let expr0_0 = C::put_in_reg(ctx, pattern2_0); + let expr1_0 = constructor_lower_address(ctx, pattern0_0, pattern3_0, pattern4_0)?; + let expr2_0 = constructor_storerev32(ctx, expr0_0, &expr1_0)?; + return Some(expr2_0); + } + if let Some(()) = C::bigendian(ctx, pattern0_0) { + let pattern2_0 = arg1; + if let Some(pattern3_0) = C::i16_from_value(ctx, pattern2_0) { + let pattern4_0 = arg2; + let pattern5_0 = arg3; + // Rule at src/isa/s390x/lower.isle line 1467. + let expr0_0 = constructor_lower_address(ctx, pattern0_0, pattern4_0, pattern5_0)?; + let expr1_0 = constructor_store32_simm16(ctx, pattern3_0, &expr0_0)?; + return Some(expr1_0); + } + let pattern3_0 = arg2; + let pattern4_0 = arg3; + // Rule at src/isa/s390x/lower.isle line 1463. + let expr0_0 = C::put_in_reg(ctx, pattern2_0); + let expr1_0 = constructor_lower_address(ctx, pattern0_0, pattern3_0, pattern4_0)?; + let expr2_0 = constructor_store32(ctx, expr0_0, &expr1_0)?; + return Some(expr2_0); + } + return None; +} + +// Generated as internal constructor for term istore64_impl. +pub fn constructor_istore64_impl( + ctx: &mut C, + arg0: MemFlags, + arg1: Value, + arg2: Value, + arg3: Offset32, +) -> Option { + let pattern0_0 = arg0; + if let Some(()) = C::littleendian(ctx, pattern0_0) { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + let pattern4_0 = arg3; + // Rule at src/isa/s390x/lower.isle line 1489. + let expr0_0 = C::put_in_reg(ctx, pattern2_0); + let expr1_0 = constructor_lower_address(ctx, pattern0_0, pattern3_0, pattern4_0)?; + let expr2_0 = constructor_storerev64(ctx, expr0_0, &expr1_0)?; + return Some(expr2_0); + } + if let Some(()) = C::bigendian(ctx, pattern0_0) { + let pattern2_0 = arg1; + if let Some(pattern3_0) = C::i16_from_value(ctx, pattern2_0) { + let pattern4_0 = arg2; + let pattern5_0 = arg3; + // Rule at src/isa/s390x/lower.isle line 1485. + let expr0_0 = constructor_lower_address(ctx, pattern0_0, pattern4_0, pattern5_0)?; + let expr1_0 = constructor_store64_simm16(ctx, pattern3_0, &expr0_0)?; + return Some(expr1_0); + } + let pattern3_0 = arg2; + let pattern4_0 = arg3; + // Rule at src/isa/s390x/lower.isle line 1481. + let expr0_0 = C::put_in_reg(ctx, pattern2_0); + let expr1_0 = constructor_lower_address(ctx, pattern0_0, pattern3_0, pattern4_0)?; + let expr2_0 = constructor_store64(ctx, expr0_0, &expr1_0)?; + return Some(expr2_0); + } + return None; +} + +// Generated as internal constructor for term atomic_store_impl. +pub fn constructor_atomic_store_impl( + ctx: &mut C, + arg0: &SideEffectNoResult, +) -> Option { + let pattern0_0 = arg0; + // Rule at src/isa/s390x/lower.isle line 1576. + let expr0_0 = constructor_value_regs_none(ctx, pattern0_0)?; + let expr1_0 = constructor_fence_impl(ctx)?; + let expr2_0 = constructor_value_regs_none(ctx, &expr1_0)?; + return Some(expr2_0); +} + +// Generated as internal constructor for term icmp_val. +pub fn constructor_icmp_val( + ctx: &mut C, + arg0: bool, + arg1: &IntCC, + arg2: Value, + arg3: Value, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + if let Some(()) = C::signed(ctx, pattern1_0) { + let pattern3_0 = arg2; + let pattern4_0 = arg3; + // Rule at src/isa/s390x/lower.isle line 1643. + let expr0_0 = constructor_icmps_val(ctx, pattern0_0, pattern3_0, pattern4_0)?; + let expr1_0 = C::intcc_as_cond(ctx, pattern1_0); + let expr2_0 = constructor_bool(ctx, &expr0_0, &expr1_0)?; + return Some(expr2_0); + } + if let Some(()) = C::unsigned(ctx, pattern1_0) { + let pattern3_0 = arg2; + let pattern4_0 = arg3; + // Rule at src/isa/s390x/lower.isle line 1646. + let expr0_0 = constructor_icmpu_val(ctx, pattern0_0, pattern3_0, pattern4_0)?; + let expr1_0 = C::intcc_as_cond(ctx, pattern1_0); + let expr2_0 = constructor_bool(ctx, &expr0_0, &expr1_0)?; + return Some(expr2_0); + } + return None; +} + +// Generated as internal constructor for term icmps_val. +pub fn constructor_icmps_val( + ctx: &mut C, + arg0: bool, + arg1: Value, + arg2: Value, +) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == true { + let pattern2_0 = arg1; + let pattern3_0 = C::value_type(ctx, pattern2_0); + if let Some(pattern4_0) = C::fits_in_64(ctx, pattern3_0) { + let pattern5_0 = arg2; + if let Some(pattern6_0) = C::sinkable_inst(ctx, pattern5_0) { + let pattern7_0 = C::inst_data(ctx, pattern6_0); + if let &InstructionData::Load { + opcode: ref pattern8_0, + arg: pattern8_1, + flags: pattern8_2, + offset: pattern8_3, + } = &pattern7_0 + { + match &pattern8_0 { + &Opcode::Sload16 => { + if let Some(()) = C::bigendian(ctx, pattern8_2) { + // Rule at src/isa/s390x/lower.isle line 1676. + let expr0_0 = C::put_in_reg(ctx, pattern2_0); + let expr1_0 = constructor_sink_sload16(ctx, pattern6_0)?; + let expr2_0 = constructor_icmps_mem_sext16( + ctx, pattern4_0, expr0_0, &expr1_0, + )?; + return Some(expr2_0); + } + } + &Opcode::Sload32 => { + if let Some(()) = C::bigendian(ctx, pattern8_2) { + // Rule at src/isa/s390x/lower.isle line 1678. + let expr0_0 = C::put_in_reg(ctx, pattern2_0); + let expr1_0 = constructor_sink_sload32(ctx, pattern6_0)?; + let expr2_0 = constructor_icmps_mem_sext32( + ctx, pattern4_0, expr0_0, &expr1_0, + )?; + return Some(expr2_0); + } + } + _ => {} + } + } + } + let pattern6_0 = C::value_type(ctx, pattern5_0); + if pattern6_0 == I16 { + if let Some(pattern8_0) = C::sinkable_inst(ctx, pattern5_0) { + let pattern9_0 = C::inst_data(ctx, pattern8_0); + if let &InstructionData::Load { + opcode: ref pattern10_0, + arg: pattern10_1, + flags: pattern10_2, + offset: pattern10_3, + } = &pattern9_0 + { + if let &Opcode::Load = &pattern10_0 { + if let Some(()) = C::bigendian(ctx, pattern10_2) { + // Rule at src/isa/s390x/lower.isle line 1672. + let expr0_0 = constructor_ty_ext32(ctx, pattern4_0)?; + let expr1_0 = constructor_put_in_reg_sext32(ctx, pattern2_0)?; + let expr2_0 = constructor_sink_load(ctx, pattern8_0)?; + let expr3_0 = + constructor_icmps_mem_sext16(ctx, expr0_0, expr1_0, &expr2_0)?; + return Some(expr3_0); + } + } + } + } + } + if let Some(pattern7_0) = C::ty_32_or_64(ctx, pattern6_0) { + if let Some(pattern8_0) = C::sinkable_inst(ctx, pattern5_0) { + let pattern9_0 = C::inst_data(ctx, pattern8_0); + if let &InstructionData::Load { + opcode: ref pattern10_0, + arg: pattern10_1, + flags: pattern10_2, + offset: pattern10_3, + } = &pattern9_0 + { + if let &Opcode::Load = &pattern10_0 { + if let Some(()) = C::bigendian(ctx, pattern10_2) { + // Rule at src/isa/s390x/lower.isle line 1668. + let expr0_0 = C::put_in_reg(ctx, pattern2_0); + let expr1_0 = constructor_sink_load(ctx, pattern8_0)?; + let expr2_0 = + constructor_icmps_mem(ctx, pattern4_0, expr0_0, &expr1_0)?; + return Some(expr2_0); + } + } + } + } + } + } + } + let pattern1_0 = arg1; + let pattern2_0 = C::value_type(ctx, pattern1_0); + if let Some(pattern3_0) = C::fits_in_64(ctx, pattern2_0) { + let pattern4_0 = arg2; + if let Some(pattern5_0) = C::i16_from_value(ctx, pattern4_0) { + // Rule at src/isa/s390x/lower.isle line 1662. + let expr0_0 = constructor_ty_ext32(ctx, pattern3_0)?; + let expr1_0 = constructor_put_in_reg_sext32(ctx, pattern1_0)?; + let expr2_0 = constructor_icmps_simm16(ctx, expr0_0, expr1_0, pattern5_0)?; + return Some(expr2_0); + } + if let Some(pattern5_0) = C::i32_from_value(ctx, pattern4_0) { + // Rule at src/isa/s390x/lower.isle line 1664. + let expr0_0 = constructor_ty_ext32(ctx, pattern3_0)?; + let expr1_0 = constructor_put_in_reg_sext32(ctx, pattern1_0)?; + let expr2_0 = constructor_icmps_simm32(ctx, expr0_0, expr1_0, pattern5_0)?; + return Some(expr2_0); + } + if let Some(pattern5_0) = C::def_inst(ctx, pattern4_0) { + let pattern6_0 = C::inst_data(ctx, pattern5_0); + if let &InstructionData::Unary { + opcode: ref pattern7_0, + arg: pattern7_1, + } = &pattern6_0 + { + if let &Opcode::Sextend = &pattern7_0 { + let pattern9_0 = C::value_type(ctx, pattern7_1); + if pattern9_0 == I32 { + // Rule at src/isa/s390x/lower.isle line 1658. + let expr0_0 = C::put_in_reg(ctx, pattern1_0); + let expr1_0 = C::put_in_reg(ctx, pattern7_1); + let expr2_0 = + constructor_icmps_reg_sext32(ctx, pattern3_0, expr0_0, expr1_0)?; + return Some(expr2_0); + } + } + } + } + // Rule at src/isa/s390x/lower.isle line 1654. + let expr0_0 = constructor_ty_ext32(ctx, pattern3_0)?; + let expr1_0 = constructor_put_in_reg_sext32(ctx, pattern1_0)?; + let expr2_0 = constructor_put_in_reg_sext32(ctx, pattern4_0)?; + let expr3_0 = constructor_icmps_reg(ctx, expr0_0, expr1_0, expr2_0)?; + return Some(expr3_0); + } + return None; +} + +// Generated as internal constructor for term icmpu_val. +pub fn constructor_icmpu_val( + ctx: &mut C, + arg0: bool, + arg1: Value, + arg2: Value, +) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == true { + let pattern2_0 = arg1; + let pattern3_0 = C::value_type(ctx, pattern2_0); + if let Some(pattern4_0) = C::fits_in_64(ctx, pattern3_0) { + let pattern5_0 = arg2; + if let Some(pattern6_0) = C::sinkable_inst(ctx, pattern5_0) { + let pattern7_0 = C::inst_data(ctx, pattern6_0); + if let &InstructionData::Load { + opcode: ref pattern8_0, + arg: pattern8_1, + flags: pattern8_2, + offset: pattern8_3, + } = &pattern7_0 + { + match &pattern8_0 { + &Opcode::Uload16 => { + if let Some(pattern10_0) = C::def_inst(ctx, pattern8_1) { + if let Some((pattern11_0, pattern11_1, pattern11_2)) = + C::symbol_value_data(ctx, pattern10_0) + { + if let Some(()) = C::reloc_distance_near(ctx, &pattern11_1) { + let pattern13_0 = C::i64_from_offset(ctx, pattern8_3); + let closure14 = || { + return Some(pattern11_2); + }; + if let Some(pattern14_0) = closure14() { + if let Some(pattern15_0) = C::memarg_symbol_offset_sum( + ctx, + pattern13_0, + pattern14_0, + ) { + let pattern16_0 = C::inst_data(ctx, pattern6_0); + if let &InstructionData::Load { + opcode: ref pattern17_0, + arg: pattern17_1, + flags: pattern17_2, + offset: pattern17_3, + } = &pattern16_0 + { + if let &Opcode::Uload16 = &pattern17_0 { + if let Some(()) = + C::bigendian(ctx, pattern17_2) + { + // Rule at src/isa/s390x/lower.isle line 1711. + let expr0_0 = + C::put_in_reg(ctx, pattern2_0); + let expr1_0 = constructor_sink_uload16( + ctx, pattern6_0, + )?; + let expr2_0 = + constructor_icmpu_mem_zext16( + ctx, pattern4_0, expr0_0, + &expr1_0, + )?; + return Some(expr2_0); + } + } + } + } + } + } + } + } + } + &Opcode::Uload32 => { + if let Some(()) = C::bigendian(ctx, pattern8_2) { + // Rule at src/isa/s390x/lower.isle line 1714. + let expr0_0 = C::put_in_reg(ctx, pattern2_0); + let expr1_0 = constructor_sink_uload32(ctx, pattern6_0)?; + let expr2_0 = constructor_icmpu_mem_zext32( + ctx, pattern4_0, expr0_0, &expr1_0, + )?; + return Some(expr2_0); + } + } + _ => {} + } + } + } + let pattern6_0 = C::value_type(ctx, pattern5_0); + if pattern6_0 == I16 { + if let Some(pattern8_0) = C::sinkable_inst(ctx, pattern5_0) { + let pattern9_0 = C::inst_data(ctx, pattern8_0); + if let &InstructionData::Load { + opcode: ref pattern10_0, + arg: pattern10_1, + flags: pattern10_2, + offset: pattern10_3, + } = &pattern9_0 + { + if let &Opcode::Load = &pattern10_0 { + if let Some(pattern12_0) = C::def_inst(ctx, pattern10_1) { + if let Some((pattern13_0, pattern13_1, pattern13_2)) = + C::symbol_value_data(ctx, pattern12_0) + { + if let Some(()) = C::reloc_distance_near(ctx, &pattern13_1) { + let pattern15_0 = C::i64_from_offset(ctx, pattern10_3); + let closure16 = || { + return Some(pattern13_2); + }; + if let Some(pattern16_0) = closure16() { + if let Some(pattern17_0) = C::memarg_symbol_offset_sum( + ctx, + pattern15_0, + pattern16_0, + ) { + let pattern18_0 = C::inst_data(ctx, pattern8_0); + if let &InstructionData::Load { + opcode: ref pattern19_0, + arg: pattern19_1, + flags: pattern19_2, + offset: pattern19_3, + } = &pattern18_0 + { + if let &Opcode::Load = &pattern19_0 { + if let Some(()) = + C::bigendian(ctx, pattern19_2) + { + // Rule at src/isa/s390x/lower.isle line 1704. + let expr0_0 = constructor_ty_ext32( + ctx, pattern4_0, + )?; + let expr1_0 = + constructor_put_in_reg_zext32( + ctx, pattern2_0, + )?; + let expr2_0 = constructor_sink_load( + ctx, pattern8_0, + )?; + let expr3_0 = + constructor_icmpu_mem_zext16( + ctx, expr0_0, expr1_0, &expr2_0, + )?; + return Some(expr3_0); + } + } + } + } + } + } + } + } + } + } + } + } + if let Some(pattern7_0) = C::ty_32_or_64(ctx, pattern6_0) { + if let Some(pattern8_0) = C::sinkable_inst(ctx, pattern5_0) { + let pattern9_0 = C::inst_data(ctx, pattern8_0); + if let &InstructionData::Load { + opcode: ref pattern10_0, + arg: pattern10_1, + flags: pattern10_2, + offset: pattern10_3, + } = &pattern9_0 + { + if let &Opcode::Load = &pattern10_0 { + if let Some(()) = C::bigendian(ctx, pattern10_2) { + // Rule at src/isa/s390x/lower.isle line 1698. + let expr0_0 = C::put_in_reg(ctx, pattern2_0); + let expr1_0 = constructor_sink_load(ctx, pattern8_0)?; + let expr2_0 = + constructor_icmpu_mem(ctx, pattern4_0, expr0_0, &expr1_0)?; + return Some(expr2_0); + } + } + } + } + } + } + } + let pattern1_0 = arg1; + let pattern2_0 = C::value_type(ctx, pattern1_0); + if let Some(pattern3_0) = C::fits_in_64(ctx, pattern2_0) { + let pattern4_0 = arg2; + if let Some(pattern5_0) = C::u32_from_value(ctx, pattern4_0) { + // Rule at src/isa/s390x/lower.isle line 1694. + let expr0_0 = constructor_ty_ext32(ctx, pattern3_0)?; + let expr1_0 = constructor_put_in_reg_zext32(ctx, pattern1_0)?; + let expr2_0 = constructor_icmpu_uimm32(ctx, expr0_0, expr1_0, pattern5_0)?; + return Some(expr2_0); + } + if let Some(pattern5_0) = C::def_inst(ctx, pattern4_0) { + let pattern6_0 = C::inst_data(ctx, pattern5_0); + if let &InstructionData::Unary { + opcode: ref pattern7_0, + arg: pattern7_1, + } = &pattern6_0 + { + if let &Opcode::Uextend = &pattern7_0 { + let pattern9_0 = C::value_type(ctx, pattern7_1); + if pattern9_0 == I32 { + // Rule at src/isa/s390x/lower.isle line 1690. + let expr0_0 = C::put_in_reg(ctx, pattern1_0); + let expr1_0 = C::put_in_reg(ctx, pattern7_1); + let expr2_0 = + constructor_icmpu_reg_zext32(ctx, pattern3_0, expr0_0, expr1_0)?; + return Some(expr2_0); + } + } + } + } + // Rule at src/isa/s390x/lower.isle line 1686. + let expr0_0 = constructor_ty_ext32(ctx, pattern3_0)?; + let expr1_0 = constructor_put_in_reg_zext32(ctx, pattern1_0)?; + let expr2_0 = constructor_put_in_reg_zext32(ctx, pattern4_0)?; + let expr3_0 = constructor_icmpu_reg(ctx, expr0_0, expr1_0, expr2_0)?; + return Some(expr3_0); + } + return None; +} + +// Generated as internal constructor for term fcmp_val. +pub fn constructor_fcmp_val( + ctx: &mut C, + arg0: &FloatCC, + arg1: Value, + arg2: Value, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = C::value_type(ctx, pattern1_0); + let pattern3_0 = arg2; + // Rule at src/isa/s390x/lower.isle line 1727. + let expr0_0 = C::put_in_reg(ctx, pattern1_0); + let expr1_0 = C::put_in_reg(ctx, pattern3_0); + let expr2_0 = constructor_fcmp_reg(ctx, pattern2_0, expr0_0, expr1_0)?; + let expr3_0 = C::floatcc_as_cond(ctx, pattern0_0); + let expr4_0 = constructor_bool(ctx, &expr2_0, &expr3_0)?; + return Some(expr4_0); +} + +// Generated as internal constructor for term value_nonzero. +pub fn constructor_value_nonzero(ctx: &mut C, arg0: Value) -> Option { + let pattern0_0 = arg0; + if let Some(pattern1_0) = C::def_inst(ctx, pattern0_0) { + let pattern2_0 = C::inst_data(ctx, pattern1_0); + match &pattern2_0 { + &InstructionData::FloatCompare { + opcode: ref pattern3_0, + args: ref pattern3_1, + cond: ref pattern3_2, + } => { + if let &Opcode::Fcmp = &pattern3_0 { + let (pattern5_0, pattern5_1) = C::unpack_value_array_2(ctx, &pattern3_1); + // Rule at src/isa/s390x/lower.isle line 1755. + let expr0_0 = constructor_fcmp_val(ctx, &pattern3_2, pattern5_0, pattern5_1)?; + return Some(expr0_0); + } + } + &InstructionData::IntCompare { + opcode: ref pattern3_0, + args: ref pattern3_1, + cond: ref pattern3_2, + } => { + if let &Opcode::Icmp = &pattern3_0 { + let (pattern5_0, pattern5_1) = C::unpack_value_array_2(ctx, &pattern3_1); + // Rule at src/isa/s390x/lower.isle line 1754. + let expr0_0: bool = false; + let expr1_0 = + constructor_icmp_val(ctx, expr0_0, &pattern3_2, pattern5_0, pattern5_1)?; + return Some(expr1_0); + } + } + &InstructionData::Unary { + opcode: ref pattern3_0, + arg: pattern3_1, + } => { + if let &Opcode::Bint = &pattern3_0 { + // Rule at src/isa/s390x/lower.isle line 1753. + let expr0_0 = constructor_value_nonzero(ctx, pattern3_1)?; + return Some(expr0_0); + } + } + _ => {} + } + } + let pattern1_0 = C::value_type(ctx, pattern0_0); + if let Some(pattern2_0) = C::gpr32_ty(ctx, pattern1_0) { + // Rule at src/isa/s390x/lower.isle line 1756. + let expr0_0: Type = I32; + let expr1_0 = constructor_put_in_reg_sext32(ctx, pattern0_0)?; + let expr2_0: i16 = 0; + let expr3_0 = constructor_icmps_simm16(ctx, expr0_0, expr1_0, expr2_0)?; + let expr4_0 = IntCC::NotEqual; + let expr5_0 = C::intcc_as_cond(ctx, &expr4_0); + let expr6_0 = constructor_bool(ctx, &expr3_0, &expr5_0)?; + return Some(expr6_0); + } + if let Some(pattern2_0) = C::gpr64_ty(ctx, pattern1_0) { + // Rule at src/isa/s390x/lower.isle line 1759. + let expr0_0: Type = I64; + let expr1_0 = C::put_in_reg(ctx, pattern0_0); + let expr2_0: i16 = 0; + let expr3_0 = constructor_icmps_simm16(ctx, expr0_0, expr1_0, expr2_0)?; + let expr4_0 = IntCC::NotEqual; + let expr5_0 = C::intcc_as_cond(ctx, &expr4_0); + let expr6_0 = constructor_bool(ctx, &expr3_0, &expr5_0)?; + return Some(expr6_0); + } + return None; +} diff --git a/cranelift/filetests/filetests/isa/s390x/bitops.clif b/cranelift/filetests/filetests/isa/s390x/bitops.clif index a6bb3308d0a7..8939a946af65 100644 --- a/cranelift/filetests/filetests/isa/s390x/bitops.clif +++ b/cranelift/filetests/filetests/isa/s390x/bitops.clif @@ -54,8 +54,7 @@ block0(v0: i32): ; check: llgfr %r2, %r2 ; nextln: flogr %r0, %r2 -; nextln: lr %r2, %r0 -; nextln: ahi %r2, -32 +; nextln: ahik %r2, %r0, -32 ; nextln: br %r14 function %clz_i16(i16) -> i16 { @@ -66,8 +65,7 @@ block0(v0: i16): ; check: llghr %r2, %r2 ; nextln: flogr %r0, %r2 -; nextln: lr %r2, %r0 -; nextln: ahi %r2, -48 +; nextln: ahik %r2, %r0, -48 ; nextln: br %r14 function %clz_i8(i8) -> i8 { @@ -78,8 +76,7 @@ block0(v0: i8): ; check: llgcr %r2, %r2 ; nextln: flogr %r0, %r2 -; nextln: lr %r2, %r0 -; nextln: ahi %r2, -56 +; nextln: ahik %r2, %r0, -56 ; nextln: br %r14 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; @@ -93,7 +90,7 @@ block0(v0: i64): } ; check: srag %r3, %r2, 63 -; nextln: xgrk %r2, %r3, %r2 +; nextln: xgr %r2, %r3 ; nextln: flogr %r0, %r2 ; nextln: lgr %r2, %r0 ; nextln: br %r14 @@ -106,10 +103,9 @@ block0(v0: i32): ; check: lgfr %r2, %r2 ; nextln: srag %r3, %r2, 63 -; nextln: xgrk %r2, %r3, %r2 +; nextln: xgr %r2, %r3 ; nextln: flogr %r0, %r2 -; nextln: lr %r2, %r0 -; nextln: ahi %r2, -32 +; nextln: ahik %r2, %r0, -32 ; nextln: br %r14 function %cls_i16(i16) -> i16 { @@ -120,10 +116,9 @@ block0(v0: i16): ; check: lghr %r2, %r2 ; nextln: srag %r3, %r2, 63 -; nextln: xgrk %r2, %r3, %r2 +; nextln: xgr %r2, %r3 ; nextln: flogr %r0, %r2 -; nextln: lr %r2, %r0 -; nextln: ahi %r2, -48 +; nextln: ahik %r2, %r0, -48 ; nextln: br %r14 function %cls_i8(i8) -> i8 { @@ -134,10 +129,9 @@ block0(v0: i8): ; check: lgbr %r2, %r2 ; nextln: srag %r3, %r2, 63 -; nextln: xgrk %r2, %r3, %r2 +; nextln: xgr %r2, %r3 ; nextln: flogr %r0, %r2 -; nextln: lr %r2, %r0 -; nextln: ahi %r2, -56 +; nextln: ahik %r2, %r0, -56 ; nextln: br %r14 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; @@ -151,7 +145,7 @@ block0(v0: i64): } ; check: lcgr %r3, %r2 -; nextln: ngrk %r2, %r3, %r2 +; nextln: ngr %r2, %r3 ; nextln: flogr %r0, %r2 ; nextln: locghie %r0, -1 ; nextln: lghi %r2, 63 @@ -166,7 +160,7 @@ block0(v0: i32): ; check: oihl %r2, 1 ; nextln: lcgr %r3, %r2 -; nextln: ngrk %r2, %r3, %r2 +; nextln: ngr %r2, %r3 ; nextln: flogr %r0, %r2 ; nextln: lhi %r2, 63 ; nextln: sr %r2, %r0 @@ -180,7 +174,7 @@ block0(v0: i16): ; check: oilh %r2, 1 ; nextln: lcgr %r3, %r2 -; nextln: ngrk %r2, %r3, %r2 +; nextln: ngr %r2, %r3 ; nextln: flogr %r0, %r2 ; nextln: lhi %r2, 63 ; nextln: sr %r2, %r0 @@ -194,7 +188,7 @@ block0(v0: i8): ; check: oill %r2, 256 ; nextln: lcgr %r3, %r2 -; nextln: ngrk %r2, %r3, %r2 +; nextln: ngr %r2, %r3 ; nextln: flogr %r0, %r2 ; nextln: lhi %r2, 63 ; nextln: sr %r2, %r0 diff --git a/cranelift/filetests/filetests/isa/s390x/heap_addr.clif b/cranelift/filetests/filetests/isa/s390x/heap_addr.clif index 1eda3470cdd6..c07d5f66a75b 100644 --- a/cranelift/filetests/filetests/isa/s390x/heap_addr.clif +++ b/cranelift/filetests/filetests/isa/s390x/heap_addr.clif @@ -13,8 +13,8 @@ block0(v0: i64, v1: i32): ; check: Block 0: ; check: llgfr %r3, %r3 -; nextln: lg %r4, 0(%r2) -; nextln: aghi %r4, 0 +; nextln: lghi %r4, 0 +; nextln: ag %r4, 0(%r2) ; nextln: clgr %r3, %r4 ; nextln: jgnh label1 ; jg label2 ; check: Block 1: diff --git a/cranelift/filetests/filetests/isa/s390x/shift-rotate.clif b/cranelift/filetests/filetests/isa/s390x/shift-rotate.clif index 9c682fe77842..0066bef49832 100644 --- a/cranelift/filetests/filetests/isa/s390x/shift-rotate.clif +++ b/cranelift/filetests/filetests/isa/s390x/shift-rotate.clif @@ -52,13 +52,12 @@ block0(v0: i16, v1: i16): } ; check: llhr %r2, %r2 -; nextln: lr %r3, %r4 -; nextln: lcr %r4, %r4 +; nextln: lcr %r4, %r3 ; nextln: nill %r3, 15 ; nextln: nill %r4, 15 -; nextln: sllk %r3, %r2, 0(%r3) -; nextln: srlk %r2, %r2, 0(%r4) -; nextln: ork %r2, %r3, %r2 +; nextln: sllk %r4, %r2, 0(%r4) +; nextln: srlk %r2, %r2, 0(%r3) +; nextln: ork %r2, %r4, %r2 ; nextln: br %r14 function %rotr_i16_imm(i16) -> i16 { @@ -81,13 +80,12 @@ block0(v0: i8, v1: i8): } ; check: llcr %r2, %r2 -; nextln: lr %r3, %r4 -; nextln: lcr %r4, %r4 +; nextln: lcr %r4, %r3 ; nextln: nill %r3, 7 ; nextln: nill %r4, 7 -; nextln: sllk %r3, %r2, 0(%r3) -; nextln: srlk %r2, %r2, 0(%r4) -; nextln: ork %r2, %r3, %r2 +; nextln: sllk %r4, %r2, 0(%r4) +; nextln: srlk %r2, %r2, 0(%r3) +; nextln: ork %r2, %r4, %r2 ; nextln: br %r14 function %rotr_i8_imm(i8) -> i8 { @@ -152,10 +150,9 @@ block0(v0: i16, v1: i16): } ; check: llhr %r2, %r2 -; nextln: lr %r4, %r3 -; nextln: lcr %r3, %r3 -; nextln: nill %r4, 15 +; nextln: lcr %r4, %r3 ; nextln: nill %r3, 15 +; nextln: nill %r4, 15 ; nextln: sllk %r3, %r2, 0(%r3) ; nextln: srlk %r2, %r2, 0(%r4) ; nextln: ork %r2, %r3, %r2 @@ -181,10 +178,9 @@ block0(v0: i8, v1: i8): } ; check: llcr %r2, %r2 -; nextln: lr %r4, %r3 -; nextln: lcr %r3, %r3 -; nextln: nill %r4, 7 +; nextln: lcr %r4, %r3 ; nextln: nill %r3, 7 +; nextln: nill %r4, 7 ; nextln: sllk %r3, %r2, 0(%r3) ; nextln: srlk %r2, %r2, 0(%r4) ; nextln: ork %r2, %r3, %r2