From 32b1b8fc68d22c3080fc7300dd84ece73073dac2 Mon Sep 17 00:00:00 2001 From: Xinlai Wan Date: Tue, 27 Dec 2022 20:23:10 +0800 Subject: [PATCH 01/11] V extension general framework and configuration setting instructions (#191) * V extension general framework and configuration setting instructions * Update model/riscv_insts_vext_utils.sail fix a typo Co-authored-by: Nicolas Brunie Signed-off-by: BrighterW * Update model/riscv_insts_vext_vset.sail * Revisions after Nov 22 meeting * Update effect matching for functions in riscv_vlen.sail * Fix code formatting issues * Update model/riscv_insts_vext_utils.sail Co-authored-by: Jessica Clarke Signed-off-by: Xinlai Wan * Fix coding style issues * Update vset instructions Signed-off-by: BrighterW Signed-off-by: Xinlai Wan Co-authored-by: Nicolas Brunie Co-authored-by: Jessica Clarke --- .gitignore | 3 + Makefile | 7 +- c_emulator/riscv_platform.c | 3 + c_emulator/riscv_platform.h | 1 + c_emulator/riscv_platform_impl.c | 1 + c_emulator/riscv_platform_impl.h | 1 + c_emulator/riscv_sim.c | 6 + model/prelude.sail | 42 ++- model/riscv_csr_map.sail | 8 + model/riscv_insts_vext_utils.sail | 524 ++++++++++++++++++++++++++++++ model/riscv_insts_vext_vset.sail | 148 +++++++++ model/riscv_insts_zicsr.sail | 18 + model/riscv_sys_control.sail | 16 + model/riscv_sys_regs.sail | 65 ++++ model/riscv_vext_control.sail | 20 ++ model/riscv_vext_regs.sail | 474 +++++++++++++++++++++++++++ model/riscv_vlen.sail | 64 ++++ model/riscv_vreg_type.sail | 141 ++++++++ ocaml_emulator/platform.ml | 2 + ocaml_emulator/riscv_ocaml_sim.ml | 3 + 20 files changed, 1545 insertions(+), 2 deletions(-) create mode 100755 model/riscv_insts_vext_utils.sail create mode 100644 model/riscv_insts_vext_vset.sail create mode 100755 model/riscv_vext_control.sail create mode 100755 model/riscv_vext_regs.sail create mode 100644 model/riscv_vlen.sail create mode 100755 model/riscv_vreg_type.sail diff --git a/.gitignore b/.gitignore index 224b1595b..7442c9d3f 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,6 @@ _build/ _sbuild/ *.o *.a +c_emulator/riscv_sim_* +ocaml_emulator/riscv_ocaml_sim_* +z3_problems diff --git a/Makefile b/Makefile index 0f66cbaf1..7d1562c74 100644 --- a/Makefile +++ b/Makefile @@ -16,6 +16,7 @@ else endif SAIL_FLEN := riscv_flen_D.sail +SAIL_VLEN := riscv_vlen.sail # Instruction sources, depending on target SAIL_CHECK_SRCS = riscv_addr_checks_common.sail riscv_addr_checks.sail riscv_misa_ext.sail @@ -36,6 +37,8 @@ SAIL_DEFAULT_INST += riscv_insts_zks.sail SAIL_DEFAULT_INST += riscv_insts_zbkb.sail SAIL_DEFAULT_INST += riscv_insts_zbkx.sail +SAIL_DEFAULT_INST += riscv_insts_vext_utils.sail +SAIL_DEFAULT_INST += riscv_insts_vext_vset.sail SAIL_SEQ_INST = $(SAIL_DEFAULT_INST) riscv_jalr_seq.sail SAIL_RMEM_INST = $(SAIL_DEFAULT_INST) riscv_jalr_rmem.sail riscv_insts_rmem.sail @@ -44,6 +47,7 @@ SAIL_RMEM_INST_SRCS = riscv_insts_begin.sail $(SAIL_RMEM_INST) riscv_insts_end.s # System and platform sources SAIL_SYS_SRCS = riscv_csr_map.sail +SAIL_SYS_SRCS += riscv_vext_control.sail # helpers for the 'V' extension SAIL_SYS_SRCS += riscv_next_regs.sail SAIL_SYS_SRCS += riscv_sys_exceptions.sail # default basic helpers for exception handling SAIL_SYS_SRCS += riscv_sync_exception.sail # define the exception structure used in the model @@ -63,11 +67,12 @@ SAIL_VM_SRCS += $(SAIL_RV64_VM_SRCS) endif # Non-instruction sources -PRELUDE = prelude.sail prelude_mapping.sail $(SAIL_XLEN) $(SAIL_FLEN) prelude_mem_metadata.sail prelude_mem.sail +PRELUDE = prelude.sail prelude_mapping.sail $(SAIL_XLEN) $(SAIL_FLEN) $(SAIL_VLEN) prelude_mem_metadata.sail prelude_mem.sail SAIL_REGS_SRCS = riscv_reg_type.sail riscv_freg_type.sail riscv_regs.sail riscv_pc_access.sail riscv_sys_regs.sail SAIL_REGS_SRCS += riscv_pmp_regs.sail riscv_pmp_control.sail SAIL_REGS_SRCS += riscv_ext_regs.sail $(SAIL_CHECK_SRCS) +SAIL_REGS_SRCS += riscv_vreg_type.sail riscv_vext_regs.sail SAIL_ARCH_SRCS = $(PRELUDE) SAIL_ARCH_SRCS += riscv_types_common.sail riscv_types_ext.sail riscv_types.sail diff --git a/c_emulator/riscv_platform.c b/c_emulator/riscv_platform.c index 2572dbcd4..bbc6020b5 100644 --- a/c_emulator/riscv_platform.c +++ b/c_emulator/riscv_platform.c @@ -21,6 +21,9 @@ bool sys_enable_fdext(unit u) bool sys_enable_zfinx(unit u) { return rv_enable_zfinx; } +bool sys_enable_rvv(unit u) +{ return rv_enable_rvv; } + bool sys_enable_writable_misa(unit u) { return rv_enable_writable_misa; } diff --git a/c_emulator/riscv_platform.h b/c_emulator/riscv_platform.h index 5335a90bb..a284cbaa4 100644 --- a/c_emulator/riscv_platform.h +++ b/c_emulator/riscv_platform.h @@ -6,6 +6,7 @@ bool sys_enable_next(unit); bool sys_enable_fdext(unit); bool sys_enable_zfinx(unit); bool sys_enable_writable_misa(unit); +bool sys_enable_rvv(unit); bool plat_enable_dirty_update(unit); bool plat_enable_misaligned_access(unit); diff --git a/c_emulator/riscv_platform_impl.c b/c_emulator/riscv_platform_impl.c index b1504a727..d7aed4182 100644 --- a/c_emulator/riscv_platform_impl.c +++ b/c_emulator/riscv_platform_impl.c @@ -9,6 +9,7 @@ bool rv_enable_rvc = true; bool rv_enable_next = false; bool rv_enable_writable_misa = true; bool rv_enable_fdext = true; +bool rv_enable_rvv = true; bool rv_enable_dirty_update = false; bool rv_enable_misaligned = false; diff --git a/c_emulator/riscv_platform_impl.h b/c_emulator/riscv_platform_impl.h index 165fb94d7..569a8e80d 100644 --- a/c_emulator/riscv_platform_impl.h +++ b/c_emulator/riscv_platform_impl.h @@ -12,6 +12,7 @@ extern bool rv_enable_zfinx; extern bool rv_enable_rvc; extern bool rv_enable_next; extern bool rv_enable_fdext; +extern bool rv_enable_rvv; extern bool rv_enable_writable_misa; extern bool rv_enable_dirty_update; extern bool rv_enable_misaligned; diff --git a/c_emulator/riscv_sim.c b/c_emulator/riscv_sim.c index 5ad83e95b..3ea632885 100644 --- a/c_emulator/riscv_sim.c +++ b/c_emulator/riscv_sim.c @@ -116,6 +116,7 @@ static struct option options[] = { {"disable-compressed", no_argument, 0, 'C'}, {"disable-writable-misa", no_argument, 0, 'I'}, {"disable-fdext", no_argument, 0, 'F'}, + {"disable-vector", no_argument, 0, 'W'}, {"mtval-has-illegal-inst-bits", no_argument, 0, 'i'}, {"device-tree-blob", required_argument, 0, 'b'}, {"terminal-log", required_argument, 0, 't'}, @@ -226,6 +227,7 @@ char *process_args(int argc, char **argv) "N" "I" "F" + "W" "i" "s" "p" @@ -279,6 +281,10 @@ char *process_args(int argc, char **argv) fprintf(stderr, "disabling floating point (F and D extensions).\n"); rv_enable_fdext = false; break; + case 'W': + fprintf(stderr, "disabling RVV vector instructions.\n"); + rv_enable_rvv = false; + break; case 'i': fprintf(stderr, "enabling storing illegal instruction bits in mtval.\n"); rv_mtval_has_illegal_inst_bits = true; diff --git a/model/prelude.sail b/model/prelude.sail index fbf30ff7b..754f53315 100644 --- a/model/prelude.sail +++ b/model/prelude.sail @@ -74,6 +74,7 @@ $include $include $include $include +$include val string_startswith = "string_startswith" : (string, string) -> bool val string_drop = "string_drop" : (string, nat) -> string @@ -140,7 +141,17 @@ val xor_vec = {c: "xor_bits", _: "xor_vec"} : forall 'n. (bits('n), bits('n)) -> val int_power = {ocaml: "int_power", interpreter: "int_power", lem: "pow", coq: "pow", c: "pow_int"} : (int, int) -> int -overload operator ^ = {xor_vec, int_power, concat_str} +val xor_bool : (bool, bool) -> bool +function xor_bool(b1, b2) = { + match (b1, b2) { + (false, false) => false, + (false, true) => true, + (true, false) => true, + (true, true) => false + } +} + +overload operator ^ = {xor_vec, int_power, concat_str, xor_bool} val sub_vec = {c: "sub_bits", _: "sub_vec"} : forall 'n. (bits('n), bits('n)) -> bits('n) @@ -207,6 +218,12 @@ function bit_to_bool b = match b { bitzero => false } +val bool_to_bit : bool -> bit +function bool_to_bit b = match b { + true => bitone, + false => bitzero +} + val to_bits : forall 'l, 'l >= 0.(atom('l), int) -> bits('l) function to_bits (l, n) = get_slice_int(l, n, 0) @@ -327,3 +344,26 @@ val def_spc_backwards : string -> unit function def_spc_backwards s = () val def_spc_matches_prefix : string -> option((unit, nat)) function def_spc_matches_prefix s = opt_spc_matches_prefix(s) + +val div_int : (int, int) -> int +function div_int(x, y) = { + floor(div_real(to_real(x), to_real(y))) +} + +val "print_real" : (string, real) -> unit +val "print_int" : (string, int) -> unit + +overload operator / = {div_int, div_real} +overload operator * = {mult_atom, mult_int, mult_real} + +/* helper for vector extension where the element width is between 8 and 64 */ +val log2 : forall 'n, 'n in {8, 16, 32, 64}. int('n) -> int +function log2(n) = { + let result : int = match n { + 8 => 3, + 16 => 4, + 32 => 5, + 64 => 6 + }; + result +} diff --git a/model/riscv_csr_map.sail b/model/riscv_csr_map.sail index 31872d3f1..24ee89f1e 100644 --- a/model/riscv_csr_map.sail +++ b/model/riscv_csr_map.sail @@ -163,6 +163,14 @@ mapping clause csr_name_map = 0x7a0 <-> "tselect" mapping clause csr_name_map = 0x7a1 <-> "tdata1" mapping clause csr_name_map = 0x7a2 <-> "tdata2" mapping clause csr_name_map = 0x7a3 <-> "tdata3" +/* vector csrs */ +mapping clause csr_name_map = 0x008 <-> "vstart" +mapping clause csr_name_map = 0x009 <-> "vxsat" +mapping clause csr_name_map = 0x00A <-> "vxrm" +mapping clause csr_name_map = 0x00F <-> "vcsr" +mapping clause csr_name_map = 0xC20 <-> "vl" +mapping clause csr_name_map = 0xC21 <-> "vtype" +mapping clause csr_name_map = 0xC22 <-> "vlenb" val csr_name : csreg -> string overload to_str = {csr_name} diff --git a/model/riscv_insts_vext_utils.sail b/model/riscv_insts_vext_utils.sail new file mode 100755 index 000000000..342387f38 --- /dev/null +++ b/model/riscv_insts_vext_utils.sail @@ -0,0 +1,524 @@ +/* ************************************************************************** */ +/* This file implements functions used by vector instructions. */ + +/* ************************************************************************** */ + + +/* Vector mask mapping */ +mapping maybe_vmask : string <-> bits(1) = { + "" <-> 0b1, /* unmasked by default */ + sep() ^ "v0.t" <-> 0b0 +} + +/* Check for valid vsew and lmul values */ +val vcheck_vsew_lmul : (int, real) -> bool +function vcheck_vsew_lmul(vsew_bits, lmul) = { + vsew_bits >= 8 & vsew_bits <= 64 & + lmul >= 0.125 & lmul <= 8.0; +} + +/* Check for vstart value */ +val assert_vstart : int -> bool effect {rreg} +function assert_vstart(i) = { + unsigned(vstart) == i; +} + +/* Scalar register shaping */ +val get_scalar : forall 'n, 'n >= 8. (regidx, int('n)) -> bits('n) effect {escape, rreg} +function get_scalar(rs1, vsew_bits) = { + if sizeof(xlen) > vsew_bits then { + /* Least significant SEW bits */ + X(rs1)[vsew_bits - 1 .. 0] + } else if sizeof(xlen) < vsew_bits then { + /* Sign extend to SEW */ + EXTS(vsew_bits, X(rs1)) + } else { + X(rs1) + } +} + +/* Get the starting element index from csr vtype */ +val get_start_element : unit -> int effect {escape, rreg, wreg} +function get_start_element() = { + let start_element = unsigned(vstart); + let vsew_bits = get_vtype_vsew(); + /* The use of vstart values greater than the largest element + index for the current SEW setting is reserved. + It is recommended that implementations trap if vstart is out of bounds. + It is not required to trap, as a possible future use of upper vstart bits + is to store imprecise trap information. */ + if start_element > ((8 * get_vlen() / vsew_bits) - 1) then -1 + else start_element +} + +/* Get the ending element index from csr vl */ +val get_end_element : unit -> int effect {escape, rreg, wreg} +function get_end_element() = { + let end_element : int = unsigned(vl) - 1; + end_element +} + +/* Mask handling; creates a pre-masked result vector for vstart, vl, vta/vma, and vm */ +/* vm should be baked into vm_val from doing read_vmask */ +/* tail masking when lmul < 1 is handled in write_vreg */ +/* Returns two vectors: + * vector1 is the result vector with values applied to masked elements + * vector2 is a "mask" vector that is true for an element if the corresponding element + * in the result vector should be updated by the calling instruction + */ +val init_masked_result : forall 'n 'm, 8 <= 'm <= 128. (int('n), int('m), real, vector('n, dec, bits('m)), vector('n, dec, bool)) -> (vector('n, dec, bits('m)), vector('n, dec, bool)) effect {escape, rreg, undef, wreg} +function init_masked_result(num_elem, vsew_bits, lmul, vd_val, vm_val) = { + let start_element : int = get_start_element(); + let end_element : int = get_end_element(); + let tail_ag : agtype = get_vtype_vta(); + let mask_ag : agtype = get_vtype_vma(); + mask_helper : vector('n, dec, bool) = undefined; + result : vector('n, dec, bits('m)) = undefined; + + if start_element < 0 then { + /* start element is not valid */ + result = undefined; + mask_helper = undefined; + } else { + /* Determine the actual number of elements when lmul < 1 */ + let real_num_elem = if lmul >= 1.0 then num_elem else floor(lmul * to_real(num_elem)); + assert(num_elem >= real_num_elem); + + foreach (i from 0 to (num_elem - 1)) { + if i < start_element then { + /* Prestart elements defined by vstart */ + result[i] = vd_val[i]; + mask_helper[i] = false + } else if i > end_element then { + /* Tail elements defined by vl */ + if tail_ag == UNDISTURBED then { + result[i] = vd_val[i]; + } else if tail_ag == AGNOSTIC then { + result[i] = vd_val[i]; + }; + mask_helper[i] = false + } else if i >= real_num_elem then { + /* Tail elements defined by lmul < 1 */ + if tail_ag == UNDISTURBED then { + result[i] = vd_val[i]; + } else if tail_ag == AGNOSTIC then { + result[i] = vd_val[i]; + }; + mask_helper[i] = false + } else if vm_val[i] == false then { + /* Inactive body elements defined by vm */ + if mask_ag == UNDISTURBED then { + result[i] = vd_val[i] + } else if mask_ag == AGNOSTIC then { + result[i] = vd_val[i] + }; + mask_helper[i] = false + } else { + /* Active body elements */ + mask_helper[i] = true; + } + }; + }; + + (result, mask_helper) +} + +/* Mask handling for carry functions that use masks as input/output */ +/* Only prestart and tail elements are masked in a mask value */ +val init_masked_result_carry : forall 'n 'm, 8 <= 'm <= 128. (int('n), int('m), real, vector('n, dec, bool)) -> (vector('n, dec, bool), vector('n, dec, bool)) effect {escape, rreg, undef, wreg} +function init_masked_result_carry(num_elem, vsew_bits, lmul, vd_val) = { + let start_element : int = get_start_element(); + let end_element : int = get_end_element(); + mask_helper : vector('n, dec, bool) = undefined; + result : vector('n, dec, bool) = undefined; + + /* Determine the actual number of elements when lmul < 1 */ + let real_num_elem = if lmul >= 1.0 then num_elem else floor(lmul * to_real(num_elem)); + assert(num_elem >= real_num_elem); + + foreach (i from 0 to (num_elem - 1)) { + if i < start_element then { + /* Prestart elements defined by vstart */ + result[i] = vd_val[i]; + mask_helper[i] = false + } else if i > end_element then { + /* Tail elements defined by vl */ + /* Mask tail is always agnostic */ + result[i] = vd_val[i]; + mask_helper[i] = false + } else if i >= real_num_elem then { + /* Tail elements defined by lmul < 1 */ + /* Mask tail is always agnostic */ + result[i] = vd_val[i]; + mask_helper[i] = false + } else { + /* Active body elements */ + mask_helper[i] = true + } + }; + + (result, mask_helper) +} + +/* Mask handling for cmp functions that use masks as output */ +val init_masked_result_cmp : forall 'n 'm, 8 <= 'm <= 128. (int('n), int('m), real, vector('n, dec, bool), vector('n, dec, bool)) -> (vector('n, dec, bool), vector('n, dec, bool)) effect {escape, rreg, undef, wreg} +function init_masked_result_cmp(num_elem, vsew_bits, lmul, vd_val, vm_val) = { + let start_element : int = get_start_element(); + let end_element : int = get_end_element(); + let mask_ag : agtype = get_vtype_vma(); + mask_helper : vector('n, dec, bool) = undefined; + result : vector('n, dec, bool) = undefined; + + /* Determine the actual number of elements when lmul < 1 */ + let real_num_elem = if lmul >= 1.0 then num_elem else floor(lmul * to_real(num_elem)); + assert(num_elem >= real_num_elem); + + foreach (i from 0 to (num_elem - 1)) { + if i < start_element then { + /* Prestart elements defined by vstart */ + result[i] = vd_val[i]; + mask_helper[i] = false + } else if i > end_element then { + /* Tail elements defined by vl */ + /* Mask tail is always agnostic */ + result[i] = vd_val[i]; + mask_helper[i] = false + } else if i >= real_num_elem then { + /* Tail elements defined by lmul < 1 */ + /* Mask tail is always agnostic */ + result[i] = vd_val[i]; + mask_helper[i] = false + } else if vm_val[i] == false then { + /* Inactive body elements defined by vm */ + if mask_ag == UNDISTURBED then { + result[i] = vd_val[i] + } else if mask_ag == AGNOSTIC then { + result[i] = vd_val[i] + }; + mask_helper[i] = false + } else { + /* Active body elements */ + mask_helper[i] = true + } + }; + + (result, mask_helper) +} + +/* Floating point canonical NaN for 16-bit, 32-bit, 64-bit and 128-bit types */ +val canonical_NaN : forall 'm, 'm in {16, 32, 64, 128}. int('m) -> bits('m) +function canonical_NaN('m) = { + match 'm { + 16 => 0x_7e00, + 32 => 0x_7fc0_0000, + 64 => 0x_7ff8_0000_0000_0000, + 128 => 0x_7fff_8000_0000_0000_0000_0000_0000_0000 + } +} + +/* Floating point NaN boxing / unboxing that support 16-bit to 128-bit types */ +val NaN_box : forall 'm, ('m in {16, 32, 64, 128} & 'm <= flen). bits('m) -> flenbits +function NaN_box unboxed = { + if sizeof(flen) == 'm then unboxed + else ones(sizeof(flen) - 'm) @ unboxed +} + +val NaN_unbox : forall 'm, ('m in {16, 32, 64, 128} & 'm <= flen). (flenbits, int('m)) -> bits('m) +function NaN_unbox(regval, 'm) = { + if sizeof(flen) == 'm then regval + else if regval[sizeof(flen) - 1 .. 'm] == ones() then regval['m - 1 .. 0] + else canonical_NaN('m) +} + +/* Check if the floating point number is a signaling NaN */ +val f_is_SNaN : forall 'm, ('m in {16, 32, 64, 128} & 'm <= flen). bits('m) -> bool +function f_is_SNaN xf = { + match 'm { + 16 => (xf[14..10] == ones()) & (xf[9..9] == zeros()) & (xf[8..0] != zeros()), + 32 => (xf[30..23] == ones()) & (xf[22..22] == zeros()) & (xf[21..0] != zeros()), + 64 => (xf[62..52] == ones()) & (xf[51..51] == zeros()) & (xf[50..0] != zeros()), + 128 => (xf[126..112] == ones()) & (xf[111..111] == zeros()) & (xf[110..0] != zeros()) + } +} + +/* Either QNaN or SNan */ +val f_is_NaN : forall 'm, ('m in {16, 32, 64, 128} & 'm <= flen). bits('m) -> bool +function f_is_NaN xf = { + match 'm { + 16 => (xf[14..10] == ones()) & (xf[9..0] != zeros()), + 32 => (xf[30..23] == ones()) & (xf[22..0] != zeros()), + 64 => (xf[62..52] == ones()) & (xf[51..0] != zeros()), + 128 => (xf[126..112] == ones()) & (xf[111..0] != zeros()) + } +} + +val f_is_neg_zero : forall 'm, ('m in {16, 32, 64, 128} & 'm <= flen). bits('m) -> bool +function f_is_neg_zero xf = { + match 'm { + 16 => (xf[15..15] == ones()) & (xf[14..0] == zeros()), + 32 => (xf[31..31] == ones()) & (xf[30..0] == zeros()), + 64 => (xf[63..63] == ones()) & (xf[62..0] == zeros()), + 128 => (xf[127..127] == ones()) & (xf[126..0] == zeros()) + } +} + +val f_is_pos_zero : forall 'm, ('m in {16, 32, 64, 128} & 'm <= flen). bits('m) -> bool +function f_is_pos_zero xf = { + match 'm { + 16 => (xf == zeros()), + 32 => (xf == zeros()), + 64 => (xf == zeros()), + 128 => (xf == zeros()) + } +} + +/* Scalar register shaping for floating point operations */ +val get_scalar_fp : forall 'n, 'n in {16, 32, 64, 128}. (regidx, int('n)) -> bits('n) effect {escape, rreg} +function get_scalar_fp(rs1, vsew_bits) = { + if sizeof(flen) > vsew_bits then { + /* Least significant SEW bits */ + NaN_unbox(F(rs1), vsew_bits) + } else { + canonical_NaN(vsew_bits) + } +} + +/* Shift amounts */ +val get_shift_amount : forall 'n 'm, 0 <= 'n & 'm in {8, 16, 32, 64}. (bits('n), int('m)) -> int effect {escape} +function get_shift_amount(bit_val, vsew_bits) = { + let lowlog2bits : int = log2(vsew_bits); + assert(0 < lowlog2bits & lowlog2bits < 'n); + unsigned(bit_val[lowlog2bits - 1 .. 0]); +} + +/* Fixed point rounding increment */ +val get_fixed_rounding_incr : forall ('m 'n : Int), ('m > 0 & 'n >= 0). (bits('m), int('n)) -> bits(1) effect {rreg, undef} +function get_fixed_rounding_incr(vec_elem, shift_amount) = { + if shift_amount == 0 then 0b0 + else { + let rounding_mode = vxrm[1 .. 0]; + match rounding_mode { + 0b00 => slice(vec_elem, shift_amount - 1, 1), + 0b01 => bool_to_bits( + (slice(vec_elem, shift_amount - 1, 1) == 0b1) & (slice(vec_elem, 0, shift_amount - 1) != zeros() | slice(vec_elem, shift_amount, 1) == 0b1)), + 0b10 => 0b0, + 0b11 => bool_to_bits( + ~(slice(vec_elem, shift_amount, 1) == 0b1) & (slice(vec_elem, 0, shift_amount) != zeros())) + } + } +} + +/* Fixed point unsigned saturation */ +val unsigned_saturation : forall ('m 'n: Int), ('n >= 'm > 0). (int('m), bits('n)) -> bits('m) effect {escape, rreg, undef, wreg} +function unsigned_saturation(len, elem) = { + if unsigned(elem) > unsigned(ones('m)) then { + vxsat = 0b1; + EXTZ('m, ones('m)) + } else { + vxsat = 0b0; + elem['m - 1 .. 0] + } +} + +/* Fixed point signed saturation */ +val signed_saturation : forall ('m 'n: Int), ('n >= 'm > 0). (int('m), bits('n)) -> bits('m) effect {escape, rreg, undef, wreg} +function signed_saturation(len, elem) = { + if signed(elem) > signed(EXTZ('m, ones('m - 1))) then { + vxsat = 0b1; + EXTZ('m, ones('m - 1)) + } else if signed(elem) < signed(EXTZ('m, 0b1) << ('m - 1)) then { + vxsat = 0b1; + to_bits('m, signed(EXTZ('m, 0b1) << ('m - 1))) + } else { + vxsat = 0b0; + elem['m - 1 .. 0] + }; +} + +/* Get the floating point rounding mode from csr fcsr */ +val get_fp_rounding_mode : unit -> rounding_mode effect {rreg} +function get_fp_rounding_mode() = encdec_rounding_mode(fcsr.FRM()) + +/* Split sign and the remain of floating point number */ +val fsplitsign : forall 'n, 'n in {16, 32, 64}. bits('n) -> (bits(1), bits('n - 1)) +function fsplitsign (xf) = { + match 'n { + 16 => (xf[15..15], xf[14..0]), + 32 => (xf[31..31], xf[30..0]), + 64 => (xf[63..63], xf[62..0]) + } +} + +/* Make a floating point number by sign and the remains bits */ +val fmakesign : forall 'n, 'n in {16, 32, 64}. (bits(1), bits('n - 1)) -> bits('n) +function fmakesign (sign, remain) = sign @ remain + +/* Negate a floating point number */ +val negate_fp : forall 'n, 'n in {16, 32, 64}. bits('n) -> bits('n) +function negate_fp (xf) = { + let (sign, remain) = fsplitsign(xf); + let new_sign = if (sign == 0b0) then 0b1 else 0b0; + fmakesign (new_sign, remain) +} + + +/* Floating point functions */ +val fp_add: forall 'n, 'n in {16, 32, 64}. (bits(3), bits('n), bits('n)) -> bits('n) effect {escape, rreg, undef, wreg} +function fp_add(rm_3b, op1, op2) = { + let (fflags, result_val) : (bits_fflags, bits('n)) = match 'n { + 16 => riscv_f16Add(rm_3b, op1, op2), + 32 => riscv_f32Add(rm_3b, op1, op2), + 64 => riscv_f64Add(rm_3b, op1, op2) + }; + write_fflags(fflags); + result_val +} + +val fp_sub: forall 'n, 'n in {16, 32, 64}. (bits(3), bits('n), bits('n)) -> bits('n) effect {escape, rreg, undef, wreg} +function fp_sub(rm_3b, op1, op2) = { + let (fflags, result_val) : (bits_fflags, bits('n)) = match 'n { + 16 => riscv_f16Sub(rm_3b, op1, op2), + 32 => riscv_f32Sub(rm_3b, op1, op2), + 64 => riscv_f64Sub(rm_3b, op1, op2) + }; + write_fflags(fflags); + result_val +} + +val fp_eq : forall 'n, 'n in {16, 32, 64}. (bits('n), bits('n)) -> bool effect {escape, rreg, undef, wreg} +function fp_eq(op1, op2) = { + let (fflags, result_val) : (bits_fflags, bool) = match 'n { + 16 => riscv_f16Eq(op1, op2), + 32 => riscv_f32Eq(op1, op2), + 64 => riscv_f64Eq(op1, op2) + }; + write_fflags(fflags); + result_val +} + +val fp_gt : forall 'n, 'n in {16, 32, 64}. (bits('n), bits('n)) -> bool effect {escape, rreg, undef, wreg} +function fp_gt(op1, op2) = { + let (fflags, temp_val) : (bits_fflags, bool) = match 'n { + 16 => riscv_f16Le(op1, op2), + 32 => riscv_f32Le(op1, op2), + 64 => riscv_f64Le(op1, op2) + }; + let result_val = (if fflags == 0b10000 then false else ~(temp_val)); + write_fflags(fflags); + result_val +} + +val fp_ge : forall 'n, 'n in {16, 32, 64}. (bits('n), bits('n)) -> bool effect {escape, rreg, undef, wreg} +function fp_ge(op1, op2) = { + let (fflags, temp_val) : (bits_fflags, bool) = match 'n { + 16 => riscv_f16Lt(op1, op2), + 32 => riscv_f32Lt(op1, op2), + 64 => riscv_f64Lt(op1, op2) + }; + let result_val = (if fflags == 0b10000 then false else ~(temp_val)); + write_fflags(fflags); + result_val +} + +val fp_lt : forall 'n, 'n in {16, 32, 64}. (bits('n), bits('n)) -> bool effect {escape, rreg, undef, wreg} +function fp_lt(op1, op2) = { + let (fflags, result_val) : (bits_fflags, bool) = match 'n { + 16 => riscv_f16Lt(op1, op2), + 32 => riscv_f32Lt(op1, op2), + 64 => riscv_f64Lt(op1, op2) + }; + write_fflags(fflags); + result_val +} + +val fp_le : forall 'n, 'n in {16, 32, 64}. (bits('n), bits('n)) -> bool effect {escape, rreg, undef, wreg} +function fp_le(op1, op2) = { + let (fflags, result_val) : (bits_fflags, bool) = match 'n { + 16 => riscv_f16Le(op1, op2), + 32 => riscv_f32Le(op1, op2), + 64 => riscv_f64Le(op1, op2) + }; + write_fflags(fflags); + result_val +} + +val fp_mul : forall 'n, 'n in {16, 32, 64}. (bits(3), bits('n), bits('n)) -> bits('n) effect {escape, rreg, undef, wreg} +function fp_mul(rm_3b, op1, op2) = { + let (fflags, result_val) : (bits_fflags, bits('n)) = match 'n { + 16 => riscv_f16Mul(rm_3b, op1, op2), + 32 => riscv_f32Mul(rm_3b, op1, op2), + 64 => riscv_f64Mul(rm_3b, op1, op2) + }; + write_fflags(fflags); + result_val +} + +val fp_div : forall 'n, 'n in {16, 32, 64}. (bits(3), bits('n), bits('n)) -> bits('n) effect {escape, rreg, undef, wreg} +function fp_div(rm_3b, op1, op2) = { + let (fflags, result_val) : (bits_fflags, bits('n)) = match 'n { + 16 => riscv_f16Div(rm_3b, op1, op2), + 32 => riscv_f32Div(rm_3b, op1, op2), + 64 => riscv_f64Div(rm_3b, op1, op2) + }; + write_fflags(fflags); + result_val +} + +val fp_muladd : forall 'n, 'n in {16, 32, 64}. (bits(3), bits('n), bits('n), bits('n)) -> bits('n) effect {escape, rreg, undef, wreg} +function fp_muladd(rm_3b, op1, op2, opadd) = { + let (fflags, result_val) : (bits_fflags, bits('n)) = match 'n { + 16 => riscv_f16MulAdd(rm_3b, op1, op2, opadd), + 32 => riscv_f32MulAdd(rm_3b, op1, op2, opadd), + 64 => riscv_f64MulAdd(rm_3b, op1, op2, opadd) + }; + write_fflags(fflags); + result_val +} + +val fp_nmuladd : forall 'n, 'n in {16, 32, 64}. (bits(3), bits('n), bits('n), bits('n)) -> bits('n) effect {escape, rreg, undef, wreg} +function fp_nmuladd(rm_3b, op1, op2, opadd) = { + let op1 = negate_fp(op1); + let (fflags, result_val) : (bits_fflags, bits('n)) = match 'n { + 16 => riscv_f16MulAdd(rm_3b, op1, op2, opadd), + 32 => riscv_f32MulAdd(rm_3b, op1, op2, opadd), + 64 => riscv_f64MulAdd(rm_3b, op1, op2, opadd) + }; + write_fflags(fflags); + result_val +} + +val fp_mulsub : forall 'n, 'n in {16, 32, 64}. (bits(3), bits('n), bits('n), bits('n)) -> bits('n) effect {escape, rreg, undef, wreg} +function fp_mulsub(rm_3b, op1, op2, opsub) = { + let opsub = negate_fp(opsub); + let (fflags, result_val) : (bits_fflags, bits('n)) = match 'n { + 16 => riscv_f16MulAdd(rm_3b, op1, op2, opsub), + 32 => riscv_f32MulAdd(rm_3b, op1, op2, opsub), + 64 => riscv_f64MulAdd(rm_3b, op1, op2, opsub) + }; + write_fflags(fflags); + result_val +} + +val fp_nmulsub : forall 'n, 'n in {16, 32, 64}. (bits(3), bits('n), bits('n), bits('n)) -> bits('n) effect {escape, rreg, undef, wreg} +function fp_nmulsub(rm_3b, op1, op2, opsub) = { + let opsub = negate_fp(opsub); + let op1 = negate_fp(op1); + let (fflags, result_val) : (bits_fflags, bits('n)) = match 'n { + 16 => riscv_f16MulAdd(rm_3b, op1, op2, opsub), + 32 => riscv_f32MulAdd(rm_3b, op1, op2, opsub), + 64 => riscv_f64MulAdd(rm_3b, op1, op2, opsub) + }; + write_fflags(fflags); + result_val +} + +val fp_widen : forall 'm, ('m in {16, 32} & 'm <= flen). bits('m) -> bits('m * 2) effect {escape, rreg, undef, wreg} +function fp_widen(nval) = { + let rm_3b = fcsr.FRM(); + let (fflags, wval) : (bits_fflags, bits('m * 2)) = match 'm { + 16 => riscv_f16ToF32(rm_3b, nval), + 32 => riscv_f32ToF64(rm_3b, nval) + }; + accrue_fflags(fflags); + wval +} diff --git a/model/riscv_insts_vext_vset.sail b/model/riscv_insts_vext_vset.sail new file mode 100644 index 000000000..edbbe7055 --- /dev/null +++ b/model/riscv_insts_vext_vset.sail @@ -0,0 +1,148 @@ +/* ************************************************************************ */ +/* This file implements part of the vector extension. */ +/* Chapter 6: configuration setting instructions */ + +/* ************************************************************************ */ + +mapping sew_flag : string <-> bits(3) = { + "e8" <-> 0b000, + "e16" <-> 0b001, + "e32" <-> 0b010, + "e64" <-> 0b011, + "e128" <-> 0b100, + "e256" <-> 0b101, + "e512" <-> 0b110, + "e1024" <-> 0b111 +} + +mapping maybe_lmul_flag : string <-> bits(3) = { + "" <-> 0b000, /* m1 by default */ + sep() ^ "mf8" <-> 0b101, + sep() ^ "mf4" <-> 0b110, + sep() ^ "mf2" <-> 0b111, + sep() ^ "m1" <-> 0b000, + sep() ^ "m2" <-> 0b001, + sep() ^ "m4" <-> 0b010, + sep() ^ "m8" <-> 0b011 +} + +mapping maybe_ta_flag : string <-> bits(1) = { + "" <-> 0b0, /* tu by default */ + sep() ^ "ta" <-> 0b1, + sep() ^ "tu" <-> 0b0 +} + +mapping maybe_ma_flag : string <-> bits(1) = { + "" <-> 0b0, /* mu by default */ + sep() ^ "ma" <-> 0b1, + sep() ^ "mu" <-> 0b0 +} + +/* ******************** vsetvli & vsetvl *********************** */ +union clause ast = VSET_TYPE : (vsetop, bits(1), bits(1), bits(3), bits(3), regidx, regidx) + +mapping encdec_vsetop : vsetop <-> bits(4) ={ + VSETVLI <-> 0b0000, + VSETVL <-> 0b1000 +} + +mapping clause encdec = VSET_TYPE(op, ma, ta, sew, lmul, rs1, rd) + <-> encdec_vsetop(op) @ ma @ ta @ sew @ lmul @ rs1 @ 0b111 @ rd @ 0b1010111 + +function clause execute VSET_TYPE(op, ma, ta, sew, lmul, rs1, rd) = { + let vlen : int = get_vlen(); + let LMUL_ori : real = get_vtype_LMUL(); + let SEW_ori : int = get_vtype_vsew(); + let ratio_ori : real = to_real(SEW_ori) / LMUL_ori; + + /* set vtype and calculate VLMAX */ + match op { + VSETVLI => { + vtype->bits() = 0b0 @ zeros(sizeof(xlen) - 9) @ ma @ ta @ sew @ lmul + }, + VSETVL => { + let rs2 : regidx = sew[1 .. 0] @ lmul; + vtype->bits() = X(rs2) + } + }; + print_reg("CSR vtype <- " ^ BitStr(vtype.bits())); + let LMUL_new : real = get_vtype_LMUL(); + let SEW_new : int = get_vtype_vsew(); + let VLMAX = floor(LMUL_new) * vlen / SEW_new; + + /* set vl according to VLMAX and AVL */ + if (rs1 != 0b00000) then { /* normal stripmining */ + let rs1_val = X(rs1); + let AVL = unsigned(rs1_val); + vl = if AVL <= VLMAX then to_bits(sizeof(xlen), AVL) + else if AVL < 2 * VLMAX then to_bits(sizeof(xlen), (AVL + 1) / 2) /* ceil(AVL / 2) ≤ vl ≤ VLMAX */ + else to_bits(sizeof(xlen), VLMAX); + X(rd) = vl; + print_reg("CSR vl <- " ^ BitStr(vl)) + } else if (rd != 0b00000) then { /* set vl to VLMAX */ + let AVL = unsigned(ones(sizeof(xlen))); + vl = to_bits(sizeof(xlen), VLMAX); + X(rd) = vl; + print_reg("CSR vl <- " ^ BitStr(vl)) + } else { /* keep existing vl */ + let AVL = unsigned(vl); + let ratio_new : real = to_real(SEW_new) / LMUL_new; + if (ratio_new != ratio_ori) then { + vtype->bits() = 0b1 @ zeros(sizeof(xlen) - 1); /* set vtype.vill */ + print_reg("CSR vtype <- " ^ BitStr(vtype.bits())); + } + }; + + /* reset vstart to 0 */ + vstart = EXTZ(0b0); + print_reg("CSR vstart <- " ^ BitStr(vstart)); + + RETIRE_SUCCESS +} + +mapping vsettype_mnemonic : vsetop <-> string ={ + VSETVLI <-> "vsetvli", + VSETVL <-> "vsetvli" +} + +mapping clause assembly = VSET_TYPE(op, ma, ta, sew, lmul, rs1, rd) + <-> vsettype_mnemonic(op) ^ spc() ^ reg_name(rd) ^ sep() ^ reg_name(rs1) ^ sep() ^ sew_flag(sew) ^ maybe_lmul_flag(lmul) ^ maybe_ta_flag(ta) ^ maybe_ma_flag(ma) + + +/* ******************** vsetivli *********************** */ +union clause ast = VSETI_TYPE : ( bits(1), bits(1), bits(3), bits(3), regidx, regidx) + +mapping clause encdec = VSETI_TYPE(ma, ta, sew, lmul, uimm, rd) + <-> 0b1100 @ ma @ ta @ sew @ lmul @ uimm @ 0b111 @ rd @ 0b1010111 + +function clause execute VSETI_TYPE(ma, ta, sew, lmul, uimm, rd) = { + let vlen : int = get_vlen(); + let LMUL_ori : real = get_vtype_LMUL(); + let SEW_ori : int = get_vtype_vsew(); + let ratio_ori : real = to_real(SEW_ori) / LMUL_ori; + + /* set vtype and calculate VLMAX */ + vtype->bits() = 0b0 @ zeros(sizeof(xlen) - 9) @ ma @ ta @ sew @ lmul; + print_reg("CSR vtype <- " ^ BitStr(vtype.bits())); + + let LMUL_new : real = get_vtype_LMUL(); + let SEW_new : int = get_vtype_vsew(); + let VLMAX : int = floor(LMUL_new) * vlen / SEW_new; + let AVL : int = unsigned(uimm); /* AVL is encoded as 5-bit zero-extended imm in the rs1 field */ + + /* set vl according to VLMAX and AVL */ + vl = if AVL <= VLMAX then to_bits(sizeof(xlen), AVL) + else if AVL < 2 * VLMAX then to_bits(sizeof(xlen), (AVL + 1) / 2) /* ceil(AVL / 2) ≤ vl ≤ VLMAX */ + else to_bits(sizeof(xlen), VLMAX); + X(rd) = vl; + print_reg("CSR vl <- " ^ BitStr(vl)); + + /* reset vstart to 0 */ + vstart = EXTZ(0b0); + print_reg("CSR vstart <- " ^ BitStr(vstart)); + + RETIRE_SUCCESS +} + +mapping clause assembly = VSETI_TYPE(ma, ta, sew, lmul, uimm, rd) + <-> "vsetivli" ^ spc() ^ reg_name(rd) ^ sep() ^ hex_bits_5(uimm) ^ sep() ^ sew_flag(sew) ^ maybe_lmul_flag(lmul) ^ maybe_ta_flag(ta) ^ maybe_ma_flag(ma) diff --git a/model/riscv_insts_zicsr.sail b/model/riscv_insts_zicsr.sail index 08c7a19a0..5b0a5e358 100644 --- a/model/riscv_insts_zicsr.sail +++ b/model/riscv_insts_zicsr.sail @@ -133,6 +133,15 @@ function readCSR csr : csreg -> xlenbits = { (0xB80, 32) => mcycle[63 .. 32], (0xB82, 32) => minstret[63 .. 32], + /* vector csr*/ + (0x008, _) => EXTZ(vstart), + (0x009, _) => EXTZ(vxsat), + (0x00A, _) => EXTZ(vxrm), + (0x00F, _) => EXTZ(vcsr.bits()), + (0xC20, _) => vl, + (0xC21, _) => vtype.bits(), + (0xC22, _) => vlenb, + /* trigger/debug */ (0x7a0, _) => ~(tselect), /* this indicates we don't have any trigger support */ @@ -241,6 +250,15 @@ function writeCSR (csr : csreg, value : xlenbits) -> unit = { /* user mode: seed (entropy source). writes are ignored */ (0x015, _) => write_seed_csr(), + /* vector csr */ + (0x008, _) => { let vstart_length = get_vstart_length(); vstart = EXTZ(16, value[(vstart_length - 1) .. 0]); Some(EXTZ(vstart)) }, + (0x009, _) => { vxsat = value[0 .. 0]; Some(EXTZ(vxsat)) }, + (0x00A, _) => { vxrm = value[1 .. 0]; Some(EXTZ(vxrm)) }, + (0x00F, _) => { vcsr->bits() = value[2 ..0]; Some(EXTZ(vcsr.bits())) }, + (0xC20, _) => { vl = value; Some(vl) }, + (0xC21, _) => { vtype->bits() = value; Some(vtype.bits()) }, + (0xC22, _) => { vlenb = value; Some(vlenb) }, + _ => ext_write_CSR(csr, value) }; match res { diff --git a/model/riscv_sys_control.sail b/model/riscv_sys_control.sail index 668136776..6f95e9c5e 100644 --- a/model/riscv_sys_control.sail +++ b/model/riscv_sys_control.sail @@ -558,6 +558,7 @@ function init_sys() -> unit = { misa->M() = 0b1; /* integer multiply/divide */ misa->U() = 0b1; /* user-mode */ misa->S() = 0b1; /* supervisor-mode */ + misa->V() = bool_to_bits(sys_enable_rvv()); /* RVV */ if sys_enable_fdext() & sys_enable_zfinx() then internal_error(__FILE__, __LINE__, "F and Zfinx cannot both be enabled!"); @@ -596,6 +597,21 @@ function init_sys() -> unit = { minstret = EXTZ(0b0); minstret_written = false; + /* initialize vector csrs */ + vstart = EXTZ(0b0); + vxsat = 0b0; + vxrm = 0b00; + vcsr->vxrm() = vxrm; + vcsr->vxsat() = vxsat; + vl = EXTZ(0b10000); /* the default value is 16 */ + vtype->vill() = 0b0; + vtype->reserved() = EXTZ(0b0); + vtype->vma() = 0b0; + vtype->vta() = 0b0; + vtype->vsew() = 0b000; + vtype->vlmul() = 0b000; + vlenb = EXTZ(0b0); + init_pmp(); // log compatibility with spike diff --git a/model/riscv_sys_regs.sail b/model/riscv_sys_regs.sail index e1576ed6f..67d0d63ca 100644 --- a/model/riscv_sys_regs.sail +++ b/model/riscv_sys_regs.sail @@ -148,6 +148,7 @@ val sys_enable_fdext = {c: "sys_enable_fdext", ocaml: "Platform.enable_fdext", _ val sys_enable_zfinx = {c: "sys_enable_zfinx", ocaml: "Platform.enable_zfinx", _: "sys_enable_zfinx"} : unit -> bool /* whether the N extension was enabled at boot */ val sys_enable_next = {c: "sys_enable_next", ocaml: "Platform.enable_next", _: "sys_enable_next"} : unit -> bool +val sys_enable_rvv = {c: "sys_enable_rvv", ocaml: "Platform.enable_rvv", _: "sys_enable_rvv"} : unit -> bool /* This function allows an extension to veto a write to Misa if it would violate an alignment restriction on @@ -207,6 +208,8 @@ function haveZkne() -> bool = true function haveZknd() -> bool = true function haveZmmul() -> bool = true +function haveRVV() -> bool = misa.V() == 0b1 +/* see below for F and D extension tests */ bitfield Mstatush : bits(32) = { MBE : 5, @@ -824,3 +827,65 @@ function read_seed_csr() -> xlenbits = { /* Writes to the seed CSR are ignored */ function write_seed_csr () -> option(xlenbits) = None() + +/* vector csrs */ +register vstart : bits(16) /* use the largest possible length of vstart */ +register vxsat : bits(1) +register vxrm : bits(2) +register vl : xlenbits +register vlenb : xlenbits + +bitfield Vtype : xlenbits = { + vill : xlen - 1, + reserved : xlen - 2 .. 8, + vma : 7, + vta : 6, + vsew : 5 .. 3, + vlmul : 2 .. 0 +} +register vtype : Vtype + +/* the dynamic selected element width (SEW) */ +val get_vtype_vsew : unit -> int effect {escape, rreg} +function get_vtype_vsew() = { + match vtype.vsew() { + 0b000 => 8, + 0b001 => 16, + 0b010 => 32, + 0b011 => 64, + _ => {assert(false, "invalid vsew field in vtype"); 0} + } +} + +/* the vector register group multiplier (LMUL) */ +val get_vtype_LMUL : unit -> real effect {escape, rreg} +function get_vtype_LMUL() = { + match vtype.vlmul() { + 0b101 => 0.125, /* 1/8 */ + 0b110 => 0.25, /* 1/4 */ + 0b111 => 0.5, /* 1/2 */ + 0b000 => 1.0, + 0b001 => 2.0, + 0b010 => 4.0, + 0b011 => 8.0, + _ => {assert(false, "invalid vlmul field in vtype"); 0.0} + } +} + +enum agtype = { UNDISTURBED, AGNOSTIC } + +val get_vtype_vma : unit -> agtype effect {rreg} +function get_vtype_vma() = { + match vtype.vma() { + 0b0 => UNDISTURBED, + 0b1 => AGNOSTIC + } +} + +val get_vtype_vta : unit -> agtype effect {rreg} +function get_vtype_vta() = { + match vtype.vta() { + 0b0 => UNDISTURBED, + 0b1 => AGNOSTIC + } +} diff --git a/model/riscv_vext_control.sail b/model/riscv_vext_control.sail new file mode 100755 index 000000000..b2a68c2ee --- /dev/null +++ b/model/riscv_vext_control.sail @@ -0,0 +1,20 @@ +function clause ext_is_CSR_defined (0x008, _) = true +function clause ext_is_CSR_defined (0xC20, _) = true +function clause ext_is_CSR_defined (0xC21, _) = true +function clause ext_is_CSR_defined (0xC22, _) = true + +function clause ext_is_CSR_defined (0x009, _) = true +function clause ext_is_CSR_defined (0x00A, _) = true +function clause ext_is_CSR_defined (0x00F, _) = true + +function clause ext_read_CSR (0x009) = Some (EXTZ (vcsr.vxsat())) +function clause ext_read_CSR (0x00A) = Some (EXTZ (vcsr.vxrm())) +function clause ext_read_CSR (0x00F) = Some (EXTZ (vcsr.bits())) + +function clause ext_read_CSR (0x009) = Some (EXTZ (vcsr.vxsat())) +function clause ext_read_CSR (0x00A) = Some (EXTZ (vcsr.vxrm())) +function clause ext_read_CSR (0x00F) = Some (EXTZ (vcsr.bits())) + +function clause ext_write_CSR (0x009, value) = { ext_write_vcsr (vcsr.vxrm(), value[0 .. 0]); Some(EXTZ(vcsr.vxsat())) } +function clause ext_write_CSR (0x00A, value) = { ext_write_vcsr (value[1 .. 0], vcsr.vxsat()); Some(EXTZ(vcsr.vxrm())) } +function clause ext_write_CSR (0x00F, value) = { ext_write_vcsr (value [2 .. 1], value [0 .. 0]); Some(EXTZ(vcsr.bits())) } diff --git a/model/riscv_vext_regs.sail b/model/riscv_vext_regs.sail new file mode 100755 index 000000000..85cd30821 --- /dev/null +++ b/model/riscv_vext_regs.sail @@ -0,0 +1,474 @@ +/* vector registers */ +register vr0 : vregtype +register vr1 : vregtype +register vr2 : vregtype +register vr3 : vregtype +register vr4 : vregtype +register vr5 : vregtype +register vr6 : vregtype +register vr7 : vregtype +register vr8 : vregtype +register vr9 : vregtype +register vr10 : vregtype +register vr11 : vregtype +register vr12 : vregtype +register vr13 : vregtype +register vr14 : vregtype +register vr15 : vregtype +register vr16 : vregtype +register vr17 : vregtype +register vr18 : vregtype +register vr19 : vregtype +register vr20 : vregtype +register vr21 : vregtype +register vr22 : vregtype +register vr23 : vregtype +register vr24 : vregtype +register vr25 : vregtype +register vr26 : vregtype +register vr27 : vregtype +register vr28 : vregtype +register vr29 : vregtype +register vr30 : vregtype +register vr31 : vregtype + +val vreg_name : bits(5) <-> string +mapping vreg_name = { + 0b00000 <-> "v0", + 0b00001 <-> "v1", + 0b00010 <-> "v2", + 0b00011 <-> "v3", + 0b00100 <-> "v4", + 0b00101 <-> "v5", + 0b00110 <-> "v6", + 0b00111 <-> "v7", + 0b01000 <-> "v8", + 0b01001 <-> "v9", + 0b01010 <-> "v10", + 0b01011 <-> "v11", + 0b01100 <-> "v12", + 0b01101 <-> "v13", + 0b01110 <-> "v14", + 0b01111 <-> "v15", + 0b10000 <-> "v16", + 0b10001 <-> "v17", + 0b10010 <-> "v18", + 0b10011 <-> "v19", + 0b10100 <-> "v20", + 0b10101 <-> "v21", + 0b10110 <-> "v22", + 0b10111 <-> "v23", + 0b11000 <-> "v24", + 0b11001 <-> "v25", + 0b11010 <-> "v26", + 0b11011 <-> "v27", + 0b11100 <-> "v28", + 0b11101 <-> "v29", + 0b11110 <-> "v30", + 0b11111 <-> "v31" +} + +val rV : forall 'n, 0 <= 'n < 32. regno('n) -> vregtype effect {rreg, escape} +function rV r = { + let zero_vreg : vregtype = EXTZ(0x0); + let v : vregtype = + match r { + 0 => vr0, + 1 => vr1, + 2 => vr2, + 3 => vr3, + 4 => vr4, + 5 => vr5, + 6 => vr6, + 7 => vr7, + 8 => vr8, + 9 => vr9, + 10 => vr10, + 11 => vr11, + 12 => vr12, + 13 => vr13, + 14 => vr14, + 15 => vr15, + 16 => vr16, + 17 => vr17, + 18 => vr18, + 19 => vr19, + 20 => vr20, + 21 => vr21, + 22 => vr22, + 23 => vr23, + 24 => vr24, + 25 => vr25, + 26 => vr26, + 27 => vr27, + 28 => vr28, + 29 => vr29, + 30 => vr30, + 31 => vr31, + _ => {assert(false, "invalid vector register number"); zero_vreg} + }; + v +} + +val wV : forall 'n, 0 <= 'n < 32. (regno('n), vregtype) -> unit effect {rreg, wreg, escape} +function wV (r, in_v) = { + let v = in_v; + match r { + 0 => vr0 = v, + 1 => vr1 = v, + 2 => vr2 = v, + 3 => vr3 = v, + 4 => vr4 = v, + 5 => vr5 = v, + 6 => vr6 = v, + 7 => vr7 = v, + 8 => vr8 = v, + 9 => vr9 = v, + 10 => vr10 = v, + 11 => vr11 = v, + 12 => vr12 = v, + 13 => vr13 = v, + 14 => vr14 = v, + 15 => vr15 = v, + 16 => vr16 = v, + 17 => vr17 = v, + 18 => vr18 = v, + 19 => vr19 = v, + 20 => vr20 = v, + 21 => vr21 = v, + 22 => vr22 = v, + 23 => vr23 = v, + 24 => vr24 = v, + 25 => vr25 = v, + 26 => vr26 = v, + 27 => vr27 = v, + 28 => vr28 = v, + 29 => vr29 = v, + 30 => vr30 = v, + 31 => vr31 = v, + _ => assert(false, "invalid vector register number") + }; + + let vlen : int = get_vlen(); + assert(0 < vlen & vlen <= sizeof(vlenmax)); + if get_config_print_reg() + then { + print_reg("v" ^ string_of_int(r) ^ " <- " ^ BitStr(v[vlen - 1 .. 0])); + } +} + +function rV_bits(i: bits(5)) -> vregtype = rV(unsigned(i)) + +function wV_bits(i: bits(5), data: vregtype) -> unit = { + wV(unsigned(i)) = data +} + +overload V = {rV_bits, wV_bits, rV, wV} + +val init_vregs : unit -> unit effect {wreg} +function init_vregs () = { + let zero_vreg : vregtype = EXTZ(0x0); + vr0 = zero_vreg; + vr1 = zero_vreg; + vr2 = zero_vreg; + vr3 = zero_vreg; + vr4 = zero_vreg; + vr5 = zero_vreg; + vr6 = zero_vreg; + vr7 = zero_vreg; + vr8 = zero_vreg; + vr9 = zero_vreg; + vr10 = zero_vreg; + vr11 = zero_vreg; + vr12 = zero_vreg; + vr13 = zero_vreg; + vr14 = zero_vreg; + vr15 = zero_vreg; + vr16 = zero_vreg; + vr17 = zero_vreg; + vr18 = zero_vreg; + vr19 = zero_vreg; + vr20 = zero_vreg; + vr21 = zero_vreg; + vr22 = zero_vreg; + vr23 = zero_vreg; + vr24 = zero_vreg; + vr25 = zero_vreg; + vr26 = zero_vreg; + vr27 = zero_vreg; + vr28 = zero_vreg; + vr29 = zero_vreg; + vr30 = zero_vreg; + vr31 = zero_vreg +} + +/* Vector CSR */ +bitfield Vcsr : bits(3) = { + vxrm : 2 .. 1, + vxsat : 0 +} +register vcsr : Vcsr + +val ext_write_vcsr : (bits(2), bits(1)) -> unit effect {rreg, wreg} +function ext_write_vcsr (vxrm_val, vxsat_val) = { + vcsr->vxrm() = vxrm_val; /* Note: frm can be an illegal value, 101, 110, 111 */ + vcsr->vxsat() = vxsat_val; +} + +/* num_elem means max(VLMAX,VLEN/SEW)) according to Section 5.4 of RVV spec */ +val get_num_elem : (real, int) -> int effect {rreg, undef} +function get_num_elem(lmul, vsew_bits) = { + let vlen : int = get_vlen(); + var num_elem : int = undefined; + if lmul >= 1.0 then { + num_elem = floor(lmul) * vlen / vsew_bits; + } else { + /* Ignore lmul < 1 so that the entire vreg is read, allowing all masking to + * be handled in init_masked_result */ + num_elem = vlen / vsew_bits; + }; + + num_elem +} + +/* Reads a single vreg into multiple elements */ +val read_single_vreg : forall 'n 'm, 8 <= 'm <= 128. (int('n), int('m), regidx) -> vector('n, dec, bits('m)) effect {escape, rreg, undef} +function read_single_vreg(num_elem, sew, vrid) = { + let bv : vregtype = V(vrid); + var result : vector('n, dec, bits('m)) = undefined; + + foreach (i from 0 to (num_elem - 1)) { + let start_index : int = i * sew; + result[i] = slice(bv, start_index, sew); + }; + + result +} + +/* Writes multiple elements into a single vreg */ +val write_single_vreg : forall 'n 'm, 8 <= 'm <= 128. (int('n), int('m), regidx, vector('n, dec, bits('m))) -> unit effect {escape, rreg, wreg} +function write_single_vreg(num_elem, sew, vrid, v) = { + r : vregtype = zeros(); + + foreach (i from (num_elem - 1) downto 0) { + r = r << sew; + r = r | EXTZ(v[i]); + }; + + V(vrid) = r +} + +/* Reads multiple vregs into a single element */ +val read_mult_vreg : forall 'n 'm, 'n >= 0. (int('n), int('m), regidx) -> bits('m) effect {escape, rreg} +function read_mult_vreg(num_vreg, num_bits, vrid) = { + let vlen : int = get_vlen(); + assert(0 < vlen & vlen <= sizeof(vlenmax)); + assert('m >= vlen); + var result : bits('m) = zeros(num_bits); + + foreach (i from (num_vreg - 1) downto 0) { + let vrid_lmul : regidx = vrid + to_bits(5, i); + let bv : vregtype = V(vrid_lmul); + + result = (result << vlen); + result = result | sail_zero_extend(bv[vlen - 1 .. 0], num_bits); + }; + + result +} + +/* Writes a single element into multiple vregs */ +val write_mult_vreg : forall 'n 'm, 'n >= 0. (int('n), int('m), regidx, bits('m)) -> unit effect {escape, rreg, wreg} +function write_mult_vreg(num_vreg, num_bits, vrid, bv) = { + let vlen : int = get_vlen(); + assert(0 < vlen & vlen <= sizeof(vlenmax)); + assert('m >= vlen); + foreach (i from (num_vreg - 1) downto 0) { + let vrid_lmul : regidx = vrid + to_bits(5, i); + let single_bv : vregtype = sail_zero_extend(slice(bv >> (vlen * i), 0, vlen), sizeof(vlenmax)); + V(vrid_lmul) = single_bv + } +} + +/* The general vreg reading operation with num_elem as max(VLMAX,VLEN/SEW)) */ +val read_vreg : forall 'n 'm, 8 <= 'm <= 64. (int('n), int('m), real, regidx) -> vector('n, dec, bits('m)) effect {escape, rreg, undef} +function read_vreg(num_elem, vsew_bits, lmul, vrid) = { + var result : vector('n, dec, bits('m)) = undefined; + let vlen : int = get_vlen(); + let lmul_int : int = if lmul < 1.0 then 1 else floor(lmul); + + /* Check for valid vrid */ + if lmul > 1.0 & (unsigned(vrid) + floor(lmul)) > 31 then { + /* vrid would read past largest vreg (v31) */ + result = undefined + } else if lmul > 1.0 & (unsigned(vrid) % floor(lmul) != 0) then { + /* vrid must be a multiple of lmul */ + result = undefined + } else if (vsew_bits > vlen) & (vsew_bits % vlen != 0) then { + /* vsew_bits must be a multiple of vlen */ + result = undefined + } else { + if vsew_bits > vlen then { + /* Multiple vregs per element */ + let 'num_reg_per_elem : int = vsew_bits / vlen; + assert('num_reg_per_elem >= 0); + foreach (i from 0 to (num_elem - 1)) { + let vrid_lmul : regidx = vrid + to_bits(5, i * 'num_reg_per_elem); + result[i] = read_mult_vreg('num_reg_per_elem, vsew_bits, vrid_lmul) + } + } else { + let 'num_elem_single : int = vlen / vsew_bits; + foreach (i_lmul from 0 to (lmul_int - 1)) { + let r_start_i : int = i_lmul * 'num_elem_single; + let r_end_i : int = r_start_i + 'num_elem_single - 1; + let vrid_lmul : regidx = vrid + to_bits(5, i_lmul); + let single_result : vector('num_elem_single, dec, bits('m)) = read_single_vreg('num_elem_single, vsew_bits, vrid_lmul); + foreach (r_i from r_start_i to r_end_i) { + let s_i : int = r_i - r_start_i; + assert(0 <= r_i & r_i < num_elem); + assert(0 <= s_i & s_i < 'num_elem_single); + result[r_i] = single_result[s_i]; + } + } + } + }; + + result +} + +/* Single element reading operation */ +val read_single_element : forall 'm 'x, 8 <= 'm <= 128. (int('m), int('x), real, regidx) -> bits('m) effect {escape, rreg, undef} +function read_single_element(elem_width_bits, index, emul, vrid) = { + real_vrid : regidx = vrid; + real_index : int = index; + let vlen : int = get_vlen(); + let 'elem_per_reg : int = vlen / elem_width_bits; + if emul > 1.0 then { + real_vrid = vrid + to_bits(5, index / 'elem_per_reg); + real_index = index % 'elem_per_reg; + }; + let vrid_val : vector('elem_per_reg, dec, bits('m)) = read_single_vreg('elem_per_reg, elem_width_bits, real_vrid); + + let 'real_index = real_index; + assert( 0 <= 'real_index & 'real_index < 'elem_per_reg ); + vrid_val['real_index] +} + +/* The general vreg writing operation with num_elem as max(VLMAX,VLEN/SEW)) */ +val write_vreg : forall 'n 'm, 8 <= 'm <= 128. (int('n), int('m), real, regidx, vector('n, dec, bits('m))) -> unit effect {escape, rreg, undef, wreg} +function write_vreg(num_elem, vsew_bits, lmul, vrid, vec) = { + let vlen : int = get_vlen(); + let lmul_int : int = if lmul < 1.0 then 1 else floor(lmul); + + if vsew_bits > vlen then { + /* Multiple vregs per element */ + let 'num_reg_per_elem : int = vsew_bits / vlen; + assert('num_reg_per_elem >= 0); + foreach (i from 0 to (num_elem - 1)) { + let vrid_lmul : regidx = vrid + to_bits(5, i * 'num_reg_per_elem); + write_mult_vreg('num_reg_per_elem, vsew_bits, vrid_lmul, vec[i]) + } + } else { + let 'num_elem_single : int = vlen / vsew_bits; + foreach (i_lmul from 0 to (lmul_int - 1)) { + var single_vec : vector('num_elem_single, dec, bits('m)) = undefined; + let vrid_lmul : regidx = vrid + to_bits(5, i_lmul); + let r_start_i : int = i_lmul * 'num_elem_single; + let r_end_i : int = r_start_i + 'num_elem_single - 1; + foreach (r_i from r_start_i to r_end_i) { + let s_i : int = r_i - r_start_i; + assert(0 <= r_i & r_i < num_elem); + assert(0 <= s_i & s_i < 'num_elem_single); + single_vec[s_i] = vec[r_i] + }; + write_single_vreg('num_elem_single, vsew_bits, vrid_lmul, single_vec) + } + } +} + +/* Single element writing operation */ +val write_single_element : forall 'm 'x, 8 <= 'm <= 128. (int('m), int('x), real, regidx, bits('m)) -> unit effect {escape, rreg, undef, wreg} +function write_single_element(elem_width_bits, index, emul, vrid, value) = { + real_vrid : regidx = vrid; + real_index : int = index; + let vlen : int = get_vlen(); + let 'elem_per_reg : int = vlen / elem_width_bits; + if emul > 1.0 then { + real_vrid = vrid + to_bits(5, index / 'elem_per_reg); + real_index = index % 'elem_per_reg; + }; + let vrid_val : vector('elem_per_reg, dec, bits('m)) = read_single_vreg('elem_per_reg, elem_width_bits, real_vrid); + r : vregtype = zeros(); + foreach (i from ('elem_per_reg - 1) downto 0) { + r = r << elem_width_bits; + if i == real_index then { + r = r | EXTZ(value); + } else { + r = r | EXTZ(vrid_val[i]); + } + }; + V(real_vrid) = r; +} + +/* Mask register reading operation with num_elem as max(VLMAX,VLEN/SEW)) */ +val read_vmask : forall 'n, 'n >= 0. (int('n), bits(1), regidx) -> vector('n, dec, bool) effect {escape, rreg, undef} +function read_vmask(num_elem, vm, vrid) = { + let vlen : int = get_vlen(); + assert('n <= vlen); + assert(0 < num_elem & num_elem <= sizeof(vlenmax)); + let vreg_val : vregtype = V(vrid); + var result : vector('n, dec, bool) = undefined; + + foreach (i from 0 to (num_elem - 1)) { + if vm == 0b1 then { + result[i] = true + } else { + result[i] = bit_to_bool(vreg_val[i]) + } + }; + + result +} + +/* This is a special version of read_vmask for carry/borrow instructions, where vm=1 means no carry */ +val read_vmask_carry : forall 'n, 'n >= 0. (int('n), bits(1), regidx) -> vector('n, dec, bool) effect {escape, rreg, undef} +function read_vmask_carry(num_elem, vm, vrid) = { + let vlen : int = get_vlen(); + assert('n <= vlen); + assert(0 < num_elem & num_elem <= sizeof(vlenmax)); + let vreg_val : vregtype = V(vrid); + var result : vector('n, dec, bool) = undefined; + + foreach (i from 0 to (num_elem - 1)) { + if vm == 0b1 then { + result[i] = false + } else { + result[i] = bit_to_bool(vreg_val[i]) + } + }; + + result +} + +/* Mask register writing operation with num_elem as max(VLMAX,VLEN/SEW)) */ +val write_vmask : forall 'n, 'n >= 0. (int('n), regidx, vector('n, dec, bool)) -> unit effect {escape, rreg, undef, wreg} +function write_vmask(num_elem, vrid, v) = { + let vlen : int = get_vlen(); + assert('n <= vlen); + assert(0 < vlen & vlen <= sizeof(vlenmax)); + assert(0 < num_elem & num_elem <= sizeof(vlenmax)); + let vreg_val : vregtype = V(vrid); + var result : vregtype = undefined; + + foreach (i from 0 to (num_elem - 1)) { + result[i] = bool_to_bit(v[i]) + }; + foreach (i from num_elem to (vlen - 1)) { + /* Mask tail is always agnostic */ + result[i] = vreg_val[i] + }; + + V(vrid) = result +} + +/* end vector register */ diff --git a/model/riscv_vlen.sail b/model/riscv_vlen.sail new file mode 100644 index 000000000..aadd36e29 --- /dev/null +++ b/model/riscv_vlen.sail @@ -0,0 +1,64 @@ +register ELEN : bits(1) + +val get_elen : unit -> {|32, 64|} effect {rreg} + +function get_elen() = match ELEN { + 0b0 => 32, + 0b1 => 64 +} + +register VLEN : bits(4) + +val get_vlen : unit -> {|32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536|} effect {rreg} + +function get_vlen() = match VLEN { + 0b0000 => 32, + 0b0001 => 64, + 0b0010 => 128, + 0b0011 => 256, + 0b0100 => 512, + 0b0101 => 1024, + 0b0110 => 2048, + 0b0111 => 4096, + 0b1000 => 8192, + 0b1001 => 16384, + 0b1010 => 32768, + _ => 65536 +} + +val vlen_bytes : unit -> {|4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192|} effect {rreg} + +function vlen_bytes() = match VLEN { + 0b0000 => 4, + 0b0001 => 8, + 0b0010 => 16, + 0b0011 => 32, + 0b0100 => 64, + 0b0101 => 128, + 0b0110 => 256, + 0b0111 => 512, + 0b1000 => 1024, + 0b1001 => 2048, + 0b1010 => 4096, + _ => 8192 +} + +/* to determine the length of vstart csr */ +val get_vstart_length : unit -> {|5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16|} effect {rreg} + +function get_vstart_length() = match VLEN { + 0b0000 => 5, + 0b0001 => 6, + 0b0010 => 7, + 0b0011 => 8, + 0b0100 => 9, + 0b0101 => 10, + 0b0110 => 11, + 0b0111 => 12, + 0b1000 => 13, + 0b1001 => 14, + 0b1010 => 15, + _ => 16 +} + +type vlenmax : Int = 65536 diff --git a/model/riscv_vreg_type.sail b/model/riscv_vreg_type.sail new file mode 100755 index 000000000..025664656 --- /dev/null +++ b/model/riscv_vreg_type.sail @@ -0,0 +1,141 @@ +/* Definitions for vector registers (V extension) */ + +type vreglenbits = bits(vlenmax) /* use the largest possible register length */ + +/* default vector register type */ +type vregtype = vreglenbits + +/* vector instruction types */ +enum vsetop = { VSETVLI, VSETVL } + +enum vvfunct6 = { VV_VADD, VV_VSUB, VV_VMINU, VV_VMIN, VV_VMAXU, VV_VMAX, VV_VAND, VV_VOR, VV_VXOR, + VV_VRGATHER, VV_VRGATHEREI16, VV_VSADDU, VV_VSADD, VV_VSSUBU, VV_VSSUB, VV_VSLL, VV_VSMUL, + VV_VSRL, VV_VSRA, VV_VSSRL, VV_VSSRA } + +enum vvcmpfunct6 = { VVCMP_VMSEQ, VVCMP_VMSNE, VVCMP_VMSLTU, VVCMP_VMSLT, VVCMP_VMSLEU, VVCMP_VMSLE } + +enum vvmfunct6 = { VVM_VMADC, VVM_VMSBC } + +enum vvmcfunct6 = { VVMC_VMADC, VVMC_VMSBC } + +enum vvmsfunct6 = { VVMS_VADC, VVMS_VSBC } + +enum vxmfunct6 = { VXM_VMADC, VXM_VMSBC } + +enum vxmcfunct6 = { VXMC_VMADC, VXMC_VMSBC } + +enum vxmsfunct6 = { VXMS_VADC, VXMS_VSBC } + +enum vimfunct6 = { VIM_VMADC } + +enum vimcfunct6 = { VIMC_VMADC } + +enum vimsfunct6 = { VIMS_VADC } + +enum vxcmpfunct6 = { VXCMP_VMSEQ, VXCMP_VMSNE, VXCMP_VMSLTU, VXCMP_VMSLT, VXCMP_VMSLEU, VXCMP_VMSLE, + VXCMP_VMSGTU, VXCMP_VMSGT } + +enum vicmpfunct6 = { VICMP_VMSEQ, VICMP_VMSNE, VICMP_VMSLEU, VICMP_VMSLE, VICMP_VMSGTU, VICMP_VMSGT } + +enum nvfunct6 = { NV_VNCLIPU, NV_VNCLIP } + +enum nvsfunct6 = { NVS_VNSRL, NVS_VNSRA } + +enum nxfunct6 = { NX_VNCLIPU, NX_VNCLIP} + +enum nxsfunct6 = { NXS_VNSRL, NXS_VNSRA } + +enum mmfunct6 = { MM_VMAND, MM_VMNAND, MM_VMANDNOT, MM_VMXOR, MM_VMOR, MM_VMNOR, MM_VMORNOT, MM_VMXNOR } + +enum nifunct6 = { NI_VNCLIPU, NI_VNCLIP } + +enum nisfunct6 = { NIS_VNSRL, NIS_VNSRA } + +enum wvvfunct6 = { WVV_VADD, WVV_VSUB, WVV_VADDU, WVV_VSUBU, WVV_VWMUL, WVV_VWMULU, WVV_VWMULSU } + +enum wvfunct6 = { WV_VADD, WV_VSUB, WV_VADDU, WV_VSUBU } + +enum wvxfunct6 = { WVX_VADD, WVX_VSUB, WVX_VADDU, WVX_VSUBU, WVX_VWMUL, WVX_VWMULU, WVX_VWMULSU } + +enum wxfunct6 = { WX_VADD, WX_VSUB, WX_VADDU, WX_VSUBU } + +enum vext2funct6 = { VEXT2_ZVF2, VEXT2_SVF2 } + +enum vext4funct6 = { VEXT4_ZVF4, VEXT4_SVF4 } + +enum vext8funct6 = { VEXT8_ZVF8, VEXT8_SVF8 } + +enum vxfunct6 = { VX_VADD, VX_VSUB, VX_VRSUB, VX_VMINU, VX_VMIN, VX_VMAXU, VX_VMAX, + VX_VAND, VX_VOR, VX_VXOR, VX_VSADDU, VX_VSADD, VX_VSSUBU, VX_VSSUB, + VX_VSLL, VX_VSMUL, VX_VSRL, VX_VSRA, VX_VSSRL, VX_VSSRA } + +enum vifunct6 = { VI_VADD, VI_VRSUB, VI_VAND, VI_VOR, VI_VXOR, VI_VSADDU, VI_VSADD, + VI_VSLL, VI_VSRL, VI_VSRA, VI_VSSRL, VI_VSSRA } + +enum vxsgfunct6 = { VX_VSLIDEUP, VX_VSLIDEDOWN, VX_VRGATHER } + +enum visgfunct6 = { VI_VSLIDEUP, VI_VSLIDEDOWN, VI_VRGATHER } + +enum mvvfunct6 = { MVV_VAADDU, MVV_VAADD, MVV_VASUBU, MVV_VASUB, MVV_VMUL, MVV_VMULH, + MVV_VMULHU, MVV_VMULHSU, MVV_VDIVU, MVV_VDIV, MVV_VREMU, MVV_VREM } + +enum mvvmafunct6 = { MVV_VMACC, MVV_VNMSAC, MVV_VMADD, MVV_VNMSUB } + +enum rmvvfunct6 = { MVV_VREDSUM, MVV_VREDAND, MVV_VREDOR, MVV_VREDXOR, + MVV_VREDMINU, MVV_VREDMIN, MVV_VREDMAXU, MVV_VREDMAX } + +enum rivvfunct6 = { IVV_VWREDSUMU, IVV_VWREDSUM } + +enum rfvvfunct6 = { FVV_VFREDOSUM, FVV_VFREDUSUM, FVV_VFREDMAX, FVV_VFREDMIN, + FVV_VFWREDOSUM, FVV_VFWREDUSUM } + +enum wmvvfunct6 = { WMVV_VWMACCU, WMVV_VWMACC, WMVV_VWMACCSU } + +enum mvxfunct6 = { MVX_VAADDU, MVX_VAADD, MVX_VASUBU, MVX_VASUB, MVX_VSLIDE1UP, MVX_VSLIDE1DOWN, + MVX_VMUL, MVX_VMULH, MVX_VMULHU, MVX_VMULHSU, MVX_VDIVU, MVX_VDIV, MVX_VREMU, MVX_VREM } + +enum mvxmafunct6 = { MVX_VMACC, MVX_VNMSAC, MVX_VMADD, MVX_VNMSUB } + +enum wmvxfunct6 = { WMVX_VWMACCU, WMVX_VWMACC, WMVX_VWMACCUS, WMVX_VWMACCSU } + +enum maskfunct3 = { VV_VMERGE, VI_VMERGE, VX_VMERGE } + +enum vlewidth = { VLE8, VLE16, VLE32, VLE64 } + +enum fvvfunct6 = { FVV_VADD, FVV_VSUB, FVV_VMIN, FVV_VMAX, FVV_VSGNJ, FVV_VSGNJN, FVV_VSGNJX, + FVV_VDIV, FVV_VMUL } + +enum fvvmafunct6 = { FVV_VMADD, FVV_VNMADD, FVV_VMSUB, FVV_VNMSUB, FVV_VMACC, FVV_VNMACC, FVV_VMSAC, FVV_VNMSAC } + +enum fwvvfunct6 = { FWVV_VADD, FWVV_VSUB, FWVV_VMUL } + +enum fwvvmafunct6 = { FWVV_VMACC, FWVV_VNMACC, FWVV_VMSAC, FWVV_VNMSAC } + +enum fwvfunct6 = { FWV_VADD, FWV_VSUB } + +enum fvvmfunct6 = { FVVM_VMFEQ, FVVM_VMFLE, FVVM_VMFLT, FVVM_VMFNE } + +enum vfunary0 = { FV_CVT_XU_F, FV_CVT_X_F, FV_CVT_F_XU, FV_CVT_F_X, FV_CVT_RTZ_XU_F, FV_CVT_RTZ_X_F } + +enum vfwunary0 = { FWV_CVT_XU_F, FWV_CVT_X_F, FWV_CVT_F_XU, FWV_CVT_F_X, FWV_CVT_F_F, + FWV_CVT_RTZ_XU_F, FWV_CVT_RTZ_X_F } + +enum vfnunary0 = { FNV_CVT_XU_F, FNV_CVT_X_F, FNV_CVT_F_XU, FNV_CVT_F_X, FNV_CVT_F_F, + FNV_CVT_ROD_F_F, FNV_CVT_RTZ_XU_F, FNV_CVT_RTZ_X_F} + +enum vfunary1 = { FVV_VSQRT, FVV_VRSQRT7, FVV_VREC7, FVV_VCLASS } + +enum fvffunct6 = { VF_VADD, VF_VSUB, VF_VMIN, VF_VMAX, VF_VSGNJ, VF_VSGNJN, VF_VSGNJX, + VF_VDIV, VF_VRDIV, VF_VMUL, VF_VRSUB, VF_VSLIDE1UP, VF_VSLIDE1DOWN } + +enum fvfmafunct6 = { VF_VMADD, VF_VNMADD, VF_VMSUB, VF_VNMSUB, VF_VMACC, VF_VNMACC, VF_VMSAC, VF_VNMSAC } + +enum fwvffunct6 = { FWVF_VADD, FWVF_VSUB, FWVF_VMUL } + +enum fwvfmafunct6 = { FWVF_VMACC, FWVF_VNMACC, FWVF_VMSAC, FWVF_VNMSAC } + +enum fwffunct6 = { FWF_VADD, FWF_VSUB } + +enum fvfmfunct6 = { VFM_VMFEQ, VFM_VMFLE, VFM_VMFLT, VFM_VMFNE, VFM_VMFGT, VFM_VMFGE } + +enum vmlsop = { VLM, VSM } diff --git a/ocaml_emulator/platform.ml b/ocaml_emulator/platform.ml index ccf487589..4a72bceea 100644 --- a/ocaml_emulator/platform.ml +++ b/ocaml_emulator/platform.ml @@ -11,6 +11,7 @@ let config_enable_dirty_update = ref false let config_enable_misaligned_access = ref false let config_mtval_has_illegal_inst_bits = ref false let config_enable_pmp = ref false +let config_enable_rvv = ref true let platform_arch = ref P.RV64 @@ -78,6 +79,7 @@ let enable_writable_misa () = !config_enable_writable_misa let enable_rvc () = !config_enable_rvc let enable_next () = !config_enable_next let enable_fdext () = false +let enable_rvv () = !config_enable_rvv let enable_dirty_update () = !config_enable_dirty_update let enable_misaligned_access () = !config_enable_misaligned_access let mtval_has_illegal_inst_bits () = !config_mtval_has_illegal_inst_bits diff --git a/ocaml_emulator/riscv_ocaml_sim.ml b/ocaml_emulator/riscv_ocaml_sim.ml index 6e612ad26..7ea5d4113 100644 --- a/ocaml_emulator/riscv_ocaml_sim.ml +++ b/ocaml_emulator/riscv_ocaml_sim.ml @@ -53,6 +53,9 @@ let options = Arg.align ([("-dump-dts", ("-disable-rvc", Arg.Clear P.config_enable_rvc, " disable the RVC extension on boot"); + ("-disable-rvv", + Arg.Clear P.config_enable_rvv, + " disable the RVV extension on boot"); ("-disable-writable-misa-c", Arg.Clear P.config_enable_writable_misa, " leave misa hardwired to its initial value"); From c673d33412d0c019d1ca4025bbfc41dfbea59883 Mon Sep 17 00:00:00 2001 From: Xinlai Wan Date: Wed, 22 Feb 2023 03:55:44 +0800 Subject: [PATCH 02/11] Vector load / store instructions (#198) * Add vector load / store instructions * Modify the implementation of SEW, LMUL, VLEN and avoid real numbers in the code * Update vstart setting in vector load / store instructions * Remove unnecessary assert statements in vector instructions * Fix bugs in vleff instructions and revise coding styles * Add guards for vector encdec clauses, Avoid redundant memory access after vector load/store failure --- Makefile | 3 + model/prelude.sail | 21 +- model/riscv_insts_vext_arith.sail | 496 ++++++++++ model/riscv_insts_vext_mem.sail | 1460 +++++++++++++++++++++++++++++ model/riscv_insts_vext_utils.sail | 183 ++-- model/riscv_insts_vext_vset.sail | 45 +- model/riscv_insts_zicsr.sail | 2 +- model/riscv_sys_regs.sail | 54 +- model/riscv_vext_regs.sail | 224 +++-- model/riscv_vlen.sail | 45 +- 10 files changed, 2224 insertions(+), 309 deletions(-) create mode 100644 model/riscv_insts_vext_arith.sail create mode 100644 model/riscv_insts_vext_mem.sail mode change 100755 => 100644 model/riscv_vext_regs.sail diff --git a/Makefile b/Makefile index 7d1562c74..6b305ddb4 100644 --- a/Makefile +++ b/Makefile @@ -39,6 +39,9 @@ SAIL_DEFAULT_INST += riscv_insts_zbkx.sail SAIL_DEFAULT_INST += riscv_insts_vext_utils.sail SAIL_DEFAULT_INST += riscv_insts_vext_vset.sail +SAIL_DEFAULT_INST += riscv_insts_vext_arith.sail +SAIL_DEFAULT_INST += riscv_insts_vext_mem.sail + SAIL_SEQ_INST = $(SAIL_DEFAULT_INST) riscv_jalr_seq.sail SAIL_RMEM_INST = $(SAIL_DEFAULT_INST) riscv_jalr_rmem.sail riscv_insts_rmem.sail diff --git a/model/prelude.sail b/model/prelude.sail index 754f53315..8eb5efaf9 100644 --- a/model/prelude.sail +++ b/model/prelude.sail @@ -74,7 +74,6 @@ $include $include $include $include -$include val string_startswith = "string_startswith" : (string, string) -> bool val string_drop = "string_drop" : (string, nat) -> string @@ -345,21 +344,21 @@ function def_spc_backwards s = () val def_spc_matches_prefix : string -> option((unit, nat)) function def_spc_matches_prefix s = opt_spc_matches_prefix(s) -val div_int : (int, int) -> int -function div_int(x, y) = { - floor(div_real(to_real(x), to_real(y))) -} - -val "print_real" : (string, real) -> unit val "print_int" : (string, int) -> unit -overload operator / = {div_int, div_real} -overload operator * = {mult_atom, mult_int, mult_real} +overload operator / = {quot_round_zero} +overload operator * = {mult_atom, mult_int} -/* helper for vector extension where the element width is between 8 and 64 */ -val log2 : forall 'n, 'n in {8, 16, 32, 64}. int('n) -> int +/* helper for vector extension + * 1. EEW between 8 and 64 + * 2. EMUL in vmvr.v instructions between 1 and 8 + */ +val log2 : forall 'n, 'n in {1, 2, 4, 8, 16, 32, 64}. int('n) -> int function log2(n) = { let result : int = match n { + 1 => 0, + 2 => 1, + 4 => 2, 8 => 3, 16 => 4, 32 => 5, diff --git a/model/riscv_insts_vext_arith.sail b/model/riscv_insts_vext_arith.sail new file mode 100644 index 000000000..782e6e8b4 --- /dev/null +++ b/model/riscv_insts_vext_arith.sail @@ -0,0 +1,496 @@ +/* ******************************************************************************* */ +/* This file implements part of the vector extension. */ +/* Chapter 11: Vector Integer Arithmetic Instructions */ +/* Chapter 12: Vector Fixed-Point Arithmetic Instructions */ +/* Chapter 16: Vector Permutation Instructions */ +/* ******************************************************************************* */ + +/* ******************************* OPIVV (VVTYPE) ******************************** */ +union clause ast = VVTYPE : (vvfunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_vvfunct6 : vvfunct6 <-> bits(6) = { + VV_VADD <-> 0b000000, + VV_VSUB <-> 0b000010, + VV_VMINU <-> 0b000100, + VV_VMIN <-> 0b000101, + VV_VMAXU <-> 0b000110, + VV_VMAX <-> 0b000111, + VV_VAND <-> 0b001001, + VV_VOR <-> 0b001010, + VV_VXOR <-> 0b001011, + VV_VRGATHER <-> 0b001100, + VV_VRGATHEREI16 <-> 0b001110, + VV_VSADDU <-> 0b100000, + VV_VSADD <-> 0b100001, + VV_VSSUBU <-> 0b100010, + VV_VSSUB <-> 0b100011, + VV_VSLL <-> 0b100101, + VV_VSMUL <-> 0b100111, + VV_VSRL <-> 0b101000, + VV_VSRA <-> 0b101001, + VV_VSSRL <-> 0b101010, + VV_VSSRA <-> 0b101011 +} + +mapping clause encdec = VVTYPE(funct6, vm, vs2, vs1, vd) if haveRVV() + <-> encdec_vvfunct6(funct6) @ vm @ vs2 @ vs1 @ 0b000 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VVTYPE(funct6, vm, vs2, vs1, vd)) = { + let SEW_pow = get_sew_pow(); + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let VLEN_pow = get_vlen_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + VV_VADD => vs2_val[i] + vs1_val[i], + VV_VSUB => vs2_val[i] - vs1_val[i], + VV_VAND => vs2_val[i] & vs1_val[i], + VV_VOR => vs2_val[i] | vs1_val[i], + VV_VXOR => vs2_val[i] ^ vs1_val[i], + VV_VSADDU => unsigned_saturation('m, EXTZ('m + 1, vs2_val[i]) + EXTZ('m + 1, vs1_val[i])), + VV_VSADD => signed_saturation('m, EXTS('m + 1, vs2_val[i]) + EXTS('m + 1, vs1_val[i])), + VV_VSSUBU => { + if unsigned(vs2_val[i]) < unsigned(vs1_val[i]) then zeros() + else unsigned_saturation('m, EXTZ('m + 1, vs2_val[i]) - EXTZ('m + 1, vs1_val[i])) + }, + VV_VSSUB => signed_saturation('m, EXTS('m + 1, vs2_val[i]) - EXTS('m + 1, vs1_val[i])), + VV_VSMUL => { + let result_mul = to_bits('m * 2, signed(vs2_val[i]) * signed(vs1_val[i])); + let rounding_incr = get_fixed_rounding_incr(result_mul, 'm - 1); + let result_wide = (result_mul >> ('m - 1)) + EXTZ('m * 2, rounding_incr); + signed_saturation('m, result_wide['m..0]) + }, + VV_VSLL => { + let shift_amount = get_shift_amount(vs1_val[i], SEW); + vs2_val[i] << shift_amount + }, + VV_VSRL => { + let shift_amount = get_shift_amount(vs1_val[i], SEW); + vs2_val[i] >> shift_amount + }, + VV_VSRA => { + let shift_amount = get_shift_amount(vs1_val[i], SEW); + let v_double : bits('m * 2) = EXTS(vs2_val[i]); + slice(v_double >> shift_amount, 0, SEW) + }, + VV_VSSRL => { + let shift_amount = get_shift_amount(vs1_val[i], SEW); + let rounding_incr = get_fixed_rounding_incr(vs2_val[i], shift_amount); + (vs2_val[i] >> shift_amount) + EXTZ('m, rounding_incr) + }, + VV_VSSRA => { + let shift_amount = get_shift_amount(vs1_val[i], SEW); + let rounding_incr = get_fixed_rounding_incr(vs2_val[i], shift_amount); + let v_double : bits('m * 2) = EXTS(vs2_val[i]); + slice(v_double >> shift_amount, 0, SEW) + EXTZ('m, rounding_incr) + }, + VV_VMINU => to_bits(SEW, min(unsigned(vs2_val[i]), unsigned(vs1_val[i]))), + VV_VMIN => to_bits(SEW, min(signed(vs2_val[i]), signed(vs1_val[i]))), + VV_VMAXU => to_bits(SEW, max(unsigned(vs2_val[i]), unsigned(vs1_val[i]))), + VV_VMAX => to_bits(SEW, max(signed(vs2_val[i]), signed(vs1_val[i]))), + VV_VRGATHER => { + assert(vs1 != vd & vs2 != vd); + let idx = unsigned(vs1_val[i]); + let VLMAX = int_power(2, LMUL_pow + VLEN_pow - SEW_pow); + assert(VLMAX <= 'n); + if idx < VLMAX then vs2_val[idx] else zeros() + }, + VV_VRGATHEREI16 => { + assert(vs1 != vd & vs2 != vd); + /* vrgatherei16.vv uses SEW/LMUL for the data in vs2 but EEW=16 and EMUL = (16/SEW)*LMUL for the indices in vs1 */ + let vs1_new : vector('n, dec, bits(16)) = read_vreg(num_elem, 16, 4 + LMUL_pow - SEW_pow, vs1); + let idx = unsigned(vs1_new[i]); + let VLMAX = int_power(2, LMUL_pow + VLEN_pow - SEW_pow); + assert(VLMAX <= 'n); + if idx < VLMAX then vs2_val[idx] else zeros() + } + } + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping vvtype_mnemonic : vvfunct6 <-> string = { + VV_VADD <-> "vadd.vv", + VV_VSUB <-> "vsub.vv", + VV_VAND <-> "vand.vv", + VV_VOR <-> "vor.vv", + VV_VXOR <-> "vxor.vv", + VV_VRGATHER <-> "vrgather.vv", + VV_VRGATHEREI16 <-> "vrgatherei16.vv", + VV_VSADDU <-> "vsaddu.vv", + VV_VSADD <-> "vsadd.vv", + VV_VSSUBU <-> "vssubu.vv", + VV_VSSUB <-> "vssub.vv", + VV_VSLL <-> "vsll.vv", + VV_VSMUL <-> "vsmul.vv", + VV_VSRL <-> "vsrl.vv", + VV_VSRA <-> "vsra.vv", + VV_VSSRL <-> "vssrl.vv", + VV_VSSRA <-> "vssra.vv", + VV_VMINU <-> "vminu.vv", + VV_VMIN <-> "vmin.vv", + VV_VMAXU <-> "vmaxu.vv", + VV_VMAX <-> "vmax.vv" +} + +mapping clause assembly = VVTYPE(funct6, vm, vs2, vs1, vd) + <-> vvtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ vreg_name(vs1) ^ maybe_vmask(vm) + +/* ***************** OPIVX (Vector Slide & Gather Instructions) ****************** */ +/* Slide and gather instructions extend rs1/imm to XLEN intead of SEW bits */ +union clause ast = VXSG : (vxsgfunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_vxsgfunct6 : vxsgfunct6 <-> bits(6) = { + VX_VSLIDEUP <-> 0b001110, + VX_VSLIDEDOWN <-> 0b001111, + VX_VRGATHER <-> 0b001100 +} + +mapping clause encdec = VXSG(funct6, vm, vs2, rs1, vd) if haveRVV() + <-> encdec_vxsgfunct6(funct6) @ vm @ vs2 @ rs1 @ 0b100 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VXSG(funct6, vm, vs2, rs1, vd)) = { + let SEW_pow = get_sew_pow(); + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let VLEN_pow = get_vlen_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let rs1_val : nat = unsigned(X(rs1)); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + VX_VSLIDEUP => { + assert(vs2 != vd); + if i >= rs1_val then vs2_val[i - rs1_val] else vd_val[i] + }, + VX_VSLIDEDOWN => { + let VLMAX = int_power(2, LMUL_pow + VLEN_pow - SEW_pow); + assert(VLMAX > 0 & VLMAX <= 'n); + if i + rs1_val < VLMAX then vs2_val[i + rs1_val] else zeros() + }, + VX_VRGATHER => { + assert(vs2 != vd); + let VLMAX = int_power(2, LMUL_pow + VLEN_pow - SEW_pow); + assert(VLMAX > 0 & VLMAX <= 'n); + if rs1_val < VLMAX then vs2_val[rs1_val] else zeros() + } + } + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping vxsg_mnemonic : vxsgfunct6 <-> string = { + VX_VSLIDEUP <-> "vslideup.vx", + VX_VSLIDEDOWN <-> "vslidedown.vx", + VX_VRGATHER <-> "vrgather.vx" +} + +mapping clause assembly = VXSG(funct6, vm, vs2, rs1, vd) + <-> vxsg_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ reg_name(rs1) ^ maybe_vmask(vm) + +/* ***************** OPIVI (Vector Slide & Gather Instructions) ****************** */ +/* Slide and gather instructions extend rs1/imm to XLEN intead of SEW bits */ +union clause ast = VISG : (visgfunct6, bits(1), regidx, bits(5), regidx) + +mapping encdec_visgfunct6 : visgfunct6 <-> bits(6) = { + VI_VSLIDEUP <-> 0b001110, + VI_VSLIDEDOWN <-> 0b001111, + VI_VRGATHER <-> 0b001100 +} + +mapping clause encdec = VISG(funct6, vm, vs2, simm, vd) if haveRVV() + <-> encdec_visgfunct6(funct6) @ vm @ vs2 @ simm @ 0b011 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VISG(funct6, vm, vs2, simm, vd)) = { + let SEW_pow = get_sew_pow(); + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let VLEN_pow = get_vlen_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let imm_val : nat = unsigned(EXTZ(sizeof(xlen), simm)); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + VI_VSLIDEUP => { + assert(vs2 != vd); + if i >= imm_val then vs2_val[i - imm_val] else vd_val[i] + }, + VI_VSLIDEDOWN => { + let VLMAX = int_power(2, LMUL_pow + VLEN_pow - SEW_pow); + assert(VLMAX > 0 & VLMAX <= 'n); + if i + imm_val < VLMAX then vs2_val[i + imm_val] else zeros() + }, + VI_VRGATHER => { + assert(vs2 != vd); + let VLMAX = int_power(2, LMUL_pow + VLEN_pow - SEW_pow); + assert(VLMAX > 0 & VLMAX <= 'n); + if imm_val < VLMAX then vs2_val[imm_val] else zeros() + } + } + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping visg_mnemonic : visgfunct6 <-> string = { + VI_VSLIDEUP <-> "vslideup.vi", + VI_VSLIDEDOWN <-> "vslidedown.vi", + VI_VRGATHER <-> "vrgather.vi" +} + +mapping clause assembly = VISG(funct6, vm, vs2, simm, vd) + <-> visg_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ reg_name(simm) ^ maybe_vmask(vm) + +/* ********************* Whole Vector Register Move (OPIVI) ********************** */ +union clause ast = VMVRTYPE : (regidx, bits(5), regidx) + +mapping clause encdec = VMVRTYPE(vs2, simm, vd) if haveRVV() + <-> 0b100111 @ 0b1 @ vs2 @ simm @ 0b011 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VMVRTYPE(vs2, simm, vd)) = { + let SEW = get_sew(); + let imm_val = unsigned(EXTZ(sizeof(xlen), simm)); + let EMUL = imm_val + 1; + + assert(EMUL == 1 | EMUL == 2 | EMUL == 4 | EMUL == 8); + let EMUL_pow = log2(EMUL); + let num_elem = get_num_elem(EMUL_pow, SEW); + let 'n = num_elem; + let 'm = SEW; + + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, EMUL_pow, vs2); + write_vreg(num_elem, SEW, EMUL_pow, vd, vs2_val); + vstart = EXTZ(0b0); + + RETIRE_SUCCESS +} + +mapping simm_string : bits(5) <-> string = { + 0b00000 <-> "1", + 0b00001 <-> "2", + 0b00011 <-> "4", + 0b00111 <-> "8" +} + +mapping clause assembly = VMVRTYPE(vs2, simm, vd) + <-> "vmv" ^ simm_string(simm) ^ "r.v" ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) + +/* ****************************** OPMVV (VWXUNARY0) ****************************** */ +union clause ast = VMVXS : (regidx, regidx) + +mapping clause encdec = VMVXS(vs2, rd) if haveRVV() + <-> 0b010000 @ 0b1 @ vs2 @ 0b00000 @ 0b010 @ rd @ 0b1010111 if haveRVV() + +function clause execute(VMVXS(vs2, rd)) = { + let SEW = get_sew(); + let num_elem = get_num_elem(0, SEW); + + assert(num_elem > 0); + let 'n = num_elem; + let 'm = SEW; + + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, 0, vs2); + X(rd) = if sizeof(xlen) < SEW then slice(vs2_val[0], 0, sizeof(xlen)) + else if sizeof(xlen) > SEW then EXTS(vs2_val[0]) + else vs2_val[0]; + vstart = EXTZ(0b0); + + RETIRE_SUCCESS +} + +mapping clause assembly = VMVXS(vs2, rd) + <-> "vmv.x.s" ^ spc() ^ reg_name(rd) ^ sep() ^ vreg_name(vs2) + +/* ****************************** OPMVX (VRXUNARY0) ****************************** */ +union clause ast = VMVSX : (regidx, regidx) + +mapping clause encdec = VMVSX(rs1, vd) if haveRVV() + <-> 0b010000 @ 0b1 @ 0b00000 @ rs1 @ 0b110 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VMVSX(rs1, vd)) = { + let SEW = get_sew(); + let num_elem = get_num_elem(0, SEW); + + assert(num_elem > 0); + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b1, vreg_name("v0")); + let rs1_val : bits('m) = get_scalar(rs1, 'm); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, 0, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, 0, vd_val, vm_val); + + /* one body element */ + if mask[0] == true then result[0] = rs1_val; + + /* others treated as tail elements */ + let tail_ag : agtype = get_vtype_vta(); + if tail_ag == UNDISTURBED then { + foreach (i from 1 to (num_elem - 1)) result[i] = vd_val[i] + } else if tail_ag == AGNOSTIC then { + foreach (i from 1 to (num_elem - 1)) result[i] = vd_val[i] /* TODO: configuration support */ + }; + + write_vreg(num_elem, SEW, 0, vd, result); + vstart = EXTZ(0b0); + + RETIRE_SUCCESS +} + +mapping clause assembly = VMVSX(rs1, vd) + <-> "vmv.s.x" ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) + +/* ********************** Integer Move Instruction (OPIVV) *********************** */ +union clause ast = MOVETYPEV : (regidx, regidx) + +mapping clause encdec = MOVETYPEV (vs1, vd) if haveRVV() + <-> 0b010111 @ 0b1 @ 0b00000 @ vs1 @ 0b000 @ vd @ 0b1010111 if haveRVV() + +function clause execute(MOVETYPEV(vs1, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b1, vreg_name("v0")); + let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then result[i] = vs1_val[i] + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + + RETIRE_SUCCESS +} + +mapping clause assembly = MOVETYPEV(vs1, vd) + <-> "vmv.v.v" ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs1) + +/* ********************** Integer Move Instruction (OPIVX) *********************** */ +union clause ast = MOVETYPEX : (regidx, regidx) + +mapping clause encdec = MOVETYPEX (rs1, vd) if haveRVV() + <-> 0b010111 @ 0b1 @ 0b00000 @ rs1 @ 0b100 @ vd @ 0b1010111 if haveRVV() + +function clause execute(MOVETYPEX(rs1, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + let 'n = num_elem; + let 'm = SEW; + + let rs1_val : bits('m) = get_scalar(rs1, 'm); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b1, vreg_name("v0")); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then result[i] = rs1_val + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + + RETIRE_SUCCESS +} + +mapping clause assembly = MOVETYPEX(rs1, vd) + <-> "vmv.v.x" ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) + +/* ********************** Integer Move Instruction (OPIVI) *********************** */ +union clause ast = MOVETYPEI : (regidx, bits(5)) + +mapping clause encdec = MOVETYPEI (vd, simm) if haveRVV() + <-> 0b010111 @ 0b1 @ 0b00000 @ simm @ 0b011 @ vd @ 0b1010111 if haveRVV() + +function clause execute(MOVETYPEI(vd, simm)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b1, vreg_name("v0")); + let imm_val : bits('m) = EXTS(simm); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then result[i] = imm_val + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + + RETIRE_SUCCESS +} + +mapping clause assembly = MOVETYPEI(vd, simm) + <-> "vmv.v.i" ^ spc() ^ vreg_name(vd) ^ sep() ^ hex_bits_5(simm) diff --git a/model/riscv_insts_vext_mem.sail b/model/riscv_insts_vext_mem.sail new file mode 100644 index 000000000..81ff53092 --- /dev/null +++ b/model/riscv_insts_vext_mem.sail @@ -0,0 +1,1460 @@ +/* ************************************************************************ */ +/* This file implements part of the vector extension. */ +/* Chapter 7: Vector Loads and Stores */ +/* ************************************************************************ */ + +mapping nfields_int : bits(3) <-> {|1, 2, 3, 4, 5, 6, 7, 8|} = { + 0b000 <-> 1, + 0b001 <-> 2, + 0b010 <-> 3, + 0b011 <-> 4, + 0b100 <-> 5, + 0b101 <-> 6, + 0b110 <-> 7, + 0b111 <-> 8 +} + +mapping nfields_string : bits(3) <-> string = { + 0b000 <-> "1", + 0b001 <-> "2", + 0b010 <-> "3", + 0b011 <-> "4", + 0b100 <-> "5", + 0b101 <-> "6", + 0b110 <-> "7", + 0b111 <-> "8" +} + +mapping vlewidth_bitsnumberstr : vlewidth <-> string = { + VLE8 <-> "8", + VLE16 <-> "16", + VLE32 <-> "32", + VLE64 <-> "64" +} + +mapping encdec_vlewidth : vlewidth <-> bits(3) = { + VLE8 <-> 0b000, + VLE16 <-> 0b101, + VLE32 <-> 0b110, + VLE64 <-> 0b111 +} + +mapping vlewidth_bytesnumber : vlewidth <-> {|1, 2, 4, 8|} = { + VLE8 <-> 1, + VLE16 <-> 2, + VLE32 <-> 4, + VLE64 <-> 8 +} + +mapping vlewidth_pow : vlewidth <-> {|3, 4, 5, 6|} = { + VLE8 <-> 3, + VLE16 <-> 4, + VLE32 <-> 5, + VLE64 <-> 6 +} + +mapping bytes_wordwidth : {|1, 2, 4, 8|} <-> word_width = { + 1 <-> BYTE, + 2 <-> HALF, + 4 <-> WORD, + 8 <-> DOUBLE +} + +/* ******************** Vector Load Unit-Stride Normal (nf=0, mop=0, lumop=0) ******************** */ +union clause ast = VLETYPE : (bits(1), regidx, vlewidth, regidx) + +mapping clause encdec = VLETYPE(vm, rs1, width, vd) if haveRVV() + <-> 0b000 @ 0b0 @ 0b00 @ vm @ 0b00000 @ rs1 @ encdec_vlewidth(width) @ vd @ 0b0000111 if haveRVV() + +val process_vle : forall 'b 'n 'p, ('b in {1, 2, 4, 8}) & ('n >= 0). (bits(1), regidx, int('b), regidx, int('p), int('n)) -> Retired effect {escape, rmem, rmemt, rreg, undef, wmv, wmvt, wreg} +function process_vle (vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) = { + let width_type : word_width = bytes_wordwidth(load_width_bytes); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vd_val : vector('n, dec, bits('b * 8)) = read_vreg(num_elem, load_width_bytes * 8, EMUL_pow, vd); + total : vector('n, dec, bits('b * 8)) = undefined; + mask : vector('n, dec, bool) = undefined; + status : Retired = RETIRE_SUCCESS; + + (total, mask) = init_masked_result(num_elem, load_width_bytes * 8, EMUL_pow, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if status != RETIRE_FAIL then { + if mask[i] then { + vstart = to_bits(16, i); + let elem_offset = i * load_width_bytes; + match ext_data_get_addr(rs1, to_bits(sizeof(xlen), elem_offset), Read(Data), width_type) { + Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); status = RETIRE_FAIL }, + Ext_DataAddr_OK(vaddr) => + if check_misaligned(vaddr, width_type) + then { handle_mem_exception(vaddr, E_Load_Addr_Align()); status = RETIRE_FAIL } + else match translateAddr(vaddr, Read(Data)) { + TR_Failure(e, _) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL }, + TR_Address(paddr, _) => { + match mem_read(Read(Data), paddr, load_width_bytes, false, false, false) { + MemValue(result) => total[i] = result, + MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL } + } + } + } + } + } + } + }; + + if status == RETIRE_SUCCESS then write_vreg(num_elem, load_width_bytes * 8, EMUL_pow, vd, total); + vstart = EXTZ(0b0); + status +} + +function clause execute(VLETYPE(vm, rs1, width, vd)) = { + let load_width_bytes = vlewidth_bytesnumber(width); + let EEW = load_width_bytes * 8; + let EEW_pow = vlewidth_pow(width); + let SEW_pow = get_sew_pow(); + let LMUL_pow = get_lmul_pow(); + let EMUL_pow = EEW_pow - SEW_pow + LMUL_pow; + let num_elem = get_num_elem(EMUL_pow, EEW); + + process_vle(vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) +} + +mapping vletype_mnemonic : vlewidth <-> string = { + VLE8 <-> "vle8.v", + VLE16 <-> "vle16.v", + VLE32 <-> "vle32.v", + VLE64 <-> "vle64.v" +} + +mapping clause assembly = VLETYPE(vm, rs1, width, vd) + <-> vletype_mnemonic(width) ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) ^ sep() ^ maybe_vmask(vm) + +/* ******************** Vector Store Unit-Stride Normal (nf=0, mop=0, sumop=0) ******************* */ +union clause ast = VSETYPE : (bits(1), regidx, vlewidth, regidx) + +mapping clause encdec = VSETYPE(vm, rs1, width, vs3) if haveRVV() + <-> 0b000 @ 0b0 @ 0b00 @ vm @ 0b00000 @ rs1 @ encdec_vlewidth(width) @ vs3 @ 0b0100111 if haveRVV() + +val process_vse : forall 'b 'n 'p, ('b in {1, 2, 4, 8}) & ('n >= 0). (bits(1), regidx, int('b), regidx, int('p), int('n)) -> Retired effect {eamem, escape, rmem, rmemt, rreg, undef, wmv, wmvt, wreg} +function process_vse (vm, vs3, load_width_bytes, rs1, EMUL_pow, num_elem) = { + let width_type : word_width = bytes_wordwidth(load_width_bytes); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vs3_val : vector('n, dec, bits('b * 8)) = read_vreg(num_elem, load_width_bytes * 8, EMUL_pow, vs3); + total : vector('n, dec, bits('b * 8)) = undefined; + mask : vector('n, dec, bool) = undefined; + status : Retired = RETIRE_SUCCESS; + + (total, mask) = init_masked_result(num_elem, load_width_bytes * 8, EMUL_pow, vs3_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if status != RETIRE_FAIL then { + if mask[i] then { + vstart = to_bits(16, i); + let elem_offset = i * load_width_bytes; + match ext_data_get_addr(rs1, to_bits(sizeof(xlen), elem_offset), Write(Data), width_type) { + Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); status = RETIRE_FAIL }, + Ext_DataAddr_OK(vaddr) => + if check_misaligned(vaddr, width_type) + then { handle_mem_exception(vaddr, E_SAMO_Addr_Align()); status = RETIRE_FAIL } + else match translateAddr(vaddr, Write(Data)) { + TR_Failure(e, _) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL }, + TR_Address(paddr, _) => { + let eares : MemoryOpResult(unit) = mem_write_ea(paddr, load_width_bytes, false, false, false); + match (eares) { + MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL }, + MemValue(_) => { + let res : MemoryOpResult(bool) = mem_write_value(paddr, load_width_bytes, vs3_val[i], false, false, false); + match (res) { + MemValue(true) => status = RETIRE_SUCCESS, + MemValue(false) => internal_error("store got false from mem_write_value"), + MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL } + } + } + } + } + } + } + } + } + }; + + vstart = EXTZ(0b0); + status +} + +function clause execute(VSETYPE(vm, rs1, width, vs3)) = { + let load_width_bytes = vlewidth_bytesnumber(width); + let EEW = load_width_bytes * 8; + let EEW_pow = vlewidth_pow(width); + let SEW_pow = get_sew_pow(); + let LMUL_pow = get_lmul_pow(); + let EMUL_pow = EEW_pow - SEW_pow + LMUL_pow; + let num_elem = get_num_elem(EMUL_pow, EEW); + + process_vse(vm, vs3, load_width_bytes, rs1, EMUL_pow, num_elem) +} + +mapping vsetype_mnemonic : vlewidth <-> string = { + VLE8 <-> "vse8.v", + VLE16 <-> "vse16.v", + VLE32 <-> "vse32.v", + VLE64 <-> "vse64.v" +} + +mapping clause assembly = VSETYPE(vm, rs1, width, vs3) + <-> vsetype_mnemonic(width) ^ spc() ^ vreg_name(vs3) ^ sep() ^ reg_name(rs1) ^ sep() ^ maybe_vmask(vm) + +/* ************************** Vector Load Strided Normal (nf=0, mop=10) ************************** */ +union clause ast = VLSETYPE : (bits(1), regidx, regidx, vlewidth, regidx) + +mapping clause encdec = VLSETYPE(vm, rs2, rs1, width, vd) if haveRVV() + <-> 0b000 @ 0b0 @ 0b10 @ vm @ rs2 @ rs1 @ encdec_vlewidth(width) @ vd @ 0b0000111 if haveRVV() + +val process_vlse : forall 'b 'n 'p, ('b in {1, 2, 4, 8}) & ('n >= 0). (bits(1), regidx, int('b), regidx, regidx, int('p), int('n)) -> Retired effect {escape, rmem, rmemt, rreg, undef, wmv, wmvt, wreg} +function process_vlse (vm, vd, load_width_bytes, rs1, rs2, EMUL_pow, num_elem) = { + let width_type : word_width = bytes_wordwidth(load_width_bytes); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vd_val : vector('n, dec, bits('b * 8)) = read_vreg(num_elem, load_width_bytes * 8, EMUL_pow, vd); + let rs2_val : int = signed(get_scalar(rs2, sizeof(xlen))); + total : vector('n, dec, bits('b * 8)) = undefined; + mask : vector('n, dec, bool) = undefined; + status : Retired = RETIRE_SUCCESS; + + (total, mask) = init_masked_result(num_elem, load_width_bytes * 8, EMUL_pow, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if status != RETIRE_FAIL then { + if mask[i] then { + vstart = to_bits(16, i); + let elem_offset = i * rs2_val; + match ext_data_get_addr(rs1, to_bits(sizeof(xlen), elem_offset), Read(Data), width_type) { + Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); status = RETIRE_FAIL }, + Ext_DataAddr_OK(vaddr) => + if check_misaligned(vaddr, width_type) + then { handle_mem_exception(vaddr, E_Load_Addr_Align()); status = RETIRE_FAIL } + else match translateAddr(vaddr, Read(Data)) { + TR_Failure(e, _) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL }, + TR_Address(paddr, _) => { + match mem_read(Read(Data), paddr, load_width_bytes, false, false, false) { + MemValue(result) => total[i] = result, + MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL } + } + } + } + } + } + } + }; + + if status == RETIRE_SUCCESS then write_vreg(num_elem, load_width_bytes * 8, EMUL_pow, vd, total); + vstart = EXTZ(0b0); + status +} + +function clause execute(VLSETYPE(vm, rs2, rs1, width, vd)) = { + let load_width_bytes = vlewidth_bytesnumber(width); + let EEW = load_width_bytes * 8; + let EEW_pow = vlewidth_pow(width); + let SEW_pow = get_sew_pow(); + let LMUL_pow = get_lmul_pow(); + let EMUL_pow = EEW_pow - SEW_pow + LMUL_pow; + let num_elem = get_num_elem(EMUL_pow, EEW); + + process_vlse(vm, vd, load_width_bytes, rs1, rs2, EMUL_pow, num_elem) +} + +mapping vlsetype_mnemonic : vlewidth <-> string = { + VLE8 <-> "vlse8.v", + VLE16 <-> "vlse16.v", + VLE32 <-> "vlse32.v", + VLE64 <-> "vlse64.v" +} + +mapping clause assembly = VLSETYPE(vm, rs2, rs1, width, vd) + <-> vlsetype_mnemonic(width) ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) ^ sep() ^ reg_name(rs2)^ sep() ^ maybe_vmask(vm) + +/* ************************** Vector Store Strided Normal (nf=0, mop=10) ************************* */ +union clause ast = VSSETYPE : (bits(1), regidx, regidx, vlewidth, regidx) + +mapping clause encdec = VSSETYPE(vm, rs2, rs1, width, vs3) if haveRVV() + <-> 0b000 @ 0b0 @ 0b10 @ vm @ rs2 @ rs1 @ encdec_vlewidth(width) @ vs3 @ 0b0100111 if haveRVV() + +val process_vsse : forall 'b 'n 'p, ('b in {1, 2, 4, 8}) & ('n >= 0). (bits(1), regidx, int('b), regidx, regidx, int('p), int('n)) -> Retired effect {eamem, escape, rmem, rmemt, rreg, undef, wmv, wmvt, wreg} +function process_vsse (vm, vs3, load_width_bytes, rs1, rs2, EMUL_pow, num_elem) = { + let width_type : word_width = bytes_wordwidth(load_width_bytes); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vs3_val : vector('n, dec, bits('b * 8)) = read_vreg(num_elem, load_width_bytes * 8, EMUL_pow, vs3); + let rs2_val : int = signed(get_scalar(rs2, sizeof(xlen))); + total : vector('n, dec, bits('b * 8)) = undefined; + mask : vector('n, dec, bool) = undefined; + status : Retired = RETIRE_SUCCESS; + + (total, mask) = init_masked_result(num_elem, load_width_bytes * 8, EMUL_pow, vs3_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if status != RETIRE_FAIL then { + if mask[i] then { + vstart = to_bits(16, i); + let elem_offset = i * rs2_val; + match ext_data_get_addr(rs1, to_bits(sizeof(xlen), elem_offset), Write(Data), width_type) { + Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); status = RETIRE_FAIL }, + Ext_DataAddr_OK(vaddr) => + if check_misaligned(vaddr, width_type) + then { handle_mem_exception(vaddr, E_SAMO_Addr_Align()); status = RETIRE_FAIL } + else match translateAddr(vaddr, Write(Data)) { + TR_Failure(e, _) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL }, + TR_Address(paddr, _) => { + let eares : MemoryOpResult(unit) = mem_write_ea(paddr, load_width_bytes, false, false, false); + match (eares) { + MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL }, + MemValue(_) => { + let res : MemoryOpResult(bool) = mem_write_value(paddr, load_width_bytes, vs3_val[i], false, false, false); + match (res) { + MemValue(true) => status = RETIRE_SUCCESS, + MemValue(false) => internal_error("store got false from mem_write_value"), + MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL } + } + } + } + } + } + } + } + } + }; + + vstart = EXTZ(0b0); + status +} + +function clause execute(VSSETYPE(vm, rs2, rs1, width, vs3)) = { + let load_width_bytes = vlewidth_bytesnumber(width); + let EEW = load_width_bytes * 8; + let EEW_pow = vlewidth_pow(width); + let SEW_pow = get_sew_pow(); + let LMUL_pow = get_lmul_pow(); + let EMUL_pow = EEW_pow - SEW_pow + LMUL_pow; + let num_elem = get_num_elem(EMUL_pow, EEW); + + process_vsse(vm, vs3, load_width_bytes, rs1, rs2, EMUL_pow, num_elem) +} + +mapping vssetype_mnemonic : vlewidth <-> string = { + VLE8 <-> "vsse8.v", + VLE16 <-> "vsse16.v", + VLE32 <-> "vsse32.v", + VLE64 <-> "vsse64.v" +} + +mapping clause assembly = VSSETYPE(vm, rs2, rs1, width, vs3) + <-> vssetype_mnemonic(width) ^ spc() ^ vreg_name(vs3) ^ sep() ^ reg_name(rs1) ^ sep() ^ reg_name(rs2)^ sep() ^ maybe_vmask(vm) + +/* ************************ Vector Load Indexed Unordered (nf=0, mop=01) ************************* */ +union clause ast = VLUXEITYPE : (bits(1), regidx, regidx, vlewidth, regidx) + +mapping clause encdec = VLUXEITYPE(vm, vs2, rs1, width, vd) if haveRVV() + <-> 0b000 @ 0b0 @ 0b01 @ vm @ vs2 @ rs1 @ encdec_vlewidth(width) @ vd @ 0b0000111 if haveRVV() + +val process_vlxei : forall 'ib 'db 'ip 'dp 'n, ('ib in {1, 2, 4, 8}) & ('db in {1, 2, 4, 8}) & ('n >= 0). (bits(1), regidx, int('ib), int('db), int('ip), int('dp), regidx, regidx, int('n), int) -> Retired effect {escape, rmem, rmemt, rreg, undef, wmv, wmvt, wreg} +function process_vlxei (vm, vd, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, mop) = { + let width_type : word_width = bytes_wordwidth(EEW_data_bytes); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vd_val : vector('n, dec, bits('db * 8)) = read_vreg(num_elem, EEW_data_bytes * 8, EMUL_data_pow, vd); + let vs2_val : vector('n, dec, bits('ib * 8)) = read_vreg(num_elem, EEW_index_bytes * 8, EMUL_index_pow, vs2); + total : vector('n, dec, bits('db * 8)) = undefined; + mask : vector('n, dec, bool) = undefined; + (total, mask) = init_masked_result(num_elem, EEW_data_bytes * 8, EMUL_data_pow, vd_val, vm_val); + status : Retired = RETIRE_SUCCESS; + + /* Currently mop = 1(unordered) or 3(ordered) do the same operations */ + foreach (i from 0 to (num_elem - 1)) { + if status != RETIRE_FAIL then { + if mask[i] then { + vstart = to_bits(16, i); + let elem_offset = signed(vs2_val[i]); + match ext_data_get_addr(rs1, to_bits(sizeof(xlen), elem_offset), Read(Data), width_type) { + Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); status = RETIRE_FAIL }, + Ext_DataAddr_OK(vaddr) => + if check_misaligned(vaddr, width_type) then + { handle_mem_exception(vaddr, E_Load_Addr_Align()); status = RETIRE_FAIL } + else match translateAddr(vaddr, Read(Data)) { + TR_Failure(e, _) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL }, + TR_Address(paddr, _) => { + match mem_read(Read(Data), paddr, EEW_data_bytes, false, false, false) { + MemValue(result) => total[i] = result, + MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL } + } + } + } + } + } + } + }; + + if status == RETIRE_SUCCESS then write_vreg(num_elem, EEW_data_bytes * 8, EMUL_data_pow, vd, total); + vstart = EXTZ(0b0); + status +} + +function clause execute(VLUXEITYPE(vm, vs2, rs1, width, vd)) = { + let EEW_index_pow = vlewidth_pow(width); + let EEW_index_bytes = vlewidth_bytesnumber(width); + let EEW_data_pow = get_sew_pow(); + let EEW_data_bytes = get_sew_bytes(); + let EMUL_data_pow = get_lmul_pow(); + let EMUL_index_pow = EEW_index_pow - EEW_data_pow + EMUL_data_pow; + let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); /* number of data and indices are the same */ + + process_vlxei(vm, vd, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 1) +} + +mapping clause assembly = VLUXEITYPE(vm, vs2, rs1, width, vd) + <-> "vluxei" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) ^ sep() ^ reg_name(vs2) ^ sep() ^ maybe_vmask(vm) + +/* ************************* Vector Load Indexed Ordered (nf=0, mop=11) ************************** */ +union clause ast = VLOXEITYPE : (bits(1), regidx, regidx, vlewidth, regidx) + +mapping clause encdec = VLOXEITYPE(vm, vs2, rs1, width, vd) if haveRVV() + <-> 0b000 @ 0b0 @ 0b11 @ vm @ vs2 @ rs1 @ encdec_vlewidth(width) @ vd @ 0b0000111 if haveRVV() + +function clause execute(VLOXEITYPE(vm, vs2, rs1, width, vd)) = { + let EEW_index_pow = vlewidth_pow(width); + let EEW_index_bytes = vlewidth_bytesnumber(width); + let EEW_data_pow = get_sew_pow(); + let EEW_data_bytes = get_sew_bytes(); + let EMUL_data_pow = get_lmul_pow(); + let EMUL_index_pow = EEW_index_pow - EEW_data_pow + EMUL_data_pow; + let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); /* number of data and indices are the same */ + + process_vlxei(vm, vd, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 3) +} + +mapping clause assembly = VLOXEITYPE(vm, vs2, rs1, width, vd) + <-> "vloxei" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) ^ sep() ^ reg_name(vs2) ^ sep() ^ maybe_vmask(vm) + +/* ************************ Vector Store Indexed Unordered (nf=0, mop=01) ************************ */ +union clause ast = VSUXEITYPE : (bits(1), regidx, regidx, vlewidth, regidx) + +mapping clause encdec = VSUXEITYPE(vm, vs2, rs1, width, vs3) if haveRVV() + <-> 0b000 @ 0b0 @ 0b01 @ vm @ vs2 @ rs1 @ encdec_vlewidth(width) @ vs3 @ 0b0100111 if haveRVV() + +val process_vsxei : forall 'ib 'db 'ip 'dp 'n, ('ib in {1, 2, 4, 8}) & ('db in {1, 2, 4, 8}) & ('n >= 0). (bits(1), regidx, int('ib), int('db), int('ip), int('dp), regidx, regidx, int('n), int) -> Retired effect {eamem, escape, rmem, rmemt, rreg, undef, wmv, wmvt, wreg} +function process_vsxei (vm, vs3, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, mop) = { + let width_type : word_width = bytes_wordwidth(EEW_data_bytes); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vs3_val : vector('n, dec, bits('db * 8)) = read_vreg(num_elem, EEW_data_bytes * 8, EMUL_data_pow, vs3); + let vs2_val : vector('n, dec, bits('ib * 8)) = read_vreg(num_elem, EEW_index_bytes * 8, EMUL_index_pow, vs2); + total : vector('n, dec, bits('db * 8)) = undefined; /* just used to generate mask */ + mask : vector('n, dec, bool) = undefined; + (total, mask) = init_masked_result(num_elem, EEW_data_bytes * 8, EMUL_data_pow, vs3_val, vm_val); + status : Retired = RETIRE_SUCCESS; + + /* Currently mop = 1(unordered) or 3(ordered) do the same operations */ + foreach (i from 0 to (num_elem - 1)) { + if status != RETIRE_FAIL then { + if mask[i] then { + vstart = to_bits(16, i); + let elem_offset = signed(vs2_val[i]); + match ext_data_get_addr(rs1, to_bits(sizeof(xlen), elem_offset), Write(Data), width_type) { + Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); status = RETIRE_FAIL }, + Ext_DataAddr_OK(vaddr) => + if check_misaligned(vaddr, width_type) + then { handle_mem_exception(vaddr, E_SAMO_Addr_Align()); status = RETIRE_FAIL } + else match translateAddr(vaddr, Write(Data)) { + TR_Failure(e, _) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL }, + TR_Address(paddr, _) => { + let eares : MemoryOpResult(unit) = mem_write_ea(paddr, EEW_data_bytes, false, false, false); + match (eares) { + MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL }, + MemValue(_) => { + let res : MemoryOpResult(bool) = mem_write_value(paddr, EEW_data_bytes, vs3_val[i], false, false, false); + match (res) { + MemValue(true) => status = RETIRE_SUCCESS, + MemValue(false) => internal_error("store got false from mem_write_value"), + MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL } + } + } + } + } + } + } + } + } + }; + + vstart = EXTZ(0b0); + status +} + +function clause execute(VSUXEITYPE(vm, vs2, rs1, width, vs3)) = { + let EEW_index_pow = vlewidth_pow(width); + let EEW_index_bytes = vlewidth_bytesnumber(width); + let EEW_data_pow = get_sew_pow(); + let EEW_data_bytes = get_sew_bytes(); + let EMUL_data_pow = get_lmul_pow(); + let EMUL_index_pow = EEW_index_pow - EEW_data_pow + EMUL_data_pow; + let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); /* number of data and indices are the same */ + + process_vsxei(vm, vs3, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 1) +} + +mapping clause assembly = VSUXEITYPE(vm, vs2, rs1, width, vs3) + <-> "vsuxei" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vs3) ^ sep() ^ reg_name(rs1) ^ sep() ^ reg_name(vs2) ^ sep() ^ maybe_vmask(vm) + +/* ************************* Vector Store Indexed Ordered (nf=0, mop=11) ************************* */ +union clause ast = VSOXEITYPE : (bits(1), regidx, regidx, vlewidth, regidx) + +mapping clause encdec = VSOXEITYPE(vm, vs2, rs1, width, vs3) if haveRVV() + <-> 0b000 @ 0b0 @ 0b11 @ vm @ vs2 @ rs1 @ encdec_vlewidth(width) @ vs3 @ 0b0100111 if haveRVV() + +function clause execute(VSOXEITYPE(vm, vs2, rs1, width, vs3)) = { + let EEW_index_pow = vlewidth_pow(width); + let EEW_index_bytes = vlewidth_bytesnumber(width); + let EEW_data_pow = get_sew_pow(); + let EEW_data_bytes = get_sew_bytes(); + let EMUL_data_pow = get_lmul_pow(); + let EMUL_index_pow = EEW_index_pow - EEW_data_pow + EMUL_data_pow; + let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); /* number of data and indices are the same */ + + process_vsxei(vm, vs3, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 3) +} + +mapping clause assembly = VSOXEITYPE(vm, vs2, rs1, width, vs3) + <-> "vsoxei" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vs3) ^ sep() ^ reg_name(rs1) ^ sep() ^ reg_name(vs2) ^ sep() ^ maybe_vmask(vm) + +/* ************* Vector Load Unit-Stride Fault-Only-First (nf=0, mop=0, lumop=10000) ************* */ +union clause ast = VLEFFTYPE : (bits(1), regidx, vlewidth, regidx) + +mapping clause encdec = VLEFFTYPE(vm, rs1, width, vd) if haveRVV() + <-> 0b000 @ 0b0 @ 0b00 @ vm @ 0b10000 @ rs1 @ encdec_vlewidth(width) @ vd @ 0b0000111 if haveRVV() + +val process_vleff : forall 'b 'n 'p, ('b in {1, 2, 4, 8}) & ('n >= 0). (bits(1), regidx, int('b), regidx, int('p), int('n)) -> Retired effect {escape, rmem, rmemt, rreg, undef, wmv, wmvt, wreg} +function process_vleff (vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) = { + let width_type : word_width = bytes_wordwidth(load_width_bytes); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vd_val : vector('n, dec, bits('b * 8)) = read_vreg(num_elem, load_width_bytes * 8, EMUL_pow, vd); + total : vector('n, dec, bits('b * 8)) = undefined; + mask : vector('n, dec, bool) = undefined; + (total, mask) = init_masked_result(num_elem, load_width_bytes * 8, EMUL_pow, vd_val, vm_val); + status : Retired = RETIRE_SUCCESS; + + foreach (i from 0 to (num_elem - 1)) { + if status != RETIRE_FAIL then { + if mask[i] then { + let elem_offset = i * load_width_bytes; + match ext_data_get_addr(rs1, to_bits(sizeof(xlen), elem_offset), Read(Data), width_type) { + Ext_DataAddr_Error(e) => { + if i == 0 then { + ext_handle_data_check_error(e); + status = RETIRE_FAIL + } else { + vl = to_bits(sizeof(xlen), i); + print_reg("CSR vl <- " ^ BitStr(vl)) + } + }, + Ext_DataAddr_OK(vaddr) => { + if check_misaligned(vaddr, width_type) then { + if i == 0 then { + handle_mem_exception(vaddr, E_Load_Addr_Align()); + status = RETIRE_FAIL + } else { + vl = to_bits(sizeof(xlen), i); + print_reg("CSR vl <- " ^ BitStr(vl)) + } + } else match translateAddr(vaddr, Read(Data)) { + TR_Failure(e, _) => { + if i == 0 then { + handle_mem_exception(vaddr, e); + status = RETIRE_FAIL + } else { + vl = to_bits(sizeof(xlen), i); + print_reg("CSR vl <- " ^ BitStr(vl)) + } + }, + TR_Address(paddr, _) => { + match mem_read(Read(Data), paddr, load_width_bytes, false, false, false) { + MemValue(result) => total[i] = result, + MemException(e) => { + if i == 0 then { + handle_mem_exception(vaddr, e); + status = RETIRE_FAIL + } else { + vl = to_bits(sizeof(xlen), i); + print_reg("CSR vl <- " ^ BitStr(vl)) + } + } + } + } + } + } + } + } + } + }; + + if status == RETIRE_SUCCESS then write_vreg(num_elem, load_width_bytes * 8, EMUL_pow, vd, total); + vstart = EXTZ(0b0); + status +} + +function clause execute(VLEFFTYPE(vm, rs1, width, vd)) = { + let load_width_bytes = vlewidth_bytesnumber(width); + let EEW = load_width_bytes * 8; + let EEW_pow = vlewidth_pow(width); + let SEW_pow = get_sew_pow(); + let LMUL_pow = get_lmul_pow(); + let EMUL_pow = EEW_pow - SEW_pow + LMUL_pow; + let num_elem = get_num_elem(EMUL_pow, EEW); + + process_vleff(vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) +} + +mapping vlefftype_mnemonic : vlewidth <-> string = { + VLE8 <-> "vle8ff.v", + VLE16 <-> "vle16ff.v", + VLE32 <-> "vle32ff.v", + VLE64 <-> "vle64ff.v" +} + +mapping clause assembly = VLEFFTYPE(vm, rs1, width, vd) + <-> vlefftype_mnemonic(width) ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) ^ sep() ^ maybe_vmask(vm) + +/* ******************** Vector Load Unit-Stride Segment (mop=0, lumop=00000) ********************* */ +union clause ast = VLSEGTYPE : (bits(3), bits(1), regidx, vlewidth, regidx) + +mapping clause encdec = VLSEGTYPE(nf, vm, rs1, width, vd) if haveRVV() + <-> nf @ 0b0 @ 0b00 @ vm @ 0b00000 @ rs1 @ encdec_vlewidth(width) @ vd @ 0b0000111 if haveRVV() + +val process_vlseg : forall 'f 'b 'n 'p, (0 < 'f & 'f <= 8) & ('b in {1, 2, 4, 8}) & ('n >= 0). (int('f), bits(1), regidx, int('b), regidx, int('p), int('n)) -> Retired effect {escape, rmem, rmemt, rreg, undef, wmv, wmvt, wreg} +function process_vlseg (nf, vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) = { + let EMUL_reg : int = if EMUL_pow <= 0 then 1 else int_power(2, EMUL_pow); + let width_type : word_width = bytes_wordwidth(load_width_bytes); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + status : Retired = RETIRE_SUCCESS; + vd_a = vd; + vd_t = vd; + + foreach(a from 0 to (nf - 1)) { + if vd_t == vd_a & a != 0 then vd_t = vd_t + 1; /* EMUL < 1 */ + let vd_t_val : vector('n, dec, bits('b * 8)) = read_vreg(num_elem, load_width_bytes * 8, EMUL_pow, vd_t); + total : vector('n, dec, bits('b * 8)) = undefined; + mask : vector('n, dec, bool) = undefined; + (total, mask) = init_masked_result(num_elem, load_width_bytes * 8, EMUL_pow, vd_t_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if status != RETIRE_FAIL then { + if mask[i] then { + vstart = to_bits(16, i); + let elem_offset = load_width_bytes * (a + i * nf); + match ext_data_get_addr(rs1, to_bits(sizeof(xlen), elem_offset), Read(Data), width_type) { + Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); status = RETIRE_FAIL }, + Ext_DataAddr_OK(vaddr) => + if check_misaligned(vaddr, width_type) + then { handle_mem_exception(vaddr, E_Load_Addr_Align()); status = RETIRE_FAIL } + else match translateAddr(vaddr, Read(Data)) { + TR_Failure(e, _) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL }, + TR_Address(paddr, _) => { + match mem_read(Read(Data), paddr, load_width_bytes, false, false, false) { + MemValue(result) => total[i] = result, + MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL } + } + } + } + } + } + } + }; + + if status == RETIRE_SUCCESS then write_vreg(num_elem, load_width_bytes * 8, EMUL_pow, vd_t, total); + vd_a = vd_t; + vd_t = vd_t + to_bits(5, EMUL_reg) + }; + + vstart = EXTZ(0b0); + status +} + +function clause execute(VLSEGTYPE(nf, vm, rs1, width, vd)) = { + let load_width_bytes = vlewidth_bytesnumber(width); + let EEW = load_width_bytes * 8; + let EEW_pow = vlewidth_pow(width); + let SEW_pow = get_sew_pow(); + let LMUL_pow = get_lmul_pow(); + let EMUL_pow = EEW_pow - SEW_pow + LMUL_pow; + let num_elem = get_num_elem(EMUL_pow, EEW); /* # of element of each register group */ + let nf_int = nfields_int(nf); + + process_vlseg(nf_int, vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) +} + +mapping clause assembly = VLSEGTYPE(nf, vm, rs1, width, vd) + <-> "vlseg" ^ nfields_string(nf) ^ "e" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vd) ^ sep() ^ "(" ^ reg_name(rs1) ^ ")" ^ sep() ^ maybe_vmask(vm) + +/* ************ Vector Load Unit-Stride Segment Fault-Only-First (mop=0, lumop=10000) ************ */ +union clause ast = VLSEGFFTYPE : (bits(3), bits(1), regidx, vlewidth, regidx) + +mapping clause encdec = VLSEGFFTYPE(nf, vm, rs1, width, vd) if haveRVV() + <-> nf @ 0b0 @ 0b00 @ vm @ 0b10000 @ rs1 @ encdec_vlewidth(width) @ vd @ 0b0000111 if haveRVV() + +val process_vlsegff : forall 'f 'b 'n 'p, (0 < 'f & 'f <= 8) & ('b in {1, 2, 4, 8}) & ('n >= 0). (int('f), bits(1), regidx, int('b), regidx, int('p), int('n)) -> Retired effect {escape, rmem, rmemt, rreg, undef, wmv, wmvt, wreg} +function process_vlsegff (nf, vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) = { + let EMUL_reg : int = if EMUL_pow <= 0 then 1 else int_power(2, EMUL_pow); + let width_type : word_width = bytes_wordwidth(load_width_bytes); + let start_element = get_start_element(); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + status : Retired = RETIRE_SUCCESS; + if start_element >= num_elem then return status; + + foreach (i from start_element to (num_elem - 1)) { + if status != RETIRE_FAIL then { + if vm_val[i] then { + foreach (j from 0 to (nf - 1)) { + let elem_offset = (i * nf + j) * load_width_bytes; + match ext_data_get_addr(rs1, to_bits(sizeof(xlen), elem_offset), Read(Data), width_type) { + Ext_DataAddr_Error(e) => { + if i == 0 then { + ext_handle_data_check_error(e); + status = RETIRE_FAIL + } else { + vl = to_bits(sizeof(xlen), i); + print_reg("CSR vl <- " ^ BitStr(vl)) + } + }, + Ext_DataAddr_OK(vaddr) => { + if check_misaligned(vaddr, width_type) then { + if i == 0 then { + handle_mem_exception(vaddr, E_Load_Addr_Align()); + status = RETIRE_FAIL + } else { + vl = to_bits(sizeof(xlen), i); + print_reg("CSR vl <- " ^ BitStr(vl)) + } + } else match translateAddr(vaddr, Read(Data)) { + TR_Failure(e, _) => { + if i == 0 then { + handle_mem_exception(vaddr, e); + status = RETIRE_FAIL + } else { + vl = to_bits(sizeof(xlen), i); + print_reg("CSR vl <- " ^ BitStr(vl)) + } + }, + TR_Address(paddr, _) => { + match mem_read(Read(Data), paddr, load_width_bytes, false, false, false) { + MemValue(result) => write_single_element(load_width_bytes * 8, i, EMUL_pow, vd + to_bits(5, j * EMUL_reg), result), + MemException(e) => { + if i == 0 then { + handle_mem_exception(vaddr, e); + status = RETIRE_FAIL + } else { + vl = to_bits(sizeof(xlen), i); + print_reg("CSR vl <- " ^ BitStr(vl)) + } + } + } + } + } + } + } + } + } + } + }; + + vstart = EXTZ(0b0); + status +} + +function clause execute(VLSEGFFTYPE(nf, vm, rs1, width, vd)) = { + let load_width_bytes = vlewidth_bytesnumber(width); + let EEW = load_width_bytes * 8; + let EEW_pow = vlewidth_pow(width); + let SEW_pow = get_sew_pow(); + let LMUL_pow = get_lmul_pow(); + let EMUL_pow = EEW_pow - SEW_pow + LMUL_pow; + let num_elem = get_num_elem(EMUL_pow, EEW); + let nf_int = nfields_int(nf); + + process_vlsegff(nf_int, vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) +} + +mapping clause assembly = VLSEGTYPE(nf, vm, rs1, width, vd) + <-> "vlseg" ^ nfields_string(nf) ^ "e" ^ vlewidth_bitsnumberstr(width) ^ "ff.v" ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) ^ sep() ^ maybe_vmask(vm) + +/* ******************** Vector Store Unit-Stride Segment (mop=0, sumop=00000) ******************** */ +union clause ast = VSSEGTYPE : (bits(3), bits(1), regidx, vlewidth, regidx) + +mapping clause encdec = VSSEGTYPE(nf, vm, rs1, width, vs3) if haveRVV() + <-> nf @ 0b0 @ 0b00 @ vm @ 0b00000 @ rs1 @ encdec_vlewidth(width) @ vs3 @ 0b0100111 if haveRVV() + +val process_vsseg : forall 'f 'b 'n 'p, (0 < 'f & 'f <= 8) & ('b in {1, 2, 4, 8}) & ('n >= 0). (int('f), bits(1), regidx, int('b), regidx, int('p), int('n)) -> Retired effect {eamem, escape, rmem, rmemt, rreg, undef, wmv, wmvt, wreg} +function process_vsseg (nf, vm, vs3, load_width_bytes, rs1, EMUL_pow, num_elem) = { + let EMUL_reg : int = if EMUL_pow <= 0 then 1 else int_power(2, EMUL_pow); + let width_type : word_width = bytes_wordwidth(load_width_bytes); + let start_element = get_start_element(); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + status : Retired = RETIRE_SUCCESS; + if start_element >= num_elem then return status; + + foreach (i from start_element to (num_elem - 1)) { + if vm_val[i] then { + vstart = to_bits(16, i); + foreach (j from 0 to (nf - 1)) { + if status != RETIRE_FAIL then { + let elem_offset = (i * nf + j) * load_width_bytes; + match ext_data_get_addr(rs1, to_bits(sizeof(xlen), elem_offset), Write(Data), width_type) { + Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); status = RETIRE_FAIL }, + Ext_DataAddr_OK(vaddr) => + if check_misaligned(vaddr, width_type) + then { handle_mem_exception(vaddr, E_SAMO_Addr_Align()); status = RETIRE_FAIL } + else match translateAddr(vaddr, Write(Data)) { + TR_Failure(e, _) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL }, + TR_Address(paddr, _) => { + let eares : MemoryOpResult(unit) = mem_write_ea(paddr, load_width_bytes, false, false, false); + match (eares) { + MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL }, + MemValue(_) => { + let one_elem_val : bits('b * 8) = read_single_element(load_width_bytes * 8, i, EMUL_pow, vs3 + to_bits(5, j * EMUL_reg)); + let res : MemoryOpResult(bool) = mem_write_value(paddr, load_width_bytes, one_elem_val, false, false, false); + match (res) { + MemValue(true) => status = RETIRE_SUCCESS, + MemValue(false) => internal_error("store got false from mem_write_value"), + MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL } + } + } + } + } + } + } + } + } + } + }; + + vstart = EXTZ(0b0); + status +} + +function clause execute(VSSEGTYPE(nf, vm, rs1, width, vs3)) = { + let load_width_bytes = vlewidth_bytesnumber(width); + let EEW = load_width_bytes * 8; + let EEW_pow = vlewidth_pow(width); + let SEW_pow = get_sew_pow(); + let LMUL_pow = get_lmul_pow(); + let EMUL_pow = EEW_pow - SEW_pow + LMUL_pow; + let num_elem = get_num_elem(EMUL_pow, EEW); + let nf_int = nfields_int(nf); + + process_vsseg(nf_int, vm, vs3, load_width_bytes, rs1, EMUL_pow, num_elem) +} + +mapping clause assembly = VSSEGTYPE(nf, vm, rs1, width, vs3) + <-> "vsseg" ^ nfields_string(nf) ^ "e" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vs3) ^ sep() ^ reg_name(rs1) ^ sep() ^ maybe_vmask(vm) + +/* **************************** Vector Load Strided Segment (mop=10) ***************************** */ +union clause ast = VLSSEGTYPE : (bits(3), bits(1), regidx, regidx, vlewidth, regidx) + +mapping clause encdec = VLSSEGTYPE(nf, vm, rs2, rs1, width, vd) if haveRVV() + <-> nf @ 0b0 @ 0b10 @ vm @ rs2 @ rs1 @ encdec_vlewidth(width) @ vd @ 0b0000111 if haveRVV() + +val process_vlsseg : forall 'f 'b 'n 'p, (0 < 'f & 'f <= 8) & ('b in {1, 2, 4, 8}) & ('n >= 0). (int('f), bits(1), regidx, int('b), regidx, regidx, int('p), int('n)) -> Retired effect {escape, rmem, rmemt, rreg, undef, wmv, wmvt, wreg} +function process_vlsseg (nf, vm, vd, load_width_bytes, rs1, rs2, EMUL_pow, num_elem) = { + let EMUL_reg : int = if EMUL_pow <= 0 then 1 else int_power(2, EMUL_pow); + let width_type : word_width = bytes_wordwidth(load_width_bytes); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vd_val : vector('n, dec, bits('b * 8)) = read_vreg(num_elem, load_width_bytes * 8, EMUL_pow, vd); /* only to generate mask */ + result : vector('n, dec, bits('b * 8)) = undefined; + mask : vector('n, dec, bool) = undefined; + (result, mask) = init_masked_result(num_elem, load_width_bytes * 8, EMUL_pow, vd_val, vm_val); + + let rs2_val : int = signed(get_scalar(rs2, sizeof(xlen))); + status : Retired = RETIRE_SUCCESS; + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + vstart = to_bits(16, i); + foreach (j from 0 to (nf - 1)) { + if status != RETIRE_FAIL then { + let elem_offset = i * rs2_val + j * load_width_bytes; + match ext_data_get_addr(rs1, to_bits(sizeof(xlen), elem_offset), Read(Data), width_type) { + Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); status = RETIRE_FAIL }, + Ext_DataAddr_OK(vaddr) => + if check_misaligned(vaddr, width_type) + then { handle_mem_exception(vaddr, E_Load_Addr_Align()); status = RETIRE_FAIL } + else match translateAddr(vaddr, Read(Data)) { + TR_Failure(e, _) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL }, + TR_Address(paddr, _) => { + match mem_read(Read(Data), paddr, load_width_bytes, false, false, false) { + MemValue(result) => write_single_element(load_width_bytes * 8, i, EMUL_pow, vd + to_bits(5, j * EMUL_reg) , result), + MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL } + } + } + } + } + } + } + } + }; + + vstart = EXTZ(0b0); + status +} + +function clause execute(VLSSEGTYPE(nf, vm, rs2, rs1, width, vd)) = { + let load_width_bytes = vlewidth_bytesnumber(width); + let EEW = load_width_bytes * 8; + let EEW_pow = vlewidth_pow(width); + let SEW_pow = get_sew_pow(); + let LMUL_pow = get_lmul_pow(); + let EMUL_pow = EEW_pow - SEW_pow + LMUL_pow; + let num_elem = get_num_elem(EMUL_pow, EEW); + let nf_int = nfields_int(nf); + + process_vlsseg(nf_int, vm, vd, load_width_bytes, rs1, rs2, EMUL_pow, num_elem) +} + +mapping clause assembly = VLSSEGTYPE(nf, vm, rs2, rs1, width, vd) + <-> "vlsseg" ^ nfields_string(nf) ^ "e" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) ^ sep() ^ reg_name(rs2) ^ sep() ^ maybe_vmask(vm) + +/* **************************** Vector Store Strided Segment (mop=10) **************************** */ +union clause ast = VSSSEGTYPE : (bits(3), bits(1), regidx, regidx, vlewidth, regidx) + +mapping clause encdec = VSSSEGTYPE(nf, vm, rs2, rs1, width, vs3) if haveRVV() + <-> nf @ 0b0 @ 0b10 @ vm @ rs2 @ rs1 @ encdec_vlewidth(width) @ vs3 @ 0b0100111 if haveRVV() + +val process_vssseg : forall 'f 'b 'n 'p, (0 < 'f & 'f <= 8) & ('b in {1, 2, 4, 8}) & ('n >= 0). (int('f), bits(1), regidx, int('b), regidx, regidx, int('p), int('n)) -> Retired effect {eamem, escape, rmem, rmemt, rreg, undef, wmv, wmvt, wreg} +function process_vssseg (nf, vm, vs3, load_width_bytes, rs1, rs2, EMUL_pow, num_elem) = { + let EMUL_reg : int = if EMUL_pow <= 0 then 1 else int_power(2, EMUL_pow); + let width_type : word_width = bytes_wordwidth(load_width_bytes); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vs3_val : vector('n, dec, bits('b * 8)) = read_vreg(num_elem, load_width_bytes * 8, EMUL_pow, vs3); /* only to generate mask */ + result : vector('n, dec, bits('b * 8)) = undefined; + mask : vector('n, dec, bool) = undefined; + (result, mask) = init_masked_result(num_elem, load_width_bytes * 8, EMUL_pow, vs3_val, vm_val); + + let rs2_val : int = signed(get_scalar(rs2, sizeof(xlen))); + status : Retired = RETIRE_SUCCESS; + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + vstart = to_bits(16, i); + foreach (j from 0 to (nf - 1)) { + if status != RETIRE_FAIL then { + let elem_offset = i * rs2_val + j * load_width_bytes; + match ext_data_get_addr(rs1, to_bits(sizeof(xlen), elem_offset), Write(Data), width_type) { + Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); status = RETIRE_FAIL }, + Ext_DataAddr_OK(vaddr) => + if check_misaligned(vaddr, width_type) + then { handle_mem_exception(vaddr, E_SAMO_Addr_Align()); status = RETIRE_FAIL } + else match translateAddr(vaddr, Write(Data)) { + TR_Failure(e, _) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL }, + TR_Address(paddr, _) => { + let eares : MemoryOpResult(unit) = mem_write_ea(paddr, load_width_bytes, false, false, false); + match (eares) { + MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL }, + MemValue(_) => { + let one_elem_val : bits('b * 8) = read_single_element(load_width_bytes * 8, i, EMUL_pow, vs3 + to_bits(5, j * EMUL_reg)); + let res : MemoryOpResult(bool) = mem_write_value(paddr, load_width_bytes, one_elem_val, false, false, false); + match (res) { + MemValue(true) => status = RETIRE_SUCCESS, + MemValue(false) => internal_error("store got false from mem_write_value"), + MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL } + } + } + } + } + } + } + } + } + } + }; + + vstart = EXTZ(0b0); + status +} + +function clause execute(VSSSEGTYPE(nf, vm, rs2, rs1, width, vs3)) = { + let load_width_bytes = vlewidth_bytesnumber(width); + let EEW = load_width_bytes * 8; + let EEW_pow = vlewidth_pow(width); + let SEW_pow = get_sew_pow(); + let LMUL_pow = get_lmul_pow(); + let EMUL_pow = EEW_pow - SEW_pow + LMUL_pow; + let num_elem = get_num_elem(EMUL_pow, EEW); + let nf_int = nfields_int(nf); + + process_vssseg(nf_int, vm, vs3, load_width_bytes, rs1, rs2, EMUL_pow, num_elem) +} + +mapping clause assembly = VSSSEGTYPE(nf, vm, rs2, rs1, width, vs3) + <-> "vssseg" ^ nfields_string(nf) ^ "e" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vs3) ^ sep() ^ reg_name(rs1) ^ sep() ^ reg_name(rs2) ^ sep() ^ maybe_vmask(vm) + +/* *********************** Vector Load Indexed Unordered Segment (mop=01) ************************ */ +union clause ast = VLUXSEGTYPE : (bits(3), bits(1), regidx, regidx, vlewidth, regidx) + +mapping clause encdec = VLUXSEGTYPE(nf, vm, vs2, rs1, width, vd) if haveRVV() + <-> nf @ 0b0 @ 0b01 @ vm @ vs2 @ rs1 @ encdec_vlewidth(width) @ vd @ 0b0000111 if haveRVV() + +val process_vlxseg : forall 'f 'ib 'db 'ip 'dp 'n, (0 < 'f & 'f <= 8) & ('ib in {1, 2, 4, 8}) & ('db in {1, 2, 4, 8}) & ('n >= 0). (int('f), bits(1), regidx, int('ib), int('db), int('ip), int('dp), regidx, regidx, int('n), int) -> Retired effect {eamem, escape, rmem, rmemt, rreg, undef, wmv, wmvt, wreg} +function process_vlxseg (nf, vm, vd, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, mop) = { + let EMUL_data_reg : int = if EMUL_data_pow <= 0 then 1 else int_power(2, EMUL_data_pow); + let width_type : word_width = bytes_wordwidth(EEW_data_bytes); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vd_val : vector('n, dec, bits('db * 8)) = read_vreg(num_elem, EEW_data_bytes * 8, EMUL_data_pow, vd); + let vs2_val : vector('n, dec, bits('ib * 8)) = read_vreg(num_elem, EEW_index_bytes * 8, EMUL_index_pow, vs2); + total : vector('n, dec, bits('db * 8)) = undefined; + mask : vector('n, dec, bool) = undefined; + (total, mask) = init_masked_result(num_elem, EEW_data_bytes * 8, EMUL_data_pow, vd_val, vm_val); + status : Retired = RETIRE_SUCCESS; + + /* Currently mop = 1(unordered) or 3(ordered) do the same operations */ + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + vstart = to_bits(16, i); + foreach (j from 0 to (nf - 1)) { + if status != RETIRE_FAIL then { + let elem_offset : int = signed(vs2_val[i]) + j * EEW_data_bytes; + match ext_data_get_addr(rs1, to_bits(sizeof(xlen), elem_offset), Read(Data), width_type) { + Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); status = RETIRE_FAIL }, + Ext_DataAddr_OK(vaddr) => + if check_misaligned(vaddr, width_type) + then { handle_mem_exception(vaddr, E_Load_Addr_Align()); status = RETIRE_FAIL } + else match translateAddr(vaddr, Read(Data)) { + TR_Failure(e, _) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL }, + TR_Address(paddr, _) => { + match mem_read(Read(Data), paddr, EEW_data_bytes, false, false, false) { + MemValue(result) => write_single_element(EEW_data_bytes * 8, i, EMUL_data_pow, vd + to_bits(5, j * EMUL_data_reg) , result), + MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL } + } + } + } + } + } + } + } + }; + + vstart = EXTZ(0b0); + status +} + +function clause execute(VLUXSEGTYPE(nf, vm, vs2, rs1, width, vd)) = { + let EEW_index_pow = vlewidth_pow(width); + let EEW_index_bytes = vlewidth_bytesnumber(width); + let EEW_data_pow = get_sew_pow(); + let EEW_data_bytes = get_sew_bytes(); + let EMUL_data_pow = get_lmul_pow(); + let EMUL_index_pow = EEW_index_pow - EEW_data_pow + EMUL_data_pow; + let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); /* number of data and indices are the same */ + let nf_int = nfields_int(nf); + + process_vlxseg(nf_int, vm, vd, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 1) +} + +mapping clause assembly = VLUXSEGTYPE(nf, vm, vs2, rs1, width, vd) + <-> "vluxseg" ^ nfields_string(nf) ^ "ei" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) ^ sep() ^ reg_name(vs2) ^ sep() ^ maybe_vmask(vm) + +/* ************************ Vector Load Indexed Ordered Segment (mop=11) ************************* */ +union clause ast = VLOXSEGTYPE : (bits(3), bits(1), regidx, regidx, vlewidth, regidx) + +mapping clause encdec = VLOXSEGTYPE(nf, vm, vs2, rs1, width, vd) if haveRVV() + <-> nf @ 0b0 @ 0b11 @ vm @ vs2 @ rs1 @ encdec_vlewidth(width) @ vd @ 0b0000111 if haveRVV() + +function clause execute(VLOXSEGTYPE(nf, vm, vs2, rs1, width, vd)) = { + let EEW_index_pow = vlewidth_pow(width); + let EEW_index_bytes = vlewidth_bytesnumber(width); + let EEW_data_pow = get_sew_pow(); + let EEW_data_bytes = get_sew_bytes(); + let EMUL_data_pow = get_lmul_pow(); + let EMUL_index_pow = EEW_index_pow - EEW_data_pow + EMUL_data_pow; + let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); /* number of data and indices are the same */ + let nf_int = nfields_int(nf); + + process_vlxseg(nf_int, vm, vd, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 3) +} + +mapping clause assembly = VLOXSEGTYPE(nf, vm, vs2, rs1, width, vd) + <-> "vloxseg" ^ nfields_string(nf) ^ "ei" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) ^ sep() ^ reg_name(vs2) ^ sep() ^ maybe_vmask(vm) + +/* *********************** Vector Store Indexed Unordered Segment (mop=01) *********************** */ +union clause ast = VSUXSEGTYPE : (bits(3), bits(1), regidx, regidx, vlewidth, regidx) + +mapping clause encdec = VSUXSEGTYPE(nf, vm, vs2, rs1, width, vs3) if haveRVV() + <-> nf @ 0b0 @ 0b01 @ vm @ vs2 @ rs1 @ encdec_vlewidth(width) @ vs3 @ 0b0100111 if haveRVV() + +val process_vsxseg : forall 'f 'ib 'db 'ip 'dp 'n, (0 < 'f & 'f <= 8) & ('ib in {1, 2, 4, 8}) & ('db in {1, 2, 4, 8}) & ('n >= 0). (int('f), bits(1), regidx, int('ib), int('db), int('ip), int('dp), regidx, regidx, int('n), int) -> Retired effect {eamem, escape, rmem, rmemt, rreg, undef, wmv, wmvt, wreg} +function process_vsxseg (nf, vm, vs3, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, mop) = { + let EMUL_data_reg : int = if EMUL_data_pow <= 0 then 1 else int_power(2, EMUL_data_pow); + let width_type : word_width = bytes_wordwidth(EEW_data_bytes); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vs3_val : vector('n, dec, bits('db * 8)) = read_vreg(num_elem, EEW_data_bytes * 8, EMUL_data_pow, vs3); + let vs2_val : vector('n, dec, bits('ib * 8)) = read_vreg(num_elem, EEW_index_bytes * 8, EMUL_index_pow, vs2); + total : vector('n, dec, bits('db * 8)) = undefined; + mask : vector('n, dec, bool) = undefined; + (total, mask) = init_masked_result(num_elem, EEW_data_bytes * 8, EMUL_data_pow, vs3_val, vm_val); + status : Retired = RETIRE_SUCCESS; + + /* Currently mop = 1(unordered) or 3(ordered) do the same operations */ + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + vstart = to_bits(16, i); + foreach (j from 0 to (nf - 1)) { + if status != RETIRE_FAIL then { + let elem_offset : int = signed(vs2_val[i]) + j * EEW_data_bytes; + match ext_data_get_addr(rs1, to_bits(sizeof(xlen), elem_offset), Write(Data), width_type) { + Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); status = RETIRE_FAIL }, + Ext_DataAddr_OK(vaddr) => + if check_misaligned(vaddr, width_type) + then { handle_mem_exception(vaddr, E_SAMO_Addr_Align()); status = RETIRE_FAIL } + else match translateAddr(vaddr, Write(Data)) { + TR_Failure(e, _) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL }, + TR_Address(paddr, _) => { + let eares : MemoryOpResult(unit) = mem_write_ea(paddr, EEW_data_bytes, false, false, false); + match (eares) { + MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL }, + MemValue(_) => { + let one_elem_val : bits('db * 8) = read_single_element(EEW_data_bytes * 8, i, EMUL_data_pow, vs3 + to_bits(5, j * EMUL_data_reg)); + let res : MemoryOpResult(bool) = mem_write_value(paddr, EEW_data_bytes, one_elem_val, false, false, false); + match (res) { + MemValue(true) => status = RETIRE_SUCCESS, + MemValue(false) => internal_error("store got false from mem_write_value"), + MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL } + } + } + } + } + } + } + } + } + } + }; + + vstart = EXTZ(0b0); + status +} + +function clause execute(VSUXSEGTYPE(nf, vm, vs2, rs1, width, vs3)) = { + let EEW_index_pow = vlewidth_pow(width); + let EEW_index_bytes = vlewidth_bytesnumber(width); + let EEW_data_pow = get_sew_pow(); + let EEW_data_bytes = get_sew_bytes(); + let EMUL_data_pow = get_lmul_pow(); + let EMUL_index_pow = EEW_index_pow - EEW_data_pow + EMUL_data_pow; + let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); /* number of data and indices are the same */ + let nf_int = nfields_int(nf); + + process_vsxseg(nf_int, vm, vs3, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 1) +} + +mapping clause assembly = VSUXSEGTYPE(nf, vm, vs2, rs1, width, vs3) + <-> "vsuxseg" ^ nfields_string(nf) ^ "ei" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vs3) ^ sep() ^ reg_name(rs1) ^ sep() ^ reg_name(vs2) ^ sep() ^ maybe_vmask(vm) + +/* ************************ Vector Store Indexed Ordered Segment (mop=11) ************************ */ +union clause ast = VSOXSEGTYPE : (bits(3), bits(1), regidx, regidx, vlewidth, regidx) + +mapping clause encdec = VSOXSEGTYPE(nf, vm, vs2, rs1, width, vs3) if haveRVV() + <-> nf @ 0b0 @ 0b11 @ vm @ vs2 @ rs1 @ encdec_vlewidth(width) @ vs3 @ 0b0100111 if haveRVV() + +function clause execute(VSOXSEGTYPE(nf, vm, vs2, rs1, width, vs3)) = { + let EEW_index_pow = vlewidth_pow(width); + let EEW_index_bytes = vlewidth_bytesnumber(width); + let EEW_data_pow = get_sew_pow(); + let EEW_data_bytes = get_sew_bytes(); + let EMUL_data_pow = get_lmul_pow(); + let EMUL_index_pow = EEW_index_pow - EEW_data_pow + EMUL_data_pow; + let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); /* number of data and indices are the same */ + let nf_int = nfields_int(nf); + + process_vsxseg(nf_int, vm, vs3, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 3) +} + +mapping clause assembly = VSUXSEGTYPE(nf, vm, vs2, rs1, width, vs3) + <-> "vsoxseg" ^ nfields_string(nf) ^ "ei" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vs3) ^ sep() ^ reg_name(rs1) ^ sep() ^ reg_name(vs2) ^ sep() ^ maybe_vmask(vm) + +/* ************** Vector Load Unit-Stride Whole Register (vm=1, mop=0, lumop=01000) ************** */ +union clause ast = VLRETYPE : (bits(3), regidx, vlewidth, regidx) + +mapping clause encdec = VLRETYPE(nf, rs1, width, vd) if haveRVV() + <-> nf @ 0b0 @ 0b00 @ 0b1 @ 0b01000 @ rs1 @ encdec_vlewidth(width) @ vd @ 0b0000111 if haveRVV() + +val process_vlre : forall 'f 'b 'n, ('f in {1, 2, 4, 8}) & ('b in {1, 2, 4, 8}) & ('n >= 0). (int('f), regidx, int('b), regidx, int('n)) -> Retired effect {escape, rmem, rmemt, rreg, undef, wmv, wmvt, wreg} +function process_vlre (nf, vd, load_width_bytes, rs1, elem_per_reg) = { + let width_type : word_width = bytes_wordwidth(load_width_bytes); + status : Retired = RETIRE_SUCCESS; + start_element = get_start_element(); + if start_element >= nf * elem_per_reg then return status; /* no elements are written */ + cur_field : int = start_element / elem_per_reg; + elem_to_align : int = start_element % elem_per_reg; + + if elem_to_align > 0 then { + foreach (i from elem_to_align to (elem_per_reg - 1)) { + if status != RETIRE_FAIL then { + vstart = to_bits(16, start_element); + let elem_offset : int = start_element * load_width_bytes; + match ext_data_get_addr(rs1, to_bits(sizeof(xlen), elem_offset), Read(Data), width_type) { + Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); status = RETIRE_FAIL }, + Ext_DataAddr_OK(vaddr) => + if check_misaligned(vaddr, width_type) + then { handle_mem_exception(vaddr, E_Load_Addr_Align()); status = RETIRE_FAIL } + else match translateAddr(vaddr, Read(Data)) { + TR_Failure(e, _) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL }, + TR_Address(paddr, _) => { + match mem_read(Read(Data), paddr, load_width_bytes, false, false, false) { + MemValue(result) => write_single_element(load_width_bytes * 8, i, 0, vd + to_bits(5, cur_field) , result), + MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL } + } + } + } + }; + start_element = start_element + 1 + } + }; + cur_field = cur_field + 1 + }; + + foreach (j from cur_field to (nf - 1)) { + foreach (i from 0 to (elem_per_reg - 1)) { + if status != RETIRE_FAIL then { + vstart = to_bits(16, start_element); + let elem_offset = start_element * load_width_bytes; + match ext_data_get_addr(rs1, to_bits(sizeof(xlen), elem_offset), Read(Data), width_type) { + Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); status = RETIRE_FAIL }, + Ext_DataAddr_OK(vaddr) => + if check_misaligned(vaddr, width_type) + then { handle_mem_exception(vaddr, E_Load_Addr_Align()); status = RETIRE_FAIL } + else match translateAddr(vaddr, Read(Data)) { + TR_Failure(e, _) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL }, + TR_Address(paddr, _) => { + match mem_read(Read(Data), paddr, load_width_bytes, false, false, false) { + MemValue(result) => write_single_element(load_width_bytes * 8, i, 0, vd + to_bits(5, j) , result), + MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL } + } + } + } + }; + start_element = start_element + 1 + } + }; + }; + + vstart = EXTZ(0b0); + status +} + +function clause execute(VLRETYPE(nf, rs1, width, vd)) = { + let load_width_bytes = vlewidth_bytesnumber(width); + let EEW = load_width_bytes * 8; + let VLEN = int_power(2, get_vlen_pow()); + let elem_per_reg : int = VLEN / EEW; + let nf_int = nfields_int(nf); + + assert(elem_per_reg >= 0); + assert(nf_int == 1 | nf_int == 2 | nf_int == 4 | nf_int == 8); + + process_vlre(nf_int, vd, load_width_bytes, rs1, elem_per_reg) +} + +mapping clause assembly = VLRETYPE(nf, rs1, width, vd) + <-> "vl" ^ nfields_string(nf) ^ "re" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) + +/* ************* Vector Store Unit-Stride Whole Register (vm=1, mop=0, lumop=01000) ************** */ +union clause ast = VSRETYPE : (bits(3), regidx, regidx) + +mapping clause encdec = VSRETYPE(nf, rs1, vs3) if haveRVV() + <-> nf @ 0b0 @ 0b00 @ 0b1 @ 0b01000 @ rs1 @ 0b000 @ vs3 @ 0b0100111 if haveRVV() + +val process_vsre : forall 'f 'b 'n, ('f in {1, 2, 4, 8}) & ('b in {1, 2, 4, 8}) & ('n >= 0). (int('f), int('b), regidx, regidx, int('n)) -> Retired effect {eamem, escape, rmem, rmemt, rreg, undef, wmv, wmvt, wreg} +function process_vsre (nf, load_width_bytes, rs1, vs3, elem_per_reg) = { + let width_type : word_width = BYTE; + start_element = get_start_element(); + status : Retired = RETIRE_SUCCESS; + if start_element >= nf * elem_per_reg then return status; /* no elements are written */ + cur_field : int = start_element / elem_per_reg; + elem_to_align : int = start_element % elem_per_reg; + + if elem_to_align > 0 then { + foreach (i from elem_to_align to (elem_per_reg - 1)) { + if status != RETIRE_FAIL then { + vstart = to_bits(16, start_element); + let elem_offset : int = start_element * load_width_bytes; + match ext_data_get_addr(rs1, to_bits(sizeof(xlen), elem_offset), Write(Data), width_type) { + Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); status = RETIRE_FAIL }, + Ext_DataAddr_OK(vaddr) => + if check_misaligned(vaddr, width_type) + then { handle_mem_exception(vaddr, E_SAMO_Addr_Align()); status = RETIRE_FAIL } + else match translateAddr(vaddr, Write(Data)) { + TR_Failure(e, _) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL }, + TR_Address(paddr, _) => { + let eares : MemoryOpResult(unit) = mem_write_ea(paddr, load_width_bytes, false, false, false); + match (eares) { + MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL }, + MemValue(_) => { + let one_elem_val : bits('b * 8) = read_single_element(load_width_bytes * 8, i, 0, vs3 + to_bits(5, cur_field)); + let res : MemoryOpResult(bool) = mem_write_value(paddr, load_width_bytes, one_elem_val, false, false, false); + match (res) { + MemValue(true) => status = RETIRE_SUCCESS, + MemValue(false) => internal_error("store got false from mem_write_value"), + MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL } + } + } + } + } + } + }; + start_element = start_element + 1 + } + }; + cur_field = cur_field + 1 + }; + + foreach (j from cur_field to (nf - 1)) { + let vs_val : vector('n, dec, bits('b * 8)) = read_vreg(elem_per_reg, load_width_bytes * 8, 0, vs3 + to_bits(5, j)); + foreach (i from 0 to (elem_per_reg - 1)) { + if status != RETIRE_FAIL then { + vstart = to_bits(16, start_element); + let elem_offset = start_element * load_width_bytes; + match ext_data_get_addr(rs1, to_bits(sizeof(xlen), elem_offset), Write(Data), width_type) { + Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); status = RETIRE_FAIL }, + Ext_DataAddr_OK(vaddr) => + if check_misaligned(vaddr, width_type) + then { handle_mem_exception(vaddr, E_SAMO_Addr_Align()); status = RETIRE_FAIL } + else match translateAddr(vaddr, Write(Data)) { + TR_Failure(e, _) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL }, + TR_Address(paddr, _) => { + let eares : MemoryOpResult(unit) = mem_write_ea(paddr, load_width_bytes, false, false, false); + match (eares) { + MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL }, + MemValue(_) => { + let res : MemoryOpResult(bool) = mem_write_value(paddr, load_width_bytes, vs_val[i], false, false, false); + match (res) { + MemValue(true) => status = RETIRE_SUCCESS, + MemValue(false) => internal_error("store got false from mem_write_value"), + MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL } + } + } + } + } + } + }; + start_element = start_element + 1 + } + } + }; + + vstart = EXTZ(0b0); + status +} + +function clause execute(VSRETYPE(nf, rs1, vs3)) = { + let load_width_bytes = 1; + let EEW = 8; + let VLEN = int_power(2, get_vlen_pow()); + let elem_per_reg : int = VLEN / EEW; + let nf_int = nfields_int(nf); + + assert(elem_per_reg >= 0); + assert(nf_int == 1 | nf_int == 2 | nf_int == 4 | nf_int == 8); + + process_vsre(nf_int, load_width_bytes, rs1, vs3, elem_per_reg) +} + +mapping clause assembly = VSRETYPE(nf, rs1, vs3) + <-> "vs" ^ nfields_string(nf) ^ "r.v" ^ spc() ^ vreg_name(vs3) ^ sep() ^ reg_name(rs1) + +/* *********** Vector Mask Load/Store Unit-Stride (nf=0, mop=0, lumop or sumop=01011) ************ */ +union clause ast = VMTYPE : (regidx, regidx, vmlsop) + +mapping encdec_lsop : vmlsop <-> bits(7) = { + VLM <-> 0b0000111, + VSM <-> 0b0100111 +} + +mapping clause encdec = VMTYPE(rs1, vd_or_vs3, op) if haveRVV() + <-> 0b000 @ 0b0 @ 0b00 @ 0b1 @ 0b01011 @ rs1 @ 0b000 @ vd_or_vs3 @ encdec_lsop(op) if haveRVV() + +val process_vm : forall 'n 'p, ('n >= 0). (regidx, regidx, int('p), int('n), vmlsop) -> Retired effect {eamem, escape, rmem, rmemt, rreg, undef, wmv, wmvt, wreg} +function process_vm(vd_or_vs3, rs1, EMUL_pow, num_elem, op) = { + let width_type : word_width = BYTE; + let start_element = get_start_element(); + let vd_or_vs3_val : vector('n, dec, bits(8)) = read_vreg(num_elem, 8, EMUL_pow, vd_or_vs3); + total : vector('n, dec, bits(8)) = undefined; + elem_offset : int = undefined; + status : Retired = RETIRE_SUCCESS; + + foreach (i from 0 to (num_elem - 1)) { + if status != RETIRE_FAIL then { + let elem_offset = i; + if op == VLM then { /* load */ + if i < start_element then { + total[i] = vd_or_vs3_val[i] + } else { + vstart = to_bits(16, i); + match ext_data_get_addr(rs1, to_bits(sizeof(xlen), elem_offset), Read(Data), width_type) { + Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); status = RETIRE_FAIL }, + Ext_DataAddr_OK(vaddr) => + if check_misaligned(vaddr, width_type) + then { handle_mem_exception(vaddr, E_Load_Addr_Align()); status = RETIRE_FAIL } + else match translateAddr(vaddr, Read(Data)) { + TR_Failure(e, _) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL }, + TR_Address(paddr, _) => { + match mem_read(Read(Data), paddr, 1, false, false, false) { + MemValue(result) => if i < num_elem then total[i] = result, + MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL } + } + } + } + } + }; + } else if op == VSM then { /* store */ + if i >= start_element then { + vstart = to_bits(16, i); + match ext_data_get_addr(rs1, to_bits(sizeof(xlen), elem_offset), Write(Data), width_type) { + Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); status = RETIRE_FAIL }, + Ext_DataAddr_OK(vaddr) => + if check_misaligned(vaddr, width_type) + then { handle_mem_exception(vaddr, E_SAMO_Addr_Align()); status = RETIRE_FAIL } + else match translateAddr(vaddr, Write(Data)) { + TR_Failure(e, _) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL }, + TR_Address(paddr, _) => { + let eares : MemoryOpResult(unit) = mem_write_ea(paddr, 1, false, false, false); + match (eares) { + MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL }, + MemValue(_) => { + let res : MemoryOpResult(bool) = mem_write_value(paddr, 1, vd_or_vs3_val[i], false, false, false); + match (res) { + MemValue(true) => status = RETIRE_SUCCESS, + MemValue(false) => internal_error("store got false from mem_write_value"), + MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL } + } + } + } + } + } + } + } + } + } + }; + + if op == VLM & status == RETIRE_SUCCESS then write_vreg(num_elem, 8, EMUL_pow, vd_or_vs3, total); + vstart = EXTZ(0b0); + status +} + +function clause execute(VMTYPE(rs1, vd_or_vs3, op)) = { + let EEW = 8; + let EMUL_pow = 0; + let tmp = unsigned(vl); + let num_elem : int = if tmp % 8 == 0 then tmp / 8 else tmp / 8 + 1; + + /* unmask vle8 except that the effective vector length is evl=ceil(vl/8) */ + assert(num_elem >= 0); + process_vm(vd_or_vs3, rs1, EMUL_pow, num_elem, op) +} + +mapping vmtype_mnemonic : vmlsop <-> string = { + VLM <-> "vlm.v", + VSM <-> "vsm.v" +} + +mapping clause assembly = VMTYPE(rs1, vd_or_vs3, op) + <-> vmtype_mnemonic(op) ^ spc() ^ vreg_name(vd_or_vs3) ^ sep() ^ reg_name(rs1) diff --git a/model/riscv_insts_vext_utils.sail b/model/riscv_insts_vext_utils.sail index 342387f38..b71d24743 100755 --- a/model/riscv_insts_vext_utils.sail +++ b/model/riscv_insts_vext_utils.sail @@ -1,22 +1,13 @@ /* ************************************************************************** */ /* This file implements functions used by vector instructions. */ - /* ************************************************************************** */ - /* Vector mask mapping */ mapping maybe_vmask : string <-> bits(1) = { "" <-> 0b1, /* unmasked by default */ sep() ^ "v0.t" <-> 0b0 } -/* Check for valid vsew and lmul values */ -val vcheck_vsew_lmul : (int, real) -> bool -function vcheck_vsew_lmul(vsew_bits, lmul) = { - vsew_bits >= 8 & vsew_bits <= 64 & - lmul >= 0.125 & lmul <= 8.0; -} - /* Check for vstart value */ val assert_vstart : int -> bool effect {rreg} function assert_vstart(i) = { @@ -38,23 +29,24 @@ function get_scalar(rs1, vsew_bits) = { } /* Get the starting element index from csr vtype */ -val get_start_element : unit -> int effect {escape, rreg, wreg} +val get_start_element : unit -> nat effect {escape, rreg, wreg} function get_start_element() = { let start_element = unsigned(vstart); - let vsew_bits = get_vtype_vsew(); + let VLEN_pow = get_vlen_pow(); + let SEW_pow = get_sew_pow(); /* The use of vstart values greater than the largest element index for the current SEW setting is reserved. It is recommended that implementations trap if vstart is out of bounds. It is not required to trap, as a possible future use of upper vstart bits is to store imprecise trap information. */ - if start_element > ((8 * get_vlen() / vsew_bits) - 1) then -1 - else start_element + if start_element > (2 ^ (3 + VLEN_pow - SEW_pow) - 1) then handle_illegal(); + start_element } /* Get the ending element index from csr vl */ val get_end_element : unit -> int effect {escape, rreg, wreg} function get_end_element() = { - let end_element : int = unsigned(vl) - 1; + let end_element = unsigned(vl) - 1; end_element } @@ -66,143 +58,137 @@ function get_end_element() = { * vector2 is a "mask" vector that is true for an element if the corresponding element * in the result vector should be updated by the calling instruction */ -val init_masked_result : forall 'n 'm, 8 <= 'm <= 128. (int('n), int('m), real, vector('n, dec, bits('m)), vector('n, dec, bool)) -> (vector('n, dec, bits('m)), vector('n, dec, bool)) effect {escape, rreg, undef, wreg} -function init_masked_result(num_elem, vsew_bits, lmul, vd_val, vm_val) = { - let start_element : int = get_start_element(); - let end_element : int = get_end_element(); - let tail_ag : agtype = get_vtype_vta(); - let mask_ag : agtype = get_vtype_vma(); - mask_helper : vector('n, dec, bool) = undefined; - result : vector('n, dec, bits('m)) = undefined; - - if start_element < 0 then { - /* start element is not valid */ - result = undefined; - mask_helper = undefined; - } else { - /* Determine the actual number of elements when lmul < 1 */ - let real_num_elem = if lmul >= 1.0 then num_elem else floor(lmul * to_real(num_elem)); - assert(num_elem >= real_num_elem); +val init_masked_result : forall 'n 'm 'p, 8 <= 'm <= 64. (int('n), int('m), int('p), vector('n, dec, bits('m)), vector('n, dec, bool)) -> (vector('n, dec, bits('m)), vector('n, dec, bool)) effect {escape, rreg, undef, wreg} +function init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val) = { + let start_element = get_start_element(); + let end_element = get_end_element(); + let tail_ag : agtype = get_vtype_vta(); + let mask_ag : agtype = get_vtype_vma(); + mask : vector('n, dec, bool) = undefined; + result : vector('n, dec, bits('m)) = undefined; - foreach (i from 0 to (num_elem - 1)) { - if i < start_element then { - /* Prestart elements defined by vstart */ + /* Determine the actual number of elements when lmul < 1 */ + let real_num_elem = if LMUL_pow >= 0 then num_elem else num_elem / (0 - LMUL_pow); + assert(num_elem >= real_num_elem); + + foreach (i from 0 to (num_elem - 1)) { + if i < start_element then { + /* Prestart elements defined by vstart */ + result[i] = vd_val[i]; + mask[i] = false + } else if i > end_element then { + /* Tail elements defined by vl */ + if tail_ag == UNDISTURBED then { result[i] = vd_val[i]; - mask_helper[i] = false - } else if i > end_element then { - /* Tail elements defined by vl */ - if tail_ag == UNDISTURBED then { - result[i] = vd_val[i]; - } else if tail_ag == AGNOSTIC then { - result[i] = vd_val[i]; - }; - mask_helper[i] = false - } else if i >= real_num_elem then { - /* Tail elements defined by lmul < 1 */ - if tail_ag == UNDISTURBED then { - result[i] = vd_val[i]; - } else if tail_ag == AGNOSTIC then { - result[i] = vd_val[i]; - }; - mask_helper[i] = false - } else if vm_val[i] == false then { - /* Inactive body elements defined by vm */ - if mask_ag == UNDISTURBED then { - result[i] = vd_val[i] - } else if mask_ag == AGNOSTIC then { - result[i] = vd_val[i] - }; - mask_helper[i] = false - } else { - /* Active body elements */ - mask_helper[i] = true; - } - }; + } else if tail_ag == AGNOSTIC then { + result[i] = vd_val[i]; /* TODO: configuration support */ + }; + mask[i] = false + } else if i >= real_num_elem then { + /* Tail elements defined by lmul < 1 */ + if tail_ag == UNDISTURBED then { + result[i] = vd_val[i]; + } else if tail_ag == AGNOSTIC then { + result[i] = vd_val[i]; /* TODO: configuration support */ + }; + mask[i] = false + } else if vm_val[i] == false then { + /* Inactive body elements defined by vm */ + if mask_ag == UNDISTURBED then { + result[i] = vd_val[i] + } else if mask_ag == AGNOSTIC then { + result[i] = vd_val[i] /* TODO: configuration support */ + }; + mask[i] = false + } else { + /* Active body elements */ + mask[i] = true; + } }; - (result, mask_helper) + (result, mask) } /* Mask handling for carry functions that use masks as input/output */ /* Only prestart and tail elements are masked in a mask value */ -val init_masked_result_carry : forall 'n 'm, 8 <= 'm <= 128. (int('n), int('m), real, vector('n, dec, bool)) -> (vector('n, dec, bool), vector('n, dec, bool)) effect {escape, rreg, undef, wreg} -function init_masked_result_carry(num_elem, vsew_bits, lmul, vd_val) = { - let start_element : int = get_start_element(); - let end_element : int = get_end_element(); - mask_helper : vector('n, dec, bool) = undefined; - result : vector('n, dec, bool) = undefined; +val init_masked_result_carry : forall 'n 'm 'p, 8 <= 'm <= 64. (int('n), int('m), int('p), vector('n, dec, bool)) -> (vector('n, dec, bool), vector('n, dec, bool)) effect {escape, rreg, undef, wreg} +function init_masked_result_carry(num_elem, SEW, LMUL_pow, vd_val) = { + let start_element = get_start_element(); + let end_element = get_end_element(); + mask : vector('n, dec, bool) = undefined; + result : vector('n, dec, bool) = undefined; /* Determine the actual number of elements when lmul < 1 */ - let real_num_elem = if lmul >= 1.0 then num_elem else floor(lmul * to_real(num_elem)); + let real_num_elem = if LMUL_pow >= 0 then num_elem else num_elem / (0 - LMUL_pow); assert(num_elem >= real_num_elem); foreach (i from 0 to (num_elem - 1)) { if i < start_element then { /* Prestart elements defined by vstart */ result[i] = vd_val[i]; - mask_helper[i] = false + mask[i] = false } else if i > end_element then { /* Tail elements defined by vl */ /* Mask tail is always agnostic */ - result[i] = vd_val[i]; - mask_helper[i] = false + result[i] = vd_val[i]; /* TODO: configuration support */ + mask[i] = false } else if i >= real_num_elem then { /* Tail elements defined by lmul < 1 */ /* Mask tail is always agnostic */ - result[i] = vd_val[i]; - mask_helper[i] = false + result[i] = vd_val[i]; /* TODO: configuration support */ + mask[i] = false } else { /* Active body elements */ - mask_helper[i] = true + mask[i] = true } }; - (result, mask_helper) + (result, mask) } /* Mask handling for cmp functions that use masks as output */ -val init_masked_result_cmp : forall 'n 'm, 8 <= 'm <= 128. (int('n), int('m), real, vector('n, dec, bool), vector('n, dec, bool)) -> (vector('n, dec, bool), vector('n, dec, bool)) effect {escape, rreg, undef, wreg} -function init_masked_result_cmp(num_elem, vsew_bits, lmul, vd_val, vm_val) = { - let start_element : int = get_start_element(); - let end_element : int = get_end_element(); - let mask_ag : agtype = get_vtype_vma(); - mask_helper : vector('n, dec, bool) = undefined; - result : vector('n, dec, bool) = undefined; +val init_masked_result_cmp : forall 'n 'm 'p, 8 <= 'm <= 64. (int('n), int('m), int('p), vector('n, dec, bool), vector('n, dec, bool)) -> (vector('n, dec, bool), vector('n, dec, bool)) effect {escape, rreg, undef, wreg} +function init_masked_result_cmp(num_elem, SEW, LMUL_pow, vd_val, vm_val) = { + let start_element = get_start_element(); + let end_element = get_end_element(); + let mask_ag : agtype = get_vtype_vma(); + mask : vector('n, dec, bool) = undefined; + result : vector('n, dec, bool) = undefined; /* Determine the actual number of elements when lmul < 1 */ - let real_num_elem = if lmul >= 1.0 then num_elem else floor(lmul * to_real(num_elem)); + let real_num_elem = if LMUL_pow >= 0 then num_elem else num_elem / (0 - LMUL_pow); assert(num_elem >= real_num_elem); foreach (i from 0 to (num_elem - 1)) { if i < start_element then { /* Prestart elements defined by vstart */ result[i] = vd_val[i]; - mask_helper[i] = false + mask[i] = false } else if i > end_element then { /* Tail elements defined by vl */ /* Mask tail is always agnostic */ - result[i] = vd_val[i]; - mask_helper[i] = false + result[i] = vd_val[i]; /* TODO: configuration support */ + mask[i] = false } else if i >= real_num_elem then { /* Tail elements defined by lmul < 1 */ /* Mask tail is always agnostic */ - result[i] = vd_val[i]; - mask_helper[i] = false + result[i] = vd_val[i]; /* TODO: configuration support */ + mask[i] = false } else if vm_val[i] == false then { /* Inactive body elements defined by vm */ if mask_ag == UNDISTURBED then { result[i] = vd_val[i] } else if mask_ag == AGNOSTIC then { - result[i] = vd_val[i] + result[i] = vd_val[i] /* TODO: configuration support */ }; - mask_helper[i] = false + mask[i] = false } else { /* Active body elements */ - mask_helper[i] = true + mask[i] = true } }; - (result, mask_helper) + (result, mask) } /* Floating point canonical NaN for 16-bit, 32-bit, 64-bit and 128-bit types */ @@ -284,9 +270,9 @@ function get_scalar_fp(rs1, vsew_bits) = { } /* Shift amounts */ -val get_shift_amount : forall 'n 'm, 0 <= 'n & 'm in {8, 16, 32, 64}. (bits('n), int('m)) -> int effect {escape} +val get_shift_amount : forall 'n 'm, 0 <= 'n & 'm in {8, 16, 32, 64}. (bits('n), int('m)) -> nat effect {escape} function get_shift_amount(bit_val, vsew_bits) = { - let lowlog2bits : int = log2(vsew_bits); + let lowlog2bits = log2(vsew_bits); assert(0 < lowlog2bits & lowlog2bits < 'n); unsigned(bit_val[lowlog2bits - 1 .. 0]); } @@ -361,7 +347,6 @@ function negate_fp (xf) = { fmakesign (new_sign, remain) } - /* Floating point functions */ val fp_add: forall 'n, 'n in {16, 32, 64}. (bits(3), bits('n), bits('n)) -> bits('n) effect {escape, rreg, undef, wreg} function fp_add(rm_3b, op1, op2) = { diff --git a/model/riscv_insts_vext_vset.sail b/model/riscv_insts_vext_vset.sail index edbbe7055..248855bb0 100644 --- a/model/riscv_insts_vext_vset.sail +++ b/model/riscv_insts_vext_vset.sail @@ -1,7 +1,6 @@ /* ************************************************************************ */ /* This file implements part of the vector extension. */ -/* Chapter 6: configuration setting instructions */ - +/* Chapter 6: Configuration-Setting Instructions */ /* ************************************************************************ */ mapping sew_flag : string <-> bits(3) = { @@ -46,14 +45,14 @@ mapping encdec_vsetop : vsetop <-> bits(4) ={ VSETVL <-> 0b1000 } -mapping clause encdec = VSET_TYPE(op, ma, ta, sew, lmul, rs1, rd) - <-> encdec_vsetop(op) @ ma @ ta @ sew @ lmul @ rs1 @ 0b111 @ rd @ 0b1010111 +mapping clause encdec = VSET_TYPE(op, ma, ta, sew, lmul, rs1, rd) if haveRVV() + <-> encdec_vsetop(op) @ ma @ ta @ sew @ lmul @ rs1 @ 0b111 @ rd @ 0b1010111 if haveRVV() function clause execute VSET_TYPE(op, ma, ta, sew, lmul, rs1, rd) = { - let vlen : int = get_vlen(); - let LMUL_ori : real = get_vtype_LMUL(); - let SEW_ori : int = get_vtype_vsew(); - let ratio_ori : real = to_real(SEW_ori) / LMUL_ori; + let VLEN_pow = get_vlen_pow(); + let LMUL_pow_ori = get_lmul_pow(); + let SEW_pow_ori = get_sew_pow(); + let ratio_pow_ori = SEW_pow_ori - LMUL_pow_ori; /* set vtype and calculate VLMAX */ match op { @@ -66,9 +65,9 @@ function clause execute VSET_TYPE(op, ma, ta, sew, lmul, rs1, rd) = { } }; print_reg("CSR vtype <- " ^ BitStr(vtype.bits())); - let LMUL_new : real = get_vtype_LMUL(); - let SEW_new : int = get_vtype_vsew(); - let VLMAX = floor(LMUL_new) * vlen / SEW_new; + let LMUL_pow_new = get_lmul_pow(); + let SEW_pow_new = get_sew_pow(); + let VLMAX = int_power(2, VLEN_pow + LMUL_pow_new - SEW_pow_new); /* set vl according to VLMAX and AVL */ if (rs1 != 0b00000) then { /* normal stripmining */ @@ -86,8 +85,8 @@ function clause execute VSET_TYPE(op, ma, ta, sew, lmul, rs1, rd) = { print_reg("CSR vl <- " ^ BitStr(vl)) } else { /* keep existing vl */ let AVL = unsigned(vl); - let ratio_new : real = to_real(SEW_new) / LMUL_new; - if (ratio_new != ratio_ori) then { + let ratio_pow_new = SEW_pow_new - LMUL_pow_new; + if (ratio_pow_new != ratio_pow_ori) then { vtype->bits() = 0b1 @ zeros(sizeof(xlen) - 1); /* set vtype.vill */ print_reg("CSR vtype <- " ^ BitStr(vtype.bits())); } @@ -112,23 +111,23 @@ mapping clause assembly = VSET_TYPE(op, ma, ta, sew, lmul, rs1, rd) /* ******************** vsetivli *********************** */ union clause ast = VSETI_TYPE : ( bits(1), bits(1), bits(3), bits(3), regidx, regidx) -mapping clause encdec = VSETI_TYPE(ma, ta, sew, lmul, uimm, rd) - <-> 0b1100 @ ma @ ta @ sew @ lmul @ uimm @ 0b111 @ rd @ 0b1010111 +mapping clause encdec = VSETI_TYPE(ma, ta, sew, lmul, uimm, rd) if haveRVV() + <-> 0b1100 @ ma @ ta @ sew @ lmul @ uimm @ 0b111 @ rd @ 0b1010111 if haveRVV() function clause execute VSETI_TYPE(ma, ta, sew, lmul, uimm, rd) = { - let vlen : int = get_vlen(); - let LMUL_ori : real = get_vtype_LMUL(); - let SEW_ori : int = get_vtype_vsew(); - let ratio_ori : real = to_real(SEW_ori) / LMUL_ori; + let VLEN_pow = get_vlen_pow(); + let LMUL_pow_ori = get_lmul_pow(); + let SEW_pow_ori = get_sew_pow(); + let ratio_pow_ori = SEW_pow_ori - LMUL_pow_ori; /* set vtype and calculate VLMAX */ vtype->bits() = 0b0 @ zeros(sizeof(xlen) - 9) @ ma @ ta @ sew @ lmul; print_reg("CSR vtype <- " ^ BitStr(vtype.bits())); - let LMUL_new : real = get_vtype_LMUL(); - let SEW_new : int = get_vtype_vsew(); - let VLMAX : int = floor(LMUL_new) * vlen / SEW_new; - let AVL : int = unsigned(uimm); /* AVL is encoded as 5-bit zero-extended imm in the rs1 field */ + let LMUL_pow_new = get_lmul_pow(); + let SEW_pow_new = get_sew_pow(); + let VLMAX = int_power(2, VLEN_pow + LMUL_pow_new - SEW_pow_new); + let AVL = unsigned(uimm); /* AVL is encoded as 5-bit zero-extended imm in the rs1 field */ /* set vl according to VLMAX and AVL */ vl = if AVL <= VLMAX then to_bits(sizeof(xlen), AVL) diff --git a/model/riscv_insts_zicsr.sail b/model/riscv_insts_zicsr.sail index 5b0a5e358..56b78974e 100644 --- a/model/riscv_insts_zicsr.sail +++ b/model/riscv_insts_zicsr.sail @@ -251,7 +251,7 @@ function writeCSR (csr : csreg, value : xlenbits) -> unit = { (0x015, _) => write_seed_csr(), /* vector csr */ - (0x008, _) => { let vstart_length = get_vstart_length(); vstart = EXTZ(16, value[(vstart_length - 1) .. 0]); Some(EXTZ(vstart)) }, + (0x008, _) => { let vstart_length = get_vlen_pow(); vstart = EXTZ(16, value[(vstart_length - 1) .. 0]); Some(EXTZ(vstart)) }, (0x009, _) => { vxsat = value[0 .. 0]; Some(EXTZ(vxsat)) }, (0x00A, _) => { vxrm = value[1 .. 0]; Some(EXTZ(vxrm)) }, (0x00F, _) => { vcsr->bits() = value[2 ..0]; Some(EXTZ(vcsr.bits())) }, diff --git a/model/riscv_sys_regs.sail b/model/riscv_sys_regs.sail index 67d0d63ca..dc72d11ed 100644 --- a/model/riscv_sys_regs.sail +++ b/model/riscv_sys_regs.sail @@ -846,29 +846,51 @@ bitfield Vtype : xlenbits = { register vtype : Vtype /* the dynamic selected element width (SEW) */ -val get_vtype_vsew : unit -> int effect {escape, rreg} -function get_vtype_vsew() = { +/* this returns the power of 2 for SEW */ +val get_sew_pow : unit -> {|3, 4, 5, 6|} effect {escape, rreg} +function get_sew_pow() = { match vtype.vsew() { - 0b000 => 8, - 0b001 => 16, - 0b010 => 32, - 0b011 => 64, + 0b000 => 3, + 0b001 => 4, + 0b010 => 5, + 0b011 => 6, _ => {assert(false, "invalid vsew field in vtype"); 0} } } +/* this returns the actual value of SEW */ +val get_sew : unit -> {|8, 16, 32, 64|} effect {escape, rreg} +function get_sew() = { + match get_sew_pow() { + 3 => 8, + 4 => 16, + 5 => 32, + 6 => 64 + } +} +/* this returns the value of SEW in bytes */ +val get_sew_bytes : unit -> {|1, 2, 4, 8|} effect {escape, rreg} +function get_sew_bytes() = { + match get_sew_pow() { + 3 => 1, + 4 => 2, + 5 => 4, + 6 => 8 + } +} /* the vector register group multiplier (LMUL) */ -val get_vtype_LMUL : unit -> real effect {escape, rreg} -function get_vtype_LMUL() = { +/* this returns the power of 2 for LMUL */ +val get_lmul_pow : unit -> {|-3, -2, -1, 0, 1, 2, 3|} effect {escape, rreg} +function get_lmul_pow() = { match vtype.vlmul() { - 0b101 => 0.125, /* 1/8 */ - 0b110 => 0.25, /* 1/4 */ - 0b111 => 0.5, /* 1/2 */ - 0b000 => 1.0, - 0b001 => 2.0, - 0b010 => 4.0, - 0b011 => 8.0, - _ => {assert(false, "invalid vlmul field in vtype"); 0.0} + 0b101 => -3, + 0b110 => -2, + 0b111 => -1, + 0b000 => 0, + 0b001 => 1, + 0b010 => 2, + 0b011 => 3, + _ => {assert(false, "invalid vlmul field in vtype"); 0} } } diff --git a/model/riscv_vext_regs.sail b/model/riscv_vext_regs.sail old mode 100755 new mode 100644 index 85cd30821..ed8ca6b4f --- a/model/riscv_vext_regs.sail +++ b/model/riscv_vext_regs.sail @@ -149,12 +149,10 @@ function wV (r, in_v) = { _ => assert(false, "invalid vector register number") }; - let vlen : int = get_vlen(); - assert(0 < vlen & vlen <= sizeof(vlenmax)); + let VLEN = int_power(2, get_vlen_pow()); + assert(0 < VLEN & VLEN <= sizeof(vlenmax)); if get_config_print_reg() - then { - print_reg("v" ^ string_of_int(r) ^ " <- " ^ BitStr(v[vlen - 1 .. 0])); - } + then print_reg("v" ^ string_of_int(r) ^ " <- " ^ BitStr(v[VLEN - 1 .. 0])); } function rV_bits(i: bits(5)) -> vregtype = rV(unsigned(i)) @@ -204,54 +202,52 @@ function init_vregs () = { /* Vector CSR */ bitfield Vcsr : bits(3) = { - vxrm : 2 .. 1, - vxsat : 0 + vxrm : 2 .. 1, + vxsat : 0 } register vcsr : Vcsr val ext_write_vcsr : (bits(2), bits(1)) -> unit effect {rreg, wreg} function ext_write_vcsr (vxrm_val, vxsat_val) = { - vcsr->vxrm() = vxrm_val; /* Note: frm can be an illegal value, 101, 110, 111 */ - vcsr->vxsat() = vxsat_val; + vcsr->vxrm() = vxrm_val; /* Note: frm can be an illegal value, 101, 110, 111 */ + vcsr->vxsat() = vxsat_val; } /* num_elem means max(VLMAX,VLEN/SEW)) according to Section 5.4 of RVV spec */ -val get_num_elem : (real, int) -> int effect {rreg, undef} -function get_num_elem(lmul, vsew_bits) = { - let vlen : int = get_vlen(); - var num_elem : int = undefined; - if lmul >= 1.0 then { - num_elem = floor(lmul) * vlen / vsew_bits; - } else { - /* Ignore lmul < 1 so that the entire vreg is read, allowing all masking to - * be handled in init_masked_result */ - num_elem = vlen / vsew_bits; - }; - +val get_num_elem : (int, int) -> nat effect {escape, rreg} +function get_num_elem(LMUL_pow, SEW) = { + let VLEN = int_power(2, get_vlen_pow()); + let LMUL_pow_reg = if LMUL_pow < 0 then 0 else LMUL_pow; + /* Ignore lmul < 1 so that the entire vreg is read, allowing all masking to + * be handled in init_masked_result */ + let num_elem = int_power(2, LMUL_pow_reg) * VLEN / SEW; + assert(num_elem > 0); num_elem } /* Reads a single vreg into multiple elements */ -val read_single_vreg : forall 'n 'm, 8 <= 'm <= 128. (int('n), int('m), regidx) -> vector('n, dec, bits('m)) effect {escape, rreg, undef} -function read_single_vreg(num_elem, sew, vrid) = { +val read_single_vreg : forall 'n 'm. (int('n), int('m), regidx) -> vector('n, dec, bits('m)) effect {escape, rreg, undef} +function read_single_vreg(num_elem, SEW, vrid) = { let bv : vregtype = V(vrid); var result : vector('n, dec, bits('m)) = undefined; + assert(8 <= SEW & SEW <= 64); foreach (i from 0 to (num_elem - 1)) { - let start_index : int = i * sew; - result[i] = slice(bv, start_index, sew); + let start_index = i * SEW; + result[i] = slice(bv, start_index, SEW); }; result } /* Writes multiple elements into a single vreg */ -val write_single_vreg : forall 'n 'm, 8 <= 'm <= 128. (int('n), int('m), regidx, vector('n, dec, bits('m))) -> unit effect {escape, rreg, wreg} -function write_single_vreg(num_elem, sew, vrid, v) = { +val write_single_vreg : forall 'n 'm. (int('n), int('m), regidx, vector('n, dec, bits('m))) -> unit effect {escape, rreg, wreg} +function write_single_vreg(num_elem, SEW, vrid, v) = { r : vregtype = zeros(); + assert(8 <= SEW & SEW <= 64); foreach (i from (num_elem - 1) downto 0) { - r = r << sew; + r = r << SEW; r = r | EXTZ(v[i]); }; @@ -260,18 +256,18 @@ function write_single_vreg(num_elem, sew, vrid, v) = { /* Reads multiple vregs into a single element */ val read_mult_vreg : forall 'n 'm, 'n >= 0. (int('n), int('m), regidx) -> bits('m) effect {escape, rreg} -function read_mult_vreg(num_vreg, num_bits, vrid) = { - let vlen : int = get_vlen(); - assert(0 < vlen & vlen <= sizeof(vlenmax)); - assert('m >= vlen); - var result : bits('m) = zeros(num_bits); +function read_mult_vreg(num_vreg, SEW, vrid) = { + let VLEN = int_power(2, get_vlen_pow()); + assert(0 < VLEN & VLEN <= sizeof(vlenmax)); + assert('m >= VLEN); + var result : bits('m) = zeros(); foreach (i from (num_vreg - 1) downto 0) { let vrid_lmul : regidx = vrid + to_bits(5, i); let bv : vregtype = V(vrid_lmul); - result = (result << vlen); - result = result | sail_zero_extend(bv[vlen - 1 .. 0], num_bits); + result = (result << VLEN); + result = result | sail_zero_extend(bv[VLEN - 1 .. 0], SEW); }; result @@ -279,55 +275,56 @@ function read_mult_vreg(num_vreg, num_bits, vrid) = { /* Writes a single element into multiple vregs */ val write_mult_vreg : forall 'n 'm, 'n >= 0. (int('n), int('m), regidx, bits('m)) -> unit effect {escape, rreg, wreg} -function write_mult_vreg(num_vreg, num_bits, vrid, bv) = { - let vlen : int = get_vlen(); - assert(0 < vlen & vlen <= sizeof(vlenmax)); - assert('m >= vlen); +function write_mult_vreg(num_vreg, SEW, vrid, bv) = { + let VLEN = int_power(2, get_vlen_pow()); + assert(0 < VLEN & VLEN <= sizeof(vlenmax)); + assert('m >= VLEN); foreach (i from (num_vreg - 1) downto 0) { let vrid_lmul : regidx = vrid + to_bits(5, i); - let single_bv : vregtype = sail_zero_extend(slice(bv >> (vlen * i), 0, vlen), sizeof(vlenmax)); + let single_bv : vregtype = sail_zero_extend(slice(bv >> (VLEN * i), 0, VLEN), sizeof(vlenmax)); V(vrid_lmul) = single_bv } } /* The general vreg reading operation with num_elem as max(VLMAX,VLEN/SEW)) */ -val read_vreg : forall 'n 'm, 8 <= 'm <= 64. (int('n), int('m), real, regidx) -> vector('n, dec, bits('m)) effect {escape, rreg, undef} -function read_vreg(num_elem, vsew_bits, lmul, vrid) = { - var result : vector('n, dec, bits('m)) = undefined; - let vlen : int = get_vlen(); - let lmul_int : int = if lmul < 1.0 then 1 else floor(lmul); +val read_vreg : forall 'n 'm 'p. (int('n), int('m), int('p), regidx) -> vector('n, dec, bits('m)) effect {escape, rreg, undef} +function read_vreg(num_elem, SEW, LMUL_pow, vrid) = { + var result : vector('n, dec, bits('m)) = undefined; + let VLEN = int_power(2, get_vlen_pow()); + let LMUL_pow_reg = if LMUL_pow < 0 then 0 else LMUL_pow; /* Check for valid vrid */ - if lmul > 1.0 & (unsigned(vrid) + floor(lmul)) > 31 then { + if unsigned(vrid) + 2 ^ LMUL_pow_reg > 32 then { /* vrid would read past largest vreg (v31) */ result = undefined - } else if lmul > 1.0 & (unsigned(vrid) % floor(lmul) != 0) then { + } else if unsigned(vrid) % (2 ^ LMUL_pow_reg) != 0 then { /* vrid must be a multiple of lmul */ result = undefined - } else if (vsew_bits > vlen) & (vsew_bits % vlen != 0) then { - /* vsew_bits must be a multiple of vlen */ - result = undefined } else { - if vsew_bits > vlen then { + if SEW > VLEN then { /* Multiple vregs per element */ - let 'num_reg_per_elem : int = vsew_bits / vlen; + let 'num_reg_per_elem : int = SEW / VLEN; assert('num_reg_per_elem >= 0); foreach (i from 0 to (num_elem - 1)) { let vrid_lmul : regidx = vrid + to_bits(5, i * 'num_reg_per_elem); - result[i] = read_mult_vreg('num_reg_per_elem, vsew_bits, vrid_lmul) + result[i] = read_mult_vreg('num_reg_per_elem, SEW, vrid_lmul) } } else { - let 'num_elem_single : int = vlen / vsew_bits; - foreach (i_lmul from 0 to (lmul_int - 1)) { - let r_start_i : int = i_lmul * 'num_elem_single; - let r_end_i : int = r_start_i + 'num_elem_single - 1; - let vrid_lmul : regidx = vrid + to_bits(5, i_lmul); - let single_result : vector('num_elem_single, dec, bits('m)) = read_single_vreg('num_elem_single, vsew_bits, vrid_lmul); - foreach (r_i from r_start_i to r_end_i) { - let s_i : int = r_i - r_start_i; - assert(0 <= r_i & r_i < num_elem); - assert(0 <= s_i & s_i < 'num_elem_single); - result[r_i] = single_result[s_i]; + if LMUL_pow < 0 then { + result = read_single_vreg('n, SEW, vrid); + } else { + let 'num_elem_single : int = VLEN / SEW; + foreach (i_lmul from 0 to (2 ^ LMUL_pow_reg - 1)) { + let r_start_i : int = i_lmul * 'num_elem_single; + let r_end_i : int = r_start_i + 'num_elem_single - 1; + let vrid_lmul : regidx = vrid + to_bits(5, i_lmul); + let single_result : vector('num_elem_single, dec, bits('m)) = read_single_vreg('num_elem_single, SEW, vrid_lmul); + foreach (r_i from r_start_i to r_end_i) { + let s_i : int = r_i - r_start_i; + assert(0 <= r_i & r_i < num_elem); + assert(0 <= s_i & s_i < 'num_elem_single); + result[r_i] = single_result[s_i]; + } } } } @@ -337,70 +334,62 @@ function read_vreg(num_elem, vsew_bits, lmul, vrid) = { } /* Single element reading operation */ -val read_single_element : forall 'm 'x, 8 <= 'm <= 128. (int('m), int('x), real, regidx) -> bits('m) effect {escape, rreg, undef} -function read_single_element(elem_width_bits, index, emul, vrid) = { - real_vrid : regidx = vrid; - real_index : int = index; - let vlen : int = get_vlen(); - let 'elem_per_reg : int = vlen / elem_width_bits; - if emul > 1.0 then { - real_vrid = vrid + to_bits(5, index / 'elem_per_reg); - real_index = index % 'elem_per_reg; - }; - let vrid_val : vector('elem_per_reg, dec, bits('m)) = read_single_vreg('elem_per_reg, elem_width_bits, real_vrid); - - let 'real_index = real_index; - assert( 0 <= 'real_index & 'real_index < 'elem_per_reg ); - vrid_val['real_index] +val read_single_element : forall 'm 'x 'p, 8 <= 'm <= 128. (int('m), int('x), int('p), regidx) -> bits('m) effect {escape, rreg, undef} +function read_single_element(EEW, index, EMUL_pow, vrid) = { + let VLEN = int_power(2, get_vlen_pow()); + assert(VLEN >= EEW); + let 'elem_per_reg : int = VLEN / EEW; + let real_vrid : regidx = if EMUL_pow > 0 then vrid + to_bits(5, index / 'elem_per_reg) else vrid; + let real_index : int = if EMUL_pow > 0 then index % 'elem_per_reg else index; + let vrid_val : vector('elem_per_reg, dec, bits('m)) = read_single_vreg('elem_per_reg, EEW, real_vrid); + assert(0 <= real_index & real_index < 'elem_per_reg); + vrid_val[real_index] } /* The general vreg writing operation with num_elem as max(VLMAX,VLEN/SEW)) */ -val write_vreg : forall 'n 'm, 8 <= 'm <= 128. (int('n), int('m), real, regidx, vector('n, dec, bits('m))) -> unit effect {escape, rreg, undef, wreg} -function write_vreg(num_elem, vsew_bits, lmul, vrid, vec) = { - let vlen : int = get_vlen(); - let lmul_int : int = if lmul < 1.0 then 1 else floor(lmul); +val write_vreg : forall 'n 'm 'p. (int('n), int('m), int('p), regidx, vector('n, dec, bits('m))) -> unit effect {escape, rreg, undef, wreg} +function write_vreg(num_elem, SEW, LMUL_pow, vrid, vec) = { + let VLEN = int_power(2, get_vlen_pow()); + let LMUL_pow_reg = if LMUL_pow < 0 then 0 else LMUL_pow; - if vsew_bits > vlen then { + if SEW > VLEN then { /* Multiple vregs per element */ - let 'num_reg_per_elem : int = vsew_bits / vlen; + let 'num_reg_per_elem : int = SEW / VLEN; assert('num_reg_per_elem >= 0); foreach (i from 0 to (num_elem - 1)) { let vrid_lmul : regidx = vrid + to_bits(5, i * 'num_reg_per_elem); - write_mult_vreg('num_reg_per_elem, vsew_bits, vrid_lmul, vec[i]) + write_mult_vreg('num_reg_per_elem, SEW, vrid_lmul, vec[i]) } } else { - let 'num_elem_single : int = vlen / vsew_bits; - foreach (i_lmul from 0 to (lmul_int - 1)) { - var single_vec : vector('num_elem_single, dec, bits('m)) = undefined; - let vrid_lmul : regidx = vrid + to_bits(5, i_lmul); - let r_start_i : int = i_lmul * 'num_elem_single; - let r_end_i : int = r_start_i + 'num_elem_single - 1; + let 'num_elem_single : int = VLEN / SEW; + foreach (i_lmul from 0 to (2 ^ LMUL_pow_reg - 1)) { + var single_vec : vector('num_elem_single, dec, bits('m)) = undefined; + let vrid_lmul : regidx = vrid + to_bits(5, i_lmul); + let r_start_i : int = i_lmul * 'num_elem_single; + let r_end_i : int = r_start_i + 'num_elem_single - 1; foreach (r_i from r_start_i to r_end_i) { let s_i : int = r_i - r_start_i; assert(0 <= r_i & r_i < num_elem); assert(0 <= s_i & s_i < 'num_elem_single); single_vec[s_i] = vec[r_i] }; - write_single_vreg('num_elem_single, vsew_bits, vrid_lmul, single_vec) + write_single_vreg('num_elem_single, SEW, vrid_lmul, single_vec) } } } /* Single element writing operation */ -val write_single_element : forall 'm 'x, 8 <= 'm <= 128. (int('m), int('x), real, regidx, bits('m)) -> unit effect {escape, rreg, undef, wreg} -function write_single_element(elem_width_bits, index, emul, vrid, value) = { - real_vrid : regidx = vrid; - real_index : int = index; - let vlen : int = get_vlen(); - let 'elem_per_reg : int = vlen / elem_width_bits; - if emul > 1.0 then { - real_vrid = vrid + to_bits(5, index / 'elem_per_reg); - real_index = index % 'elem_per_reg; - }; - let vrid_val : vector('elem_per_reg, dec, bits('m)) = read_single_vreg('elem_per_reg, elem_width_bits, real_vrid); +val write_single_element : forall 'm 'x 'p, 8 <= 'm <= 128. (int('m), int('x), int('p), regidx, bits('m)) -> unit effect {escape, rreg, undef, wreg} +function write_single_element(EEW, index, EMUL_pow, vrid, value) = { + let VLEN = int_power(2, get_vlen_pow()); + let 'elem_per_reg : int = VLEN / EEW; + let real_vrid : regidx = if EMUL_pow > 0 then vrid + to_bits(5, index / 'elem_per_reg) else vrid; + let real_index : int = if EMUL_pow > 0 then index % 'elem_per_reg else index; + + let vrid_val : vector('elem_per_reg, dec, bits('m)) = read_single_vreg('elem_per_reg, EEW, real_vrid); r : vregtype = zeros(); foreach (i from ('elem_per_reg - 1) downto 0) { - r = r << elem_width_bits; + r = r << EEW; if i == real_index then { r = r | EXTZ(value); } else { @@ -411,11 +400,10 @@ function write_single_element(elem_width_bits, index, emul, vrid, value) = { } /* Mask register reading operation with num_elem as max(VLMAX,VLEN/SEW)) */ -val read_vmask : forall 'n, 'n >= 0. (int('n), bits(1), regidx) -> vector('n, dec, bool) effect {escape, rreg, undef} +val read_vmask : forall 'n. (int('n), bits(1), regidx) -> vector('n, dec, bool) effect {escape, rreg, undef} function read_vmask(num_elem, vm, vrid) = { - let vlen : int = get_vlen(); - assert('n <= vlen); - assert(0 < num_elem & num_elem <= sizeof(vlenmax)); + let VLEN = int_power(2, get_vlen_pow()); + assert(num_elem <= sizeof(vlenmax)); let vreg_val : vregtype = V(vrid); var result : vector('n, dec, bool) = undefined; @@ -431,10 +419,9 @@ function read_vmask(num_elem, vm, vrid) = { } /* This is a special version of read_vmask for carry/borrow instructions, where vm=1 means no carry */ -val read_vmask_carry : forall 'n, 'n >= 0. (int('n), bits(1), regidx) -> vector('n, dec, bool) effect {escape, rreg, undef} +val read_vmask_carry : forall 'n. (int('n), bits(1), regidx) -> vector('n, dec, bool) effect {escape, rreg, undef} function read_vmask_carry(num_elem, vm, vrid) = { - let vlen : int = get_vlen(); - assert('n <= vlen); + let VLEN = int_power(2, get_vlen_pow()); assert(0 < num_elem & num_elem <= sizeof(vlenmax)); let vreg_val : vregtype = V(vrid); var result : vector('n, dec, bool) = undefined; @@ -451,21 +438,20 @@ function read_vmask_carry(num_elem, vm, vrid) = { } /* Mask register writing operation with num_elem as max(VLMAX,VLEN/SEW)) */ -val write_vmask : forall 'n, 'n >= 0. (int('n), regidx, vector('n, dec, bool)) -> unit effect {escape, rreg, undef, wreg} +val write_vmask : forall 'n. (int('n), regidx, vector('n, dec, bool)) -> unit effect {escape, rreg, undef, wreg} function write_vmask(num_elem, vrid, v) = { - let vlen : int = get_vlen(); - assert('n <= vlen); - assert(0 < vlen & vlen <= sizeof(vlenmax)); - assert(0 < num_elem & num_elem <= sizeof(vlenmax)); + let VLEN = int_power(2, get_vlen_pow()); + assert(0 < VLEN & VLEN <= sizeof(vlenmax)); + assert(0 < num_elem & num_elem <= VLEN); let vreg_val : vregtype = V(vrid); var result : vregtype = undefined; foreach (i from 0 to (num_elem - 1)) { result[i] = bool_to_bit(v[i]) }; - foreach (i from num_elem to (vlen - 1)) { + foreach (i from num_elem to (VLEN - 1)) { /* Mask tail is always agnostic */ - result[i] = vreg_val[i] + result[i] = vreg_val[i] /* TODO: configuration support */ }; V(vrid) = result diff --git a/model/riscv_vlen.sail b/model/riscv_vlen.sail index aadd36e29..15816bc07 100644 --- a/model/riscv_vlen.sail +++ b/model/riscv_vlen.sail @@ -1,52 +1,17 @@ -register ELEN : bits(1) +register elen : bits(1) val get_elen : unit -> {|32, 64|} effect {rreg} -function get_elen() = match ELEN { +function get_elen() = match elen { 0b0 => 32, 0b1 => 64 } -register VLEN : bits(4) +register vlen : bits(4) -val get_vlen : unit -> {|32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536|} effect {rreg} +val get_vlen_pow : unit -> {|5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16|} effect {rreg} -function get_vlen() = match VLEN { - 0b0000 => 32, - 0b0001 => 64, - 0b0010 => 128, - 0b0011 => 256, - 0b0100 => 512, - 0b0101 => 1024, - 0b0110 => 2048, - 0b0111 => 4096, - 0b1000 => 8192, - 0b1001 => 16384, - 0b1010 => 32768, - _ => 65536 -} - -val vlen_bytes : unit -> {|4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192|} effect {rreg} - -function vlen_bytes() = match VLEN { - 0b0000 => 4, - 0b0001 => 8, - 0b0010 => 16, - 0b0011 => 32, - 0b0100 => 64, - 0b0101 => 128, - 0b0110 => 256, - 0b0111 => 512, - 0b1000 => 1024, - 0b1001 => 2048, - 0b1010 => 4096, - _ => 8192 -} - -/* to determine the length of vstart csr */ -val get_vstart_length : unit -> {|5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16|} effect {rreg} - -function get_vstart_length() = match VLEN { +function get_vlen_pow() = match vlen { 0b0000 => 5, 0b0001 => 6, 0b0010 => 7, From 5a0ae4fdbe1eadbc500f8c6d494020f86693b8ae Mon Sep 17 00:00:00 2001 From: Xinlai Wan Date: Wed, 15 Mar 2023 22:40:28 +0800 Subject: [PATCH 03/11] Vector integer/fixed-point arithmetic & mask instructions (#227) * Add vector arithmetic & mask instructions * Update vector EEW and EMUL checking function * Add vector instruction illegal check functions * Adjust code formatting for vector instruction illegal check functions Merge approved by team at tech-golden-model meeting on 2023-03-14. --- Makefile | 2 + model/riscv_insts_vext_arith.sail | 2140 ++++++++++++++++++++++++++--- model/riscv_insts_vext_mask.sail | 375 +++++ model/riscv_insts_vext_mem.sail | 32 +- model/riscv_insts_vext_utils.sail | 43 +- model/riscv_insts_vext_vm.sail | 202 +++ model/riscv_sys_regs.sail | 7 +- model/riscv_vlen.sail | 8 +- 8 files changed, 2622 insertions(+), 187 deletions(-) create mode 100755 model/riscv_insts_vext_mask.sail create mode 100755 model/riscv_insts_vext_vm.sail diff --git a/Makefile b/Makefile index 6b305ddb4..e4520ddb0 100644 --- a/Makefile +++ b/Makefile @@ -41,6 +41,8 @@ SAIL_DEFAULT_INST += riscv_insts_vext_utils.sail SAIL_DEFAULT_INST += riscv_insts_vext_vset.sail SAIL_DEFAULT_INST += riscv_insts_vext_arith.sail SAIL_DEFAULT_INST += riscv_insts_vext_mem.sail +SAIL_DEFAULT_INST += riscv_insts_vext_mask.sail +SAIL_DEFAULT_INST += riscv_insts_vext_vm.sail SAIL_SEQ_INST = $(SAIL_DEFAULT_INST) riscv_jalr_seq.sail SAIL_RMEM_INST = $(SAIL_DEFAULT_INST) riscv_jalr_rmem.sail riscv_insts_rmem.sail diff --git a/model/riscv_insts_vext_arith.sail b/model/riscv_insts_vext_arith.sail index 782e6e8b4..946ddbe0e 100644 --- a/model/riscv_insts_vext_arith.sail +++ b/model/riscv_insts_vext_arith.sail @@ -42,6 +42,8 @@ function clause execute(VVTYPE(funct6, vm, vs2, vs1, vd)) = { let VLEN_pow = get_vlen_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); + if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + let 'n = num_elem; let 'm = SEW; @@ -104,14 +106,14 @@ function clause execute(VVTYPE(funct6, vm, vs2, vs1, vd)) = { VV_VMAXU => to_bits(SEW, max(unsigned(vs2_val[i]), unsigned(vs1_val[i]))), VV_VMAX => to_bits(SEW, max(signed(vs2_val[i]), signed(vs1_val[i]))), VV_VRGATHER => { - assert(vs1 != vd & vs2 != vd); + if (vs1 == vd | vs2 == vd) then { handle_illegal(); return RETIRE_FAIL }; let idx = unsigned(vs1_val[i]); let VLMAX = int_power(2, LMUL_pow + VLEN_pow - SEW_pow); assert(VLMAX <= 'n); if idx < VLMAX then vs2_val[idx] else zeros() }, VV_VRGATHEREI16 => { - assert(vs1 != vd & vs2 != vd); + if (vs1 == vd | vs2 == vd) then { handle_illegal(); return RETIRE_FAIL }; /* vrgatherei16.vv uses SEW/LMUL for the data in vs2 but EEW=16 and EMUL = (16/SEW)*LMUL for the indices in vs1 */ let vs1_new : vector('n, dec, bits(16)) = read_vreg(num_elem, 16, 4 + LMUL_pow - SEW_pow, vs1); let idx = unsigned(vs1_new[i]); @@ -155,56 +157,56 @@ mapping vvtype_mnemonic : vvfunct6 <-> string = { mapping clause assembly = VVTYPE(funct6, vm, vs2, vs1, vd) <-> vvtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ vreg_name(vs1) ^ maybe_vmask(vm) -/* ***************** OPIVX (Vector Slide & Gather Instructions) ****************** */ -/* Slide and gather instructions extend rs1/imm to XLEN intead of SEW bits */ -union clause ast = VXSG : (vxsgfunct6, bits(1), regidx, regidx, regidx) +/* ************************** OPIVV (WVTYPE Narrowing) *************************** */ +/* ************** Vector Narrowing Integer Right Shift Instructions ************** */ +union clause ast = NVSTYPE : (nvsfunct6, bits(1), regidx, regidx, regidx) -mapping encdec_vxsgfunct6 : vxsgfunct6 <-> bits(6) = { - VX_VSLIDEUP <-> 0b001110, - VX_VSLIDEDOWN <-> 0b001111, - VX_VRGATHER <-> 0b001100 +mapping encdec_nvsfunct6 : nvsfunct6 <-> bits(6) = { + NVS_VNSRL <-> 0b101100, + NVS_VNSRA <-> 0b101101 } -mapping clause encdec = VXSG(funct6, vm, vs2, rs1, vd) if haveRVV() - <-> encdec_vxsgfunct6(funct6) @ vm @ vs2 @ rs1 @ 0b100 @ vd @ 0b1010111 if haveRVV() +mapping clause encdec = NVSTYPE(funct6, vm, vs2, vs1, vd) if haveRVV() + <-> encdec_nvsfunct6(funct6) @ vm @ vs2 @ vs1 @ 0b000 @ vd @ 0b1010111 if haveRVV() -function clause execute(VXSG(funct6, vm, vs2, rs1, vd)) = { - let SEW_pow = get_sew_pow(); +function clause execute(NVSTYPE(funct6, vm, vs2, vs1, vd)) = { let SEW = get_sew(); let LMUL_pow = get_lmul_pow(); - let VLEN_pow = get_vlen_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); + let SEW_widen = SEW * 2; + let LMUL_pow_widen = LMUL_pow + 1; + + if ~(valid_rd_mask(vd, vm)) | ~(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) | + ~(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; + let 'o = SEW_widen; let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); - let rs1_val : nat = unsigned(X(rs1)); - let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); + let vs2_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vs2); result : vector('n, dec, bits('m)) = undefined; mask : vector('n, dec, bool) = undefined; (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + assert(SEW_widen <= 64); foreach (i from 0 to (num_elem - 1)) { if mask[i] then { result[i] = match funct6 { - VX_VSLIDEUP => { - assert(vs2 != vd); - if i >= rs1_val then vs2_val[i - rs1_val] else vd_val[i] - }, - VX_VSLIDEDOWN => { - let VLMAX = int_power(2, LMUL_pow + VLEN_pow - SEW_pow); - assert(VLMAX > 0 & VLMAX <= 'n); - if i + rs1_val < VLMAX then vs2_val[i + rs1_val] else zeros() - }, - VX_VRGATHER => { - assert(vs2 != vd); - let VLMAX = int_power(2, LMUL_pow + VLEN_pow - SEW_pow); - assert(VLMAX > 0 & VLMAX <= 'n); - if rs1_val < VLMAX then vs2_val[rs1_val] else zeros() - } + NVS_VNSRL => { + let shift_amount = get_shift_amount(vs1_val[i], SEW_widen); + slice(vs2_val[i] >> shift_amount, 0, SEW) + }, + NVS_VNSRA => { + let shift_amount = get_shift_amount(vs1_val[i], SEW_widen); + let v_double : bits('o * 2) = EXTS(vs2_val[i]); + let arith_shifted : bits('o) = slice(v_double >> shift_amount, 0, SEW_widen); + slice(arith_shifted, 0, SEW) + } } } }; @@ -212,44 +214,47 @@ function clause execute(VXSG(funct6, vm, vs2, rs1, vd)) = { write_vreg(num_elem, SEW, LMUL_pow, vd, result); vstart = EXTZ(0b0); RETIRE_SUCCESS -} +} -mapping vxsg_mnemonic : vxsgfunct6 <-> string = { - VX_VSLIDEUP <-> "vslideup.vx", - VX_VSLIDEDOWN <-> "vslidedown.vx", - VX_VRGATHER <-> "vrgather.vx" +mapping nvstype_mnemonic : nvsfunct6 <-> string = { + NVS_VNSRL <-> "vnsrl.wv", + NVS_VNSRA <-> "vnsra.wv" } -mapping clause assembly = VXSG(funct6, vm, vs2, rs1, vd) - <-> vxsg_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ reg_name(rs1) ^ maybe_vmask(vm) +mapping clause assembly = NVSTYPE(funct6, vm, vs2, vs1, vd) + <-> nvstype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ vreg_name(vs1) ^ maybe_vmask(vm) -/* ***************** OPIVI (Vector Slide & Gather Instructions) ****************** */ -/* Slide and gather instructions extend rs1/imm to XLEN intead of SEW bits */ -union clause ast = VISG : (visgfunct6, bits(1), regidx, bits(5), regidx) +/* ************************** OPIVV (WVTYPE Narrowing) *************************** */ +/* *************** Vector Narrowing Fixed-Point Clip Instructions **************** */ +union clause ast = NVTYPE : (nvfunct6, bits(1), regidx, regidx, regidx) -mapping encdec_visgfunct6 : visgfunct6 <-> bits(6) = { - VI_VSLIDEUP <-> 0b001110, - VI_VSLIDEDOWN <-> 0b001111, - VI_VRGATHER <-> 0b001100 +mapping encdec_nvfunct6 : nvfunct6 <-> bits(6) = { + NV_VNCLIPU <-> 0b101110, + NV_VNCLIP <-> 0b101111 } -mapping clause encdec = VISG(funct6, vm, vs2, simm, vd) if haveRVV() - <-> encdec_visgfunct6(funct6) @ vm @ vs2 @ simm @ 0b011 @ vd @ 0b1010111 if haveRVV() +mapping clause encdec = NVTYPE(funct6, vm, vs2, vs1, vd) if haveRVV() + <-> encdec_nvfunct6(funct6) @ vm @ vs2 @ vs1 @ 0b000 @ vd @ 0b1010111 if haveRVV() -function clause execute(VISG(funct6, vm, vs2, simm, vd)) = { - let SEW_pow = get_sew_pow(); +function clause execute(NVTYPE(funct6, vm, vs2, vs1, vd)) = { let SEW = get_sew(); let LMUL_pow = get_lmul_pow(); - let VLEN_pow = get_vlen_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); + let SEW_widen = SEW * 2; + let LMUL_pow_widen = LMUL_pow + 1; + + if ~(valid_rd_mask(vd, vm)) | ~(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) | + ~(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; + let 'o = SEW_widen; let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); - let imm_val : nat = unsigned(EXTZ(sizeof(xlen), simm)); - let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); + let vs2_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vs2); result : vector('n, dec, bits('m)) = undefined; mask : vector('n, dec, bool) = undefined; @@ -257,22 +262,18 @@ function clause execute(VISG(funct6, vm, vs2, simm, vd)) = { foreach (i from 0 to (num_elem - 1)) { if mask[i] then { + let shift_amount = get_shift_amount(vs1_val[i], SEW); + let rounding_incr = get_fixed_rounding_incr(vs2_val[i], shift_amount); result[i] = match funct6 { - VI_VSLIDEUP => { - assert(vs2 != vd); - if i >= imm_val then vs2_val[i - imm_val] else vd_val[i] - }, - VI_VSLIDEDOWN => { - let VLMAX = int_power(2, LMUL_pow + VLEN_pow - SEW_pow); - assert(VLMAX > 0 & VLMAX <= 'n); - if i + imm_val < VLMAX then vs2_val[i + imm_val] else zeros() - }, - VI_VRGATHER => { - assert(vs2 != vd); - let VLMAX = int_power(2, LMUL_pow + VLEN_pow - SEW_pow); - assert(VLMAX > 0 & VLMAX <= 'n); - if imm_val < VLMAX then vs2_val[imm_val] else zeros() - } + NV_VNCLIPU => { + let result_wide = (vs2_val[i] >> shift_amount) + EXTZ('o, rounding_incr); + unsigned_saturation('m, result_wide); + }, + NV_VNCLIP => { + let v_double : bits('m * 4) = EXTS(vs2_val[i]); + let result_wide = slice(v_double >> shift_amount, 0, 'o) + EXTZ('o, rounding_incr); + signed_saturation('m, result_wide); + } } } }; @@ -280,217 +281,2010 @@ function clause execute(VISG(funct6, vm, vs2, simm, vd)) = { write_vreg(num_elem, SEW, LMUL_pow, vd, result); vstart = EXTZ(0b0); RETIRE_SUCCESS -} +} -mapping visg_mnemonic : visgfunct6 <-> string = { - VI_VSLIDEUP <-> "vslideup.vi", - VI_VSLIDEDOWN <-> "vslidedown.vi", - VI_VRGATHER <-> "vrgather.vi" +mapping nvtype_mnemonic : nvfunct6 <-> string = { + NV_VNCLIPU <-> "vnclipu.wv", + NV_VNCLIP <-> "vnclip.wv" } -mapping clause assembly = VISG(funct6, vm, vs2, simm, vd) - <-> visg_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ reg_name(simm) ^ maybe_vmask(vm) +mapping clause assembly = NVTYPE(funct6, vm, vs2, vs1, vd) + <-> nvtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ vreg_name(vs1) ^ maybe_vmask(vm) -/* ********************* Whole Vector Register Move (OPIVI) ********************** */ -union clause ast = VMVRTYPE : (regidx, bits(5), regidx) +/* ********************** OPIVV (Integer Merge Instruction) ********************** */ +union clause ast = MASKTYPEV : (regidx, regidx, regidx) -mapping clause encdec = VMVRTYPE(vs2, simm, vd) if haveRVV() - <-> 0b100111 @ 0b1 @ vs2 @ simm @ 0b011 @ vd @ 0b1010111 if haveRVV() +mapping clause encdec = MASKTYPEV (vs2, vs1, vd) if haveRVV() + <-> 0b010111 @ 0b0 @ vs2 @ vs1 @ 0b000 @ vd @ 0b1010111 if haveRVV() -function clause execute(VMVRTYPE(vs2, simm, vd)) = { - let SEW = get_sew(); - let imm_val = unsigned(EXTZ(sizeof(xlen), simm)); - let EMUL = imm_val + 1; +function clause execute(MASKTYPEV(vs2, vs1, vd)) = { + let start_element = get_start_element(); + let end_element = get_end_element(); + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); /* max(VLMAX,VLEN/SEW)) */ + let real_num_elem = if LMUL_pow >= 0 then num_elem else num_elem / (0 - LMUL_pow); /* VLMAX */ + + if vd == vreg_name("v0") then { handle_illegal(); return RETIRE_FAIL }; - assert(EMUL == 1 | EMUL == 2 | EMUL == 4 | EMUL == 8); - let EMUL_pow = log2(EMUL); - let num_elem = get_num_elem(EMUL_pow, SEW); let 'n = num_elem; let 'm = SEW; - let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, EMUL_pow, vs2); - write_vreg(num_elem, SEW, EMUL_pow, vd, vs2_val); - vstart = EXTZ(0b0); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vreg_name("v0")); + let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; - RETIRE_SUCCESS -} + let tail_ag : agtype = get_vtype_vta(); + foreach (i from 0 to (num_elem - 1)) { + if i < start_element then { + result[i] = vd_val[i] + } else if i > end_element | i >= real_num_elem then { + if tail_ag == UNDISTURBED then { + result[i] = vd_val[i] + } else if tail_ag == AGNOSTIC then { + result[i] = vd_val[i] /* TODO: configuration support */ + } + } else { + /* the merge operates on all body elements */ + result[i] = if vm_val[i] then vs1_val[i] else vs2_val[i] + } + }; -mapping simm_string : bits(5) <-> string = { - 0b00000 <-> "1", - 0b00001 <-> "2", - 0b00011 <-> "4", - 0b00111 <-> "8" + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS } -mapping clause assembly = VMVRTYPE(vs2, simm, vd) - <-> "vmv" ^ simm_string(simm) ^ "r.v" ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) +mapping clause assembly = MASKTYPEV(vs2, vs1, vd) +<-> "vmerge.vvm" ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ vreg_name(vs1) ^ sep() ^ "v0" -/* ****************************** OPMVV (VWXUNARY0) ****************************** */ -union clause ast = VMVXS : (regidx, regidx) +/* ********************** OPIVV (Integer Move Instruction) *********************** */ +union clause ast = MOVETYPEV : (regidx, regidx) -mapping clause encdec = VMVXS(vs2, rd) if haveRVV() - <-> 0b010000 @ 0b1 @ vs2 @ 0b00000 @ 0b010 @ rd @ 0b1010111 if haveRVV() +mapping clause encdec = MOVETYPEV (vs1, vd) if haveRVV() + <-> 0b010111 @ 0b1 @ 0b00000 @ vs1 @ 0b000 @ vd @ 0b1010111 if haveRVV() -function clause execute(VMVXS(vs2, rd)) = { +function clause execute(MOVETYPEV(vs1, vd)) = { let SEW = get_sew(); - let num_elem = get_num_elem(0, SEW); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); - assert(num_elem > 0); let 'n = num_elem; let 'm = SEW; - let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, 0, vs2); - X(rd) = if sizeof(xlen) < SEW then slice(vs2_val[0], 0, sizeof(xlen)) - else if sizeof(xlen) > SEW then EXTS(vs2_val[0]) - else vs2_val[0]; - vstart = EXTZ(0b0); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b1, vreg_name("v0")); + let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then result[i] = vs1_val[i] + }; + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); RETIRE_SUCCESS } -mapping clause assembly = VMVXS(vs2, rd) - <-> "vmv.x.s" ^ spc() ^ reg_name(rd) ^ sep() ^ vreg_name(vs2) +mapping clause assembly = MOVETYPEV(vs1, vd) + <-> "vmv.v.v" ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs1) -/* ****************************** OPMVX (VRXUNARY0) ****************************** */ -union clause ast = VMVSX : (regidx, regidx) +/* ******************************* OPIVX (VXTYPE) ******************************** */ +union clause ast = VXTYPE : (vxfunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_vxfunct6 : vxfunct6 <-> bits(6) = { + VX_VADD <-> 0b000000, + VX_VSUB <-> 0b000010, + VX_VRSUB <-> 0b000011, + VX_VMINU <-> 0b000100, + VX_VMIN <-> 0b000101, + VX_VMAXU <-> 0b000110, + VX_VMAX <-> 0b000111, + VX_VAND <-> 0b001001, + VX_VOR <-> 0b001010, + VX_VXOR <-> 0b001011, + VX_VSADDU <-> 0b100000, + VX_VSADD <-> 0b100001, + VX_VSSUBU <-> 0b100010, + VX_VSSUB <-> 0b100011, + VX_VSLL <-> 0b100101, + VX_VSMUL <-> 0b100111, + VX_VSRL <-> 0b101000, + VX_VSRA <-> 0b101001, + VX_VSSRL <-> 0b101010, + VX_VSSRA <-> 0b101011 +} -mapping clause encdec = VMVSX(rs1, vd) if haveRVV() - <-> 0b010000 @ 0b1 @ 0b00000 @ rs1 @ 0b110 @ vd @ 0b1010111 if haveRVV() +mapping clause encdec = VXTYPE(funct6, vm, vs2, rs1, vd) if haveRVV() + <-> encdec_vxfunct6(funct6) @ vm @ vs2 @ rs1 @ 0b100 @ vd @ 0b1010111 if haveRVV() -function clause execute(VMVSX(rs1, vd)) = { +function clause execute(VXTYPE(funct6, vm, vs2, rs1, vd)) = { let SEW = get_sew(); - let num_elem = get_num_elem(0, SEW); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; - assert(num_elem > 0); let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b1, vreg_name("v0")); - let rs1_val : bits('m) = get_scalar(rs1, 'm); - let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, 0, vd); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let rs1_val : bits('m) = get_scalar(rs1, SEW); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); result : vector('n, dec, bits('m)) = undefined; mask : vector('n, dec, bool) = undefined; - (result, mask) = init_masked_result(num_elem, SEW, 0, vd_val, vm_val); - - /* one body element */ - if mask[0] == true then result[0] = rs1_val; + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); - /* others treated as tail elements */ - let tail_ag : agtype = get_vtype_vta(); - if tail_ag == UNDISTURBED then { - foreach (i from 1 to (num_elem - 1)) result[i] = vd_val[i] - } else if tail_ag == AGNOSTIC then { - foreach (i from 1 to (num_elem - 1)) result[i] = vd_val[i] /* TODO: configuration support */ + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + VX_VADD => vs2_val[i] + rs1_val, + VX_VSUB => vs2_val[i] - rs1_val, + VX_VRSUB => rs1_val - vs2_val[i], + VX_VAND => vs2_val[i] & rs1_val, + VX_VOR => vs2_val[i] | rs1_val, + VX_VXOR => vs2_val[i] ^ rs1_val, + VX_VSADDU => unsigned_saturation('m, EXTZ('m + 1, vs2_val[i]) + EXTZ('m + 1, rs1_val) ), + VX_VSADD => signed_saturation('m, EXTS('m + 1, vs2_val[i]) + EXTS('m + 1, rs1_val) ), + VX_VSSUBU => { + if unsigned(vs2_val[i]) < unsigned(rs1_val) then zeros() + else unsigned_saturation('m, EXTZ('m + 1, vs2_val[i]) - EXTZ('m + 1, rs1_val) ) + }, + VX_VSSUB => signed_saturation('m, EXTS('m + 1, vs2_val[i]) - EXTS('m + 1, rs1_val) ), + VX_VSMUL => { + let result_mul = to_bits('m * 2, signed(vs2_val[i]) * signed(rs1_val)); + let rounding_incr = get_fixed_rounding_incr(result_mul, 'm - 1); + let result_wide = (result_mul >> ('m - 1)) + EXTZ('m * 2, rounding_incr); + signed_saturation('m, result_wide['m..0]) + }, + VX_VSLL => { + let shift_amount = get_shift_amount(rs1_val, SEW); + vs2_val[i] << shift_amount + }, + VX_VSRL => { + let shift_amount = get_shift_amount(rs1_val, SEW); + vs2_val[i] >> shift_amount + }, + VX_VSRA => { + let shift_amount = get_shift_amount(rs1_val, SEW); + let v_double : bits('m * 2) = EXTS(vs2_val[i]); + slice(v_double >> shift_amount, 0, SEW) + }, + VX_VSSRL => { + let shift_amount = get_shift_amount(rs1_val, SEW); + let rounding_incr = get_fixed_rounding_incr(vs2_val[i], shift_amount); + (vs2_val[i] >> shift_amount) + EXTZ('m, rounding_incr) + }, + VX_VSSRA => { + let shift_amount = get_shift_amount(rs1_val, SEW); + let rounding_incr = get_fixed_rounding_incr(vs2_val[i], shift_amount); + let v_double : bits('m * 2) = EXTS(vs2_val[i]); + slice(v_double >> shift_amount, 0, SEW) + EXTZ('m, rounding_incr) + }, + VX_VMINU => to_bits(SEW, min(unsigned(vs2_val[i]), unsigned(rs1_val))), + VX_VMIN => to_bits(SEW, min(signed(vs2_val[i]), signed(rs1_val))), + VX_VMAXU => to_bits(SEW, max(unsigned(vs2_val[i]), unsigned(rs1_val))), + VX_VMAX => to_bits(SEW, max(signed(vs2_val[i]), signed(rs1_val))) + } + } }; - write_vreg(num_elem, SEW, 0, vd, result); + write_vreg(num_elem, SEW, LMUL_pow, vd, result); vstart = EXTZ(0b0); - RETIRE_SUCCESS } -mapping clause assembly = VMVSX(rs1, vd) - <-> "vmv.s.x" ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) +mapping vxtype_mnemonic : vxfunct6 <-> string = { + VX_VADD <-> "vadd.vx", + VX_VSUB <-> "vsub.vx", + VX_VRSUB <-> "vrsub.vx", + VX_VAND <-> "vand.vx", + VX_VOR <-> "vor.vx", + VX_VXOR <-> "vxor.vx", + VX_VSADDU <-> "vsaddu.vx", + VX_VSADD <-> "vsadd.vx", + VX_VSSUBU <-> "vssubu.vx", + VX_VSSUB <-> "vssub.vx", + VX_VSLL <-> "vsll.vx", + VX_VSMUL <-> "vsmul.vx", + VX_VSRL <-> "vsrl.vx", + VX_VSRA <-> "vsra.vx", + VX_VSSRL <-> "vssrl.vx", + VX_VSSRA <-> "vssra.vx", + VX_VMINU <-> "vminu.vx", + VX_VMIN <-> "vmin.vx", + VX_VMAXU <-> "vmaxu.vx", + VX_VMAX <-> "vmax.vx" +} -/* ********************** Integer Move Instruction (OPIVV) *********************** */ -union clause ast = MOVETYPEV : (regidx, regidx) +mapping clause assembly = VXTYPE(funct6, vm, vs2, rs1, vd) + <-> vxtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ reg_name(rs1) ^ maybe_vmask(vm) -mapping clause encdec = MOVETYPEV (vs1, vd) if haveRVV() - <-> 0b010111 @ 0b1 @ 0b00000 @ vs1 @ 0b000 @ vd @ 0b1010111 if haveRVV() +/* ************************** OPIVX (WXTYPE Narrowing) *************************** */ +/* ************** Vector Narrowing Integer Right Shift Instructions ************** */ +union clause ast = NXSTYPE : (nxsfunct6, bits(1), regidx, regidx, regidx) -function clause execute(MOVETYPEV(vs1, vd)) = { +mapping encdec_nxsfunct6 : nxsfunct6 <-> bits(6) = { + NXS_VNSRL <-> 0b101100, + NXS_VNSRA <-> 0b101101 +} + +mapping clause encdec = NXSTYPE(funct6, vm, vs2, rs1, vd) if haveRVV() + <-> encdec_nxsfunct6(funct6) @ vm @ vs2 @ rs1 @ 0b100 @ vd @ 0b1010111 if haveRVV() + +function clause execute(NXSTYPE(funct6, vm, vs2, rs1, vd)) = { let SEW = get_sew(); let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); + let SEW_widen = SEW * 2; + let LMUL_pow_widen = LMUL_pow + 1; + + if ~(valid_rd_mask(vd, vm)) | ~(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) | + ~(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; + let 'o = SEW_widen; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b1, vreg_name("v0")); - let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + let rs1_val : bits('m) = get_scalar(rs1, SEW); + let vs2_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vs2); result : vector('n, dec, bits('m)) = undefined; mask : vector('n, dec, bool) = undefined; (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + assert(SEW_widen <= 64); foreach (i from 0 to (num_elem - 1)) { - if mask[i] then result[i] = vs1_val[i] + if mask[i] then { + result[i] = match funct6 { + NXS_VNSRL => { + let shift_amount = get_shift_amount(rs1_val, SEW_widen); + slice(vs2_val[i] >> shift_amount, 0, SEW) + }, + NXS_VNSRA => { + let shift_amount = get_shift_amount(rs1_val, SEW_widen); + let v_double : bits('o * 2) = EXTS(vs2_val[i]); + let arith_shifted : bits('o) = slice(v_double >> shift_amount, 0, SEW_widen); + slice(arith_shifted, 0, SEW) + } + } + } }; write_vreg(num_elem, SEW, LMUL_pow, vd, result); vstart = EXTZ(0b0); - RETIRE_SUCCESS } -mapping clause assembly = MOVETYPEV(vs1, vd) - <-> "vmv.v.v" ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs1) +mapping nxstype_mnemonic : nxsfunct6 <-> string = { + NXS_VNSRL <-> "vnsrl.wx", + NXS_VNSRA <-> "vnsra.wx" +} -/* ********************** Integer Move Instruction (OPIVX) *********************** */ -union clause ast = MOVETYPEX : (regidx, regidx) +mapping clause assembly = NXSTYPE(funct6, vm, vs2, rs1, vd) + <-> nxstype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ reg_name(rs1) ^ maybe_vmask(vm) -mapping clause encdec = MOVETYPEX (rs1, vd) if haveRVV() - <-> 0b010111 @ 0b1 @ 0b00000 @ rs1 @ 0b100 @ vd @ 0b1010111 if haveRVV() +/* ************************** OPIVX (WXTYPE Narrowing) *************************** */ +/* *************** Vector Narrowing Fixed-Point Clip Instructions **************** */ +union clause ast = NXTYPE : (nxfunct6, bits(1), regidx, regidx, regidx) -function clause execute(MOVETYPEX(rs1, vd)) = { +mapping encdec_nxfunct6 : nxfunct6 <-> bits(6) = { + NX_VNCLIPU <-> 0b101110, + NX_VNCLIP <-> 0b101111 +} + +mapping clause encdec = NXTYPE(funct6, vm, vs2, rs1, vd) if haveRVV() + <-> encdec_nxfunct6(funct6) @ vm @ vs2 @ rs1 @ 0b100 @ vd @ 0b1010111 if haveRVV() + +function clause execute(NXTYPE(funct6, vm, vs2, rs1, vd)) = { let SEW = get_sew(); let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); + let SEW_widen = SEW * 2; + let LMUL_pow_widen = LMUL_pow + 1; + + if ~(valid_rd_mask(vd, vm)) | ~(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) | + ~(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; + let 'o = SEW_widen; - let rs1_val : bits('m) = get_scalar(rs1, 'm); - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b1, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + let rs1_val : bits('m) = get_scalar(rs1, SEW); + let vs2_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vs2); result : vector('n, dec, bits('m)) = undefined; mask : vector('n, dec, bool) = undefined; (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + foreach (i from 0 to (num_elem - 1)) { - if mask[i] then result[i] = rs1_val + if mask[i] then { + let shift_amount = get_shift_amount(rs1_val, SEW); + let rounding_incr = get_fixed_rounding_incr(vs2_val[i], shift_amount); + result[i] = match funct6 { + NX_VNCLIPU => { + let result_wide = (vs2_val[i] >> shift_amount) + EXTZ('o, rounding_incr); + unsigned_saturation('m, result_wide) + }, + NX_VNCLIP => { + let v_double : bits('m * 4) = EXTS(vs2_val[i]); + let result_wide = slice(v_double >> shift_amount, 0, 'o) + EXTZ('o, rounding_incr); + signed_saturation('m, result_wide) + } + } + } }; write_vreg(num_elem, SEW, LMUL_pow, vd, result); vstart = EXTZ(0b0); - RETIRE_SUCCESS } -mapping clause assembly = MOVETYPEX(rs1, vd) - <-> "vmv.v.x" ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) +mapping nxtype_mnemonic : nxfunct6 <-> string = { + NX_VNCLIPU <-> "vnclipu.wx", + NX_VNCLIP <-> "vnclip.wx" +} -/* ********************** Integer Move Instruction (OPIVI) *********************** */ -union clause ast = MOVETYPEI : (regidx, bits(5)) +mapping clause assembly = NXTYPE(funct6, vm, vs2, rs1, vd) + <-> nxtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ reg_name(rs1) ^ maybe_vmask(vm) -mapping clause encdec = MOVETYPEI (vd, simm) if haveRVV() - <-> 0b010111 @ 0b1 @ 0b00000 @ simm @ 0b011 @ vd @ 0b1010111 if haveRVV() +/* ***************** OPIVX (Vector Slide & Gather Instructions) ****************** */ +/* Slide and gather instructions extend rs1/imm to XLEN intead of SEW bits */ +union clause ast = VXSG : (vxsgfunct6, bits(1), regidx, regidx, regidx) -function clause execute(MOVETYPEI(vd, simm)) = { +mapping encdec_vxsgfunct6 : vxsgfunct6 <-> bits(6) = { + VX_VSLIDEUP <-> 0b001110, + VX_VSLIDEDOWN <-> 0b001111, + VX_VRGATHER <-> 0b001100 +} + +mapping clause encdec = VXSG(funct6, vm, vs2, rs1, vd) if haveRVV() + <-> encdec_vxsgfunct6(funct6) @ vm @ vs2 @ rs1 @ 0b100 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VXSG(funct6, vm, vs2, rs1, vd)) = { + let SEW_pow = get_sew_pow(); let SEW = get_sew(); let LMUL_pow = get_lmul_pow(); + let VLEN_pow = get_vlen_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); + if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b1, vreg_name("v0")); - let imm_val : bits('m) = EXTS(simm); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let rs1_val : nat = unsigned(X(rs1)); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); result : vector('n, dec, bits('m)) = undefined; mask : vector('n, dec, bool) = undefined; (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); - foreach (i from 0 to (num_elem - 1)) { - if mask[i] then result[i] = imm_val - }; - - write_vreg(num_elem, SEW, LMUL_pow, vd, result); - vstart = EXTZ(0b0); - - RETIRE_SUCCESS -} + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + VX_VSLIDEUP => { + if (vs2 == vd) then { handle_illegal(); return RETIRE_FAIL }; + if i >= rs1_val then vs2_val[i - rs1_val] else vd_val[i] + }, + VX_VSLIDEDOWN => { + let VLMAX = int_power(2, LMUL_pow + VLEN_pow - SEW_pow); + assert(VLMAX > 0 & VLMAX <= 'n); + if i + rs1_val < VLMAX then vs2_val[i + rs1_val] else zeros() + }, + VX_VRGATHER => { + if (vs2 == vd) then { handle_illegal(); return RETIRE_FAIL }; + let VLMAX = int_power(2, LMUL_pow + VLEN_pow - SEW_pow); + assert(VLMAX > 0 & VLMAX <= 'n); + if rs1_val < VLMAX then vs2_val[rs1_val] else zeros() + } + } + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping vxsg_mnemonic : vxsgfunct6 <-> string = { + VX_VSLIDEUP <-> "vslideup.vx", + VX_VSLIDEDOWN <-> "vslidedown.vx", + VX_VRGATHER <-> "vrgather.vx" +} + +mapping clause assembly = VXSG(funct6, vm, vs2, rs1, vd) + <-> vxsg_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ reg_name(rs1) ^ maybe_vmask(vm) + +/* ********************** OPIVX (Integer Merge Instruction) ********************** */ +union clause ast = MASKTYPEX : (regidx, regidx, regidx) + +mapping clause encdec = MASKTYPEX(vs2, rs1, vd) if haveRVV() + <-> 0b010111 @ 0b0 @ vs2 @ rs1 @ 0b100 @ vd @ 0b1010111 if haveRVV() + +function clause execute(MASKTYPEX(vs2, rs1, vd)) = { + let start_element = get_start_element(); + let end_element = get_end_element(); + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); /* max(VLMAX,VLEN/SEW)) */ + let real_num_elem = if LMUL_pow >= 0 then num_elem else num_elem / (0 - LMUL_pow); /* VLMAX */ + + if vd == vreg_name("v0") then { handle_illegal(); return RETIRE_FAIL }; + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vreg_name("v0")); + let rs1_val : bits('m) = get_scalar(rs1, SEW); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + + let tail_ag : agtype = get_vtype_vta(); + foreach (i from 0 to (num_elem - 1)) { + if i < start_element then { + result[i] = vd_val[i] + } else if i > end_element | i >= real_num_elem then { + if tail_ag == UNDISTURBED then { + result[i] = vd_val[i] + } else if tail_ag == AGNOSTIC then { + result[i] = vd_val[i] /* TODO: configuration support */ + } + } else { + /* the merge operates on all body elements */ + result[i] = if vm_val[i] then rs1_val else vs2_val[i] + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping clause assembly = MASKTYPEX(vs2, rs1, vd) + <-> "vmerge.vxm" ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ reg_name(rs1) ^ sep() ^ "v0" + +/* ********************** OPIVX (Integer Move Instruction) *********************** */ +union clause ast = MOVETYPEX : (regidx, regidx) + +mapping clause encdec = MOVETYPEX (rs1, vd) if haveRVV() + <-> 0b010111 @ 0b1 @ 0b00000 @ rs1 @ 0b100 @ vd @ 0b1010111 if haveRVV() + +function clause execute(MOVETYPEX(rs1, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + let 'n = num_elem; + let 'm = SEW; + + let rs1_val : bits('m) = get_scalar(rs1, 'm); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b1, vreg_name("v0")); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then result[i] = rs1_val + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping clause assembly = MOVETYPEX(rs1, vd) + <-> "vmv.v.x" ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) + +/* ******************************* OPIVI (VITYPE) ******************************** */ +union clause ast = VITYPE : (vifunct6, bits(1), regidx, bits(5), regidx) + +mapping encdec_vifunct6 : vifunct6 <-> bits(6) = { + VI_VADD <-> 0b000000, + VI_VRSUB <-> 0b000011, + VI_VAND <-> 0b001001, + VI_VOR <-> 0b001010, + VI_VXOR <-> 0b001011, + VI_VSADDU <-> 0b100000, + VI_VSADD <-> 0b100001, + VI_VSLL <-> 0b100101, + VI_VSRL <-> 0b101000, + VI_VSRA <-> 0b101001, + VI_VSSRL <-> 0b101010, + VI_VSSRA <-> 0b101011 +} + +mapping clause encdec = VITYPE(funct6, vm, vs2, simm, vd) if haveRVV() + <-> encdec_vifunct6(funct6) @ vm @ vs2 @ simm @ 0b011 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VITYPE(funct6, vm, vs2, simm, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let imm_val : bits('m) = EXTS(simm); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + VI_VADD => vs2_val[i] + imm_val, + VI_VRSUB => imm_val - vs2_val[i], + VI_VAND => vs2_val[i] & imm_val, + VI_VOR => vs2_val[i] | imm_val, + VI_VXOR => vs2_val[i] ^ imm_val, + VI_VSADDU => unsigned_saturation('m, EXTZ('m + 1, vs2_val[i]) + EXTZ('m + 1, imm_val) ), + VI_VSADD => signed_saturation('m, EXTS('m + 1, vs2_val[i]) + EXTS('m + 1, imm_val) ), + VI_VSLL => { + let shift_amount = get_shift_amount(sail_zero_extend(simm, SEW), SEW); + vs2_val[i] << shift_amount + }, + VI_VSRL => { + let shift_amount = get_shift_amount(sail_zero_extend(simm, SEW), SEW); + vs2_val[i] >> shift_amount + }, + VI_VSRA => { + let shift_amount = get_shift_amount(sail_zero_extend(simm, SEW), SEW); + let v_double : bits('m * 2) = EXTS(vs2_val[i]); + slice(v_double >> shift_amount, 0, SEW) + }, + VI_VSSRL => { + let shift_amount = get_shift_amount(sail_zero_extend(simm, SEW), SEW); + let rounding_incr = get_fixed_rounding_incr(vs2_val[i], shift_amount); + (vs2_val[i] >> shift_amount) + EXTZ('m, rounding_incr) + }, + VI_VSSRA => { + let shift_amount = get_shift_amount(sail_zero_extend(simm, SEW), SEW); + let rounding_incr = get_fixed_rounding_incr(vs2_val[i], shift_amount); + let v_double : bits('m * 2) = EXTS(vs2_val[i]); + slice(v_double >> shift_amount, 0, SEW) + EXTZ('m, rounding_incr) + } + } + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping vitype_mnemonic : vifunct6 <-> string = { + VI_VADD <-> "vadd.vi", + VI_VRSUB <-> "vrsub.vi", + VI_VAND <-> "vand.vi", + VI_VOR <-> "vor.vi", + VI_VXOR <-> "vxor.vi", + VI_VSADDU <-> "vsaddu.vi", + VI_VSADD <-> "vsadd.vi", + VI_VSLL <-> "vsll.vi", + VI_VSRL <-> "vsrl.vi", + VI_VSRA <-> "vsra.vi", + VI_VSSRL <-> "vssrl.vi", + VI_VSSRA <-> "vssra.vi" +} + +mapping clause assembly = VITYPE(funct6, vm, vs2, simm, vd) + <-> vitype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ hex_bits_5(simm) ^ maybe_vmask(vm) + +/* ************************** OPIVI (WITYPE Narrowing) *************************** */ +/* ************** Vector Narrowing Integer Right Shift Instructions ************** */ +union clause ast = NISTYPE : (nisfunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_nisfunct6 : nisfunct6 <-> bits(6) = { + NIS_VNSRL <-> 0b101100, + NIS_VNSRA <-> 0b101101 +} + +mapping clause encdec = NISTYPE(funct6, vm, vs2, simm, vd) if haveRVV() + <-> encdec_nisfunct6(funct6) @ vm @ vs2 @ simm @ 0b011 @ vd @ 0b1010111 if haveRVV() + +function clause execute(NISTYPE(funct6, vm, vs2, simm, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + let SEW_widen = SEW * 2; + let LMUL_pow_widen = LMUL_pow + 1; + + if ~(valid_rd_mask(vd, vm)) | ~(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) | + ~(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + then { handle_illegal(); return RETIRE_FAIL }; + + let 'n = num_elem; + let 'm = SEW; + let 'o = SEW_widen; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + let imm_val : bits('m) = EXTS(simm); + let vs2_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vs2); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + assert(SEW_widen <= 64); + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + NIS_VNSRL => { + let shift_amount = get_shift_amount(imm_val, SEW_widen); + slice(vs2_val[i] >> shift_amount, 0, SEW) + }, + NIS_VNSRA => { + let shift_amount = get_shift_amount(imm_val, SEW_widen); + let v_double : bits('o * 2) = EXTS(vs2_val[i]); + let arith_shifted : bits('o) = slice(v_double >> shift_amount, 0, SEW_widen); + slice(arith_shifted, 0, SEW) + } + } + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping nistype_mnemonic : nisfunct6 <-> string = { + NIS_VNSRL <-> "vnsrl.wi", + NIS_VNSRA <-> "vnsra.wi" +} + +mapping clause assembly = NISTYPE(funct6, vm, vs2, simm, vd) + <-> nistype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ hex_bits_5(simm) ^ maybe_vmask(vm) + +/* ************************** OPIVI (WITYPE Narrowing) *************************** */ +/* *************** Vector Narrowing Fixed-Point Clip Instructions **************** */ +union clause ast = NITYPE : (nifunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_nifunct6 : nifunct6 <-> bits(6) = { + NI_VNCLIPU <-> 0b101110, + NI_VNCLIP <-> 0b101111 +} + +mapping clause encdec = NITYPE(funct6, vm, vs2, simm, vd) if haveRVV() + <-> encdec_nifunct6(funct6) @ vm @ vs2 @ simm @ 0b011 @ vd @ 0b1010111 if haveRVV() + +function clause execute(NITYPE(funct6, vm, vs2, simm, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + let SEW_widen = SEW * 2; + let LMUL_pow_widen = LMUL_pow + 1; + + if ~(valid_rd_mask(vd, vm)) | ~(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) | + ~(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + then { handle_illegal(); return RETIRE_FAIL }; + + let 'n = num_elem; + let 'm = SEW; + let 'o = SEW_widen; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + let imm_val : bits('m) = EXTS(simm); + let vs2_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vs2); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + let shift_amount = get_shift_amount(imm_val, SEW); + let rounding_incr = get_fixed_rounding_incr(vs2_val[i], shift_amount); + result[i] = match funct6 { + NI_VNCLIPU => { + let result_wide = (vs2_val[i] >> shift_amount) + EXTZ('o, rounding_incr); + unsigned_saturation('m, result_wide) + }, + NI_VNCLIP => { + let v_double : bits('m * 4) = EXTS(vs2_val[i]); + let result_wide = slice(v_double >> shift_amount, 0, 'o) + EXTZ('o, rounding_incr); + signed_saturation('m, result_wide) + } + } + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping nitype_mnemonic : nifunct6 <-> string = { + NI_VNCLIPU <-> "vnclipu.wi", + NI_VNCLIP <-> "vnclip.wi" +} + +mapping clause assembly = NITYPE(funct6, vm, vs2, simm, vd) + <-> nitype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ hex_bits_5(simm) ^ maybe_vmask(vm) + +/* ***************** OPIVI (Vector Slide & Gather Instructions) ****************** */ +/* Slide and gather instructions extend rs1/imm to XLEN intead of SEW bits */ +union clause ast = VISG : (visgfunct6, bits(1), regidx, bits(5), regidx) + +mapping encdec_visgfunct6 : visgfunct6 <-> bits(6) = { + VI_VSLIDEUP <-> 0b001110, + VI_VSLIDEDOWN <-> 0b001111, + VI_VRGATHER <-> 0b001100 +} + +mapping clause encdec = VISG(funct6, vm, vs2, simm, vd) if haveRVV() + <-> encdec_visgfunct6(funct6) @ vm @ vs2 @ simm @ 0b011 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VISG(funct6, vm, vs2, simm, vd)) = { + let SEW_pow = get_sew_pow(); + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let VLEN_pow = get_vlen_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let imm_val : nat = unsigned(EXTZ(sizeof(xlen), simm)); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + VI_VSLIDEUP => { + if (vs2 == vd) then { handle_illegal(); return RETIRE_FAIL }; + if i >= imm_val then vs2_val[i - imm_val] else vd_val[i] + }, + VI_VSLIDEDOWN => { + let VLMAX = int_power(2, LMUL_pow + VLEN_pow - SEW_pow); + assert(VLMAX > 0 & VLMAX <= 'n); + if i + imm_val < VLMAX then vs2_val[i + imm_val] else zeros() + }, + VI_VRGATHER => { + if (vs2 == vd) then { handle_illegal(); return RETIRE_FAIL }; + let VLMAX = int_power(2, LMUL_pow + VLEN_pow - SEW_pow); + assert(VLMAX > 0 & VLMAX <= 'n); + if imm_val < VLMAX then vs2_val[imm_val] else zeros() + } + } + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping visg_mnemonic : visgfunct6 <-> string = { + VI_VSLIDEUP <-> "vslideup.vi", + VI_VSLIDEDOWN <-> "vslidedown.vi", + VI_VRGATHER <-> "vrgather.vi" +} + +mapping clause assembly = VISG(funct6, vm, vs2, simm, vd) + <-> visg_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ reg_name(simm) ^ maybe_vmask(vm) + +/* ********************** OPIVI (Integer Merge Instruction) ********************** */ +union clause ast = MASKTYPEI : (regidx, bits(5), regidx) + +mapping clause encdec = MASKTYPEI(vs2, simm, vd) if haveRVV() + <-> 0b010111 @ 0b0 @ vs2 @ simm @ 0b011 @ vd @ 0b1010111 if haveRVV() + +function clause execute(MASKTYPEI(vs2, simm, vd)) = { + let start_element = get_start_element(); + let end_element = get_end_element(); + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); /* max(VLMAX,VLEN/SEW)) */ + let real_num_elem = if LMUL_pow >= 0 then num_elem else num_elem / (0 - LMUL_pow); /* VLMAX */ + + if vd == vreg_name("v0") then { handle_illegal(); return RETIRE_FAIL }; + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vreg_name("v0")); + let imm_val : bits('m) = EXTS(simm); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + + let tail_ag : agtype = get_vtype_vta(); + foreach (i from 0 to (num_elem - 1)) { + if i < start_element then { + result[i] = vd_val[i] + } else if i > end_element | i >= real_num_elem then { + if tail_ag == UNDISTURBED then { + result[i] = vd_val[i] + } else if tail_ag == AGNOSTIC then { + result[i] = vd_val[i] /* TODO: configuration support */ + } + } else { + /* the merge operates on all body elements */ + result[i] = if vm_val[i] then imm_val else vs2_val[i] + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping clause assembly = MASKTYPEI(vs2, simm, vd) + <-> "vmerge.vim" ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ hex_bits_5(simm) ^ sep() ^ "v0" + +/* ********************** OPIVI (Integer Move Instruction) *********************** */ +union clause ast = MOVETYPEI : (regidx, bits(5)) + +mapping clause encdec = MOVETYPEI (vd, simm) if haveRVV() + <-> 0b010111 @ 0b1 @ 0b00000 @ simm @ 0b011 @ vd @ 0b1010111 if haveRVV() + +function clause execute(MOVETYPEI(vd, simm)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b1, vreg_name("v0")); + let imm_val : bits('m) = EXTS(simm); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then result[i] = imm_val + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} mapping clause assembly = MOVETYPEI(vd, simm) <-> "vmv.v.i" ^ spc() ^ vreg_name(vd) ^ sep() ^ hex_bits_5(simm) + +/* ********************* OPIVI (Whole Vector Register Move) ********************** */ +union clause ast = VMVRTYPE : (regidx, bits(5), regidx) + +mapping clause encdec = VMVRTYPE(vs2, simm, vd) if haveRVV() + <-> 0b100111 @ 0b1 @ vs2 @ simm @ 0b011 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VMVRTYPE(vs2, simm, vd)) = { + let start_element = get_start_element(); + let SEW = get_sew(); + let imm_val = unsigned(EXTZ(sizeof(xlen), simm)); + let EMUL = imm_val + 1; + + if ~(EMUL == 1 | EMUL == 2 | EMUL == 4 | EMUL == 8) then { handle_illegal(); return RETIRE_FAIL }; + + let EMUL_pow = log2(EMUL); + let num_elem = get_num_elem(EMUL_pow, SEW); + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b1, vreg_name("v0")); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, EMUL_pow, vs2); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, EMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + + foreach (i from 0 to (num_elem - 1)) { + result[i] = if i < start_element then vd_val[i] else vs2_val[i] + }; + + write_vreg(num_elem, SEW, EMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping simm_string : bits(5) <-> string = { + 0b00000 <-> "1", + 0b00001 <-> "2", + 0b00011 <-> "4", + 0b00111 <-> "8" +} + +mapping clause assembly = VMVRTYPE(vs2, simm, vd) + <-> "vmv" ^ simm_string(simm) ^ "r.v" ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) + +/* ******************************* OPMVV (MVVTYPE) ******************************* */ +union clause ast = MVVTYPE : (mvvfunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_mvvfunct6 : mvvfunct6 <-> bits(6) = { + MVV_VAADDU <-> 0b001000, + MVV_VAADD <-> 0b001001, + MVV_VASUBU <-> 0b001010, + MVV_VASUB <-> 0b001011, + MVV_VMUL <-> 0b100101, + MVV_VMULH <-> 0b100111, + MVV_VMULHU <-> 0b100100, + MVV_VMULHSU <-> 0b100110, + MVV_VDIVU <-> 0b100000, + MVV_VDIV <-> 0b100001, + MVV_VREMU <-> 0b100010, + MVV_VREM <-> 0b100011 +} + +mapping clause encdec = MVVTYPE(funct6, vm, vs2, vs1, vd) if haveRVV() + <-> encdec_mvvfunct6(funct6) @ vm @ vs2 @ vs1 @ 0b010 @ vd @ 0b1010111 if haveRVV() + +function clause execute(MVVTYPE(funct6, vm, vs2, vs1, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then{ + result[i] = match funct6 { + MVV_VAADDU => { + let result_add = EXTZ('m + 1, vs2_val[i]) + EXTZ('m + 1, vs1_val[i]); + let rounding_incr = get_fixed_rounding_incr(result_add, 1); + slice(result_add >> 1, 0, 'm) + EXTZ('m, rounding_incr) + }, + MVV_VAADD => { + let result_add = EXTS('m + 1, vs2_val[i]) + EXTS('m + 1, vs1_val[i]); + let rounding_incr = get_fixed_rounding_incr(result_add, 1); + slice(result_add >> 1, 0, 'm) + EXTZ('m, rounding_incr) + }, + MVV_VASUBU => { + let result_sub = EXTZ('m + 1, vs2_val[i]) - EXTZ('m + 1, vs1_val[i]); + let rounding_incr = get_fixed_rounding_incr(result_sub, 1); + slice(result_sub >> 1, 0, 'm) + EXTZ('m, rounding_incr) + }, + MVV_VASUB => { + let result_sub = EXTS('m + 1, vs2_val[i]) - EXTS('m + 1, vs1_val[i]); + let rounding_incr = get_fixed_rounding_incr(result_sub, 1); + slice(result_sub >> 1, 0, 'm) + EXTZ('m, rounding_incr) + }, + MVV_VMUL => get_slice_int(SEW, signed(vs2_val[i]) * signed(vs1_val[i]), 0), + MVV_VMULH => get_slice_int(SEW, signed(vs2_val[i]) * signed(vs1_val[i]), SEW), + MVV_VMULHU => get_slice_int(SEW, unsigned(vs2_val[i]) * unsigned(vs1_val[i]), SEW), + MVV_VMULHSU => get_slice_int(SEW, signed(vs2_val[i]) * unsigned(vs1_val[i]), SEW), + MVV_VDIVU => { + let q : int = if unsigned(vs1_val[i]) == 0 then -1 else quot_round_zero(unsigned(vs2_val[i]), unsigned(vs1_val[i])); + to_bits(SEW, q) + }, + MVV_VDIV => { + let elem_max : int = 2 ^ (SEW - 1) - 1; + let elem_min : int = 0 - 2 ^ (SEW - 1); + let q : int = if signed(vs1_val[i]) == 0 then -1 else quot_round_zero(signed(vs2_val[i]), signed(vs1_val[i])); + /* check for signed overflow */ + let q' : int = if q > elem_max then elem_min else q; + to_bits(SEW, q') + }, + MVV_VREMU => { + let r : int = if unsigned(vs1_val[i]) == 0 then unsigned(vs2_val[i]) else rem_round_zero(unsigned(vs2_val[i]), unsigned(vs1_val[i])); + /* signed overflow case returns zero naturally as required due to -1 divisor */ + to_bits(SEW, r) + }, + MVV_VREM => { + let r : int = if signed(vs1_val[i]) == 0 then signed(vs2_val[i]) else rem_round_zero(signed(vs2_val[i]), signed(vs1_val[i])); + /* signed overflow case returns zero naturally as required due to -1 divisor */ + to_bits(SEW, r) + } + } + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping mvvtype_mnemonic : mvvfunct6 <-> string = { + MVV_VAADDU <-> "vaaddu.vv", + MVV_VAADD <-> "vaadd.vv", + MVV_VASUBU <-> "vasubu.vv", + MVV_VASUB <-> "vasub.vv", + MVV_VMUL <-> "vmul.vv", + MVV_VMULH <-> "vmulh.vv", + MVV_VMULHU <-> "vmulhu.vv", + MVV_VMULHSU <-> "vmulhsu.vv", + MVV_VDIVU <-> "vdivu.vv", + MVV_VDIV <-> "vdiv.vv", + MVV_VREMU <-> "vremu.vv", + MVV_VREM <-> "vrem.vv" +} + +mapping clause assembly = MVVTYPE(funct6, vm, vs2, vs1, vd) + <-> mvvtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ vreg_name(vs1) ^ maybe_vmask(vm) + +/* ************************ OPMVV (MVVtype Multiply-Add) ************************* */ +/* Multiply-Add instructions switch the order of source operands in assembly (vs1/rs1 before vs2) */ +union clause ast = MVVMATYPE : (mvvmafunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_mvvmafunct6 : mvvmafunct6 <-> bits(6) = { + MVV_VMACC <-> 0b101101, + MVV_VNMSAC <-> 0b101111, + MVV_VMADD <-> 0b101001, + MVV_VNMSUB <-> 0b101011 +} + +mapping clause encdec = MVVMATYPE(funct6, vm, vs2, vs1, vd) if haveRVV() + <-> encdec_mvvmafunct6(funct6) @ vm @ vs2 @ vs1 @ 0b010 @ vd @ 0b1010111 if haveRVV() + +function clause execute(MVVMATYPE(funct6, vm, vs2, vs1, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let VLEN = int_power(2, get_vlen_pow()); + let num_elem = get_num_elem(LMUL_pow, SEW); + + if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + assert(VLEN >= 0); + foreach (i from 0 to (num_elem - 1)){ + if mask[i] then{ + result[i] = match funct6 { + MVV_VMACC => get_slice_int(SEW, signed(vs1_val[i]) * signed(vs2_val[i]), 0) + vd_val[i], + MVV_VNMSAC => vd_val[i] - get_slice_int(SEW, signed(vs1_val[i]) * signed(vs2_val[i]), 0), + MVV_VMADD => get_slice_int(SEW, signed(vs1_val[i]) * signed(vd_val[i]), 0) + vs2_val[i], + MVV_VNMSUB => vs2_val[i] - get_slice_int(SEW, signed(vs1_val[i]) * signed(vd_val[i]), 0) + } + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping mvvmatype_mnemonic : mvvmafunct6 <-> string = { + MVV_VMACC <-> "vmacc.vv", + MVV_VNMSAC <-> "vnmsac.vv", + MVV_VMADD <-> "vmadd.vv", + MVV_VNMSUB <-> "vnmsub.vv" +} + +mapping clause assembly = MVVMATYPE(funct6, vm, vs2, vs1, vd) + <-> mvvmatype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs1) ^ sep() ^ vreg_name(vs2) ^ maybe_vmask(vm) + +/* *************************** OPMVV (VVTYPE Widening) *************************** */ +union clause ast = WVVTYPE : (wvvfunct6, bits(1), regidx, regidx, regidx) +mapping encdec_wvvfunct6 : wvvfunct6 <-> bits(6) = { + WVV_VADD <-> 0b110001, + WVV_VSUB <-> 0b110011, + WVV_VADDU <-> 0b110000, + WVV_VSUBU <-> 0b110010, + WVV_VWMUL <-> 0b111011, + WVV_VWMULU <-> 0b111000, + WVV_VWMULSU <-> 0b111010 +} + +mapping clause encdec = WVVTYPE(funct6, vm, vs2, vs1, vd) if haveRVV() + <-> encdec_wvvfunct6(funct6) @ vm @ vs2 @ vs1 @ 0b010 @ vd @ 0b1010111 if haveRVV() + +function clause execute(WVVTYPE(funct6, vm, vs2, vs1, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + let SEW_widen = SEW * 2; + let LMUL_pow_widen = LMUL_pow + 1; + + if ~(valid_rd_mask(vd, vm)) | ~(valid_reg_overlap(vs1, vd, LMUL_pow, LMUL_pow_widen)) | + ~(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) | ~(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + then { handle_illegal(); return RETIRE_FAIL }; + + let 'n = num_elem; + let 'm = SEW; + let 'o = SEW_widen; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); + let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + result : vector('n, dec, bits('o)) = undefined; + mask : vector('n, dec, bool) = undefined; + + assert(8 <= SEW_widen & SEW_widen <= 64); + (result, mask) = init_masked_result(num_elem, SEW_widen, LMUL_pow_widen, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + WVV_VADD => to_bits(SEW_widen, signed(vs2_val[i]) + signed(vs1_val[i])), + WVV_VSUB => to_bits(SEW_widen, signed(vs2_val[i]) - signed(vs1_val[i])), + WVV_VADDU => to_bits(SEW_widen, unsigned(vs2_val[i]) + unsigned(vs1_val[i])), + WVV_VSUBU => to_bits(SEW_widen, unsigned(vs2_val[i]) - unsigned(vs1_val[i])), + WVV_VWMUL => to_bits(SEW_widen, signed(vs2_val[i]) * signed(vs1_val[i])), + WVV_VWMULU => to_bits(SEW_widen, unsigned(vs2_val[i]) * unsigned(vs1_val[i])), + WVV_VWMULSU => to_bits(SEW_widen, signed(vs2_val[i]) * unsigned(vs1_val[i])) + } + } + }; + + write_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping wvvtype_mnemonic : wvvfunct6 <-> string = { + WVV_VADD <-> "vwadd.vv", + WVV_VSUB <-> "vwsub.vv", + WVV_VADDU <-> "vwaddu.vv", + WVV_VSUBU <-> "vwsubu.vv", + WVV_VWMUL <-> "vwmul.vv", + WVV_VWMULU <-> "vwmulu.vv", + WVV_VWMULSU <-> "vwmulsu.vv" +} + +mapping clause assembly = WVVTYPE(funct6, vm, vs2, vs1, vd) + <-> wvvtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ vreg_name(vs1) ^ maybe_vmask(vm) + +/* ******************************* OPMVV (WVTYPE) ******************************** */ +union clause ast = WVTYPE : (wvfunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_wvfunct6 : wvfunct6 <-> bits(6) = { + WV_VADD <-> 0b110101, + WV_VSUB <-> 0b110111, + WV_VADDU <-> 0b110100, + WV_VSUBU <-> 0b110110 +} + +mapping clause encdec = WVTYPE(funct6, vm, vs2, vs1, vd) if haveRVV() + <-> encdec_wvfunct6(funct6) @ vm @ vs2 @ vs1 @ 0b010 @ vd @ 0b1010111 if haveRVV() + +function clause execute(WVTYPE(funct6, vm, vs2, vs1, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + let SEW_widen = SEW * 2; + let LMUL_pow_widen = LMUL_pow + 1; + + if ~(valid_rd_mask(vd, vm)) | ~(valid_reg_overlap(vs1, vd, LMUL_pow, LMUL_pow_widen)) | + ~(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + then { handle_illegal(); return RETIRE_FAIL }; + + let 'n = num_elem; + let 'm = SEW; + let 'o = SEW_widen; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); + let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); + let vs2_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vs2); + result : vector('n, dec, bits('o)) = undefined; + mask : vector('n, dec, bool) = undefined; + + assert(8 <= SEW_widen & SEW_widen <= 64); + (result, mask) = init_masked_result(num_elem, SEW_widen, LMUL_pow_widen, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + WV_VADD => to_bits(SEW_widen, signed(vs2_val[i]) + signed(vs1_val[i])), + WV_VSUB => to_bits(SEW_widen, signed(vs2_val[i]) - signed(vs1_val[i])), + WV_VADDU => to_bits(SEW_widen, unsigned(vs2_val[i]) + unsigned(vs1_val[i])), + WV_VSUBU => to_bits(SEW_widen, unsigned(vs2_val[i]) - unsigned(vs1_val[i])) + } + } + }; + + write_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping wvtype_mnemonic : wvfunct6 <-> string = { + WV_VADD <-> "vwadd.wv", + WV_VSUB <-> "vwsub.wv", + WV_VADDU <-> "vwaddu.wv", + WV_VSUBU <-> "vwsubu.wv" +} + +mapping clause assembly = WVTYPE(funct6, vm, vs2, vs1, vd) + <-> wvtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ vreg_name(vs1) ^ maybe_vmask(vm) + +/* ******************** OPMVV (MVVtype Widening Multiply-Add) ******************** */ +/* Multiply-Add instructions switch the order of source operands in assembly (vs1/rs1 before vs2) */ +union clause ast = WMVVTYPE : (wmvvfunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_wmvvfunct6 : wmvvfunct6 <-> bits(6) = { + WMVV_VWMACCU <-> 0b111100, + WMVV_VWMACC <-> 0b111101, + WMVV_VWMACCSU <-> 0b111111 +} + +mapping clause encdec = WMVVTYPE(funct6, vm, vs2, vs1, vd) if haveRVV() + <-> encdec_wmvvfunct6(funct6) @ vm @ vs2 @ vs1 @ 0b010 @ vd @ 0b1010111 if haveRVV() + +function clause execute(WMVVTYPE(funct6, vm, vs2, vs1, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + let SEW_widen = SEW * 2; + let LMUL_pow_widen = LMUL_pow + 1; + + if ~(valid_rd_mask(vd, vm)) | ~(valid_reg_overlap(vs1, vd, LMUL_pow, LMUL_pow_widen)) | + ~(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) | ~(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + then { handle_illegal(); return RETIRE_FAIL }; + + let 'n = num_elem; + let 'm = SEW; + let 'o = SEW_widen; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); + let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + result : vector('n, dec, bits('o)) = undefined; + mask : vector('n, dec, bool) = undefined; + + assert(8 <= SEW_widen & SEW_widen <= 64); + (result, mask) = init_masked_result(num_elem, SEW_widen, LMUL_pow_widen, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + WMVV_VWMACC => to_bits(SEW_widen, signed(vs1_val[i]) * signed(vs2_val[i])) + vd_val[i], + WMVV_VWMACCU => to_bits(SEW_widen, unsigned(vs1_val[i]) * unsigned(vs2_val[i])) + vd_val[i], + WMVV_VWMACCSU => to_bits(SEW_widen, signed(vs1_val[i]) * unsigned(vs2_val[i]))+ vd_val[i] + } + } + }; + + write_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping wmvvtype_mnemonic : wmvvfunct6 <-> string = { + WMVV_VWMACCU <-> "vwmaccu.vv", + WMVV_VWMACC <-> "vwmacc.vv", + WMVV_VWMACCSU <-> "vwmaccsu.vv" +} + +mapping clause assembly = WMVVTYPE(funct6, vm, vs2, vs1, vd) + <-> wmvvtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs1) ^ sep() ^ vreg_name(vs2) ^ maybe_vmask(vm) + +/* ****************************** OPMVV (VXUNARY0) ******************************* */ +/* ******************* Vector Integer Extension (SEW/2 source) ******************* */ +union clause ast = VEXT2TYPE : (vext2funct6, bits(1), regidx, regidx) + +mapping vext2_vs1 : vext2funct6 <-> bits(5) = { + VEXT2_ZVF2 <-> 0b00110, + VEXT2_SVF2 <-> 0b00111 +} + +mapping clause encdec = VEXT2TYPE(funct6, vm, vs2, vd) if haveRVV() + <-> 0b010010 @ vm @ vs2 @ vext2_vs1(funct6) @ 0b010 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VEXT2TYPE(funct6, vm, vs2, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + let SEW_half = SEW / 2; + let LMUL_pow_half = LMUL_pow - 1; + + if ~(valid_rd_mask(vd, vm)) | ~(valid_reg_overlap(vs2, vd, LMUL_pow_half, LMUL_pow)) | + ~(valid_eew_emul(SEW_half, LMUL_pow_half)) + then { handle_illegal(); return RETIRE_FAIL }; + + let 'n = num_elem; + let 'm = SEW; + let 'o = SEW_half; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + let vs2_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_half, LMUL_pow_half, vs2); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + assert(SEW > SEW_half); + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + VEXT2_ZVF2 => EXTZ(vs2_val[i]), + VEXT2_SVF2 => EXTS(vs2_val[i]) + } + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping vext2type_mnemonic : vext2funct6 <-> string = { + VEXT2_ZVF2 <-> "vzext.vf2", + VEXT2_SVF2 <-> "vsext.vf2" +} + +mapping clause assembly = VEXT2TYPE(funct6, vm, vs2, vd) + <-> vext2type_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ maybe_vmask(vm) + +/* ****************************** OPMVV (VXUNARY0) ******************************* */ +/* ******************* Vector Integer Extension (SEW/4 source) ******************* */ +union clause ast = VEXT4TYPE : (vext4funct6, bits(1), regidx, regidx) + +mapping vext4_vs1 : vext4funct6 <-> bits(5) = { + VEXT4_ZVF4 <-> 0b00100, + VEXT4_SVF4 <-> 0b00101 +} + +mapping clause encdec = VEXT4TYPE(funct6, vm, vs2, vd) if haveRVV() + <-> 0b010010 @ vm @ vs2 @ vext4_vs1(funct6) @ 0b010 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VEXT4TYPE(funct6, vm, vs2, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + let SEW_quart = SEW / 4; + let LMUL_pow_quart = LMUL_pow - 2; + + if ~(valid_rd_mask(vd, vm)) | ~(valid_reg_overlap(vs2, vd, LMUL_pow_quart, LMUL_pow)) | + ~(valid_eew_emul(SEW_quart, LMUL_pow_quart)) + then { handle_illegal(); return RETIRE_FAIL }; + + let 'n = num_elem; + let 'm = SEW; + let 'o = SEW_quart; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + let vs2_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_quart, LMUL_pow_quart, vs2); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + assert(SEW > SEW_quart); + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + VEXT4_ZVF4 => EXTZ(vs2_val[i]), + VEXT4_SVF4 => EXTS(vs2_val[i]) + } + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping vext4type_mnemonic : vext4funct6 <-> string = { + VEXT4_ZVF4 <-> "vzext.vf4", + VEXT4_SVF4 <-> "vsext.vf4" +} + +mapping clause assembly = VEXT4TYPE(funct6, vm, vs2, vd) + <-> vext4type_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ maybe_vmask(vm) + +/* ****************************** OPMVV (VXUNARY0) ******************************* */ +/* ******************* Vector Integer Extension (SEW/8 source) ******************* */ +union clause ast = VEXT8TYPE : (vext8funct6, bits(1), regidx, regidx) + +mapping vext8_vs1 : vext8funct6 <-> bits(5) = { + VEXT8_ZVF8 <-> 0b00010, + VEXT8_SVF8 <-> 0b00011 +} + +mapping clause encdec = VEXT8TYPE(funct6, vm, vs2, vd) if haveRVV() + <-> 0b010010 @ vm @ vs2 @ vext8_vs1(funct6) @ 0b010 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VEXT8TYPE(funct6, vm, vs2, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + let SEW_eighth = SEW / 8; + let LMUL_pow_eighth = LMUL_pow - 3; + + if ~(valid_rd_mask(vd, vm)) | ~(valid_reg_overlap(vs2, vd, LMUL_pow_eighth, LMUL_pow)) | + ~(valid_eew_emul(SEW_eighth, LMUL_pow_eighth)) + then { handle_illegal(); return RETIRE_FAIL }; + + let 'n = num_elem; + let 'm = SEW; + let 'o = SEW_eighth; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + let vs2_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_eighth, LMUL_pow_eighth, vs2); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + assert(SEW > SEW_eighth); + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + VEXT8_ZVF8 => EXTZ(vs2_val[i]), + VEXT8_SVF8 => EXTS(vs2_val[i]) + } + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping vext8type_mnemonic : vext8funct6 <-> string = { + VEXT8_ZVF8 <-> "vzext.vf8", + VEXT8_SVF8 <-> "vsext.vf8" +} + +mapping clause assembly = VEXT8TYPE(funct6, vm, vs2, vd) + <-> vext8type_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ maybe_vmask(vm) + +/* ************************ OPMVV (vmv.x.s in VWXUNARY0) ************************* */ +union clause ast = VMVXS : (regidx, regidx) + +mapping clause encdec = VMVXS(vs2, rd) if haveRVV() + <-> 0b010000 @ 0b1 @ vs2 @ 0b00000 @ 0b010 @ rd @ 0b1010111 if haveRVV() + +function clause execute(VMVXS(vs2, rd)) = { + let SEW = get_sew(); + let num_elem = get_num_elem(0, SEW); + + assert(num_elem > 0); + let 'n = num_elem; + let 'm = SEW; + + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, 0, vs2); + X(rd) = if sizeof(xlen) < SEW then slice(vs2_val[0], 0, sizeof(xlen)) + else if sizeof(xlen) > SEW then EXTS(vs2_val[0]) + else vs2_val[0]; + vstart = EXTZ(0b0); + + RETIRE_SUCCESS +} + +mapping clause assembly = VMVXS(vs2, rd) + <-> "vmv.x.s" ^ spc() ^ reg_name(rd) ^ sep() ^ vreg_name(vs2) + +/* ********************* OPMVV (Vector Compress Instruction) ********************* */ +union clause ast = MVVCOMPRESS : (regidx, regidx, regidx) + +mapping clause encdec = MVVCOMPRESS(vs2, vs1, vd) if haveRVV() + <-> 0b010111 @ 0b1 @ vs2 @ vs1 @ 0b010 @ vd @ 0b1010111 if haveRVV() + +function clause execute(MVVCOMPRESS(vs2, vs1, vd)) = { + let start_element = get_start_element(); + let end_element = get_end_element(); + + /* vcompress should always be executed with a vstart of 0 */ + if (start_element != 0 | vs1 == vd | vs2 == vd) then { handle_illegal(); return RETIRE_FAIL }; + + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + let 'n = num_elem; + let 'm = SEW; + + let vs1_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vs1); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + + /* body elements */ + vd_idx : nat = 0; + foreach (i from 0 to (num_elem - 1)) { + if i <= end_element then { + if vs1_val[i] then { + let 'p = vd_idx; + assert('p < 'n); + result['p] = vs2_val[i]; + vd_idx = vd_idx + 1; + } + } + }; + /* tail elements */ + if vd_idx < num_elem then { + let tail_ag : agtype = get_vtype_vta(); + let 'p = vd_idx; + if tail_ag == UNDISTURBED then { + foreach (i from 'p to (num_elem - 1)) result[i] = vd_val[i] + } else if tail_ag == AGNOSTIC then { /* TODO: configuration support */ + foreach (i from 'p to (num_elem - 1)) result[i] = vd_val[i] + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping clause assembly = MVVCOMPRESS(vs2, vs1, vd) + <-> "vcompress.vm" ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ vreg_name(vs1) + +/* ******************************* OPMVX (MVXTYPE) ******************************* */ +union clause ast = MVXTYPE : (mvxfunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_mvxfunct6 : mvxfunct6 <-> bits(6) = { + MVX_VAADDU <-> 0b001000, + MVX_VAADD <-> 0b001001, + MVX_VASUBU <-> 0b001010, + MVX_VASUB <-> 0b001011, + MVX_VSLIDE1UP <-> 0b001110, + MVX_VSLIDE1DOWN <-> 0b001111, + MVX_VMUL <-> 0b100101, + MVX_VMULH <-> 0b100111, + MVX_VMULHU <-> 0b100100, + MVX_VMULHSU <-> 0b100110, + MVX_VDIVU <-> 0b100000, + MVX_VDIV <-> 0b100001, + MVX_VREMU <-> 0b100010, + MVX_VREM <-> 0b100011 +} + +mapping clause encdec = MVXTYPE(funct6, vm, vs2, rs1, vd) if haveRVV() + <-> encdec_mvxfunct6(funct6) @ vm @ vs2 @ rs1 @ 0b110 @ vd @ 0b1010111 if haveRVV() + +function clause execute(MVXTYPE(funct6, vm, vs2, rs1, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let rs1_val : bits('m) = get_scalar(rs1, SEW); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + MVX_VAADDU => { + let result_add = EXTZ('m + 1, vs2_val[i]) + EXTZ('m + 1, rs1_val); + let rounding_incr = get_fixed_rounding_incr(result_add, 1); + slice(result_add >> 1, 0, 'm) + EXTZ('m, rounding_incr) + }, + MVX_VAADD => { + let result_add = EXTS('m + 1, vs2_val[i]) + EXTS('m + 1, rs1_val); + let rounding_incr = get_fixed_rounding_incr(result_add, 1); + slice(result_add >> 1, 0, 'm) + EXTZ('m, rounding_incr) + }, + MVX_VASUBU => { + let result_sub = EXTZ('m + 1, vs2_val[i]) - EXTZ('m + 1, rs1_val); + let rounding_incr = get_fixed_rounding_incr(result_sub, 1); + slice(result_sub >> 1, 0, 'm) + EXTZ('m, rounding_incr) + }, + MVX_VASUB => { + let result_sub = EXTS('m + 1, vs2_val[i]) - EXTS('m + 1, rs1_val); + let rounding_incr = get_fixed_rounding_incr(result_sub, 1); + slice(result_sub >> 1, 0, 'm) + EXTZ('m, rounding_incr) + }, + MVX_VSLIDE1UP => { + if (vs2 == vd) then { handle_illegal(); return RETIRE_FAIL }; + if i == 0 then rs1_val else vs2_val[i - 1] + }, + MVX_VSLIDE1DOWN => { + if (vs2 == vd) then { handle_illegal(); return RETIRE_FAIL }; + let last_elem = get_end_element(); + assert(last_elem < num_elem); + if i < last_elem then vs2_val[i + 1] else rs1_val + }, + MVX_VMUL => get_slice_int(SEW, signed(vs2_val[i]) * signed(rs1_val), 0), + MVX_VMULH => get_slice_int(SEW, signed(vs2_val[i]) * signed(rs1_val), SEW), + MVX_VMULHU => get_slice_int(SEW, unsigned(vs2_val[i]) * unsigned(rs1_val), SEW), + MVX_VMULHSU => get_slice_int(SEW, signed(vs2_val[i]) * unsigned(rs1_val), SEW), + MVX_VDIVU => { + let q : int = if unsigned(rs1_val) == 0 then -1 else quot_round_zero(unsigned(vs2_val[i]), unsigned(rs1_val)); + to_bits(SEW, q) + }, + MVX_VDIV => { + let elem_max : int = 2 ^ (SEW - 1) - 1; + let elem_min : int = 0 - 2 ^ (SEW - 1); + let q : int = if signed(rs1_val) == 0 then -1 else quot_round_zero(signed(vs2_val[i]), signed(rs1_val)); + /* check for signed overflow */ + let q' : int = if q > elem_max then elem_min else q; + to_bits(SEW, q') + }, + MVX_VREMU => { + let r : int = if unsigned(rs1_val) == 0 then unsigned(vs2_val[i]) else rem_round_zero(unsigned(vs2_val[i]), unsigned (rs1_val)); + /* signed overflow case returns zero naturally as required due to -1 divisor */ + to_bits(SEW, r) + }, + MVX_VREM => { + let r : int = if signed(rs1_val) == 0 then signed(vs2_val[i]) else rem_round_zero(signed(vs2_val[i]), signed(rs1_val)); + /* signed overflow case returns zero naturally as required due to -1 divisor */ + to_bits(SEW, r) + } + } + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping mvxtype_mnemonic : mvxfunct6 <-> string = { + MVX_VAADDU <-> "vaaddu.vx", + MVX_VAADD <-> "vaadd.vx", + MVX_VASUBU <-> "vasubu.vx", + MVX_VASUB <-> "vasub.vx", + MVX_VSLIDE1UP <-> "vslide1up.vx", + MVX_VSLIDE1DOWN <-> "vslide1down.vx", + MVX_VMUL <-> "vmul.vx", + MVX_VMULH <-> "vmulh.vx", + MVX_VMULHU <-> "vmulhu.vx", + MVX_VMULHSU <-> "vmulhsu.vx", + MVX_VDIVU <-> "vdivu.vx", + MVX_VDIV <-> "vdiv.vx", + MVX_VREMU <-> "vremu.vx", + MVX_VREM <-> "vrem.vx" +} + +mapping clause assembly = MVXTYPE(funct6, vm, vs2, rs1, vd) + <-> mvxtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ reg_name(rs1) ^ maybe_vmask(vm) + +/* ************************ OPMVX (MVXtype Multiply-Add) ************************* */ +/* Multiply-Add instructions switch the order of source operands in assembly (vs1/rs1 before vs2) */ +union clause ast = MVXMATYPE : (mvxmafunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_mvxmafunct6 : mvxmafunct6 <-> bits(6) = { + MVX_VMACC <-> 0b101101, + MVX_VNMSAC <-> 0b101111, + MVX_VMADD <-> 0b101001, + MVX_VNMSUB <-> 0b101011 +} + +mapping clause encdec = MVXMATYPE(funct6, vm, vs2, rs1, vd) if haveRVV() + <-> encdec_mvxmafunct6(funct6) @ vm @ vs2 @ rs1 @ 0b110 @ vd @ 0b1010111 if haveRVV() + +function clause execute(MVXMATYPE(funct6, vm, vs2, rs1, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let VLEN = int_power(2, get_vlen_pow()); + let num_elem = get_num_elem(LMUL_pow, SEW); + + if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let rs1_val : bits('m) = get_scalar(rs1, SEW); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + assert(VLEN >= 0); + foreach (i from 0 to (num_elem - 1)){ + if mask[i] then{ + result[i] = match funct6 { + MVX_VMACC => get_slice_int(SEW, signed(rs1_val) * signed(vs2_val[i]), 0) + vd_val[i], + MVX_VNMSAC => vd_val[i] - get_slice_int(SEW, signed(rs1_val) * signed(vs2_val[i]), 0), + MVX_VMADD => get_slice_int(SEW, signed(rs1_val) * signed(vd_val[i]), 0) + vs2_val[i], + MVX_VNMSUB => vs2_val[i] - get_slice_int(SEW, signed(rs1_val) * signed(vd_val[i]), 0) + } + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping mvxmatype_mnemonic : mvxmafunct6 <-> string = { + MVX_VMACC <-> "vmacc.vx", + MVX_VNMSAC <-> "vnmsac.vx", + MVX_VMADD <-> "vmadd.vx", + MVX_VNMSUB <-> "vnmsub.vx" +} + +mapping clause assembly = MVXMATYPE(funct6, vm, vs2, rs1, vd) + <-> mvxmatype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) ^ sep() ^ vreg_name(vs2) ^ maybe_vmask(vm) + +/* *************************** OPMVX (VXTYPE Widening) *************************** */ +union clause ast = WVXTYPE : (wvxfunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_wvxfunct6 : wvxfunct6 <-> bits(6) = { + WVX_VADD <-> 0b110001, + WVX_VSUB <-> 0b110011, + WVX_VADDU <-> 0b110000, + WVX_VSUBU <-> 0b110010, + WVX_VWMUL <-> 0b111011, + WVX_VWMULU <-> 0b111000, + WVX_VWMULSU <-> 0b111010 +} + +mapping clause encdec = WVXTYPE(funct6, vm, vs2, rs1, vd) if haveRVV() + <-> encdec_wvxfunct6(funct6) @ vm @ vs2 @ rs1 @ 0b110 @ vd @ 0b1010111 if haveRVV() + +function clause execute(WVXTYPE(funct6, vm, vs2, rs1, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + let SEW_widen = SEW * 2; + let LMUL_pow_widen = LMUL_pow + 1; + + if ~(valid_rd_mask(vd, vm)) | ~(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) | + ~(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + then { handle_illegal(); return RETIRE_FAIL }; + + let 'n = num_elem; + let 'm = SEW; + let 'o = SEW_widen; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); + let rs1_val : bits('m) = get_scalar(rs1, SEW); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + result : vector('n, dec, bits('o)) = undefined; + mask : vector('n, dec, bool) = undefined; + + assert(8 <= SEW_widen & SEW_widen <= 64); + (result, mask) = init_masked_result(num_elem, SEW_widen, LMUL_pow_widen, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + WVX_VADD => to_bits(SEW_widen, signed(vs2_val[i]) + signed(rs1_val)), + WVX_VSUB => to_bits(SEW_widen, signed(vs2_val[i]) - signed(rs1_val)), + WVX_VADDU => to_bits(SEW_widen, unsigned(vs2_val[i]) + unsigned(rs1_val)), + WVX_VSUBU => to_bits(SEW_widen, unsigned(vs2_val[i]) - unsigned(rs1_val)), + WVX_VWMUL => to_bits(SEW_widen, signed(vs2_val[i]) * signed(rs1_val)), + WVX_VWMULU => to_bits(SEW_widen, unsigned(vs2_val[i]) * unsigned(rs1_val)), + WVX_VWMULSU => to_bits(SEW_widen, signed(vs2_val[i]) * unsigned(rs1_val)) + } + } + }; + + write_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping wvxtype_mnemonic : wvxfunct6 <-> string = { + WVX_VADD <-> "vwadd.vx", + WVX_VSUB <-> "vwsub.vx", + WVX_VADDU <-> "vwaddu.vx", + WVX_VSUBU <-> "vwsubu.vx", + WVX_VWMUL <-> "vwmul.vx", + WVX_VWMULU <-> "vwmulu.vx", + WVX_VWMULSU <-> "vwmulsu.vx" +} + +mapping clause assembly = WVXTYPE(funct6, vm, vs2, rs1, vd) + <-> wvxtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ reg_name(rs1) ^ maybe_vmask(vm) + +/* ******************************* OPMVX (WXTYPE) ******************************** */ +union clause ast = WXTYPE : (wxfunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_wxfunct6 : wxfunct6 <-> bits(6) = { + WX_VADD <-> 0b110101, + WX_VSUB <-> 0b110111, + WX_VADDU <-> 0b110100, + WX_VSUBU <-> 0b110110 +} + +mapping clause encdec = WXTYPE(funct6, vm, vs2, rs1, vd) if haveRVV() + <-> encdec_wxfunct6(funct6) @ vm @ vs2 @ rs1 @ 0b110 @ vd @ 0b1010111 if haveRVV() + +function clause execute(WXTYPE(funct6, vm, vs2, rs1, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + let SEW_widen = SEW * 2; + let LMUL_pow_widen = LMUL_pow + 1; + + if ~(valid_rd_mask(vd, vm)) | ~(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + then { handle_illegal(); return RETIRE_FAIL }; + + let 'n = num_elem; + let 'm = SEW; + let 'o = SEW_widen; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); + let rs1_val : bits('m) = get_scalar(rs1, SEW); + let vs2_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vs2); + result : vector('n, dec, bits('o)) = undefined; + mask : vector('n, dec, bool) = undefined; + + assert(8 <= SEW_widen & SEW_widen <= 64); + (result, mask) = init_masked_result(num_elem, SEW_widen, LMUL_pow_widen, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + WX_VADD => to_bits(SEW_widen, signed(vs2_val[i]) + signed(rs1_val)), + WX_VSUB => to_bits(SEW_widen, signed(vs2_val[i]) - signed(rs1_val)), + WX_VADDU => to_bits(SEW_widen, unsigned(vs2_val[i]) + unsigned(rs1_val)), + WX_VSUBU => to_bits(SEW_widen, unsigned(vs2_val[i]) - unsigned(rs1_val)) + } + } + }; + + write_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping wxtype_mnemonic : wxfunct6 <-> string = { + WX_VADD <-> "vwadd.wx", + WX_VSUB <-> "vwsub.wx", + WX_VADDU <-> "vwaddu.wx", + WX_VSUBU <-> "vwsubu.wx" +} + +mapping clause assembly = WXTYPE(funct6, vm, vs2, rs1, vd) + <-> wxtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ reg_name(rs1) ^ maybe_vmask(vm) + +/* ******************** OPMVX (MVXtype Widening Multiply-Add) ******************** */ +/* Multiply-Add instructions switch the order of source operands in assembly (vs1/rs1 before vs2) */ +union clause ast = WMVXTYPE : (wmvxfunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_wmvxfunct6 : wmvxfunct6 <-> bits(6) = { + WMVX_VWMACCU <-> 0b111100, + WMVX_VWMACC <-> 0b111101, + WMVX_VWMACCUS <-> 0b111110, + WMVX_VWMACCSU <-> 0b111111 +} + +mapping clause encdec = WMVXTYPE(funct6, vm, vs2, rs1, vd) if haveRVV() + <-> encdec_wmvxfunct6(funct6) @ vm @ vs2 @ rs1 @ 0b110 @ vd @ 0b1010111 if haveRVV() + +function clause execute(WMVXTYPE(funct6, vm, vs2, rs1, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + let SEW_widen = SEW * 2; + let LMUL_pow_widen = LMUL_pow + 1; + + if ~(valid_rd_mask(vd, vm)) | ~(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) | + ~(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + then { handle_illegal(); return RETIRE_FAIL }; + + let 'n = num_elem; + let 'm = SEW; + let 'o = SEW_widen; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); + let rs1_val : bits('m) = get_scalar(rs1, SEW); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + result : vector('n, dec, bits('o)) = undefined; + mask : vector('n, dec, bool) = undefined; + + assert(8 <= SEW_widen & SEW_widen <= 64); + (result, mask) = init_masked_result(num_elem, SEW_widen, LMUL_pow_widen, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + WMVX_VWMACCU => (to_bits(SEW_widen, unsigned(rs1_val) * unsigned(vs2_val[i]) )) + vd_val[i], + WMVX_VWMACC => (to_bits(SEW_widen, signed(rs1_val) * signed(vs2_val[i]) )) + vd_val[i], + WMVX_VWMACCUS => (to_bits(SEW_widen, unsigned(rs1_val) * signed(vs2_val[i]) ))+ vd_val[i], + WMVX_VWMACCSU => (to_bits(SEW_widen, signed(rs1_val) * unsigned(vs2_val[i]) ))+ vd_val[i] + } + } + }; + + write_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping wmvxtype_mnemonic : wmvxfunct6 <-> string = { + WMVX_VWMACCU <-> "vwmaccu.vx", + WMVX_VWMACC <-> "vwmacc.vx", + WMVX_VWMACCUS <-> "vwmaccus.vx", + WMVX_VWMACCSU <-> "vwmaccsu.vx" +} + +mapping clause assembly = WMVXTYPE(funct6, vm, vs2, rs1, vd) + <-> wmvxtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) ^ sep() ^ vreg_name(vs2) ^ maybe_vmask(vm) + +/* ****************************** OPMVX (VRXUNARY0) ****************************** */ +union clause ast = VMVSX : (regidx, regidx) + +mapping clause encdec = VMVSX(rs1, vd) if haveRVV() + <-> 0b010000 @ 0b1 @ 0b00000 @ rs1 @ 0b110 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VMVSX(rs1, vd)) = { + let SEW = get_sew(); + let num_elem = get_num_elem(0, SEW); + + assert(num_elem > 0); + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b1, vreg_name("v0")); + let rs1_val : bits('m) = get_scalar(rs1, 'm); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, 0, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, 0, vd_val, vm_val); + + /* one body element */ + if mask[0] then result[0] = rs1_val; + + /* others treated as tail elements */ + let tail_ag : agtype = get_vtype_vta(); + if tail_ag == UNDISTURBED then { + foreach (i from 1 to (num_elem - 1)) result[i] = vd_val[i] + } else if tail_ag == AGNOSTIC then { + foreach (i from 1 to (num_elem - 1)) result[i] = vd_val[i] /* TODO: configuration support */ + }; + + write_vreg(num_elem, SEW, 0, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping clause assembly = VMVSX(rs1, vd) + <-> "vmv.s.x" ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) diff --git a/model/riscv_insts_vext_mask.sail b/model/riscv_insts_vext_mask.sail new file mode 100755 index 000000000..cac369152 --- /dev/null +++ b/model/riscv_insts_vext_mask.sail @@ -0,0 +1,375 @@ +/* ******************************************************************************* */ +/* This file implements part of the vector extension. */ +/* Chapter 15: vector mask instructions */ +/* ******************************************************************************* */ + +/* ******************************* OPMVV (MMTYPE) ******************************** */ +union clause ast = MMTYPE : (mmfunct6, regidx, regidx, regidx) + +mapping encdec_mmfunct6 : mmfunct6 <-> bits(6) = { + MM_VMAND <-> 0b011001, + MM_VMNAND <-> 0b011101, + MM_VMANDNOT <-> 0b011000, + MM_VMXOR <-> 0b011011, + MM_VMOR <-> 0b011010, + MM_VMNOR <-> 0b011110, + MM_VMORNOT <-> 0b011100, + MM_VMXNOR <-> 0b011111 +} + +mapping clause encdec = MMTYPE(funct6, vs2, vs1, vd) if haveRVV() + <-> encdec_mmfunct6(funct6) @ 0b1 @ vs2 @ vs1 @ 0b010 @ vd @ 0b1010111 if haveRVV() + +function clause execute(MMTYPE(funct6, vs2, vs1, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + let 'n = num_elem; + let 'm = SEW; + + let vs1_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vs1); + let vs2_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vs2); + let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); + result : vector('n, dec, bool) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result_carry(num_elem, SEW, LMUL_pow, vd_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + let res : bool = match funct6 { + MM_VMAND => vs2_val[i] & vs1_val[i], + MM_VMNAND => ~(vs2_val[i] & vs1_val[i]), + MM_VMANDNOT => vs2_val[i] & ~(vs1_val[i]), + MM_VMXOR => vs2_val[i] ^ vs1_val[i], + MM_VMOR => vs2_val[i] | vs1_val[i], + MM_VMNOR => ~(vs2_val[i] | vs1_val[i]), + MM_VMORNOT => vs2_val[i] | ~(vs1_val[i]), + MM_VMXNOR => ~(vs2_val[i] ^ vs1_val[i]) + }; + result[i] = res; + } + }; + + write_vmask(num_elem, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping mmtype_mnemonic : mmfunct6 <-> string = { + MM_VMAND <-> "vmand.mm", + MM_VMNAND <-> "vmnand.mm", + MM_VMANDNOT <-> "vmandnot.mm", + MM_VMXOR <-> "vmxor.mm", + MM_VMOR <-> "vmor.mm", + MM_VMNOR <-> "vmnor.mm", + MM_VMORNOT <-> "vmornot.mm", + MM_VMXNOR <-> "vmxnor.mm" +} + +mapping clause assembly = MMTYPE(funct6, vs2, vs1, vd) + <-> mmtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ vreg_name(vs1) + +/* ************************* OPMVV (vpopc in VWXUNARY0) ************************** */ +union clause ast = VCPOP_M : (bits(1), regidx, regidx) + +mapping clause encdec = VCPOP_M(vm, vs2, rd) if haveRVV() + <-> 0b010000 @ vm @ vs2 @ 0b10000 @ 0b010 @ rd @ 0b1010111 if haveRVV() + +function clause execute(VCPOP_M(vm, vs2, rd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vs2_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vs2); + result : vector('n, dec, bool) = undefined; + mask : vector('n, dec, bool) = undefined; + + /* Value of vstart must be 0 */ + if ~(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; + + (result, mask) = init_masked_result_cmp(num_elem, SEW, LMUL_pow, vs2_val, vm_val); + + count : nat = 0; + foreach (i from 0 to (num_elem - 1)) { + if mask[i] & vs2_val[i] then count = count + 1; + }; + + X(rd) = to_bits(sizeof(xlen), count); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping clause assembly = VCPOP_M(vm, vs2, rd) + <-> "vpopc.m" ^ spc() ^ reg_name(rd) ^ sep() ^ vreg_name(vs2) ^ maybe_vmask(vm) + +/* ************************* OPMVV (vfirst in VWXUNARY0) ************************* */ +union clause ast = VFIRST_M : (bits(1), regidx, regidx) + +mapping clause encdec = VFIRST_M(vm, vs2, rd) if haveRVV() + <-> 0b010000 @ vm @ vs2 @ 0b10001 @ 0b010 @ rd @ 0b1010111 if haveRVV() + +function clause execute(VFIRST_M(vm, vs2, rd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vs2_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vs2); + result : vector('n, dec, bool) = undefined; + mask : vector('n, dec, bool) = undefined; + + /* Value of vstart must be 0 */ + if ~(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; + + (result, mask) = init_masked_result_cmp(num_elem, SEW, LMUL_pow, vs2_val, vm_val); + + index : int = -1; + foreach (i from 0 to (num_elem - 1)) { + if index == -1 then { + if mask[i] & vs2_val[i] then index = i; + }; + }; + + X(rd) = to_bits(sizeof(xlen), index); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping clause assembly = VFIRST_M(vm, vs2, rd) + <-> "vfirst.m" ^ spc() ^ reg_name(rd) ^ sep() ^ vreg_name(vs2) ^ maybe_vmask(vm) + +/* ************************** OPMVV (vmsbf in VMUNARY0) ************************** */ +union clause ast = VMSBF_M : (bits(1), regidx, regidx) + +mapping clause encdec = VMSBF_M(vm, vs2, vd) if haveRVV() + <-> 0b010100 @ vm @ vs2 @ 0b00001 @ 0b010 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VMSBF_M(vm, vs2, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vs2_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vs2); + let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); + result : vector('n, dec, bool) = undefined; + mask : vector('n, dec, bool) = undefined; + + /* Value of vstart must be 0 */ + if ~(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; + + /* If masking is enabled, then dest reg cannot be v0 */ + if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + + /* Dest reg cannot be the same as source reg */ + if vd == vs2 then { handle_illegal(); return RETIRE_FAIL }; + + (result, mask) = init_masked_result_cmp(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + found_elem : bool = false; + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + if vs2_val[i] then found_elem = true; + result[i] = if found_elem then false else true + } + }; + + write_vmask(num_elem, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping clause assembly = VMSBF_M(vm, vs2, vd) + <-> "vmsbf.m" ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ maybe_vmask(vm) + +/* ************************** OPMVV (vmsif in VMUNARY0) ************************** */ +union clause ast = VMSIF_M : (bits(1), regidx, regidx) + +mapping clause encdec = VMSIF_M(vm, vs2, vd) if haveRVV() + <-> 0b010100 @ vm @ vs2 @ 0b00011 @ 0b010 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VMSIF_M(vm, vs2, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vs2_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vs2); + let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); + result : vector('n, dec, bool) = undefined; + mask : vector('n, dec, bool) = undefined; + + /* Value of vstart must be 0 */ + if ~(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; + + /* If masking is enabled, then dest reg cannot be v0 */ + if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + + /* Dest reg cannot be the same as source reg */ + if vd == vs2 then { handle_illegal(); return RETIRE_FAIL }; + + (result, mask) = init_masked_result_cmp(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + found_elem : bool = false; + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = if found_elem then false else true; + if vs2_val[i] then found_elem = true + } + }; + + write_vmask(num_elem, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping clause assembly = VMSIF_M(vm, vs2, vd) + <-> "vmsif.m" ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ maybe_vmask(vm) + +/* ************************** OPMVV (vmsof in VMUNARY0) ************************** */ +union clause ast = VMSOF_M : (bits(1), regidx, regidx) + +mapping clause encdec = VMSOF_M(vm, vs2, vd) if haveRVV() + <-> 0b010100 @ vm @ vs2 @ 0b00010 @ 0b010 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VMSOF_M(vm, vs2, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vs2_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vs2); + let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); + result : vector('n, dec, bool) = undefined; + mask : vector('n, dec, bool) = undefined; + + /* Value of vstart must be 0 */ + if ~(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; + + /* If masking is enabled, then dest reg cannot be v0 */ + if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + + /* Dest reg cannot be the same as source reg */ + if vd == vs2 then { handle_illegal(); return RETIRE_FAIL }; + + (result, mask) = init_masked_result_cmp(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + found_elem : bool = false; + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + if vs2_val[i] & ~(found_elem) then { + result[i] = true; + found_elem = true + } else { + result[i] = false + } + } + }; + + write_vmask(num_elem, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping clause assembly = VMSOF_M(vm, vs2, vd) + <-> "vmsof.m" ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ maybe_vmask(vm) + +/* ************************** OPMVV (viota in VMUNARY0) ************************** */ +union clause ast = VIOTA_M : (bits(1), regidx, regidx) + +mapping clause encdec = VIOTA_M(vm, vs2, vd) if haveRVV() + <-> 0b010100 @ vm @ vs2 @ 0b10000 @ 0b010 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VIOTA_M(vm, vs2, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vs2_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vs2); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + /* Value of vstart must be 0 */ + if ~(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; + + /* If masking is enabled, then dest reg cannot be v0 */ + if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + + /* Dest reg cannot be the same as source reg */ + if vd == vs2 then { handle_illegal(); return RETIRE_FAIL }; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + sum : int = 0; + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = to_bits(SEW, sum); + if vs2_val[i] then sum = sum + 1 + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping clause assembly = VIOTA_M(vm, vs2, vd) + <-> "viota.m" ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ maybe_vmask(vm) + +/* *************************** OPMVV (vid in VMUNARY0) *************************** */ +union clause ast = VID_V : (bits(1), regidx) + +mapping clause encdec = VID_V(vm, vd) if haveRVV() + <-> 0b010100 @ vm @ 0b00000 @ 0b10001 @ 0b010 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VID_V(vm, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then result[i] = to_bits(SEW, i) + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping clause assembly = VID_V(vm, vd) + <-> "vid.v" ^ spc() ^ vreg_name(vd) ^ maybe_vmask(vm) diff --git a/model/riscv_insts_vext_mem.sail b/model/riscv_insts_vext_mem.sail index 81ff53092..0e50c0721 100644 --- a/model/riscv_insts_vext_mem.sail +++ b/model/riscv_insts_vext_mem.sail @@ -115,6 +115,8 @@ function clause execute(VLETYPE(vm, rs1, width, vd)) = { let EMUL_pow = EEW_pow - SEW_pow + LMUL_pow; let num_elem = get_num_elem(EMUL_pow, EEW); + if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + process_vle(vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) } @@ -259,6 +261,8 @@ function clause execute(VLSETYPE(vm, rs2, rs1, width, vd)) = { let EMUL_pow = EEW_pow - SEW_pow + LMUL_pow; let num_elem = get_num_elem(EMUL_pow, EEW); + if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + process_vlse(vm, vd, load_width_bytes, rs1, rs2, EMUL_pow, num_elem) } @@ -402,7 +406,9 @@ function clause execute(VLUXEITYPE(vm, vs2, rs1, width, vd)) = { let EEW_data_bytes = get_sew_bytes(); let EMUL_data_pow = get_lmul_pow(); let EMUL_index_pow = EEW_index_pow - EEW_data_pow + EMUL_data_pow; - let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); /* number of data and indices are the same */ + let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); + + if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; process_vlxei(vm, vd, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 1) } @@ -423,7 +429,9 @@ function clause execute(VLOXEITYPE(vm, vs2, rs1, width, vd)) = { let EEW_data_bytes = get_sew_bytes(); let EMUL_data_pow = get_lmul_pow(); let EMUL_index_pow = EEW_index_pow - EEW_data_pow + EMUL_data_pow; - let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); /* number of data and indices are the same */ + let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); + + if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; process_vlxei(vm, vd, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 3) } @@ -605,6 +613,8 @@ function clause execute(VLEFFTYPE(vm, rs1, width, vd)) = { let EMUL_pow = EEW_pow - SEW_pow + LMUL_pow; let num_elem = get_num_elem(EMUL_pow, EEW); + if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + process_vleff(vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) } @@ -683,6 +693,8 @@ function clause execute(VLSEGTYPE(nf, vm, rs1, width, vd)) = { let num_elem = get_num_elem(EMUL_pow, EEW); /* # of element of each register group */ let nf_int = nfields_int(nf); + if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + process_vlseg(nf_int, vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) } @@ -774,6 +786,8 @@ function clause execute(VLSEGFFTYPE(nf, vm, rs1, width, vd)) = { let num_elem = get_num_elem(EMUL_pow, EEW); let nf_int = nfields_int(nf); + if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + process_vlsegff(nf_int, vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) } @@ -908,6 +922,8 @@ function clause execute(VLSSEGTYPE(nf, vm, rs2, rs1, width, vd)) = { let num_elem = get_num_elem(EMUL_pow, EEW); let nf_int = nfields_int(nf); + if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + process_vlsseg(nf_int, vm, vd, load_width_bytes, rs1, rs2, EMUL_pow, num_elem) } @@ -1043,9 +1059,11 @@ function clause execute(VLUXSEGTYPE(nf, vm, vs2, rs1, width, vd)) = { let EEW_data_bytes = get_sew_bytes(); let EMUL_data_pow = get_lmul_pow(); let EMUL_index_pow = EEW_index_pow - EEW_data_pow + EMUL_data_pow; - let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); /* number of data and indices are the same */ + let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); let nf_int = nfields_int(nf); + if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + process_vlxseg(nf_int, vm, vd, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 1) } @@ -1065,9 +1083,11 @@ function clause execute(VLOXSEGTYPE(nf, vm, vs2, rs1, width, vd)) = { let EEW_data_bytes = get_sew_bytes(); let EMUL_data_pow = get_lmul_pow(); let EMUL_index_pow = EEW_index_pow - EEW_data_pow + EMUL_data_pow; - let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); /* number of data and indices are the same */ + let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); let nf_int = nfields_int(nf); + if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + process_vlxseg(nf_int, vm, vd, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 3) } @@ -1248,7 +1268,7 @@ function clause execute(VLRETYPE(nf, rs1, width, vd)) = { let nf_int = nfields_int(nf); assert(elem_per_reg >= 0); - assert(nf_int == 1 | nf_int == 2 | nf_int == 4 | nf_int == 8); + if ~(nf_int == 1 | nf_int == 2 | nf_int == 4 | nf_int == 8) then { handle_illegal(); return RETIRE_FAIL }; process_vlre(nf_int, vd, load_width_bytes, rs1, elem_per_reg) } @@ -1352,7 +1372,7 @@ function clause execute(VSRETYPE(nf, rs1, vs3)) = { let nf_int = nfields_int(nf); assert(elem_per_reg >= 0); - assert(nf_int == 1 | nf_int == 2 | nf_int == 4 | nf_int == 8); + if ~(nf_int == 1 | nf_int == 2 | nf_int == 4 | nf_int == 8) then { handle_illegal(); return RETIRE_FAIL }; process_vsre(nf_int, load_width_bytes, rs1, vs3, elem_per_reg) } diff --git a/model/riscv_insts_vext_utils.sail b/model/riscv_insts_vext_utils.sail index b71d24743..ff87d807a 100755 --- a/model/riscv_insts_vext_utils.sail +++ b/model/riscv_insts_vext_utils.sail @@ -8,12 +8,51 @@ mapping maybe_vmask : string <-> bits(1) = { sep() ^ "v0.t" <-> 0b0 } +/* Check for valid EEW and EMUL values in vector widening/narrowing instructions */ +val valid_eew_emul : (int, int) -> bool effect {rreg} +function valid_eew_emul(EEW, EMUL_pow) = { + let ELEN = int_power(2, get_elen_pow()); + EEW >= 8 & EEW <= ELEN & EMUL_pow >= -3 & EMUL_pow <= 3; +} + /* Check for vstart value */ val assert_vstart : int -> bool effect {rreg} function assert_vstart(i) = { unsigned(vstart) == i; } +/* Check for valid destination register when vector masking is enabled: + * The destination vector register group for a masked vector instruction + * cannot overlap the source mask register (v0), + * unless the destination vector register is being written with a mask value (e.g., compares) + * or the scalar result of a reduction. + */ +val valid_rd_mask : (regidx, bits(1)) -> bool +function valid_rd_mask(rd, vm) = { + vm != 0b0 | rd != vreg_name("v0") +} + +/* Check for valid register overlap in vector widening/narrowing instructions: + * In a widening instruction, the overlap is valid only in the highest-numbered part + * of the destination register group, and the source EMUL is at least 1. + * In a narrowing instruction, the overlap is valid only in the lowest-numbered part + * of the source register group. + */ +val valid_reg_overlap : (regidx, regidx, int, int) -> bool +function valid_reg_overlap(rs, rd, EMUL_pow_rs, EMUL_pow_rd) = { + let rs_group = if EMUL_pow_rs > 0 then int_power(2, EMUL_pow_rs) else 1; + let rd_group = if EMUL_pow_rd > 0 then int_power(2, EMUL_pow_rd) else 1; + let rs_int = unsigned(rs); + let rd_int = unsigned(rd); + let is_valid = if EMUL_pow_rs < EMUL_pow_rd then { + (rs_int + rs_group <= rd_int) | (rs_int >= rd_int + rd_group) | + ((rs_int + rs_group == rd_int + rd_group) & (EMUL_pow_rs >= 0)) + } else if EMUL_pow_rs > EMUL_pow_rd then { + (rd_int <= rs_int) | (rd_int >= rs_int + rs_group) + } else true; + is_valid +} + /* Scalar register shaping */ val get_scalar : forall 'n, 'n >= 8. (regidx, int('n)) -> bits('n) effect {escape, rreg} function get_scalar(rs1, vsew_bits) = { @@ -92,7 +131,7 @@ function init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val) = { result[i] = vd_val[i]; /* TODO: configuration support */ }; mask[i] = false - } else if vm_val[i] == false then { + } else if ~(vm_val[i]) then { /* Inactive body elements defined by vm */ if mask_ag == UNDISTURBED then { result[i] = vd_val[i] @@ -174,7 +213,7 @@ function init_masked_result_cmp(num_elem, SEW, LMUL_pow, vd_val, vm_val) = { /* Mask tail is always agnostic */ result[i] = vd_val[i]; /* TODO: configuration support */ mask[i] = false - } else if vm_val[i] == false then { + } else if ~(vm_val[i]) then { /* Inactive body elements defined by vm */ if mask_ag == UNDISTURBED then { result[i] = vd_val[i] diff --git a/model/riscv_insts_vext_vm.sail b/model/riscv_insts_vext_vm.sail new file mode 100755 index 000000000..19b34c824 --- /dev/null +++ b/model/riscv_insts_vext_vm.sail @@ -0,0 +1,202 @@ +/* ******************************************************************************* */ +/* This file implements part of the vector extension. */ +/* Mask instructions from Chap 11 (integer arithmetic) and 13 (floating-point) */ +/* ******************************************************************************* */ + +/* ***************** OPIVV (Vector Integer Compare Instructions) ***************** */ +/* VVCMP instructions' destination is a mask register */ +union clause ast = VVCMPTYPE : (vvcmpfunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_vvcmpfunct6 : vvcmpfunct6 <-> bits(6) = { + VVCMP_VMSEQ <-> 0b011000, + VVCMP_VMSNE <-> 0b011001, + VVCMP_VMSLTU <-> 0b011010, + VVCMP_VMSLT <-> 0b011011, + VVCMP_VMSLEU <-> 0b011100, + VVCMP_VMSLE <-> 0b011101 +} + +mapping clause encdec = VVCMPTYPE(funct6, vm, vs2, vs1, vd) if haveRVV() + <-> encdec_vvcmpfunct6(funct6) @ vm @ vs2 @ vs1 @ 0b000 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VVCMPTYPE(funct6, vm, vs2, vs1, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); + result : vector('n, dec, bool) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result_cmp(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + let res : bool = match funct6 { + VVCMP_VMSEQ => vs2_val[i] == vs1_val[i], + VVCMP_VMSNE => vs2_val[i] != vs1_val[i], + VVCMP_VMSLTU => unsigned(vs2_val[i]) < unsigned(vs1_val[i]), + VVCMP_VMSLT => signed(vs2_val[i]) < signed(vs1_val[i]), + VVCMP_VMSLEU => unsigned(vs2_val[i]) <= unsigned(vs1_val[i]), + VVCMP_VMSLE => signed(vs2_val[i]) <= signed(vs1_val[i]) + }; + result[i] = res + } + }; + + write_vmask(num_elem, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping vvcmptype_mnemonic : vvcmpfunct6 <-> string = { + VVCMP_VMSEQ <-> "vmseq.vv", + VVCMP_VMSNE <-> "vmsne.vv", + VVCMP_VMSLTU <-> "vmsltu.vv", + VVCMP_VMSLT <-> "vmslt.vv", + VVCMP_VMSLEU <-> "vmsleu.vv", + VVCMP_VMSLE <-> "vmsle.vv" +} + +mapping clause assembly = VVCMPTYPE(funct6, vm, vs2, vs1, vd) + <-> vvcmptype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ vreg_name(vs1) ^ maybe_vmask(vm) + +/* ***************** OPIVX (Vector Integer Compare Instructions) ***************** */ +/* VXCMP instructions' destination is a mask register */ +union clause ast = VXCMPTYPE : (vxcmpfunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_vxcmpfunct6 : vxcmpfunct6 <-> bits(6) = { + VXCMP_VMSEQ <-> 0b011000, + VXCMP_VMSNE <-> 0b011001, + VXCMP_VMSLTU <-> 0b011010, + VXCMP_VMSLT <-> 0b011011, + VXCMP_VMSLEU <-> 0b011100, + VXCMP_VMSLE <-> 0b011101, + VXCMP_VMSGTU <-> 0b011110, + VXCMP_VMSGT <-> 0b011111 +} + +mapping clause encdec = VXCMPTYPE(funct6, vm, vs2, rs1, vd) if haveRVV() + <-> encdec_vxcmpfunct6(funct6) @ vm @ vs2 @ rs1 @ 0b100 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VXCMPTYPE(funct6, vm, vs2, rs1, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let rs1_val : bits('m) = get_scalar(rs1, SEW); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); + result : vector('n, dec, bool) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result_cmp(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + let res : bool = match funct6 { + VXCMP_VMSEQ => vs2_val[i] == rs1_val, + VXCMP_VMSNE => vs2_val[i] != rs1_val, + VXCMP_VMSLTU => unsigned(vs2_val[i]) < unsigned(rs1_val), + VXCMP_VMSLT => signed(vs2_val[i]) < signed(rs1_val), + VXCMP_VMSLEU => unsigned(vs2_val[i]) <= unsigned(rs1_val), + VXCMP_VMSLE => signed(vs2_val[i]) <= signed(rs1_val), + VXCMP_VMSGTU => unsigned(vs2_val[i]) > unsigned(rs1_val), + VXCMP_VMSGT => signed(vs2_val[i]) > signed(rs1_val) + }; + result[i] = res + } + }; + + write_vmask(num_elem, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping vxcmptype_mnemonic : vxcmpfunct6 <-> string = { + VXCMP_VMSEQ <-> "vmseq.vx", + VXCMP_VMSNE <-> "vmsne.vx", + VXCMP_VMSLTU <-> "vmsltu.vx", + VXCMP_VMSLT <-> "vmslt.vx", + VXCMP_VMSLEU <-> "vmsleu.vx", + VXCMP_VMSLE <-> "vmsle.vx", + VXCMP_VMSGTU <-> "vmsgtu.vx", + VXCMP_VMSGT <-> "vmsgt.vx" +} + +mapping clause assembly = VXCMPTYPE(funct6, vm, vs2, rs1, vd) + <-> vxcmptype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ reg_name(rs1) ^ maybe_vmask(vm) + +/* ***************** OPIVI (Vector Integer Compare Instructions) ***************** */ +/* VICMP instructions' destination is a mask register */ +union clause ast = VICMPTYPE : (vicmpfunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_vicmpfunct6 : vicmpfunct6 <-> bits(6) = { + VICMP_VMSEQ <-> 0b011000, + VICMP_VMSNE <-> 0b011001, + VICMP_VMSLEU <-> 0b011100, + VICMP_VMSLE <-> 0b011101, + VICMP_VMSGTU <-> 0b011110, + VICMP_VMSGT <-> 0b011111 +} + +mapping clause encdec = VICMPTYPE(funct6, vm, vs2, simm, vd) if haveRVV() + <-> encdec_vicmpfunct6(funct6) @ vm @ vs2 @ simm @ 0b011 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VICMPTYPE(funct6, vm, vs2, simm, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let imm_val : bits('m) = EXTS(simm); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); + result : vector('n, dec, bool) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result_cmp(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + let res : bool = match funct6 { + VICMP_VMSEQ => vs2_val[i] == imm_val, + VICMP_VMSNE => vs2_val[i] != imm_val, + VICMP_VMSLEU => unsigned(vs2_val[i]) <= unsigned(imm_val), + VICMP_VMSLE => signed(vs2_val[i]) <= signed(imm_val), + VICMP_VMSGTU => unsigned(vs2_val[i]) > unsigned(imm_val), + VICMP_VMSGT => signed(vs2_val[i]) > signed(imm_val) + }; + result[i] = res + } + }; + + write_vmask(num_elem, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping vicmptype_mnemonic : vicmpfunct6 <-> string = { + VICMP_VMSEQ <-> "vmseq.vi", + VICMP_VMSNE <-> "vmsne.vi", + VICMP_VMSLEU <-> "vmsleu.vi", + VICMP_VMSLE <-> "vmsle.vi", + VICMP_VMSGTU <-> "vmsgtu.vi", + VICMP_VMSGT <-> "vmsgt.vi" +} + +mapping clause assembly = VICMPTYPE(funct6, vm, vs2, simm, vd) + <-> vicmptype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ hex_bits_5(simm) ^ maybe_vmask(vm) diff --git a/model/riscv_sys_regs.sail b/model/riscv_sys_regs.sail index dc72d11ed..a96a9e3f8 100644 --- a/model/riscv_sys_regs.sail +++ b/model/riscv_sys_regs.sail @@ -849,13 +849,16 @@ register vtype : Vtype /* this returns the power of 2 for SEW */ val get_sew_pow : unit -> {|3, 4, 5, 6|} effect {escape, rreg} function get_sew_pow() = { - match vtype.vsew() { + let ELEN_pow = get_elen_pow(); + let SEW_pow : {|3, 4, 5, 6|} = match vtype.vsew() { 0b000 => 3, 0b001 => 4, 0b010 => 5, 0b011 => 6, _ => {assert(false, "invalid vsew field in vtype"); 0} - } + }; + assert(SEW_pow <= ELEN_pow); + SEW_pow } /* this returns the actual value of SEW */ val get_sew : unit -> {|8, 16, 32, 64|} effect {escape, rreg} diff --git a/model/riscv_vlen.sail b/model/riscv_vlen.sail index 15816bc07..a411bf16b 100644 --- a/model/riscv_vlen.sail +++ b/model/riscv_vlen.sail @@ -1,10 +1,10 @@ register elen : bits(1) -val get_elen : unit -> {|32, 64|} effect {rreg} +val get_elen_pow : unit -> {|5, 6|} effect {rreg} -function get_elen() = match elen { - 0b0 => 32, - 0b1 => 64 +function get_elen_pow() = match elen { + 0b0 => 5, + 0b1 => 6 } register vlen : bits(4) From c5ab726fbc9b5e70af733f874af46695d059daa6 Mon Sep 17 00:00:00 2001 From: Xinlai Wan Date: Mon, 1 May 2023 22:56:20 +0800 Subject: [PATCH 04/11] Vector floating-point instructions (#232) * Add vector floating-point instructions * Update vector floating-point conversion instructions * Update copyright headers for vector extension code --------- Co-authored-by: xwan --- Makefile | 1 + .../SoftFloat-3e/build/Linux-386-GCC/Makefile | 24 +- .../build/Linux-386-SSE2-GCC/Makefile | 24 +- .../build/Linux-ARM-VFPv2-GCC/Makefile | 24 +- .../build/Linux-RISCV-GCC/Makefile | 25 +- .../build/Linux-x86_64-GCC/Makefile | 24 +- .../SoftFloat-3e/build/Win32-MinGW/Makefile | 24 +- .../build/Win32-SSE2-MinGW/Makefile | 24 +- .../build/Win64-MinGW-w64/Makefile | 24 +- .../build/template-FAST_INT64/Makefile | 24 +- .../build/template-not-FAST_INT64/Makefile | 24 +- .../SoftFloat-3e/source/8086-SSE/specialize.h | 14 + .../SoftFloat-3e/source/8086/specialize.h | 14 + .../source/ARM-VFPv2-defaultNaN/specialize.h | 14 + .../source/ARM-VFPv2/specialize.h | 14 + .../SoftFloat-3e/source/RISCV/specialize.h | 14 + c_emulator/SoftFloat-3e/source/f16_classify.c | 36 + c_emulator/SoftFloat-3e/source/f16_to_i16.c | 56 + c_emulator/SoftFloat-3e/source/f16_to_i8.c | 56 + c_emulator/SoftFloat-3e/source/f16_to_ui16.c | 53 + c_emulator/SoftFloat-3e/source/f16_to_ui8.c | 53 + c_emulator/SoftFloat-3e/source/f32_classify.c | 36 + c_emulator/SoftFloat-3e/source/f32_to_i16.c | 56 + c_emulator/SoftFloat-3e/source/f32_to_ui16.c | 53 + c_emulator/SoftFloat-3e/source/f64_classify.c | 36 + .../SoftFloat-3e/source/fall_reciprocal.c | 392 +++++ .../SoftFloat-3e/source/include/softfloat.h | 15 + c_emulator/riscv_softfloat.c | 235 +++ c_emulator/riscv_softfloat.h | 21 + .../0.11/riscv_extras_fdext.lem | 54 + handwritten_support/riscv_extras_fdext.lem | 54 + model/riscv_fdext_regs.sail | 10 +- model/riscv_insts_vext_arith.sail | 134 +- model/riscv_insts_vext_fp.sail | 1367 +++++++++++++++++ model/riscv_insts_vext_mask.sail | 72 +- model/riscv_insts_vext_mem.sail | 130 +- model/riscv_insts_vext_utils.sail | 141 +- model/riscv_insts_vext_vm.sail | 38 + model/riscv_insts_vext_vset.sail | 51 +- model/riscv_softfloat_interface.sail | 137 +- model/riscv_vext_control.sail | 38 + model/riscv_vext_regs.sail | 38 + model/riscv_vlen.sail | 48 + model/riscv_vreg_type.sail | 38 + ocaml_emulator/softfloat.ml | 54 + 45 files changed, 3647 insertions(+), 167 deletions(-) create mode 100755 c_emulator/SoftFloat-3e/source/f16_classify.c create mode 100644 c_emulator/SoftFloat-3e/source/f16_to_i16.c create mode 100644 c_emulator/SoftFloat-3e/source/f16_to_i8.c create mode 100644 c_emulator/SoftFloat-3e/source/f16_to_ui16.c create mode 100644 c_emulator/SoftFloat-3e/source/f16_to_ui8.c create mode 100755 c_emulator/SoftFloat-3e/source/f32_classify.c create mode 100644 c_emulator/SoftFloat-3e/source/f32_to_i16.c create mode 100644 c_emulator/SoftFloat-3e/source/f32_to_ui16.c create mode 100755 c_emulator/SoftFloat-3e/source/f64_classify.c create mode 100755 c_emulator/SoftFloat-3e/source/fall_reciprocal.c create mode 100755 model/riscv_insts_vext_fp.sail diff --git a/Makefile b/Makefile index e4520ddb0..523152667 100644 --- a/Makefile +++ b/Makefile @@ -40,6 +40,7 @@ SAIL_DEFAULT_INST += riscv_insts_zbkx.sail SAIL_DEFAULT_INST += riscv_insts_vext_utils.sail SAIL_DEFAULT_INST += riscv_insts_vext_vset.sail SAIL_DEFAULT_INST += riscv_insts_vext_arith.sail +SAIL_DEFAULT_INST += riscv_insts_vext_fp.sail SAIL_DEFAULT_INST += riscv_insts_vext_mem.sail SAIL_DEFAULT_INST += riscv_insts_vext_mask.sail SAIL_DEFAULT_INST += riscv_insts_vext_vm.sail diff --git a/c_emulator/SoftFloat-3e/build/Linux-386-GCC/Makefile b/c_emulator/SoftFloat-3e/build/Linux-386-GCC/Makefile index 418160051..56c9bb016 100644 --- a/c_emulator/SoftFloat-3e/build/Linux-386-GCC/Makefile +++ b/c_emulator/SoftFloat-3e/build/Linux-386-GCC/Makefile @@ -166,8 +166,12 @@ OBJS_OTHERS = \ i64_to_f64$(OBJ) \ i64_to_extF80M$(OBJ) \ i64_to_f128M$(OBJ) \ + f16_to_ui8$(OBJ) \ + f16_to_ui16$(OBJ) \ f16_to_ui32$(OBJ) \ f16_to_ui64$(OBJ) \ + f16_to_i8$(OBJ) \ + f16_to_i16$(OBJ) \ f16_to_i32$(OBJ) \ f16_to_i64$(OBJ) \ f16_to_ui32_r_minMag$(OBJ) \ @@ -193,8 +197,11 @@ OBJS_OTHERS = \ f16_le_quiet$(OBJ) \ f16_lt_quiet$(OBJ) \ f16_isSignalingNaN$(OBJ) \ + f16_classify$(OBJ) \ + f32_to_ui16$(OBJ) \ f32_to_ui32$(OBJ) \ f32_to_ui64$(OBJ) \ + f32_to_i16$(OBJ) \ f32_to_i32$(OBJ) \ f32_to_i64$(OBJ) \ f32_to_ui32_r_minMag$(OBJ) \ @@ -220,6 +227,7 @@ OBJS_OTHERS = \ f32_le_quiet$(OBJ) \ f32_lt_quiet$(OBJ) \ f32_isSignalingNaN$(OBJ) \ + f32_classify$(OBJ) \ f64_to_ui32$(OBJ) \ f64_to_ui64$(OBJ) \ f64_to_i32$(OBJ) \ @@ -247,6 +255,7 @@ OBJS_OTHERS = \ f64_le_quiet$(OBJ) \ f64_lt_quiet$(OBJ) \ f64_isSignalingNaN$(OBJ) \ + f64_classify$(OBJ) \ extF80M_to_ui32$(OBJ) \ extF80M_to_ui64$(OBJ) \ extF80M_to_i32$(OBJ) \ @@ -299,12 +308,20 @@ OBJS_OTHERS = \ f128M_le_quiet$(OBJ) \ f128M_lt_quiet$(OBJ) \ -OBJS_ALL = $(OBJS_PRIMITIVES) $(OBJS_SPECIALIZE) $(OBJS_OTHERS) +OBJS_RECIPROCAL = \ + f16_rsqrte7$(OBJ) \ + f16_recip7$(OBJ) \ + f32_rsqrte7$(OBJ) \ + f32_recip7$(OBJ) \ + f64_rsqrte7$(OBJ) \ + f64_recip7$(OBJ) \ + +OBJS_ALL = $(OBJS_PRIMITIVES) $(OBJS_SPECIALIZE) $(OBJS_OTHERS) $(OBJS_RECIPROCAL) $(OBJS_ALL): \ $(OTHER_HEADERS) platform.h $(SOURCE_DIR)/include/primitiveTypes.h \ $(SOURCE_DIR)/include/primitives.h -$(OBJS_SPECIALIZE) $(OBJS_OTHERS): \ +$(OBJS_SPECIALIZE) $(OBJS_OTHERS) $(OBJS_RECIPROCAL): \ $(SOURCE_DIR)/include/softfloat_types.h $(SOURCE_DIR)/include/internals.h \ $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/specialize.h \ $(SOURCE_DIR)/include/softfloat.h @@ -315,6 +332,9 @@ $(OBJS_PRIMITIVES) $(OBJS_OTHERS): %$(OBJ): $(SOURCE_DIR)/%.c $(OBJS_SPECIALIZE): %$(OBJ): $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/%.c $(COMPILE_C) $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/$*.c +$(OBJS_RECIPROCAL): %$(OBJ): $(SOURCE_DIR)/fall_reciprocal.c + $(COMPILE_C) $(SOURCE_DIR)/fall_reciprocal.c + softfloat$(LIB): $(OBJS_ALL) $(DELETE) $@ $(MAKELIB) $^ diff --git a/c_emulator/SoftFloat-3e/build/Linux-386-SSE2-GCC/Makefile b/c_emulator/SoftFloat-3e/build/Linux-386-SSE2-GCC/Makefile index 1cf6f5e1a..1bc4213b5 100644 --- a/c_emulator/SoftFloat-3e/build/Linux-386-SSE2-GCC/Makefile +++ b/c_emulator/SoftFloat-3e/build/Linux-386-SSE2-GCC/Makefile @@ -166,8 +166,12 @@ OBJS_OTHERS = \ i64_to_f64$(OBJ) \ i64_to_extF80M$(OBJ) \ i64_to_f128M$(OBJ) \ + f16_to_ui8$(OBJ) \ + f16_to_ui16$(OBJ) \ f16_to_ui32$(OBJ) \ f16_to_ui64$(OBJ) \ + f16_to_i8$(OBJ) \ + f16_to_i16$(OBJ) \ f16_to_i32$(OBJ) \ f16_to_i64$(OBJ) \ f16_to_ui32_r_minMag$(OBJ) \ @@ -193,8 +197,11 @@ OBJS_OTHERS = \ f16_le_quiet$(OBJ) \ f16_lt_quiet$(OBJ) \ f16_isSignalingNaN$(OBJ) \ + f16_classify$(OBJ) \ + f32_to_ui16$(OBJ) \ f32_to_ui32$(OBJ) \ f32_to_ui64$(OBJ) \ + f32_to_i16$(OBJ) \ f32_to_i32$(OBJ) \ f32_to_i64$(OBJ) \ f32_to_ui32_r_minMag$(OBJ) \ @@ -220,6 +227,7 @@ OBJS_OTHERS = \ f32_le_quiet$(OBJ) \ f32_lt_quiet$(OBJ) \ f32_isSignalingNaN$(OBJ) \ + f32_classify$(OBJ) \ f64_to_ui32$(OBJ) \ f64_to_ui64$(OBJ) \ f64_to_i32$(OBJ) \ @@ -247,6 +255,7 @@ OBJS_OTHERS = \ f64_le_quiet$(OBJ) \ f64_lt_quiet$(OBJ) \ f64_isSignalingNaN$(OBJ) \ + f64_classify$(OBJ) \ extF80M_to_ui32$(OBJ) \ extF80M_to_ui64$(OBJ) \ extF80M_to_i32$(OBJ) \ @@ -299,12 +308,20 @@ OBJS_OTHERS = \ f128M_le_quiet$(OBJ) \ f128M_lt_quiet$(OBJ) \ -OBJS_ALL = $(OBJS_PRIMITIVES) $(OBJS_SPECIALIZE) $(OBJS_OTHERS) +OBJS_RECIPROCAL = \ + f16_rsqrte7$(OBJ) \ + f16_recip7$(OBJ) \ + f32_rsqrte7$(OBJ) \ + f32_recip7$(OBJ) \ + f64_rsqrte7$(OBJ) \ + f64_recip7$(OBJ) \ + +OBJS_ALL = $(OBJS_PRIMITIVES) $(OBJS_SPECIALIZE) $(OBJS_OTHERS) $(OBJS_RECIPROCAL) $(OBJS_ALL): \ $(OTHER_HEADERS) platform.h $(SOURCE_DIR)/include/primitiveTypes.h \ $(SOURCE_DIR)/include/primitives.h -$(OBJS_SPECIALIZE) $(OBJS_OTHERS): \ +$(OBJS_SPECIALIZE) $(OBJS_OTHERS) $(OBJS_RECIPROCAL): \ $(SOURCE_DIR)/include/softfloat_types.h $(SOURCE_DIR)/include/internals.h \ $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/specialize.h \ $(SOURCE_DIR)/include/softfloat.h @@ -315,6 +332,9 @@ $(OBJS_PRIMITIVES) $(OBJS_OTHERS): %$(OBJ): $(SOURCE_DIR)/%.c $(OBJS_SPECIALIZE): %$(OBJ): $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/%.c $(COMPILE_C) $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/$*.c +$(OBJS_RECIPROCAL): %$(OBJ): $(SOURCE_DIR)/fall_reciprocal.c + $(COMPILE_C) $(SOURCE_DIR)/fall_reciprocal.c + softfloat$(LIB): $(OBJS_ALL) $(DELETE) $@ $(MAKELIB) $^ diff --git a/c_emulator/SoftFloat-3e/build/Linux-ARM-VFPv2-GCC/Makefile b/c_emulator/SoftFloat-3e/build/Linux-ARM-VFPv2-GCC/Makefile index 2565fe56c..021841406 100644 --- a/c_emulator/SoftFloat-3e/build/Linux-ARM-VFPv2-GCC/Makefile +++ b/c_emulator/SoftFloat-3e/build/Linux-ARM-VFPv2-GCC/Makefile @@ -164,8 +164,12 @@ OBJS_OTHERS = \ i64_to_f64$(OBJ) \ i64_to_extF80M$(OBJ) \ i64_to_f128M$(OBJ) \ + f16_to_ui8$(OBJ) \ + f16_to_ui16$(OBJ) \ f16_to_ui32$(OBJ) \ f16_to_ui64$(OBJ) \ + f16_to_i8$(OBJ) \ + f16_to_i16$(OBJ) \ f16_to_i32$(OBJ) \ f16_to_i64$(OBJ) \ f16_to_ui32_r_minMag$(OBJ) \ @@ -191,8 +195,11 @@ OBJS_OTHERS = \ f16_le_quiet$(OBJ) \ f16_lt_quiet$(OBJ) \ f16_isSignalingNaN$(OBJ) \ + f16_classify$(OBJ) \ + f32_to_ui16$(OBJ) \ f32_to_ui32$(OBJ) \ f32_to_ui64$(OBJ) \ + f32_to_i16$(OBJ) \ f32_to_i32$(OBJ) \ f32_to_i64$(OBJ) \ f32_to_ui32_r_minMag$(OBJ) \ @@ -218,6 +225,7 @@ OBJS_OTHERS = \ f32_le_quiet$(OBJ) \ f32_lt_quiet$(OBJ) \ f32_isSignalingNaN$(OBJ) \ + f32_classify$(OBJ) \ f64_to_ui32$(OBJ) \ f64_to_ui64$(OBJ) \ f64_to_i32$(OBJ) \ @@ -245,6 +253,7 @@ OBJS_OTHERS = \ f64_le_quiet$(OBJ) \ f64_lt_quiet$(OBJ) \ f64_isSignalingNaN$(OBJ) \ + f64_classify$(OBJ) \ extF80M_to_ui32$(OBJ) \ extF80M_to_ui64$(OBJ) \ extF80M_to_i32$(OBJ) \ @@ -297,12 +306,20 @@ OBJS_OTHERS = \ f128M_le_quiet$(OBJ) \ f128M_lt_quiet$(OBJ) \ -OBJS_ALL = $(OBJS_PRIMITIVES) $(OBJS_SPECIALIZE) $(OBJS_OTHERS) +OBJS_RECIPROCAL = \ + f16_rsqrte7$(OBJ) \ + f16_recip7$(OBJ) \ + f32_rsqrte7$(OBJ) \ + f32_recip7$(OBJ) \ + f64_rsqrte7$(OBJ) \ + f64_recip7$(OBJ) \ + +OBJS_ALL = $(OBJS_PRIMITIVES) $(OBJS_SPECIALIZE) $(OBJS_OTHERS) $(OBJS_RECIPROCAL) $(OBJS_ALL): \ $(OTHER_HEADERS) platform.h $(SOURCE_DIR)/include/primitiveTypes.h \ $(SOURCE_DIR)/include/primitives.h -$(OBJS_SPECIALIZE) $(OBJS_OTHERS): \ +$(OBJS_SPECIALIZE) $(OBJS_OTHERS) $(OBJS_RECIPROCAL): \ $(SOURCE_DIR)/include/softfloat_types.h $(SOURCE_DIR)/include/internals.h \ $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/specialize.h \ $(SOURCE_DIR)/include/softfloat.h @@ -313,6 +330,9 @@ $(OBJS_PRIMITIVES) $(OBJS_OTHERS): %$(OBJ): $(SOURCE_DIR)/%.c $(OBJS_SPECIALIZE): %$(OBJ): $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/%.c $(COMPILE_C) $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/$*.c +$(OBJS_RECIPROCAL): %$(OBJ): $(SOURCE_DIR)/fall_reciprocal.c + $(COMPILE_C) $(SOURCE_DIR)/fall_reciprocal.c + softfloat$(LIB): $(OBJS_ALL) $(DELETE) $@ $(MAKELIB) $^ diff --git a/c_emulator/SoftFloat-3e/build/Linux-RISCV-GCC/Makefile b/c_emulator/SoftFloat-3e/build/Linux-RISCV-GCC/Makefile index 9ec18f78f..3289aa5e7 100644 --- a/c_emulator/SoftFloat-3e/build/Linux-RISCV-GCC/Makefile +++ b/c_emulator/SoftFloat-3e/build/Linux-RISCV-GCC/Makefile @@ -38,6 +38,7 @@ SOURCE_DIR ?= ../../source SPECIALIZE_TYPE ?= RISCV SOFTFLOAT_OPTS ?= \ + -DSOFTFLOAT_ROUND_ODD DELETE = rm -f @@ -162,8 +163,12 @@ OBJS_OTHERS = \ i64_to_extF80M$(OBJ) \ i64_to_f128$(OBJ) \ i64_to_f128M$(OBJ) \ + f16_to_ui8$(OBJ) \ + f16_to_ui16$(OBJ) \ f16_to_ui32$(OBJ) \ f16_to_ui64$(OBJ) \ + f16_to_i8$(OBJ) \ + f16_to_i16$(OBJ) \ f16_to_i32$(OBJ) \ f16_to_i64$(OBJ) \ f16_to_ui32_r_minMag$(OBJ) \ @@ -191,8 +196,11 @@ OBJS_OTHERS = \ f16_le_quiet$(OBJ) \ f16_lt_quiet$(OBJ) \ f16_isSignalingNaN$(OBJ) \ + f16_classify$(OBJ) \ + f32_to_ui16$(OBJ) \ f32_to_ui32$(OBJ) \ f32_to_ui64$(OBJ) \ + f32_to_i16$(OBJ) \ f32_to_i32$(OBJ) \ f32_to_i64$(OBJ) \ f32_to_ui32_r_minMag$(OBJ) \ @@ -220,6 +228,7 @@ OBJS_OTHERS = \ f32_le_quiet$(OBJ) \ f32_lt_quiet$(OBJ) \ f32_isSignalingNaN$(OBJ) \ + f32_classify$(OBJ) \ f64_to_ui32$(OBJ) \ f64_to_ui64$(OBJ) \ f64_to_i32$(OBJ) \ @@ -249,6 +258,7 @@ OBJS_OTHERS = \ f64_le_quiet$(OBJ) \ f64_lt_quiet$(OBJ) \ f64_isSignalingNaN$(OBJ) \ + f64_classify$(OBJ) \ extF80_to_ui32$(OBJ) \ extF80_to_ui64$(OBJ) \ extF80_to_i32$(OBJ) \ @@ -354,12 +364,20 @@ OBJS_OTHERS = \ f128M_le_quiet$(OBJ) \ f128M_lt_quiet$(OBJ) \ -OBJS_ALL = $(OBJS_PRIMITIVES) $(OBJS_SPECIALIZE) $(OBJS_OTHERS) +OBJS_RECIPROCAL = \ + f16_rsqrte7$(OBJ) \ + f16_recip7$(OBJ) \ + f32_rsqrte7$(OBJ) \ + f32_recip7$(OBJ) \ + f64_rsqrte7$(OBJ) \ + f64_recip7$(OBJ) \ + +OBJS_ALL = $(OBJS_PRIMITIVES) $(OBJS_SPECIALIZE) $(OBJS_OTHERS) $(OBJS_RECIPROCAL) $(OBJS_ALL): \ $(OTHER_HEADERS) platform.h $(SOURCE_DIR)/include/primitiveTypes.h \ $(SOURCE_DIR)/include/primitives.h -$(OBJS_SPECIALIZE) $(OBJS_OTHERS): \ +$(OBJS_SPECIALIZE) $(OBJS_OTHERS) $(OBJS_RECIPROCAL): \ $(SOURCE_DIR)/include/softfloat_types.h $(SOURCE_DIR)/include/internals.h \ $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/specialize.h \ $(SOURCE_DIR)/include/softfloat.h @@ -370,6 +388,9 @@ $(OBJS_PRIMITIVES) $(OBJS_OTHERS): %$(OBJ): $(SOURCE_DIR)/%.c $(OBJS_SPECIALIZE): %$(OBJ): $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/%.c $(COMPILE_C) $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/$*.c +$(OBJS_RECIPROCAL): %$(OBJ): $(SOURCE_DIR)/fall_reciprocal.c + $(COMPILE_C) $(SOURCE_DIR)/fall_reciprocal.c + softfloat$(LIB): $(OBJS_ALL) $(DELETE) $@ $(MAKELIB) $^ diff --git a/c_emulator/SoftFloat-3e/build/Linux-x86_64-GCC/Makefile b/c_emulator/SoftFloat-3e/build/Linux-x86_64-GCC/Makefile index 570337825..a43fc3ab9 100644 --- a/c_emulator/SoftFloat-3e/build/Linux-x86_64-GCC/Makefile +++ b/c_emulator/SoftFloat-3e/build/Linux-x86_64-GCC/Makefile @@ -172,8 +172,12 @@ OBJS_OTHERS = \ i64_to_extF80M$(OBJ) \ i64_to_f128$(OBJ) \ i64_to_f128M$(OBJ) \ + f16_to_ui8$(OBJ) \ + f16_to_ui16$(OBJ) \ f16_to_ui32$(OBJ) \ f16_to_ui64$(OBJ) \ + f16_to_i8$(OBJ) \ + f16_to_i16$(OBJ) \ f16_to_i32$(OBJ) \ f16_to_i64$(OBJ) \ f16_to_ui32_r_minMag$(OBJ) \ @@ -201,8 +205,11 @@ OBJS_OTHERS = \ f16_le_quiet$(OBJ) \ f16_lt_quiet$(OBJ) \ f16_isSignalingNaN$(OBJ) \ + f16_classify$(OBJ) \ + f32_to_ui16$(OBJ) \ f32_to_ui32$(OBJ) \ f32_to_ui64$(OBJ) \ + f32_to_i16$(OBJ) \ f32_to_i32$(OBJ) \ f32_to_i64$(OBJ) \ f32_to_ui32_r_minMag$(OBJ) \ @@ -230,6 +237,7 @@ OBJS_OTHERS = \ f32_le_quiet$(OBJ) \ f32_lt_quiet$(OBJ) \ f32_isSignalingNaN$(OBJ) \ + f32_classify$(OBJ) \ f64_to_ui32$(OBJ) \ f64_to_ui64$(OBJ) \ f64_to_i32$(OBJ) \ @@ -259,6 +267,7 @@ OBJS_OTHERS = \ f64_le_quiet$(OBJ) \ f64_lt_quiet$(OBJ) \ f64_isSignalingNaN$(OBJ) \ + f64_classify$(OBJ) \ extF80_to_ui32$(OBJ) \ extF80_to_ui64$(OBJ) \ extF80_to_i32$(OBJ) \ @@ -364,12 +373,20 @@ OBJS_OTHERS = \ f128M_le_quiet$(OBJ) \ f128M_lt_quiet$(OBJ) \ -OBJS_ALL = $(OBJS_PRIMITIVES) $(OBJS_SPECIALIZE) $(OBJS_OTHERS) +OBJS_RECIPROCAL = \ + f16_rsqrte7$(OBJ) \ + f16_recip7$(OBJ) \ + f32_rsqrte7$(OBJ) \ + f32_recip7$(OBJ) \ + f64_rsqrte7$(OBJ) \ + f64_recip7$(OBJ) \ + +OBJS_ALL = $(OBJS_PRIMITIVES) $(OBJS_SPECIALIZE) $(OBJS_OTHERS) $(OBJS_RECIPROCAL) $(OBJS_ALL): \ $(OTHER_HEADERS) platform.h $(SOURCE_DIR)/include/primitiveTypes.h \ $(SOURCE_DIR)/include/primitives.h -$(OBJS_SPECIALIZE) $(OBJS_OTHERS): \ +$(OBJS_SPECIALIZE) $(OBJS_OTHERS) $(OBJS_RECIPROCAL): \ $(SOURCE_DIR)/include/softfloat_types.h $(SOURCE_DIR)/include/internals.h \ $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/specialize.h \ $(SOURCE_DIR)/include/softfloat.h @@ -380,6 +397,9 @@ $(OBJS_PRIMITIVES) $(OBJS_OTHERS): %$(OBJ): $(SOURCE_DIR)/%.c $(OBJS_SPECIALIZE): %$(OBJ): $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/%.c $(COMPILE_C) $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/$*.c +$(OBJS_RECIPROCAL): %$(OBJ): $(SOURCE_DIR)/fall_reciprocal.c + $(COMPILE_C) $(SOURCE_DIR)/fall_reciprocal.c + softfloat$(LIB): $(OBJS_ALL) $(DELETE) $@ $(MAKELIB) $^ diff --git a/c_emulator/SoftFloat-3e/build/Win32-MinGW/Makefile b/c_emulator/SoftFloat-3e/build/Win32-MinGW/Makefile index 418160051..56c9bb016 100644 --- a/c_emulator/SoftFloat-3e/build/Win32-MinGW/Makefile +++ b/c_emulator/SoftFloat-3e/build/Win32-MinGW/Makefile @@ -166,8 +166,12 @@ OBJS_OTHERS = \ i64_to_f64$(OBJ) \ i64_to_extF80M$(OBJ) \ i64_to_f128M$(OBJ) \ + f16_to_ui8$(OBJ) \ + f16_to_ui16$(OBJ) \ f16_to_ui32$(OBJ) \ f16_to_ui64$(OBJ) \ + f16_to_i8$(OBJ) \ + f16_to_i16$(OBJ) \ f16_to_i32$(OBJ) \ f16_to_i64$(OBJ) \ f16_to_ui32_r_minMag$(OBJ) \ @@ -193,8 +197,11 @@ OBJS_OTHERS = \ f16_le_quiet$(OBJ) \ f16_lt_quiet$(OBJ) \ f16_isSignalingNaN$(OBJ) \ + f16_classify$(OBJ) \ + f32_to_ui16$(OBJ) \ f32_to_ui32$(OBJ) \ f32_to_ui64$(OBJ) \ + f32_to_i16$(OBJ) \ f32_to_i32$(OBJ) \ f32_to_i64$(OBJ) \ f32_to_ui32_r_minMag$(OBJ) \ @@ -220,6 +227,7 @@ OBJS_OTHERS = \ f32_le_quiet$(OBJ) \ f32_lt_quiet$(OBJ) \ f32_isSignalingNaN$(OBJ) \ + f32_classify$(OBJ) \ f64_to_ui32$(OBJ) \ f64_to_ui64$(OBJ) \ f64_to_i32$(OBJ) \ @@ -247,6 +255,7 @@ OBJS_OTHERS = \ f64_le_quiet$(OBJ) \ f64_lt_quiet$(OBJ) \ f64_isSignalingNaN$(OBJ) \ + f64_classify$(OBJ) \ extF80M_to_ui32$(OBJ) \ extF80M_to_ui64$(OBJ) \ extF80M_to_i32$(OBJ) \ @@ -299,12 +308,20 @@ OBJS_OTHERS = \ f128M_le_quiet$(OBJ) \ f128M_lt_quiet$(OBJ) \ -OBJS_ALL = $(OBJS_PRIMITIVES) $(OBJS_SPECIALIZE) $(OBJS_OTHERS) +OBJS_RECIPROCAL = \ + f16_rsqrte7$(OBJ) \ + f16_recip7$(OBJ) \ + f32_rsqrte7$(OBJ) \ + f32_recip7$(OBJ) \ + f64_rsqrte7$(OBJ) \ + f64_recip7$(OBJ) \ + +OBJS_ALL = $(OBJS_PRIMITIVES) $(OBJS_SPECIALIZE) $(OBJS_OTHERS) $(OBJS_RECIPROCAL) $(OBJS_ALL): \ $(OTHER_HEADERS) platform.h $(SOURCE_DIR)/include/primitiveTypes.h \ $(SOURCE_DIR)/include/primitives.h -$(OBJS_SPECIALIZE) $(OBJS_OTHERS): \ +$(OBJS_SPECIALIZE) $(OBJS_OTHERS) $(OBJS_RECIPROCAL): \ $(SOURCE_DIR)/include/softfloat_types.h $(SOURCE_DIR)/include/internals.h \ $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/specialize.h \ $(SOURCE_DIR)/include/softfloat.h @@ -315,6 +332,9 @@ $(OBJS_PRIMITIVES) $(OBJS_OTHERS): %$(OBJ): $(SOURCE_DIR)/%.c $(OBJS_SPECIALIZE): %$(OBJ): $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/%.c $(COMPILE_C) $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/$*.c +$(OBJS_RECIPROCAL): %$(OBJ): $(SOURCE_DIR)/fall_reciprocal.c + $(COMPILE_C) $(SOURCE_DIR)/fall_reciprocal.c + softfloat$(LIB): $(OBJS_ALL) $(DELETE) $@ $(MAKELIB) $^ diff --git a/c_emulator/SoftFloat-3e/build/Win32-SSE2-MinGW/Makefile b/c_emulator/SoftFloat-3e/build/Win32-SSE2-MinGW/Makefile index 1cf6f5e1a..1bc4213b5 100644 --- a/c_emulator/SoftFloat-3e/build/Win32-SSE2-MinGW/Makefile +++ b/c_emulator/SoftFloat-3e/build/Win32-SSE2-MinGW/Makefile @@ -166,8 +166,12 @@ OBJS_OTHERS = \ i64_to_f64$(OBJ) \ i64_to_extF80M$(OBJ) \ i64_to_f128M$(OBJ) \ + f16_to_ui8$(OBJ) \ + f16_to_ui16$(OBJ) \ f16_to_ui32$(OBJ) \ f16_to_ui64$(OBJ) \ + f16_to_i8$(OBJ) \ + f16_to_i16$(OBJ) \ f16_to_i32$(OBJ) \ f16_to_i64$(OBJ) \ f16_to_ui32_r_minMag$(OBJ) \ @@ -193,8 +197,11 @@ OBJS_OTHERS = \ f16_le_quiet$(OBJ) \ f16_lt_quiet$(OBJ) \ f16_isSignalingNaN$(OBJ) \ + f16_classify$(OBJ) \ + f32_to_ui16$(OBJ) \ f32_to_ui32$(OBJ) \ f32_to_ui64$(OBJ) \ + f32_to_i16$(OBJ) \ f32_to_i32$(OBJ) \ f32_to_i64$(OBJ) \ f32_to_ui32_r_minMag$(OBJ) \ @@ -220,6 +227,7 @@ OBJS_OTHERS = \ f32_le_quiet$(OBJ) \ f32_lt_quiet$(OBJ) \ f32_isSignalingNaN$(OBJ) \ + f32_classify$(OBJ) \ f64_to_ui32$(OBJ) \ f64_to_ui64$(OBJ) \ f64_to_i32$(OBJ) \ @@ -247,6 +255,7 @@ OBJS_OTHERS = \ f64_le_quiet$(OBJ) \ f64_lt_quiet$(OBJ) \ f64_isSignalingNaN$(OBJ) \ + f64_classify$(OBJ) \ extF80M_to_ui32$(OBJ) \ extF80M_to_ui64$(OBJ) \ extF80M_to_i32$(OBJ) \ @@ -299,12 +308,20 @@ OBJS_OTHERS = \ f128M_le_quiet$(OBJ) \ f128M_lt_quiet$(OBJ) \ -OBJS_ALL = $(OBJS_PRIMITIVES) $(OBJS_SPECIALIZE) $(OBJS_OTHERS) +OBJS_RECIPROCAL = \ + f16_rsqrte7$(OBJ) \ + f16_recip7$(OBJ) \ + f32_rsqrte7$(OBJ) \ + f32_recip7$(OBJ) \ + f64_rsqrte7$(OBJ) \ + f64_recip7$(OBJ) \ + +OBJS_ALL = $(OBJS_PRIMITIVES) $(OBJS_SPECIALIZE) $(OBJS_OTHERS) $(OBJS_RECIPROCAL) $(OBJS_ALL): \ $(OTHER_HEADERS) platform.h $(SOURCE_DIR)/include/primitiveTypes.h \ $(SOURCE_DIR)/include/primitives.h -$(OBJS_SPECIALIZE) $(OBJS_OTHERS): \ +$(OBJS_SPECIALIZE) $(OBJS_OTHERS) $(OBJS_RECIPROCAL): \ $(SOURCE_DIR)/include/softfloat_types.h $(SOURCE_DIR)/include/internals.h \ $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/specialize.h \ $(SOURCE_DIR)/include/softfloat.h @@ -315,6 +332,9 @@ $(OBJS_PRIMITIVES) $(OBJS_OTHERS): %$(OBJ): $(SOURCE_DIR)/%.c $(OBJS_SPECIALIZE): %$(OBJ): $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/%.c $(COMPILE_C) $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/$*.c +$(OBJS_RECIPROCAL): %$(OBJ): $(SOURCE_DIR)/fall_reciprocal.c + $(COMPILE_C) $(SOURCE_DIR)/fall_reciprocal.c + softfloat$(LIB): $(OBJS_ALL) $(DELETE) $@ $(MAKELIB) $^ diff --git a/c_emulator/SoftFloat-3e/build/Win64-MinGW-w64/Makefile b/c_emulator/SoftFloat-3e/build/Win64-MinGW-w64/Makefile index 05bbf5bb5..05f3696c6 100644 --- a/c_emulator/SoftFloat-3e/build/Win64-MinGW-w64/Makefile +++ b/c_emulator/SoftFloat-3e/build/Win64-MinGW-w64/Makefile @@ -172,8 +172,12 @@ OBJS_OTHERS = \ i64_to_extF80M$(OBJ) \ i64_to_f128$(OBJ) \ i64_to_f128M$(OBJ) \ + f16_to_ui8$(OBJ) \ + f16_to_ui16$(OBJ) \ f16_to_ui32$(OBJ) \ f16_to_ui64$(OBJ) \ + f16_to_i8$(OBJ) \ + f16_to_i16$(OBJ) \ f16_to_i32$(OBJ) \ f16_to_i64$(OBJ) \ f16_to_ui32_r_minMag$(OBJ) \ @@ -201,8 +205,11 @@ OBJS_OTHERS = \ f16_le_quiet$(OBJ) \ f16_lt_quiet$(OBJ) \ f16_isSignalingNaN$(OBJ) \ + f16_classify$(OBJ) \ + f32_to_ui16$(OBJ) \ f32_to_ui32$(OBJ) \ f32_to_ui64$(OBJ) \ + f32_to_i16$(OBJ) \ f32_to_i32$(OBJ) \ f32_to_i64$(OBJ) \ f32_to_ui32_r_minMag$(OBJ) \ @@ -230,6 +237,7 @@ OBJS_OTHERS = \ f32_le_quiet$(OBJ) \ f32_lt_quiet$(OBJ) \ f32_isSignalingNaN$(OBJ) \ + f32_classify$(OBJ) \ f64_to_ui32$(OBJ) \ f64_to_ui64$(OBJ) \ f64_to_i32$(OBJ) \ @@ -259,6 +267,7 @@ OBJS_OTHERS = \ f64_le_quiet$(OBJ) \ f64_lt_quiet$(OBJ) \ f64_isSignalingNaN$(OBJ) \ + f64_classify$(OBJ) \ extF80_to_ui32$(OBJ) \ extF80_to_ui64$(OBJ) \ extF80_to_i32$(OBJ) \ @@ -364,12 +373,20 @@ OBJS_OTHERS = \ f128M_le_quiet$(OBJ) \ f128M_lt_quiet$(OBJ) \ -OBJS_ALL = $(OBJS_PRIMITIVES) $(OBJS_SPECIALIZE) $(OBJS_OTHERS) +OBJS_RECIPROCAL = \ + f16_rsqrte7$(OBJ) \ + f16_recip7$(OBJ) \ + f32_rsqrte7$(OBJ) \ + f32_recip7$(OBJ) \ + f64_rsqrte7$(OBJ) \ + f64_recip7$(OBJ) \ + +OBJS_ALL = $(OBJS_PRIMITIVES) $(OBJS_SPECIALIZE) $(OBJS_OTHERS) $(OBJS_RECIPROCAL) $(OBJS_ALL): \ $(OTHER_HEADERS) platform.h $(SOURCE_DIR)/include/primitiveTypes.h \ $(SOURCE_DIR)/include/primitives.h -$(OBJS_SPECIALIZE) $(OBJS_OTHERS): \ +$(OBJS_SPECIALIZE) $(OBJS_OTHERS) $(OBJS_RECIPROCAL): \ $(SOURCE_DIR)/include/softfloat_types.h $(SOURCE_DIR)/include/internals.h \ $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/specialize.h \ $(SOURCE_DIR)/include/softfloat.h @@ -380,6 +397,9 @@ $(OBJS_PRIMITIVES) $(OBJS_OTHERS): %$(OBJ): $(SOURCE_DIR)/%.c $(OBJS_SPECIALIZE): %$(OBJ): $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/%.c $(COMPILE_C) $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/$*.c +$(OBJS_RECIPROCAL): %$(OBJ): $(SOURCE_DIR)/fall_reciprocal.c + $(COMPILE_C) $(SOURCE_DIR)/fall_reciprocal.c + softfloat$(LIB): $(OBJS_ALL) $(DELETE) $@ $(MAKELIB) $^ diff --git a/c_emulator/SoftFloat-3e/build/template-FAST_INT64/Makefile b/c_emulator/SoftFloat-3e/build/template-FAST_INT64/Makefile index c5005e6e3..630538d9b 100644 --- a/c_emulator/SoftFloat-3e/build/template-FAST_INT64/Makefile +++ b/c_emulator/SoftFloat-3e/build/template-FAST_INT64/Makefile @@ -173,8 +173,12 @@ OBJS_OTHERS = \ i64_to_extF80M$(OBJ) \ i64_to_f128$(OBJ) \ i64_to_f128M$(OBJ) \ + f16_to_ui8$(OBJ) \ + f16_to_ui16$(OBJ) \ f16_to_ui32$(OBJ) \ f16_to_ui64$(OBJ) \ + f16_to_i8$(OBJ) \ + f16_to_i16$(OBJ) \ f16_to_i32$(OBJ) \ f16_to_i64$(OBJ) \ f16_to_ui32_r_minMag$(OBJ) \ @@ -202,8 +206,11 @@ OBJS_OTHERS = \ f16_le_quiet$(OBJ) \ f16_lt_quiet$(OBJ) \ f16_isSignalingNaN$(OBJ) \ + f16_classify$(OBJ) \ + f32_to_ui16$(OBJ) \ f32_to_ui32$(OBJ) \ f32_to_ui64$(OBJ) \ + f32_to_i16$(OBJ) \ f32_to_i32$(OBJ) \ f32_to_i64$(OBJ) \ f32_to_ui32_r_minMag$(OBJ) \ @@ -231,6 +238,7 @@ OBJS_OTHERS = \ f32_le_quiet$(OBJ) \ f32_lt_quiet$(OBJ) \ f32_isSignalingNaN$(OBJ) \ + f32_classify$(OBJ) \ f64_to_ui32$(OBJ) \ f64_to_ui64$(OBJ) \ f64_to_i32$(OBJ) \ @@ -260,6 +268,7 @@ OBJS_OTHERS = \ f64_le_quiet$(OBJ) \ f64_lt_quiet$(OBJ) \ f64_isSignalingNaN$(OBJ) \ + f64_classify$(OBJ) \ extF80_to_ui32$(OBJ) \ extF80_to_ui64$(OBJ) \ extF80_to_i32$(OBJ) \ @@ -365,12 +374,20 @@ OBJS_OTHERS = \ f128M_le_quiet$(OBJ) \ f128M_lt_quiet$(OBJ) \ -OBJS_ALL = $(OBJS_PRIMITIVES) $(OBJS_SPECIALIZE) $(OBJS_OTHERS) +OBJS_RECIPROCAL = \ + f16_rsqrte7$(OBJ) \ + f16_recip7$(OBJ) \ + f32_rsqrte7$(OBJ) \ + f32_recip7$(OBJ) \ + f64_rsqrte7$(OBJ) \ + f64_recip7$(OBJ) \ + +OBJS_ALL = $(OBJS_PRIMITIVES) $(OBJS_SPECIALIZE) $(OBJS_OTHERS) $(OBJS_RECIPROCAL) $(OBJS_ALL): \ $(OTHER_HEADERS) platform.h $(SOURCE_DIR)/include/primitiveTypes.h \ $(SOURCE_DIR)/include/primitives.h -$(OBJS_SPECIALIZE) $(OBJS_OTHERS): \ +$(OBJS_SPECIALIZE) $(OBJS_OTHERS) $(OBJS_RECIPROCAL): \ $(SOURCE_DIR)/include/softfloat_types.h $(SOURCE_DIR)/include/internals.h \ $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/specialize.h \ $(SOURCE_DIR)/include/softfloat.h @@ -381,6 +398,9 @@ $(OBJS_PRIMITIVES) $(OBJS_OTHERS): %$(OBJ): $(SOURCE_DIR)/%.c $(OBJS_SPECIALIZE): %$(OBJ): $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/%.c $(COMPILE_C) $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/$*.c +$(OBJS_RECIPROCAL): %$(OBJ): $(SOURCE_DIR)/fall_reciprocal.c + $(COMPILE_C) $(SOURCE_DIR)/fall_reciprocal.c + softfloat$(LIB): $(OBJS_ALL) $(DELETE) $@ $(MAKELIB) $^ diff --git a/c_emulator/SoftFloat-3e/build/template-not-FAST_INT64/Makefile b/c_emulator/SoftFloat-3e/build/template-not-FAST_INT64/Makefile index 49fddfd02..c4f353259 100644 --- a/c_emulator/SoftFloat-3e/build/template-not-FAST_INT64/Makefile +++ b/c_emulator/SoftFloat-3e/build/template-not-FAST_INT64/Makefile @@ -166,8 +166,12 @@ OBJS_OTHERS = \ i64_to_f64$(OBJ) \ i64_to_extF80M$(OBJ) \ i64_to_f128M$(OBJ) \ + f16_to_ui8$(OBJ) \ + f16_to_ui16$(OBJ) \ f16_to_ui32$(OBJ) \ f16_to_ui64$(OBJ) \ + f16_to_i8$(OBJ) \ + f16_to_i16$(OBJ) \ f16_to_i32$(OBJ) \ f16_to_i64$(OBJ) \ f16_to_ui32_r_minMag$(OBJ) \ @@ -193,8 +197,11 @@ OBJS_OTHERS = \ f16_le_quiet$(OBJ) \ f16_lt_quiet$(OBJ) \ f16_isSignalingNaN$(OBJ) \ + f16_classify$(OBJ) \ + f32_to_ui16$(OBJ) \ f32_to_ui32$(OBJ) \ f32_to_ui64$(OBJ) \ + f32_to_i16$(OBJ) \ f32_to_i32$(OBJ) \ f32_to_i64$(OBJ) \ f32_to_ui32_r_minMag$(OBJ) \ @@ -220,6 +227,7 @@ OBJS_OTHERS = \ f32_le_quiet$(OBJ) \ f32_lt_quiet$(OBJ) \ f32_isSignalingNaN$(OBJ) \ + f32_classify$(OBJ) \ f64_to_ui32$(OBJ) \ f64_to_ui64$(OBJ) \ f64_to_i32$(OBJ) \ @@ -247,6 +255,7 @@ OBJS_OTHERS = \ f64_le_quiet$(OBJ) \ f64_lt_quiet$(OBJ) \ f64_isSignalingNaN$(OBJ) \ + f64_classify$(OBJ) \ extF80M_to_ui32$(OBJ) \ extF80M_to_ui64$(OBJ) \ extF80M_to_i32$(OBJ) \ @@ -299,12 +308,20 @@ OBJS_OTHERS = \ f128M_le_quiet$(OBJ) \ f128M_lt_quiet$(OBJ) \ -OBJS_ALL = $(OBJS_PRIMITIVES) $(OBJS_SPECIALIZE) $(OBJS_OTHERS) +OBJS_RECIPROCAL = \ + f16_rsqrte7$(OBJ) \ + f16_recip7$(OBJ) \ + f32_rsqrte7$(OBJ) \ + f32_recip7$(OBJ) \ + f64_rsqrte7$(OBJ) \ + f64_recip7$(OBJ) \ + +OBJS_ALL = $(OBJS_PRIMITIVES) $(OBJS_SPECIALIZE) $(OBJS_OTHERS) $(OBJS_RECIPROCAL) $(OBJS_ALL): \ $(OTHER_HEADERS) platform.h $(SOURCE_DIR)/include/primitiveTypes.h \ $(SOURCE_DIR)/include/primitives.h -$(OBJS_SPECIALIZE) $(OBJS_OTHERS): \ +$(OBJS_SPECIALIZE) $(OBJS_OTHERS) $(OBJS_RECIPROCAL): \ $(SOURCE_DIR)/include/softfloat_types.h $(SOURCE_DIR)/include/internals.h \ $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/specialize.h \ $(SOURCE_DIR)/include/softfloat.h @@ -315,6 +332,9 @@ $(OBJS_PRIMITIVES) $(OBJS_OTHERS): %$(OBJ): $(SOURCE_DIR)/%.c $(OBJS_SPECIALIZE): %$(OBJ): $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/%.c $(COMPILE_C) $(SOURCE_DIR)/$(SPECIALIZE_TYPE)/$*.c +$(OBJS_RECIPROCAL): %$(OBJ): $(SOURCE_DIR)/fall_reciprocal.c + $(COMPILE_C) $(SOURCE_DIR)/fall_reciprocal.c + softfloat$(LIB): $(OBJS_ALL) $(DELETE) $@ $(MAKELIB) $^ diff --git a/c_emulator/SoftFloat-3e/source/8086-SSE/specialize.h b/c_emulator/SoftFloat-3e/source/8086-SSE/specialize.h index 5fe119a1e..4b31dc56b 100644 --- a/c_emulator/SoftFloat-3e/source/8086-SSE/specialize.h +++ b/c_emulator/SoftFloat-3e/source/8086-SSE/specialize.h @@ -51,6 +51,20 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | The values to return on conversions to 32-bit integer formats that raise an | invalid exception. *----------------------------------------------------------------------------*/ +#define ui8_fromPosOverflow 0xFF +#define ui8_fromNegOverflow 0 +#define ui8_fromNaN 0xFF +#define i8_fromPosOverflow 0x7F +#define i8_fromNegOverflow (-0x7F - 1) +#define i8_fromNaN 0x7F + +#define ui16_fromPosOverflow 0xFFFF +#define ui16_fromNegOverflow 0 +#define ui16_fromNaN 0xFFFF +#define i16_fromPosOverflow 0x7FFF +#define i16_fromNegOverflow (-0x7FFF - 1) +#define i16_fromNaN 0x7FFF + #define ui32_fromPosOverflow 0xFFFFFFFF #define ui32_fromNegOverflow 0xFFFFFFFF #define ui32_fromNaN 0xFFFFFFFF diff --git a/c_emulator/SoftFloat-3e/source/8086/specialize.h b/c_emulator/SoftFloat-3e/source/8086/specialize.h index 5fe119a1e..4b31dc56b 100644 --- a/c_emulator/SoftFloat-3e/source/8086/specialize.h +++ b/c_emulator/SoftFloat-3e/source/8086/specialize.h @@ -51,6 +51,20 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | The values to return on conversions to 32-bit integer formats that raise an | invalid exception. *----------------------------------------------------------------------------*/ +#define ui8_fromPosOverflow 0xFF +#define ui8_fromNegOverflow 0 +#define ui8_fromNaN 0xFF +#define i8_fromPosOverflow 0x7F +#define i8_fromNegOverflow (-0x7F - 1) +#define i8_fromNaN 0x7F + +#define ui16_fromPosOverflow 0xFFFF +#define ui16_fromNegOverflow 0 +#define ui16_fromNaN 0xFFFF +#define i16_fromPosOverflow 0x7FFF +#define i16_fromNegOverflow (-0x7FFF - 1) +#define i16_fromNaN 0x7FFF + #define ui32_fromPosOverflow 0xFFFFFFFF #define ui32_fromNegOverflow 0xFFFFFFFF #define ui32_fromNaN 0xFFFFFFFF diff --git a/c_emulator/SoftFloat-3e/source/ARM-VFPv2-defaultNaN/specialize.h b/c_emulator/SoftFloat-3e/source/ARM-VFPv2-defaultNaN/specialize.h index 2c481a259..593ef4ff5 100644 --- a/c_emulator/SoftFloat-3e/source/ARM-VFPv2-defaultNaN/specialize.h +++ b/c_emulator/SoftFloat-3e/source/ARM-VFPv2-defaultNaN/specialize.h @@ -51,6 +51,20 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | The values to return on conversions to 32-bit integer formats that raise an | invalid exception. *----------------------------------------------------------------------------*/ +#define ui8_fromPosOverflow 0xFF +#define ui8_fromNegOverflow 0 +#define ui8_fromNaN 0xFF +#define i8_fromPosOverflow 0x7F +#define i8_fromNegOverflow (-0x7F - 1) +#define i8_fromNaN 0x7F + +#define ui16_fromPosOverflow 0xFFFF +#define ui16_fromNegOverflow 0 +#define ui16_fromNaN 0xFFFF +#define i16_fromPosOverflow 0x7FFF +#define i16_fromNegOverflow (-0x7FFF - 1) +#define i16_fromNaN 0x7FFF + #define ui32_fromPosOverflow 0xFFFFFFFF #define ui32_fromNegOverflow 0 #define ui32_fromNaN 0 diff --git a/c_emulator/SoftFloat-3e/source/ARM-VFPv2/specialize.h b/c_emulator/SoftFloat-3e/source/ARM-VFPv2/specialize.h index 5321f33bc..6e9060bc1 100644 --- a/c_emulator/SoftFloat-3e/source/ARM-VFPv2/specialize.h +++ b/c_emulator/SoftFloat-3e/source/ARM-VFPv2/specialize.h @@ -51,6 +51,20 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | The values to return on conversions to 32-bit integer formats that raise an | invalid exception. *----------------------------------------------------------------------------*/ +#define ui8_fromPosOverflow 0xFF +#define ui8_fromNegOverflow 0 +#define ui8_fromNaN 0xFF +#define i8_fromPosOverflow 0x7F +#define i8_fromNegOverflow (-0x7F - 1) +#define i8_fromNaN 0x7F + +#define ui16_fromPosOverflow 0xFFFF +#define ui16_fromNegOverflow 0 +#define ui16_fromNaN 0xFFFF +#define i16_fromPosOverflow 0x7FFF +#define i16_fromNegOverflow (-0x7FFF - 1) +#define i16_fromNaN 0x7FFF + #define ui32_fromPosOverflow 0xFFFFFFFF #define ui32_fromNegOverflow 0 #define ui32_fromNaN 0 diff --git a/c_emulator/SoftFloat-3e/source/RISCV/specialize.h b/c_emulator/SoftFloat-3e/source/RISCV/specialize.h index 509c241ee..fbf1e2ba9 100644 --- a/c_emulator/SoftFloat-3e/source/RISCV/specialize.h +++ b/c_emulator/SoftFloat-3e/source/RISCV/specialize.h @@ -51,6 +51,20 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | The values to return on conversions to 32-bit integer formats that raise an | invalid exception. *----------------------------------------------------------------------------*/ +#define ui8_fromPosOverflow 0xFF +#define ui8_fromNegOverflow 0 +#define ui8_fromNaN 0xFF +#define i8_fromPosOverflow 0x7F +#define i8_fromNegOverflow (-0x7F - 1) +#define i8_fromNaN 0x7F + +#define ui16_fromPosOverflow 0xFFFF +#define ui16_fromNegOverflow 0 +#define ui16_fromNaN 0xFFFF +#define i16_fromPosOverflow 0x7FFF +#define i16_fromNegOverflow (-0x7FFF - 1) +#define i16_fromNaN 0x7FFF + #define ui32_fromPosOverflow 0xFFFFFFFF #define ui32_fromNegOverflow 0 #define ui32_fromNaN 0xFFFFFFFF diff --git a/c_emulator/SoftFloat-3e/source/f16_classify.c b/c_emulator/SoftFloat-3e/source/f16_classify.c new file mode 100755 index 000000000..9402ff13e --- /dev/null +++ b/c_emulator/SoftFloat-3e/source/f16_classify.c @@ -0,0 +1,36 @@ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast16_t f16_classify( float16_t a ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + + uA.f = a; + uiA = uA.ui; + + uint_fast16_t infOrNaN = expF16UI( uiA ) == 0x1F; + uint_fast16_t subnormalOrZero = expF16UI( uiA ) == 0; + bool sign = signF16UI( uiA ); + bool fracZero = fracF16UI( uiA ) == 0; + bool isNaN = isNaNF16UI( uiA ); + bool isSNaN = softfloat_isSigNaNF16UI( uiA ); + + return + ( sign && infOrNaN && fracZero ) << 0 | + ( sign && !infOrNaN && !subnormalOrZero ) << 1 | + ( sign && subnormalOrZero && !fracZero ) << 2 | + ( sign && subnormalOrZero && fracZero ) << 3 | + ( !sign && infOrNaN && fracZero ) << 7 | + ( !sign && !infOrNaN && !subnormalOrZero ) << 6 | + ( !sign && subnormalOrZero && !fracZero ) << 5 | + ( !sign && subnormalOrZero && fracZero ) << 4 | + ( isNaN && isSNaN ) << 8 | + ( isNaN && !isSNaN ) << 9; +} + diff --git a/c_emulator/SoftFloat-3e/source/f16_to_i16.c b/c_emulator/SoftFloat-3e/source/f16_to_i16.c new file mode 100644 index 000000000..66fdbfa7a --- /dev/null +++ b/c_emulator/SoftFloat-3e/source/f16_to_i16.c @@ -0,0 +1,56 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "specialize.h" +#include "softfloat.h" + +int_fast16_t f16_to_i16( float16_t a, uint_fast8_t roundingMode, bool exact ) +{ + uint_fast8_t old_flags = softfloat_exceptionFlags; + + int_fast32_t sig32 = f16_to_i32(a, roundingMode, exact); + + if (sig32 > INT16_MAX) { + softfloat_exceptionFlags = old_flags | softfloat_flag_invalid; + return i16_fromPosOverflow; + } else if (sig32 < INT16_MIN) { + softfloat_exceptionFlags = old_flags | softfloat_flag_invalid; + return i16_fromNegOverflow; + } else { + return sig32; + } +} diff --git a/c_emulator/SoftFloat-3e/source/f16_to_i8.c b/c_emulator/SoftFloat-3e/source/f16_to_i8.c new file mode 100644 index 000000000..a82c5ff2a --- /dev/null +++ b/c_emulator/SoftFloat-3e/source/f16_to_i8.c @@ -0,0 +1,56 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "specialize.h" +#include "softfloat.h" + +int_fast8_t f16_to_i8( float16_t a, uint_fast8_t roundingMode, bool exact ) +{ + uint_fast8_t old_flags = softfloat_exceptionFlags; + + int_fast32_t sig32 = f16_to_i32(a, roundingMode, exact); + + if (sig32 > INT8_MAX) { + softfloat_exceptionFlags = old_flags | softfloat_flag_invalid; + return i8_fromPosOverflow; + } else if (sig32 < INT8_MIN) { + softfloat_exceptionFlags = old_flags | softfloat_flag_invalid; + return i8_fromNegOverflow; + } else { + return sig32; + } +} diff --git a/c_emulator/SoftFloat-3e/source/f16_to_ui16.c b/c_emulator/SoftFloat-3e/source/f16_to_ui16.c new file mode 100644 index 000000000..2054e4f06 --- /dev/null +++ b/c_emulator/SoftFloat-3e/source/f16_to_ui16.c @@ -0,0 +1,53 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "specialize.h" +#include "softfloat.h" + +uint_fast16_t f16_to_ui16( float16_t a, uint_fast8_t roundingMode, bool exact ) +{ + uint_fast8_t old_flags = softfloat_exceptionFlags; + + uint_fast32_t sig32 = f16_to_ui32(a, roundingMode, exact); + + if (sig32 > UINT16_MAX) { + softfloat_exceptionFlags = old_flags | softfloat_flag_invalid; + return ui16_fromPosOverflow; + } else { + return sig32; + } +} diff --git a/c_emulator/SoftFloat-3e/source/f16_to_ui8.c b/c_emulator/SoftFloat-3e/source/f16_to_ui8.c new file mode 100644 index 000000000..6ce158557 --- /dev/null +++ b/c_emulator/SoftFloat-3e/source/f16_to_ui8.c @@ -0,0 +1,53 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "specialize.h" +#include "softfloat.h" + +uint_fast8_t f16_to_ui8( float16_t a, uint_fast8_t roundingMode, bool exact ) +{ + uint_fast8_t old_flags = softfloat_exceptionFlags; + + uint_fast32_t sig32 = f16_to_ui32(a, roundingMode, exact); + + if (sig32 > UINT8_MAX) { + softfloat_exceptionFlags = old_flags | softfloat_flag_invalid; + return ui8_fromPosOverflow; + } else { + return sig32; + } +} diff --git a/c_emulator/SoftFloat-3e/source/f32_classify.c b/c_emulator/SoftFloat-3e/source/f32_classify.c new file mode 100755 index 000000000..83fad878a --- /dev/null +++ b/c_emulator/SoftFloat-3e/source/f32_classify.c @@ -0,0 +1,36 @@ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast16_t f32_classify( float32_t a ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + + uA.f = a; + uiA = uA.ui; + + uint_fast16_t infOrNaN = expF32UI( uiA ) == 0xFF; + uint_fast16_t subnormalOrZero = expF32UI( uiA ) == 0; + bool sign = signF32UI( uiA ); + bool fracZero = fracF32UI( uiA ) == 0; + bool isNaN = isNaNF32UI( uiA ); + bool isSNaN = softfloat_isSigNaNF32UI( uiA ); + + return + ( sign && infOrNaN && fracZero ) << 0 | + ( sign && !infOrNaN && !subnormalOrZero ) << 1 | + ( sign && subnormalOrZero && !fracZero ) << 2 | + ( sign && subnormalOrZero && fracZero ) << 3 | + ( !sign && infOrNaN && fracZero ) << 7 | + ( !sign && !infOrNaN && !subnormalOrZero ) << 6 | + ( !sign && subnormalOrZero && !fracZero ) << 5 | + ( !sign && subnormalOrZero && fracZero ) << 4 | + ( isNaN && isSNaN ) << 8 | + ( isNaN && !isSNaN ) << 9; +} + diff --git a/c_emulator/SoftFloat-3e/source/f32_to_i16.c b/c_emulator/SoftFloat-3e/source/f32_to_i16.c new file mode 100644 index 000000000..b23231a1f --- /dev/null +++ b/c_emulator/SoftFloat-3e/source/f32_to_i16.c @@ -0,0 +1,56 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "specialize.h" +#include "softfloat.h" + +int_fast16_t f32_to_i16( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + uint_fast8_t old_flags = softfloat_exceptionFlags; + + int_fast32_t sig32 = f32_to_i32(a, roundingMode, exact); + + if (sig32 > INT16_MAX) { + softfloat_exceptionFlags = old_flags | softfloat_flag_invalid; + return i16_fromPosOverflow; + } else if (sig32 < INT16_MIN) { + softfloat_exceptionFlags = old_flags | softfloat_flag_invalid; + return i16_fromNegOverflow; + } else { + return sig32; + } +} diff --git a/c_emulator/SoftFloat-3e/source/f32_to_ui16.c b/c_emulator/SoftFloat-3e/source/f32_to_ui16.c new file mode 100644 index 000000000..073492bfa --- /dev/null +++ b/c_emulator/SoftFloat-3e/source/f32_to_ui16.c @@ -0,0 +1,53 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "specialize.h" +#include "softfloat.h" + +uint_fast16_t f32_to_ui16( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + uint_fast8_t old_flags = softfloat_exceptionFlags; + + uint_fast32_t sig32 = f32_to_ui32(a, roundingMode, exact); + + if (sig32 > UINT16_MAX) { + softfloat_exceptionFlags = old_flags | softfloat_flag_invalid; + return ui16_fromPosOverflow; + } else { + return sig32; + } +} diff --git a/c_emulator/SoftFloat-3e/source/f64_classify.c b/c_emulator/SoftFloat-3e/source/f64_classify.c new file mode 100755 index 000000000..180abde3c --- /dev/null +++ b/c_emulator/SoftFloat-3e/source/f64_classify.c @@ -0,0 +1,36 @@ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast16_t f64_classify( float64_t a ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + + uA.f = a; + uiA = uA.ui; + + uint_fast16_t infOrNaN = expF64UI( uiA ) == 0x7FF; + uint_fast16_t subnormalOrZero = expF64UI( uiA ) == 0; + bool sign = signF64UI( uiA ); + bool fracZero = fracF64UI( uiA ) == 0; + bool isNaN = isNaNF64UI( uiA ); + bool isSNaN = softfloat_isSigNaNF64UI( uiA ); + + return + ( sign && infOrNaN && fracZero ) << 0 | + ( sign && !infOrNaN && !subnormalOrZero ) << 1 | + ( sign && subnormalOrZero && !fracZero ) << 2 | + ( sign && subnormalOrZero && fracZero ) << 3 | + ( !sign && infOrNaN && fracZero ) << 7 | + ( !sign && !infOrNaN && !subnormalOrZero ) << 6 | + ( !sign && subnormalOrZero && !fracZero ) << 5 | + ( !sign && subnormalOrZero && fracZero ) << 4 | + ( isNaN && isSNaN ) << 8 | + ( isNaN && !isSNaN ) << 9; +} + diff --git a/c_emulator/SoftFloat-3e/source/fall_reciprocal.c b/c_emulator/SoftFloat-3e/source/fall_reciprocal.c new file mode 100755 index 000000000..1c9645893 --- /dev/null +++ b/c_emulator/SoftFloat-3e/source/fall_reciprocal.c @@ -0,0 +1,392 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +static inline uint64_t extract64(uint64_t val, int pos, int len) +{ + assert(pos >= 0 && len > 0 && len <= 64 - pos); + return (val >> pos) & (~UINT64_C(0) >> (64 - len)); +} + +static inline uint64_t make_mask64(int pos, int len) +{ + assert(pos >= 0 && len > 0 && pos < 64 && len <= 64); + return (UINT64_MAX >> (64 - len)) << pos; +} + +//user needs to truncate output to required length +static inline uint64_t rsqrte7(uint64_t val, int e, int s, bool sub) { + uint64_t exp = extract64(val, s, e); + uint64_t sig = extract64(val, 0, s); + uint64_t sign = extract64(val, s + e, 1); + const int p = 7; + + static const uint8_t table[] = { + 52, 51, 50, 48, 47, 46, 44, 43, + 42, 41, 40, 39, 38, 36, 35, 34, + 33, 32, 31, 30, 30, 29, 28, 27, + 26, 25, 24, 23, 23, 22, 21, 20, + 19, 19, 18, 17, 16, 16, 15, 14, + 14, 13, 12, 12, 11, 10, 10, 9, + 9, 8, 7, 7, 6, 6, 5, 4, + 4, 3, 3, 2, 2, 1, 1, 0, + 127, 125, 123, 121, 119, 118, 116, 114, + 113, 111, 109, 108, 106, 105, 103, 102, + 100, 99, 97, 96, 95, 93, 92, 91, + 90, 88, 87, 86, 85, 84, 83, 82, + 80, 79, 78, 77, 76, 75, 74, 73, + 72, 71, 70, 70, 69, 68, 67, 66, + 65, 64, 63, 63, 62, 61, 60, 59, + 59, 58, 57, 56, 56, 55, 54, 53}; + + if (sub) { + while (extract64(sig, s - 1, 1) == 0) + exp--, sig <<= 1; + + sig = (sig << 1) & make_mask64(0 ,s); + } + + int idx = ((exp & 1) << (p-1)) | (sig >> (s-p+1)); + uint64_t out_sig = (uint64_t)(table[idx]) << (s-p); + uint64_t out_exp = (3 * make_mask64(0, e - 1) + ~exp) / 2; + + return (sign << (s+e)) | (out_exp << s) | out_sig; +} + +float16_t f16_rsqrte7(float16_t in) +{ + union ui16_f16 uA; + + uA.f = in; + unsigned int ret = f16_classify(in); + bool sub = false; + switch(ret) { + case 0x001: // -inf + case 0x002: // -normal + case 0x004: // -subnormal + case 0x100: // sNaN + softfloat_exceptionFlags |= softfloat_flag_invalid; + case 0x200: //qNaN + uA.ui = defaultNaNF16UI; + break; + case 0x008: // -0 + uA.ui = 0xfc00; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x010: // +0 + uA.ui = 0x7c00; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x080: //+inf + uA.ui = 0x0; + break; + case 0x020: //+ sub + sub = true; + default: // +num + uA.ui = rsqrte7(uA.ui, 5, 10, sub); + break; + } + + return uA.f; +} + +float32_t f32_rsqrte7(float32_t in) +{ + union ui32_f32 uA; + + uA.f = in; + unsigned int ret = f32_classify(in); + bool sub = false; + switch(ret) { + case 0x001: // -inf + case 0x002: // -normal + case 0x004: // -subnormal + case 0x100: // sNaN + softfloat_exceptionFlags |= softfloat_flag_invalid; + case 0x200: //qNaN + uA.ui = defaultNaNF32UI; + break; + case 0x008: // -0 + uA.ui = 0xff800000; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x010: // +0 + uA.ui = 0x7f800000; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x080: //+inf + uA.ui = 0x0; + break; + case 0x020: //+ sub + sub = true; + default: // +num + uA.ui = rsqrte7(uA.ui, 8, 23, sub); + break; + } + + return uA.f; +} + +float64_t f64_rsqrte7(float64_t in) +{ + union ui64_f64 uA; + + uA.f = in; + unsigned int ret = f64_classify(in); + bool sub = false; + switch(ret) { + case 0x001: // -inf + case 0x002: // -normal + case 0x004: // -subnormal + case 0x100: // sNaN + softfloat_exceptionFlags |= softfloat_flag_invalid; + case 0x200: //qNaN + uA.ui = defaultNaNF64UI; + break; + case 0x008: // -0 + uA.ui = 0xfff0000000000000ul; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x010: // +0 + uA.ui = 0x7ff0000000000000ul; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x080: //+inf + uA.ui = 0x0; + break; + case 0x020: //+ sub + sub = true; + default: // +num + uA.ui = rsqrte7(uA.ui, 11, 52, sub); + break; + } + + return uA.f; +} + +//user needs to truncate output to required length +static inline uint64_t recip7(uint64_t val, int e, int s, int rm, bool sub, + bool *round_abnormal) +{ + uint64_t exp = extract64(val, s, e); + uint64_t sig = extract64(val, 0, s); + uint64_t sign = extract64(val, s + e, 1); + const int p = 7; + + static const uint8_t table[] = { + 127, 125, 123, 121, 119, 117, 116, 114, + 112, 110, 109, 107, 105, 104, 102, 100, + 99, 97, 96, 94, 93, 91, 90, 88, + 87, 85, 84, 83, 81, 80, 79, 77, + 76, 75, 74, 72, 71, 70, 69, 68, + 66, 65, 64, 63, 62, 61, 60, 59, + 58, 57, 56, 55, 54, 53, 52, 51, + 50, 49, 48, 47, 46, 45, 44, 43, + 42, 41, 40, 40, 39, 38, 37, 36, + 35, 35, 34, 33, 32, 31, 31, 30, + 29, 28, 28, 27, 26, 25, 25, 24, + 23, 23, 22, 21, 21, 20, 19, 19, + 18, 17, 17, 16, 15, 15, 14, 14, + 13, 12, 12, 11, 11, 10, 9, 9, + 8, 8, 7, 7, 6, 5, 5, 4, + 4, 3, 3, 2, 2, 1, 1, 0}; + + if (sub) { + while (extract64(sig, s - 1, 1) == 0) + exp--, sig <<= 1; + + sig = (sig << 1) & make_mask64(0 ,s); + + if (exp != 0 && exp != UINT64_MAX) { + *round_abnormal = true; + if (rm == 1 || + (rm == 2 && !sign) || + (rm == 3 && sign)) + return ((sign << (s+e)) | make_mask64(s, e)) - 1; + else + return (sign << (s+e)) | make_mask64(s, e); + } + } + + int idx = sig >> (s-p); + uint64_t out_sig = (uint64_t)(table[idx]) << (s-p); + uint64_t out_exp = 2 * make_mask64(0, e - 1) + ~exp; + if (out_exp == 0 || out_exp == UINT64_MAX) { + out_sig = (out_sig >> 1) | make_mask64(s - 1, 1); + if (out_exp == UINT64_MAX) { + out_sig >>= 1; + out_exp = 0; + } + } + + return (sign << (s+e)) | (out_exp << s) | out_sig; +} + +float16_t f16_recip7(float16_t in) +{ + union ui16_f16 uA; + + uA.f = in; + unsigned int ret = f16_classify(in); + bool sub = false; + bool round_abnormal = false; + switch(ret) { + case 0x001: // -inf + uA.ui = 0x8000; + break; + case 0x080: //+inf + uA.ui = 0x0; + break; + case 0x008: // -0 + uA.ui = 0xfc00; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x010: // +0 + uA.ui = 0x7c00; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x100: // sNaN + softfloat_exceptionFlags |= softfloat_flag_invalid; + case 0x200: //qNaN + uA.ui = defaultNaNF16UI; + break; + case 0x004: // -subnormal + case 0x020: //+ sub + sub = true; + default: // +- normal + uA.ui = recip7(uA.ui, 5, 10, + softfloat_roundingMode, sub, &round_abnormal); + if (round_abnormal) + softfloat_exceptionFlags |= softfloat_flag_inexact | + softfloat_flag_overflow; + break; + } + + return uA.f; +} + +float32_t f32_recip7(float32_t in) +{ + union ui32_f32 uA; + + uA.f = in; + unsigned int ret = f32_classify(in); + bool sub = false; + bool round_abnormal = false; + switch(ret) { + case 0x001: // -inf + uA.ui = 0x80000000; + break; + case 0x080: //+inf + uA.ui = 0x0; + break; + case 0x008: // -0 + uA.ui = 0xff800000; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x010: // +0 + uA.ui = 0x7f800000; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x100: // sNaN + softfloat_exceptionFlags |= softfloat_flag_invalid; + case 0x200: //qNaN + uA.ui = defaultNaNF32UI; + break; + case 0x004: // -subnormal + case 0x020: //+ sub + sub = true; + default: // +- normal + uA.ui = recip7(uA.ui, 8, 23, + softfloat_roundingMode, sub, &round_abnormal); + if (round_abnormal) + softfloat_exceptionFlags |= softfloat_flag_inexact | + softfloat_flag_overflow; + break; + } + + return uA.f; +} + +float64_t f64_recip7(float64_t in) +{ + union ui64_f64 uA; + + uA.f = in; + unsigned int ret = f64_classify(in); + bool sub = false; + bool round_abnormal = false; + switch(ret) { + case 0x001: // -inf + uA.ui = 0x8000000000000000; + break; + case 0x080: //+inf + uA.ui = 0x0; + break; + case 0x008: // -0 + uA.ui = 0xfff0000000000000; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x010: // +0 + uA.ui = 0x7ff0000000000000; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x100: // sNaN + softfloat_exceptionFlags |= softfloat_flag_invalid; + case 0x200: //qNaN + uA.ui = defaultNaNF64UI; + break; + case 0x004: // -subnormal + case 0x020: //+ sub + sub = true; + default: // +- normal + uA.ui = recip7(uA.ui, 11, 52, + softfloat_roundingMode, sub, &round_abnormal); + if (round_abnormal) + softfloat_exceptionFlags |= softfloat_flag_inexact | + softfloat_flag_overflow; + break; + } + + return uA.f; +} diff --git a/c_emulator/SoftFloat-3e/source/include/softfloat.h b/c_emulator/SoftFloat-3e/source/include/softfloat.h index b33374cd6..a284fdc3a 100644 --- a/c_emulator/SoftFloat-3e/source/include/softfloat.h +++ b/c_emulator/SoftFloat-3e/source/include/softfloat.h @@ -137,8 +137,12 @@ void i64_to_f128M( int64_t, float128_t * ); /*---------------------------------------------------------------------------- | 16-bit (half-precision) floating-point operations. *----------------------------------------------------------------------------*/ +uint_fast8_t f16_to_ui8( float16_t, uint_fast8_t, bool ); +uint_fast16_t f16_to_ui16( float16_t, uint_fast8_t, bool ); uint_fast32_t f16_to_ui32( float16_t, uint_fast8_t, bool ); uint_fast64_t f16_to_ui64( float16_t, uint_fast8_t, bool ); +int_fast8_t f16_to_i8( float16_t, uint_fast8_t, bool ); +int_fast16_t f16_to_i16( float16_t, uint_fast8_t, bool ); int_fast32_t f16_to_i32( float16_t, uint_fast8_t, bool ); int_fast64_t f16_to_i64( float16_t, uint_fast8_t, bool ); uint_fast32_t f16_to_ui32_r_minMag( float16_t, bool ); @@ -168,12 +172,17 @@ bool f16_eq_signaling( float16_t, float16_t ); bool f16_le_quiet( float16_t, float16_t ); bool f16_lt_quiet( float16_t, float16_t ); bool f16_isSignalingNaN( float16_t ); +uint_fast16_t f16_classify( float16_t ); +float16_t f16_rsqrte7( float16_t ); +float16_t f16_recip7( float16_t ); /*---------------------------------------------------------------------------- | 32-bit (single-precision) floating-point operations. *----------------------------------------------------------------------------*/ +uint_fast16_t f32_to_ui16( float32_t, uint_fast8_t, bool ); uint_fast32_t f32_to_ui32( float32_t, uint_fast8_t, bool ); uint_fast64_t f32_to_ui64( float32_t, uint_fast8_t, bool ); +int_fast16_t f32_to_i16( float32_t, uint_fast8_t, bool ); int_fast32_t f32_to_i32( float32_t, uint_fast8_t, bool ); int_fast64_t f32_to_i64( float32_t, uint_fast8_t, bool ); uint_fast32_t f32_to_ui32_r_minMag( float32_t, bool ); @@ -203,6 +212,9 @@ bool f32_eq_signaling( float32_t, float32_t ); bool f32_le_quiet( float32_t, float32_t ); bool f32_lt_quiet( float32_t, float32_t ); bool f32_isSignalingNaN( float32_t ); +uint_fast16_t f32_classify( float32_t ); +float32_t f32_rsqrte7( float32_t ); +float32_t f32_recip7( float32_t ); /*---------------------------------------------------------------------------- | 64-bit (double-precision) floating-point operations. @@ -238,6 +250,9 @@ bool f64_eq_signaling( float64_t, float64_t ); bool f64_le_quiet( float64_t, float64_t ); bool f64_lt_quiet( float64_t, float64_t ); bool f64_isSignalingNaN( float64_t ); +uint_fast16_t f64_classify( float64_t ); +float64_t f64_rsqrte7( float64_t ); +float64_t f64_recip7( float64_t ); /*---------------------------------------------------------------------------- | Rounding precision for 80-bit extended double-precision floating-point. diff --git a/c_emulator/riscv_softfloat.c b/c_emulator/riscv_softfloat.c index b3819b6a3..6834afc28 100644 --- a/c_emulator/riscv_softfloat.c +++ b/c_emulator/riscv_softfloat.c @@ -251,9 +251,177 @@ unit softfloat_f64sqrt(mach_bits rm, mach_bits v) { return UNIT; } +unit softfloat_f16rsqrte7(mach_bits rm, mach_bits v) { + SOFTFLOAT_PRELUDE(rm); + + float16_t a, res; + a.v = v; + res = f16_rsqrte7(a); + + SOFTFLOAT_POSTLUDE(res); + + return UNIT; +} + +unit softfloat_f32rsqrte7(mach_bits rm, mach_bits v) { + SOFTFLOAT_PRELUDE(rm); + + float32_t a, res; + a.v = v; + res = f32_rsqrte7(a); + + SOFTFLOAT_POSTLUDE(res); + + return UNIT; +} + +unit softfloat_f64rsqrte7(mach_bits rm, mach_bits v) { + SOFTFLOAT_PRELUDE(rm); + + float64_t a, res; + a.v = v; + res = f64_rsqrte7(a); + + SOFTFLOAT_POSTLUDE(res); + + return UNIT; +} + +unit softfloat_f16recip7(mach_bits rm, mach_bits v) { + SOFTFLOAT_PRELUDE(rm); + + float16_t a, res; + a.v = v; + res = f16_recip7(a); + + SOFTFLOAT_POSTLUDE(res); + + return UNIT; +} + +unit softfloat_f32recip7(mach_bits rm, mach_bits v) { + SOFTFLOAT_PRELUDE(rm); + + float32_t a, res; + a.v = v; + res = f32_recip7(a); + + SOFTFLOAT_POSTLUDE(res); + + return UNIT; +} + +unit softfloat_f64recip7(mach_bits rm, mach_bits v) { + SOFTFLOAT_PRELUDE(rm); + + float64_t a, res; + a.v = v; + res = f64_recip7(a); + + SOFTFLOAT_POSTLUDE(res); + + return UNIT; +} + +unit softfloat_f16class(mach_bits v) { + SOFTFLOAT_PRELUDE(0); + + float16_t a; + float64_t res; + a.v = v; + res.v = f16_classify(a); + + SOFTFLOAT_POSTLUDE(res); + + return UNIT; +} + +unit softfloat_f32class(mach_bits v) { + SOFTFLOAT_PRELUDE(0); + + float32_t a; + float64_t res; + a.v = v; + res.v = f32_classify(a); + + SOFTFLOAT_POSTLUDE(res); + + return UNIT; +} + +unit softfloat_f64class(mach_bits v) { + SOFTFLOAT_PRELUDE(0); + + float64_t a, res; + a.v = v; + res.v = f64_classify(a); + + SOFTFLOAT_POSTLUDE(res); + + return UNIT; +} + // The boolean 'true' argument in the conversion calls below selects // 'exact' conversion, which sets the Inexact exception flag if // needed. +unit softfloat_f16toi8(mach_bits rm, mach_bits v) { + SOFTFLOAT_PRELUDE(rm); + + float16_t a; + int_fast8_t res; + uint_fast8_t rm8 = uint8_of_rm(rm); + a.v = v; + res = f16_to_i8(a, rm8, true); + + zfloat_result = res; + zfloat_fflags |= (mach_bits) softfloat_exceptionFlags; + + return UNIT; +} + +unit softfloat_f16toui8(mach_bits rm, mach_bits v) { + SOFTFLOAT_PRELUDE(rm); + + float16_t a; + uint_fast8_t res; + uint_fast8_t rm8 = uint8_of_rm(rm); + a.v = v; + res = f16_to_ui8(a, rm8, true); + + zfloat_result = res; + zfloat_fflags |= (mach_bits) softfloat_exceptionFlags; + + return UNIT; +} + +unit softfloat_f16toi16(mach_bits rm, mach_bits v) { + SOFTFLOAT_PRELUDE(rm); + + float16_t a; + float16_t res; + uint_fast8_t rm8 = uint8_of_rm(rm); + a.v = v; + res.v = f16_to_i8(a, rm8, true); + + SOFTFLOAT_POSTLUDE(res); + + return UNIT; +} + +unit softfloat_f16toui16(mach_bits rm, mach_bits v) { + SOFTFLOAT_PRELUDE(rm); + + float16_t a; + float16_t res; + uint_fast8_t rm8 = uint8_of_rm(rm); + a.v = v; + res.v = f16_to_ui8(a, rm8, true); + + SOFTFLOAT_POSTLUDE(res); + + return UNIT; +} + unit softfloat_f16toi32(mach_bits rm, mach_bits v) { SOFTFLOAT_PRELUDE(rm); @@ -310,6 +478,34 @@ unit softfloat_f16toui64(mach_bits rm, mach_bits v) { return UNIT; } +unit softfloat_f32toi16(mach_bits rm, mach_bits v) { + SOFTFLOAT_PRELUDE(rm); + + float32_t a; + float16_t res; + uint_fast8_t rm8 = uint8_of_rm(rm); + a.v = v; + res.v = f32_to_i16(a, rm8, true); + + SOFTFLOAT_POSTLUDE(res); + + return UNIT; +} + +unit softfloat_f32toui16(mach_bits rm, mach_bits v) { + SOFTFLOAT_PRELUDE(rm); + + float32_t a; + float16_t res; + uint_fast8_t rm8 = uint8_of_rm(rm); + a.v = v; + res.v = f32_to_ui16(a, rm8, true); + + SOFTFLOAT_POSTLUDE(res); + + return UNIT; +} + unit softfloat_f32toi32(mach_bits rm, mach_bits v) { SOFTFLOAT_PRELUDE(rm); @@ -641,6 +837,19 @@ unit softfloat_f16lt(mach_bits v1, mach_bits v2) { return UNIT; } +unit softfloat_f16lt_quiet(mach_bits v1, mach_bits v2) { + SOFTFLOAT_PRELUDE(0); + + float16_t a, b, res; + a.v = v1; + b.v = v2; + res.v = f16_lt_quiet(a, b); + + SOFTFLOAT_POSTLUDE(res); + + return UNIT; +} + unit softfloat_f16le(mach_bits v1, mach_bits v2) { SOFTFLOAT_PRELUDE(0); @@ -680,6 +889,19 @@ unit softfloat_f32lt(mach_bits v1, mach_bits v2) { return UNIT; } +unit softfloat_f32lt_quiet(mach_bits v1, mach_bits v2) { + SOFTFLOAT_PRELUDE(0); + + float32_t a, b, res; + a.v = v1; + b.v = v2; + res.v = f32_lt_quiet(a, b); + + SOFTFLOAT_POSTLUDE(res); + + return UNIT; +} + unit softfloat_f32le(mach_bits v1, mach_bits v2) { SOFTFLOAT_PRELUDE(0); @@ -719,6 +941,19 @@ unit softfloat_f64lt(mach_bits v1, mach_bits v2) { return UNIT; } +unit softfloat_f64lt_quiet(mach_bits v1, mach_bits v2) { + SOFTFLOAT_PRELUDE(0); + + float64_t a, b, res; + a.v = v1; + b.v = v2; + res.v = f64_lt_quiet(a, b); + + SOFTFLOAT_POSTLUDE(res); + + return UNIT; +} + unit softfloat_f64le(mach_bits v1, mach_bits v2) { SOFTFLOAT_PRELUDE(0); diff --git a/c_emulator/riscv_softfloat.h b/c_emulator/riscv_softfloat.h index bd29d7f76..3af149011 100644 --- a/c_emulator/riscv_softfloat.h +++ b/c_emulator/riscv_softfloat.h @@ -23,11 +23,29 @@ unit softfloat_f16sqrt(mach_bits rm, mach_bits v); unit softfloat_f32sqrt(mach_bits rm, mach_bits v); unit softfloat_f64sqrt(mach_bits rm, mach_bits v); +unit softfloat_f16toi8(mach_bits rm, mach_bits v); +unit softfloat_f16toui8(mach_bits rm, mach_bits v); +unit softfloat_f16toi16(mach_bits rm, mach_bits v); +unit softfloat_f16toui16(mach_bits rm, mach_bits v); unit softfloat_f16toi32(mach_bits rm, mach_bits v); unit softfloat_f16toui32(mach_bits rm, mach_bits v); unit softfloat_f16toi64(mach_bits rm, mach_bits v); unit softfloat_f16toui64(mach_bits rm, mach_bits v); +unit softfloat_f16rsqrte7(mach_bits rm, mach_bits v); +unit softfloat_f32rsqrte7(mach_bits rm, mach_bits v); +unit softfloat_f64rsqrte7(mach_bits rm, mach_bits v); + +unit softfloat_f16recip7(mach_bits rm, mach_bits v); +unit softfloat_f32recip7(mach_bits rm, mach_bits v); +unit softfloat_f64recip7(mach_bits rm, mach_bits v); + +unit softfloat_f16class(mach_bits v); +unit softfloat_f32class(mach_bits v); +unit softfloat_f64class(mach_bits v); + +unit softfloat_f32toi16(mach_bits rm, mach_bits v); +unit softfloat_f32toui16(mach_bits rm, mach_bits v); unit softfloat_f32toi32(mach_bits rm, mach_bits v); unit softfloat_f32toui32(mach_bits rm, mach_bits v); unit softfloat_f32toi64(mach_bits rm, mach_bits v); @@ -62,11 +80,14 @@ unit softfloat_f64tof16(mach_bits rm, mach_bits v); unit softfloat_f64tof32(mach_bits rm, mach_bits v); unit softfloat_f16lt(mach_bits v1, mach_bits v2); +unit softfloat_f16lt_quiet(mach_bits v1, mach_bits v2); unit softfloat_f16le(mach_bits v1, mach_bits v2); unit softfloat_f16eq(mach_bits v1, mach_bits v2); unit softfloat_f32lt(mach_bits v1, mach_bits v2); +unit softfloat_f32lt_quiet(mach_bits v1, mach_bits v2); unit softfloat_f32le(mach_bits v1, mach_bits v2); unit softfloat_f32eq(mach_bits v1, mach_bits v2); unit softfloat_f64lt(mach_bits v1, mach_bits v2); +unit softfloat_f64lt_quiet(mach_bits v1, mach_bits v2); unit softfloat_f64le(mach_bits v1, mach_bits v2); unit softfloat_f64eq(mach_bits v1, mach_bits v2); diff --git a/handwritten_support/0.11/riscv_extras_fdext.lem b/handwritten_support/0.11/riscv_extras_fdext.lem index 04b4d7e54..04c6785c0 100644 --- a/handwritten_support/0.11/riscv_extras_fdext.lem +++ b/handwritten_support/0.11/riscv_extras_fdext.lem @@ -66,6 +66,45 @@ let softfloat_f32_sqrt _ _ = () val softfloat_f64_sqrt : forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit let softfloat_f64_sqrt _ _ = () +val softfloat_f16_rsqrte7 : forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit +let softfloat_f16_rsqrte7 _ _ = () + +val softfloat_f32_rsqrte7 : forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit +let softfloat_f32_rsqrte7 _ _ = () + +val softfloat_f64_rsqrte7 : forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit +let softfloat_f64_rsqrte7 _ _ = () + +val softfloat_f16_recip7 : forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit +let softfloat_f16_recip7 _ _ = () + +val softfloat_f32_recip7 : forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit +let softfloat_f32_recip7 _ _ = () + +val softfloat_f64_recip7 : forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit +let softfloat_f64_recip7 _ _ = () + +val softfloat_f16_class : forall 's. Size 's => bitvector 's -> unit +let softfloat_f16_class _ = () + +val softfloat_f32_class : forall 's. Size 's => bitvector 's -> unit +let softfloat_f32_class _ = () + +val softfloat_f64_class : forall 's. Size 's => bitvector 's -> unit +let softfloat_f64_class _ = () + + +val softfloat_f16_to_i8: forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit +let softfloat_f16_to_i8 _ _ = () + +val softfloat_f16_to_ui8: forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit +let softfloat_f16_to_ui8 _ _ = () + +val softfloat_f16_to_i16: forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit +let softfloat_f16_to_i16 _ _ = () + +val softfloat_f16_to_ui16: forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit +let softfloat_f16_to_ui16 _ _ = () val softfloat_f16_to_i32: forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit let softfloat_f16_to_i32 _ _ = () @@ -92,6 +131,12 @@ val softfloat_ui64_to_f16: forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> let softfloat_ui64_to_f16 _ _ = () +val softfloat_f32_to_i16: forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit +let softfloat_f32_to_i16 _ _ = () + +val softfloat_f32_to_ui16: forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit +let softfloat_f32_to_ui16 _ _ = () + val softfloat_f32_to_i32: forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit let softfloat_f32_to_i32 _ _ = () @@ -164,6 +209,9 @@ let softfloat_f64_to_f32 _ _ = () val softfloat_f16_lt : forall 's. Size 's => bitvector 's -> bitvector 's -> unit let softfloat_f16_lt _ _ = () +val softfloat_f16_lt_quiet : forall 's. Size 's => bitvector 's -> bitvector 's -> unit +let softfloat_f16_lt_quiet _ _ = () + val softfloat_f16_le : forall 's. Size 's => bitvector 's -> bitvector 's -> unit let softfloat_f16_le _ _ = () @@ -173,6 +221,9 @@ let softfloat_f16_eq _ _ = () val softfloat_f32_lt : forall 's. Size 's => bitvector 's -> bitvector 's -> unit let softfloat_f32_lt _ _ = () +val softfloat_f32_lt_quiet : forall 's. Size 's => bitvector 's -> bitvector 's -> unit +let softfloat_f32_lt_quiet _ _ = () + val softfloat_f32_le : forall 's. Size 's => bitvector 's -> bitvector 's -> unit let softfloat_f32_le _ _ = () @@ -182,6 +233,9 @@ let softfloat_f32_eq _ _ = () val softfloat_f64_lt : forall 's. Size 's => bitvector 's -> bitvector 's -> unit let softfloat_f64_lt _ _ = () +val softfloat_f64_lt_quiet : forall 's. Size 's => bitvector 's -> bitvector 's -> unit +let softfloat_f64_lt_quiet _ _ = () + val softfloat_f64_le : forall 's. Size 's => bitvector 's -> bitvector 's -> unit let softfloat_f64_le _ _ = () diff --git a/handwritten_support/riscv_extras_fdext.lem b/handwritten_support/riscv_extras_fdext.lem index 893c39ae7..ead2806ea 100644 --- a/handwritten_support/riscv_extras_fdext.lem +++ b/handwritten_support/riscv_extras_fdext.lem @@ -134,6 +134,45 @@ let softfloat_f32_sqrt _ _ = () val softfloat_f64_sqrt : forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit let softfloat_f64_sqrt _ _ = () +val softfloat_f16_rsqrte7 : forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit +let softfloat_f16_rsqrte7 _ _ = () + +val softfloat_f32_rsqrte7 : forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit +let softfloat_f32_rsqrte7 _ _ = () + +val softfloat_f64_rsqrte7 : forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit +let softfloat_f64_rsqrte7 _ _ = () + +val softfloat_f16_recip7 : forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit +let softfloat_f16_recip7 _ _ = () + +val softfloat_f32_recip7 : forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit +let softfloat_f32_recip7 _ _ = () + +val softfloat_f64_recip7 : forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit +let softfloat_f64_recip7 _ _ = () + +val softfloat_f16_class : forall 's. Size 's => bitvector 's -> unit +let softfloat_f16_class _ = () + +val softfloat_f32_class : forall 's. Size 's => bitvector 's -> unit +let softfloat_f32_class _ = () + +val softfloat_f64_class : forall 's. Size 's => bitvector 's -> unit +let softfloat_f64_class _ = () + + +val softfloat_f16_to_i8: forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit +let softfloat_f16_to_i8 _ _ = () + +val softfloat_f16_to_ui8: forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit +let softfloat_f16_to_ui8 _ _ = () + +val softfloat_f16_to_i16: forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit +let softfloat_f16_to_i16 _ _ = () + +val softfloat_f16_to_ui16: forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit +let softfloat_f16_to_ui16 _ _ = () val softfloat_f16_to_i32: forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit let softfloat_f16_to_i32 _ _ = () @@ -160,6 +199,12 @@ val softfloat_ui64_to_f16: forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> let softfloat_ui64_to_f16 _ _ = () +val softfloat_f32_to_i16: forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit +let softfloat_f32_to_i16 _ _ = () + +val softfloat_f32_to_ui16: forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit +let softfloat_f32_to_ui16 _ _ = () + val softfloat_f32_to_i32: forall 'rm 's. Size 'rm, Size 's => bitvector 'rm -> bitvector 's -> unit let softfloat_f32_to_i32 _ _ = () @@ -232,6 +277,9 @@ let softfloat_f64_to_f32 _ _ = () val softfloat_f16_lt : forall 's. Size 's => bitvector 's -> bitvector 's -> unit let softfloat_f16_lt _ _ = () +val softfloat_f16_lt_quiet : forall 's. Size 's => bitvector 's -> bitvector 's -> unit +let softfloat_f16_lt_quiet _ _ = () + val softfloat_f16_le : forall 's. Size 's => bitvector 's -> bitvector 's -> unit let softfloat_f16_le _ _ = () @@ -241,6 +289,9 @@ let softfloat_f16_eq _ _ = () val softfloat_f32_lt : forall 's. Size 's => bitvector 's -> bitvector 's -> unit let softfloat_f32_lt _ _ = () +val softfloat_f32_lt_quiet : forall 's. Size 's => bitvector 's -> bitvector 's -> unit +let softfloat_f32_lt_quiet _ _ = () + val softfloat_f32_le : forall 's. Size 's => bitvector 's -> bitvector 's -> unit let softfloat_f32_le _ _ = () @@ -250,6 +301,9 @@ let softfloat_f32_eq _ _ = () val softfloat_f64_lt : forall 's. Size 's => bitvector 's -> bitvector 's -> unit let softfloat_f64_lt _ _ = () +val softfloat_f64_lt_quiet : forall 's. Size 's => bitvector 's -> bitvector 's -> unit +let softfloat_f64_lt_quiet _ _ = () + val softfloat_f64_le : forall 's. Size 's => bitvector 's -> bitvector 's -> unit let softfloat_f64_le _ _ = () diff --git a/model/riscv_fdext_regs.sail b/model/riscv_fdext_regs.sail index e01a8053e..f8f7f9458 100644 --- a/model/riscv_fdext_regs.sail +++ b/model/riscv_fdext_regs.sail @@ -488,8 +488,12 @@ function ext_write_fcsr (frm, fflags) = { val write_fflags : (bits(5)) -> unit effect {rreg, wreg, escape} function write_fflags(fflags) = { if fcsr.FFLAGS() != fflags - then dirty_fd_context_if_present(); - fcsr->FFLAGS() = fflags; + then { + fcsr->FFLAGS() = fflags; + dirty_fd_context_if_present(); + if get_config_print_reg() + then print("fcsr.FFLAGS <- " ^ BitStr(fflags)) + } } /* called for non-softfloat paths (softfloat flags need updating) */ @@ -501,5 +505,7 @@ function accrue_fflags(flags) = { fcsr->FFLAGS() = f; update_softfloat_fflags(f); dirty_fd_context_if_present(); + if get_config_print_reg() + then print("fcsr.FFLAGS <- " ^ BitStr(flags)) } } diff --git a/model/riscv_insts_vext_arith.sail b/model/riscv_insts_vext_arith.sail index 946ddbe0e..2d3a3ecc3 100644 --- a/model/riscv_insts_vext_arith.sail +++ b/model/riscv_insts_vext_arith.sail @@ -1,8 +1,46 @@ +/*=================================================================================*/ +/* Copyright (c) 2021-2023 */ +/* Authors from RIOS Lab, Tsinghua University: */ +/* Xinlai Wan */ +/* Xi Wang */ +/* Yifei Zhu */ +/* Shenwei Hu */ +/* Kalvin Vu */ +/* Other contributors: */ +/* Jessica Clarke */ +/* Victor Moya */ +/* */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or without */ +/* modification, are permitted provided that the following conditions */ +/* are met: */ +/* 1. Redistributions of source code must retain the above copyright */ +/* notice, this list of conditions and the following disclaimer. */ +/* 2. Redistributions in binary form must reproduce the above copyright */ +/* notice, this list of conditions and the following disclaimer in */ +/* the documentation and/or other materials provided with the */ +/* distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' */ +/* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED */ +/* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A */ +/* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR */ +/* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, */ +/* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT */ +/* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF */ +/* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND */ +/* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, */ +/* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF */ +/* SUCH DAMAGE. */ +/*=================================================================================*/ + /* ******************************************************************************* */ /* This file implements part of the vector extension. */ /* Chapter 11: Vector Integer Arithmetic Instructions */ /* Chapter 12: Vector Fixed-Point Arithmetic Instructions */ -/* Chapter 16: Vector Permutation Instructions */ +/* Chapter 16: Vector Permutation Instructions (integer part) */ /* ******************************************************************************* */ /* ******************************* OPIVV (VVTYPE) ******************************** */ @@ -42,7 +80,7 @@ function clause execute(VVTYPE(funct6, vm, vs2, vs1, vd)) = { let VLEN_pow = get_vlen_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -176,8 +214,8 @@ function clause execute(NVSTYPE(funct6, vm, vs2, vs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if ~(valid_rd_mask(vd, vm)) | ~(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) | - ~(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) | + not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; @@ -243,8 +281,8 @@ function clause execute(NVTYPE(funct6, vm, vs2, vs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if ~(valid_rd_mask(vd, vm)) | ~(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) | - ~(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) | + not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; @@ -408,7 +446,7 @@ function clause execute(VXTYPE(funct6, vm, vs2, rs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -526,8 +564,8 @@ function clause execute(NXSTYPE(funct6, vm, vs2, rs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if ~(valid_rd_mask(vd, vm)) | ~(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) | - ~(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) | + not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; @@ -593,8 +631,8 @@ function clause execute(NXTYPE(funct6, vm, vs2, rs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if ~(valid_rd_mask(vd, vm)) | ~(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) | - ~(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) | + not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; @@ -661,7 +699,7 @@ function clause execute(VXSG(funct6, vm, vs2, rs1, vd)) = { let VLEN_pow = get_vlen_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -820,7 +858,7 @@ function clause execute(VITYPE(funct6, vm, vs2, simm, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -914,8 +952,8 @@ function clause execute(NISTYPE(funct6, vm, vs2, simm, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if ~(valid_rd_mask(vd, vm)) | ~(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) | - ~(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) | + not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; @@ -981,8 +1019,8 @@ function clause execute(NITYPE(funct6, vm, vs2, simm, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if ~(valid_rd_mask(vd, vm)) | ~(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) | - ~(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) | + not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; @@ -1049,7 +1087,7 @@ function clause execute(VISG(funct6, vm, vs2, simm, vd)) = { let VLEN_pow = get_vlen_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -1194,7 +1232,7 @@ function clause execute(VMVRTYPE(vs2, simm, vd)) = { let imm_val = unsigned(EXTZ(sizeof(xlen), simm)); let EMUL = imm_val + 1; - if ~(EMUL == 1 | EMUL == 2 | EMUL == 4 | EMUL == 8) then { handle_illegal(); return RETIRE_FAIL }; + if not(EMUL == 1 | EMUL == 2 | EMUL == 4 | EMUL == 8) then { handle_illegal(); return RETIRE_FAIL }; let EMUL_pow = log2(EMUL); let num_elem = get_num_elem(EMUL_pow, SEW); @@ -1225,7 +1263,7 @@ mapping simm_string : bits(5) <-> string = { mapping clause assembly = VMVRTYPE(vs2, simm, vd) <-> "vmv" ^ simm_string(simm) ^ "r.v" ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) -/* ******************************* OPMVV (MVVTYPE) ******************************* */ +/* ******************************* OPMVV (VVTYPE) ******************************** */ union clause ast = MVVTYPE : (mvvfunct6, bits(1), regidx, regidx, regidx) mapping encdec_mvvfunct6 : mvvfunct6 <-> bits(6) = { @@ -1251,7 +1289,7 @@ function clause execute(MVVTYPE(funct6, vm, vs2, vs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -1341,7 +1379,7 @@ mapping mvvtype_mnemonic : mvvfunct6 <-> string = { mapping clause assembly = MVVTYPE(funct6, vm, vs2, vs1, vd) <-> mvvtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ vreg_name(vs1) ^ maybe_vmask(vm) -/* ************************ OPMVV (MVVtype Multiply-Add) ************************* */ +/* ************************* OPMVV (VVtype Multiply-Add) ************************* */ /* Multiply-Add instructions switch the order of source operands in assembly (vs1/rs1 before vs2) */ union clause ast = MVVMATYPE : (mvvmafunct6, bits(1), regidx, regidx, regidx) @@ -1361,7 +1399,7 @@ function clause execute(MVVMATYPE(funct6, vm, vs2, vs1, vd)) = { let VLEN = int_power(2, get_vlen_pow()); let num_elem = get_num_elem(LMUL_pow, SEW); - if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -1424,8 +1462,8 @@ function clause execute(WVVTYPE(funct6, vm, vs2, vs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if ~(valid_rd_mask(vd, vm)) | ~(valid_reg_overlap(vs1, vd, LMUL_pow, LMUL_pow_widen)) | - ~(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) | ~(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs1, vd, LMUL_pow, LMUL_pow_widen)) | + not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; @@ -1474,7 +1512,7 @@ mapping wvvtype_mnemonic : wvvfunct6 <-> string = { mapping clause assembly = WVVTYPE(funct6, vm, vs2, vs1, vd) <-> wvvtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ vreg_name(vs1) ^ maybe_vmask(vm) -/* ******************************* OPMVV (WVTYPE) ******************************** */ +/* *************************** OPMVV (WVTYPE Widening) *************************** */ union clause ast = WVTYPE : (wvfunct6, bits(1), regidx, regidx, regidx) mapping encdec_wvfunct6 : wvfunct6 <-> bits(6) = { @@ -1494,8 +1532,8 @@ function clause execute(WVTYPE(funct6, vm, vs2, vs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if ~(valid_rd_mask(vd, vm)) | ~(valid_reg_overlap(vs1, vd, LMUL_pow, LMUL_pow_widen)) | - ~(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs1, vd, LMUL_pow, LMUL_pow_widen)) | + not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; @@ -1538,7 +1576,7 @@ mapping wvtype_mnemonic : wvfunct6 <-> string = { mapping clause assembly = WVTYPE(funct6, vm, vs2, vs1, vd) <-> wvtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ vreg_name(vs1) ^ maybe_vmask(vm) -/* ******************** OPMVV (MVVtype Widening Multiply-Add) ******************** */ +/* ******************** OPMVV (VVtype Widening Multiply-Add) ********************* */ /* Multiply-Add instructions switch the order of source operands in assembly (vs1/rs1 before vs2) */ union clause ast = WMVVTYPE : (wmvvfunct6, bits(1), regidx, regidx, regidx) @@ -1558,8 +1596,8 @@ function clause execute(WMVVTYPE(funct6, vm, vs2, vs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if ~(valid_rd_mask(vd, vm)) | ~(valid_reg_overlap(vs1, vd, LMUL_pow, LMUL_pow_widen)) | - ~(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) | ~(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs1, vd, LMUL_pow, LMUL_pow_widen)) | + not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; @@ -1619,8 +1657,8 @@ function clause execute(VEXT2TYPE(funct6, vm, vs2, vd)) = { let SEW_half = SEW / 2; let LMUL_pow_half = LMUL_pow - 1; - if ~(valid_rd_mask(vd, vm)) | ~(valid_reg_overlap(vs2, vd, LMUL_pow_half, LMUL_pow)) | - ~(valid_eew_emul(SEW_half, LMUL_pow_half)) + if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs2, vd, LMUL_pow_half, LMUL_pow)) | + not(valid_eew_emul(SEW_half, LMUL_pow_half)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; @@ -1677,8 +1715,8 @@ function clause execute(VEXT4TYPE(funct6, vm, vs2, vd)) = { let SEW_quart = SEW / 4; let LMUL_pow_quart = LMUL_pow - 2; - if ~(valid_rd_mask(vd, vm)) | ~(valid_reg_overlap(vs2, vd, LMUL_pow_quart, LMUL_pow)) | - ~(valid_eew_emul(SEW_quart, LMUL_pow_quart)) + if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs2, vd, LMUL_pow_quart, LMUL_pow)) | + not(valid_eew_emul(SEW_quart, LMUL_pow_quart)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; @@ -1735,8 +1773,8 @@ function clause execute(VEXT8TYPE(funct6, vm, vs2, vd)) = { let SEW_eighth = SEW / 8; let LMUL_pow_eighth = LMUL_pow - 3; - if ~(valid_rd_mask(vd, vm)) | ~(valid_reg_overlap(vs2, vd, LMUL_pow_eighth, LMUL_pow)) | - ~(valid_eew_emul(SEW_eighth, LMUL_pow_eighth)) + if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs2, vd, LMUL_pow_eighth, LMUL_pow)) | + not(valid_eew_emul(SEW_eighth, LMUL_pow_eighth)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; @@ -1856,7 +1894,7 @@ function clause execute(MVVCOMPRESS(vs2, vs1, vd)) = { mapping clause assembly = MVVCOMPRESS(vs2, vs1, vd) <-> "vcompress.vm" ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ vreg_name(vs1) -/* ******************************* OPMVX (MVXTYPE) ******************************* */ +/* ******************************* OPMVX (VXTYPE) ******************************** */ union clause ast = MVXTYPE : (mvxfunct6, bits(1), regidx, regidx, regidx) mapping encdec_mvxfunct6 : mvxfunct6 <-> bits(6) = { @@ -1884,7 +1922,7 @@ function clause execute(MVXTYPE(funct6, vm, vs2, rs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -1986,7 +2024,7 @@ mapping mvxtype_mnemonic : mvxfunct6 <-> string = { mapping clause assembly = MVXTYPE(funct6, vm, vs2, rs1, vd) <-> mvxtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ reg_name(rs1) ^ maybe_vmask(vm) -/* ************************ OPMVX (MVXtype Multiply-Add) ************************* */ +/* ************************* OPMVX (VXtype Multiply-Add) ************************* */ /* Multiply-Add instructions switch the order of source operands in assembly (vs1/rs1 before vs2) */ union clause ast = MVXMATYPE : (mvxmafunct6, bits(1), regidx, regidx, regidx) @@ -2006,7 +2044,7 @@ function clause execute(MVXMATYPE(funct6, vm, vs2, rs1, vd)) = { let VLEN = int_power(2, get_vlen_pow()); let num_elem = get_num_elem(LMUL_pow, SEW); - if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -2070,8 +2108,8 @@ function clause execute(WVXTYPE(funct6, vm, vs2, rs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if ~(valid_rd_mask(vd, vm)) | ~(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) | - ~(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) | + not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; @@ -2120,7 +2158,7 @@ mapping wvxtype_mnemonic : wvxfunct6 <-> string = { mapping clause assembly = WVXTYPE(funct6, vm, vs2, rs1, vd) <-> wvxtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ reg_name(rs1) ^ maybe_vmask(vm) -/* ******************************* OPMVX (WXTYPE) ******************************** */ +/* *************************** OPMVX (WXTYPE Widening) *************************** */ union clause ast = WXTYPE : (wxfunct6, bits(1), regidx, regidx, regidx) mapping encdec_wxfunct6 : wxfunct6 <-> bits(6) = { @@ -2140,7 +2178,7 @@ function clause execute(WXTYPE(funct6, vm, vs2, rs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if ~(valid_rd_mask(vd, vm)) | ~(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + if not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; @@ -2183,7 +2221,7 @@ mapping wxtype_mnemonic : wxfunct6 <-> string = { mapping clause assembly = WXTYPE(funct6, vm, vs2, rs1, vd) <-> wxtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ reg_name(rs1) ^ maybe_vmask(vm) -/* ******************** OPMVX (MVXtype Widening Multiply-Add) ******************** */ +/* ******************** OPMVX (VXtype Widening Multiply-Add) ********************* */ /* Multiply-Add instructions switch the order of source operands in assembly (vs1/rs1 before vs2) */ union clause ast = WMVXTYPE : (wmvxfunct6, bits(1), regidx, regidx, regidx) @@ -2204,8 +2242,8 @@ function clause execute(WMVXTYPE(funct6, vm, vs2, rs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if ~(valid_rd_mask(vd, vm)) | ~(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) | - ~(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) | + not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; diff --git a/model/riscv_insts_vext_fp.sail b/model/riscv_insts_vext_fp.sail new file mode 100755 index 000000000..5830fcf20 --- /dev/null +++ b/model/riscv_insts_vext_fp.sail @@ -0,0 +1,1367 @@ +/*=================================================================================*/ +/* Copyright (c) 2021-2023 */ +/* Authors from RIOS Lab, Tsinghua University: */ +/* Xinlai Wan */ +/* Xi Wang */ +/* Yifei Zhu */ +/* Shenwei Hu */ +/* Kalvin Vu */ +/* Other contributors: */ +/* Jessica Clarke */ +/* Victor Moya */ +/* */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or without */ +/* modification, are permitted provided that the following conditions */ +/* are met: */ +/* 1. Redistributions of source code must retain the above copyright */ +/* notice, this list of conditions and the following disclaimer. */ +/* 2. Redistributions in binary form must reproduce the above copyright */ +/* notice, this list of conditions and the following disclaimer in */ +/* the documentation and/or other materials provided with the */ +/* distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' */ +/* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED */ +/* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A */ +/* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR */ +/* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, */ +/* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT */ +/* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF */ +/* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND */ +/* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, */ +/* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF */ +/* SUCH DAMAGE. */ +/*=================================================================================*/ + +/* ******************************************************************************* */ +/* This file implements part of the vector extension. */ +/* Chapter 13: Vector Floating-Point Instructions */ +/* Chapter 16: Vector Permutation Instructions (floating-point part) */ +/* ******************************************************************************* */ + +/* ******************************* OPFVV (VVTYPE) ******************************** */ +union clause ast = FVVTYPE : (fvvfunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_fvvfunct6 : fvvfunct6 <-> bits(6) = { + FVV_VADD <-> 0b000000, + FVV_VSUB <-> 0b000010, + FVV_VMIN <-> 0b000100, + FVV_VMAX <-> 0b000110, + FVV_VSGNJ <-> 0b001000, + FVV_VSGNJN <-> 0b001001, + FVV_VSGNJX <-> 0b001010, + FVV_VDIV <-> 0b100000, + FVV_VMUL <-> 0b100100 +} + +mapping clause encdec = FVVTYPE(funct6, vm, vs2, vs1, vd) if haveRVV() + <-> encdec_fvvfunct6(funct6) @ vm @ vs2 @ vs1 @ 0b001 @ vd @ 0b1010111 if haveRVV() + +function clause execute(FVVTYPE(funct6, vm, vs2, vs1, vd)) = { + let rm_3b = fcsr.FRM(); + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + if not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; + assert(SEW != 8); + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + FVV_VADD => fp_add(rm_3b, vs2_val[i], vs1_val[i]), + FVV_VSUB => fp_sub(rm_3b, vs2_val[i], vs1_val[i]), + FVV_VMIN => fp_min(vs2_val[i], vs1_val[i]), + FVV_VMAX => fp_max(vs2_val[i], vs1_val[i]), + FVV_VMUL => fp_mul(rm_3b, vs2_val[i], vs1_val[i]), + FVV_VDIV => fp_div(rm_3b, vs2_val[i], vs1_val[i]), + FVV_VSGNJ => vs1_val[i][('m - 1)..('m - 1)] @ vs2_val[i][('m - 2)..0], + FVV_VSGNJN => (0b1 ^ vs1_val[i][('m - 1)..('m - 1)]) @ vs2_val[i][('m - 2)..0], + FVV_VSGNJX => (vs2_val[i][('m - 1)..('m - 1)] ^ vs1_val[i][('m - 1)..('m - 1)]) @ vs2_val[i][('m - 2)..0] + } + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping fvvtype_mnemonic : fvvfunct6 <-> string = { + FVV_VADD <-> "vfadd.vv", + FVV_VSUB <-> "vfsub.vv", + FVV_VMIN <-> "vfmin.vv", + FVV_VMAX <-> "vfmax.vv", + FVV_VSGNJ <-> "vfsgnj.vv", + FVV_VSGNJN <-> "vfsgnjn.vv", + FVV_VSGNJX <-> "vfsgnjx.vv", + FVV_VDIV <-> "vfdiv.vv", + FVV_VMUL <-> "vfmul.vv" +} + +mapping clause assembly = FVVTYPE(funct6, vm, vs2, vs1, vd) + <-> fvvtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ vreg_name(vs1) ^ maybe_vmask(vm) + +/* ************************* OPFVV (VVtype Multiply-Add) ************************* */ +/* Multiply-Add instructions switch the order of source operands in assembly (vs1/rs1 before vs2) */ +union clause ast = FVVMATYPE : (fvvmafunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_fvvmafunct6 : fvvmafunct6 <-> bits(6) = { + FVV_VMADD <-> 0b101000, + FVV_VNMADD <-> 0b101001, + FVV_VMSUB <-> 0b101010, + FVV_VNMSUB <-> 0b101011, + FVV_VMACC <-> 0b101100, + FVV_VNMACC <-> 0b101101, + FVV_VMSAC <-> 0b101110, + FVV_VNMSAC <-> 0b101111 +} + +mapping clause encdec = FVVMATYPE(funct6, vm, vs2, vs1, vd) if haveRVV() + <-> encdec_fvvmafunct6(funct6) @ vm @ vs2 @ vs1 @ 0b001 @ vd @ 0b1010111 if haveRVV() + +function clause execute(FVVMATYPE(funct6, vm, vs2, vs1, vd)) = { + let rm_3b = fcsr.FRM(); + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + if not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; + assert(SEW != 8); + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + FVV_VMACC => fp_muladd(rm_3b, vs1_val[i], vs2_val[i], vd_val[i]), + FVV_VNMACC => fp_nmulsub(rm_3b, vs1_val[i], vs2_val[i], vd_val[i]), + FVV_VMSAC => fp_mulsub(rm_3b, vs1_val[i], vs2_val[i], vd_val[i]), + FVV_VNMSAC => fp_nmuladd(rm_3b, vs1_val[i], vs2_val[i], vd_val[i]), + FVV_VMADD => fp_muladd(rm_3b, vs1_val[i], vd_val[i], vs2_val[i]), + FVV_VNMADD => fp_nmulsub(rm_3b, vs1_val[i], vd_val[i], vs2_val[i]), + FVV_VMSUB => fp_mulsub(rm_3b, vs1_val[i], vd_val[i], vs2_val[i]), + FVV_VNMSUB => fp_nmuladd(rm_3b, vs1_val[i], vd_val[i], vs2_val[i]) + } + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping fvvmatype_mnemonic : fvvmafunct6 <-> string = { + FVV_VMADD <-> "vfmadd.vv", + FVV_VNMADD <-> "vfnmadd.vv", + FVV_VMSUB <-> "vfmsub.vv", + FVV_VNMSUB <-> "vfnmsub.vv", + FVV_VMACC <-> "vfmacc.vv", + FVV_VNMACC <-> "vfnmacc.vv", + FVV_VMSAC <-> "vfmsac.vv", + FVV_VNMSAC <-> "vfnmsac.vv" +} + +mapping clause assembly = FVVMATYPE(funct6, vm, vs2, vs1, vd) + <-> fvvmatype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs1) ^ sep() ^ vreg_name(vs2) ^ maybe_vmask(vm) + +/* *************************** OPFVV (VVTYPE Widening) *************************** */ +union clause ast = FWVVTYPE : (fwvvfunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_fwvvfunct6 : fwvvfunct6 <-> bits(6) = { + FWVV_VADD <-> 0b110000, + FWVV_VSUB <-> 0b110010, + FWVV_VMUL <-> 0b111000 +} + +mapping clause encdec = FWVVTYPE(funct6, vm, vs2, vs1, vd) if haveRVV() + <-> encdec_fwvvfunct6(funct6) @ vm @ vs2 @ vs1 @ 0b001 @ vd @ 0b1010111 if haveRVV() + +function clause execute(FWVVTYPE(funct6, vm, vs2, vs1, vd)) = { + let rm_3b = fcsr.FRM(); + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + let SEW_widen = SEW * 2; + let LMUL_pow_widen = LMUL_pow + 1; + + if not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + not(valid_reg_overlap(vs1, vd, LMUL_pow, LMUL_pow_widen)) | + not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) + then { handle_illegal(); return RETIRE_FAIL }; + assert(SEW >= 16 & SEW_widen <= 64); + + let 'n = num_elem; + let 'm = SEW; + let 'o = SEW_widen; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); + let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + result : vector('n, dec, bits('o)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW_widen, LMUL_pow_widen, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + FWVV_VADD => fp_add(rm_3b, fp_widen(vs2_val[i]), fp_widen(vs1_val[i])), + FWVV_VSUB => fp_sub(rm_3b, fp_widen(vs2_val[i]), fp_widen(vs1_val[i])), + FWVV_VMUL => fp_mul(rm_3b, fp_widen(vs2_val[i]), fp_widen(vs1_val[i])) + } + } + }; + + write_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping fwvvtype_mnemonic : fwvvfunct6 <-> string = { + FWVV_VADD <-> "vfwadd.vv", + FWVV_VSUB <-> "vfwsub.vv", + FWVV_VMUL <-> "vfwmul.vv" +} + +mapping clause assembly = FWVVTYPE(funct6, vm, vs2, vs1, vd) + <-> fwvvtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ vreg_name(vs1) ^ maybe_vmask(vm) + +/* ******************** OPFVV (VVtype Widening Multiply-Add) ********************* */ +/* Multiply-Add instructions switch the order of source operands in assembly (vs1/rs1 before vs2) */ +union clause ast = FWVVMATYPE : (fwvvmafunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_fwvvmafunct6 : fwvvmafunct6 <-> bits(6) = { + FWVV_VMACC <-> 0b111100, + FWVV_VNMACC <-> 0b111101, + FWVV_VMSAC <-> 0b111110, + FWVV_VNMSAC <-> 0b111111 +} + +mapping clause encdec = FWVVMATYPE(funct6, vm, vs1, vs2, vd) if haveRVV() + <-> encdec_fwvvmafunct6(funct6) @ vm @ vs1 @ vs2 @ 0b001 @ vd @ 0b1010111 if haveRVV() + +function clause execute(FWVVMATYPE(funct6, vm, vs1, vs2, vd)) = { + let rm_3b = fcsr.FRM(); + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + let SEW_widen = SEW * 2; + let LMUL_pow_widen = LMUL_pow + 1; + + if not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + not(valid_reg_overlap(vs1, vd, LMUL_pow, LMUL_pow_widen)) | + not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) + then { handle_illegal(); return RETIRE_FAIL }; + assert(SEW >= 16 & SEW_widen <= 64); + + let 'n = num_elem; + let 'm = SEW; + let 'o = SEW_widen; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); + let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + result : vector('n, dec, bits('o)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW_widen, LMUL_pow_widen, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + FWVV_VMACC => fp_muladd(rm_3b, fp_widen(vs1_val[i]), fp_widen(vs2_val[i]), vd_val[i]), + FWVV_VNMACC => fp_nmulsub(rm_3b, fp_widen(vs1_val[i]), fp_widen(vs2_val[i]), vd_val[i]), + FWVV_VMSAC => fp_mulsub(rm_3b, fp_widen(vs1_val[i]), fp_widen(vs2_val[i]), vd_val[i]), + FWVV_VNMSAC => fp_nmuladd(rm_3b, fp_widen(vs1_val[i]), fp_widen(vs2_val[i]), vd_val[i]) + } + } + }; + + write_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping fwvvmatype_mnemonic : fwvvmafunct6 <-> string = { + FWVV_VMACC <-> "vfwmacc.vv", + FWVV_VNMACC <-> "vfwnmacc.vv", + FWVV_VMSAC <-> "vfwmsac.vv", + FWVV_VNMSAC <-> "vfwnmsac.vv" +} + +mapping clause assembly = FWVVMATYPE(funct6, vm, vs1, vs2, vd) + <-> fwvvmatype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs1) ^ sep() ^ vreg_name(vs2) ^ maybe_vmask(vm) + +/* *************************** OPFVV (WVTYPE Widening) *************************** */ +union clause ast = FWVTYPE : (fwvfunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_fwvfunct6 : fwvfunct6 <-> bits(6) = { + FWV_VADD <-> 0b110100, + FWV_VSUB <-> 0b110110 +} + +mapping clause encdec = FWVTYPE(funct6, vm, vs2, vs1, vd) if haveRVV() + <-> encdec_fwvfunct6(funct6) @ vm @ vs2 @ vs1 @ 0b001 @ vd @ 0b1010111 if haveRVV() + +function clause execute(FWVTYPE(funct6, vm, vs2, vs1, vd)) = { + let rm_3b = fcsr.FRM(); + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + let SEW_widen = SEW * 2; + let LMUL_pow_widen = LMUL_pow + 1; + + if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs1, vd, LMUL_pow, LMUL_pow_widen)) | + not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | not(valid_fp_op(SEW, rm_3b)) + then { handle_illegal(); return RETIRE_FAIL }; + assert(SEW >= 16 & SEW_widen <= 64); + + let 'n = num_elem; + let 'm = SEW; + let 'o = SEW_widen; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); + let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); + let vs2_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vs2); + result : vector('n, dec, bits('o)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW_widen, LMUL_pow_widen, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + FWV_VADD => fp_add(rm_3b, vs2_val[i], fp_widen(vs1_val[i])), + FWV_VSUB => fp_sub(rm_3b, vs2_val[i], fp_widen(vs1_val[i])) + } + } + }; + + write_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping fwvtype_mnemonic : fwvfunct6 <-> string = { + FWV_VADD <-> "vfwadd.wv", + FWV_VSUB <-> "vfwsub.wv" +} + +mapping clause assembly = FWVTYPE(funct6, vm, vs2, vs1, vd) + <-> fwvtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ vreg_name(vs1) ^ maybe_vmask(vm) + +/* ****************************** OPFVV (VFUNARY0) ******************************* */ +union clause ast = VFUNARY0 : (bits(1), regidx, vfunary0, regidx) + +mapping encdec_vfunary0_vs1 : vfunary0 <-> bits(5) = { + FV_CVT_XU_F <-> 0b00000, + FV_CVT_X_F <-> 0b00001, + FV_CVT_F_XU <-> 0b00010, + FV_CVT_F_X <-> 0b00011, + FV_CVT_RTZ_XU_F <-> 0b00110, + FV_CVT_RTZ_X_F <-> 0b00111 +} + +mapping clause encdec = VFUNARY0(vm, vs2, vfunary0, vd) if haveRVV() + <-> 0b010010 @ vm @ vs2 @ encdec_vfunary0_vs1(vfunary0) @ 0b001 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VFUNARY0(vm, vs2, vfunary0, vd)) = { + let rm_3b = fcsr.FRM(); + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + if not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match vfunary0 { + FV_CVT_XU_F => { + let (fflags, elem) : (bits_fflags, bits('m)) = match 'm { + 16 => riscv_f16ToUi16(rm_3b, vs2_val[i]), + 32 => riscv_f32ToUi32(rm_3b, vs2_val[i]), + 64 => riscv_f64ToUi64(rm_3b, vs2_val[i]) + }; + write_fflags(fflags); + elem + }, + FV_CVT_X_F => { + let (fflags, elem) : (bits_fflags, bits('m)) = match 'm { + 16 => riscv_f16ToI16(rm_3b, vs2_val[i]), + 32 => riscv_f32ToI32(rm_3b, vs2_val[i]), + 64 => riscv_f64ToI64(rm_3b, vs2_val[i]) + }; + write_fflags(fflags); + elem + }, + FV_CVT_F_XU => { + let (fflags, elem) : (bits_fflags, bits('m)) = match 'm { + 16 => riscv_ui32ToF16(rm_3b, EXTZ(vs2_val[i])), + 32 => riscv_ui32ToF32(rm_3b, vs2_val[i]), + 64 => riscv_ui64ToF64(rm_3b, vs2_val[i]) + }; + write_fflags(fflags); + elem + }, + FV_CVT_F_X => { + let (fflags, elem) : (bits_fflags, bits('m)) = match 'm { + 16 => riscv_i32ToF16(rm_3b, EXTS(vs2_val[i])), + 32 => riscv_i32ToF32(rm_3b, vs2_val[i]), + 64 => riscv_i64ToF64(rm_3b, vs2_val[i]) + }; + write_fflags(fflags); + elem + }, + FV_CVT_RTZ_XU_F => { + let (fflags, elem) : (bits_fflags, bits('m)) = match 'm { + 16 => riscv_f16ToUi16(0b001, vs2_val[i]), + 32 => riscv_f32ToUi32(0b001, vs2_val[i]), + 64 => riscv_f64ToUi64(0b001, vs2_val[i]) + }; + write_fflags(fflags); + elem + }, + FV_CVT_RTZ_X_F => { + let (fflags, elem) : (bits_fflags, bits('m)) = match 'm { + 16 => riscv_f16ToI16(0b001, vs2_val[i]), + 32 => riscv_f32ToI32(0b001, vs2_val[i]), + 64 => riscv_f64ToI64(0b001, vs2_val[i]) + }; + write_fflags(fflags); + elem + } + } + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping vfunary0_mnemonic : vfunary0 <-> string = { + FV_CVT_XU_F <-> "vfcvt.xu.f.v", + FV_CVT_X_F <-> "vfcvt.x.f.v", + FV_CVT_F_XU <-> "vfcvt.f.xu.v", + FV_CVT_F_X <-> "vfcvt.f.x.v", + FV_CVT_RTZ_XU_F <-> "vfcvt.rtz.xu.f.v", + FV_CVT_RTZ_X_F <-> "vfcvt.rtz.x.f.v" +} + +mapping clause assembly = VFUNARY0(vm, vs2, vfunary0, vd) + <-> vfunary0_mnemonic(vfunary0) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ maybe_vmask(vm) + +/* ************************** OPFVV (VFUNARY0 Widening) ************************** */ +union clause ast = VFWUNARY0 : (bits(1), regidx, vfwunary0, regidx) + +mapping encdec_vfwunary0_vs1 : vfwunary0 <-> bits(5) = { + FWV_CVT_XU_F <-> 0b01000, + FWV_CVT_X_F <-> 0b01001, + FWV_CVT_F_XU <-> 0b01010, + FWV_CVT_F_X <-> 0b01011, + FWV_CVT_F_F <-> 0b01100, + FWV_CVT_RTZ_XU_F <-> 0b01110, + FWV_CVT_RTZ_X_F <-> 0b01111 +} + +mapping clause encdec = VFWUNARY0(vm, vs2, vfwunary0, vd) if haveRVV() + <-> 0b010010 @ vm @ vs2 @ encdec_vfwunary0_vs1(vfwunary0) @ 0b001 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VFWUNARY0(vm, vs2, vfwunary0, vd)) = { + let rm_3b = fcsr.FRM(); + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + let SEW_widen = SEW * 2; + let LMUL_pow_widen = LMUL_pow + 1; + + if not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) + then { handle_illegal(); return RETIRE_FAIL }; + assert(SEW >= 8 & SEW_widen <= 64); + + let 'n = num_elem; + let 'm = SEW; + let 'o = SEW_widen; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); + result : vector('n, dec, bits('o)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW_widen, LMUL_pow_widen, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match vfwunary0 { + FWV_CVT_XU_F => { + let (fflags, elem) : (bits_fflags, bits('o)) = match 'm { + 8 => { handle_illegal(); return RETIRE_FAIL }, + 16 => riscv_f16ToUi32(rm_3b, vs2_val[i]), + 32 => riscv_f32ToUi64(rm_3b, vs2_val[i]) + }; + write_fflags(fflags); + elem + }, + FWV_CVT_X_F => { + let (fflags, elem) : (bits_fflags, bits('o)) = match 'm { + 8 => { handle_illegal(); return RETIRE_FAIL }, + 16 => riscv_f16ToI32(rm_3b, vs2_val[i]), + 32 => riscv_f32ToI64(rm_3b, vs2_val[i]) + }; + write_fflags(fflags); + elem + }, + FWV_CVT_F_XU => { + let (fflags, elem) : (bits_fflags, bits('o)) = match 'm { + 8 => riscv_ui32ToF16(rm_3b, EXTZ(vs2_val[i])), + 16 => riscv_ui32ToF32(rm_3b, EXTZ(vs2_val[i])), + 32 => riscv_ui32ToF64(rm_3b, vs2_val[i]) + }; + write_fflags(fflags); + elem + }, + FWV_CVT_F_X => { + let (fflags, elem) : (bits_fflags, bits('o)) = match 'm { + 8 => riscv_i32ToF16(rm_3b, EXTS(vs2_val[i])), + 16 => riscv_i32ToF32(rm_3b, EXTS(vs2_val[i])), + 32 => riscv_i32ToF64(rm_3b, vs2_val[i]) + }; + write_fflags(fflags); + elem + }, + FWV_CVT_F_F => { + let (fflags, elem) : (bits_fflags, bits('o)) = match 'm { + 8 => { handle_illegal(); return RETIRE_FAIL }, + 16 => riscv_f16ToF32(rm_3b, vs2_val[i]), + 32 => riscv_f32ToF64(rm_3b, vs2_val[i]) + }; + write_fflags(fflags); + elem + }, + FWV_CVT_RTZ_XU_F => { + let (fflags, elem) : (bits_fflags, bits('o)) = match 'm { + 8 => { handle_illegal(); return RETIRE_FAIL }, + 16 => riscv_f16ToUi32(0b001, vs2_val[i]), + 32 => riscv_f32ToUi64(0b001, vs2_val[i]) + }; + write_fflags(fflags); + elem + }, + FWV_CVT_RTZ_X_F => { + let (fflags, elem) : (bits_fflags, bits('o)) = match 'm { + 8 => { handle_illegal(); return RETIRE_FAIL }, + 16 => riscv_f16ToI32(0b001, vs2_val[i]), + 32 => riscv_f32ToI64(0b001, vs2_val[i]) + }; + write_fflags(fflags); + elem + } + } + } + }; + + write_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping vfwunary0_mnemonic : vfwunary0 <-> string = { + FWV_CVT_XU_F <-> "vfwcvt.xu.f.v", + FWV_CVT_X_F <-> "vfwcvt.x.f.v", + FWV_CVT_F_XU <-> "vfwcvt.f.xu.v", + FWV_CVT_F_X <-> "vfwcvt.f.x.v", + FWV_CVT_F_F <-> "vfwcvt.f.f.v", + FWV_CVT_RTZ_XU_F <-> "vfwcvt.rtz.xu.f.v", + FWV_CVT_RTZ_X_F <-> "vfwcvt.rtz.x.f.v" +} + +mapping clause assembly = VFWUNARY0(vm, vs2, vfwunary0, vd) + <-> vfwunary0_mnemonic(vfwunary0) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ maybe_vmask(vm) + +/* ************************* OPFVV (VFUNARY0 Narrowing) ************************** */ +union clause ast = VFNUNARY0 : (bits(1), regidx, vfnunary0, regidx) + +mapping encdec_vfnunary0_vs1 : vfnunary0 <-> bits(5) = { + FNV_CVT_XU_F <-> 0b10000, + FNV_CVT_X_F <-> 0b10001, + FNV_CVT_F_XU <-> 0b10010, + FNV_CVT_F_X <-> 0b10011, + FNV_CVT_F_F <-> 0b10100, + FNV_CVT_ROD_F_F <-> 0b10101, + FNV_CVT_RTZ_XU_F <-> 0b10110, + FNV_CVT_RTZ_X_F <-> 0b10111 +} + +mapping clause encdec = VFNUNARY0(vm, vs2, vfnunary0, vd) if haveRVV() + <-> 0b010010 @ vm @ vs2 @ encdec_vfnunary0_vs1(vfnunary0) @ 0b001 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VFNUNARY0(vm, vs2, vfnunary0, vd)) = { + let rm_3b = fcsr.FRM(); + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + let SEW_widen = SEW * 2; + let LMUL_pow_widen = LMUL_pow + 1; + + if not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + not(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) + then { handle_illegal(); return RETIRE_FAIL }; + + let 'n = num_elem; + let 'm = SEW; + let 'o = SEW_widen; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vs2_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vs2); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match vfnunary0 { + FNV_CVT_XU_F => { + let (fflags, elem) : (bits_fflags, bits('m)) = match 'm { + 8 => riscv_f16ToUi8(rm_3b, vs2_val[i]), + 16 => riscv_f32ToUi16(rm_3b, vs2_val[i]), + 32 => riscv_f64ToUi32(rm_3b, vs2_val[i]) + }; + write_fflags(fflags); + elem + }, + FNV_CVT_X_F => { + let (fflags, elem) : (bits_fflags, bits('m)) = match 'm { + 8 => riscv_f16ToI8(rm_3b, vs2_val[i]), + 16 => riscv_f32ToI16(rm_3b, vs2_val[i]), + 32 => riscv_f64ToI32(rm_3b, vs2_val[i]) + }; + write_fflags(fflags); + elem + }, + FNV_CVT_F_XU => { + let (fflags, elem) : (bits_fflags, bits('m)) = match 'm { + 8 => { handle_illegal(); return RETIRE_FAIL }, + 16 => riscv_ui32ToF16(rm_3b, vs2_val[i]), + 32 => riscv_ui64ToF32(rm_3b, vs2_val[i]) + }; + write_fflags(fflags); + elem + }, + FNV_CVT_F_X => { + let (fflags, elem) : (bits_fflags, bits('m)) = match 'm { + 8 => { handle_illegal(); return RETIRE_FAIL }, + 16 => riscv_i32ToF16(rm_3b, vs2_val[i]), + 32 => riscv_i64ToF32(rm_3b, vs2_val[i]) + }; + write_fflags(fflags); + elem + }, + FNV_CVT_F_F => { + let (fflags, elem) : (bits_fflags, bits('m)) = match 'm { + 8 => { handle_illegal(); return RETIRE_FAIL }, + 16 => riscv_f32ToF16(rm_3b, vs2_val[i]), + 32 => riscv_f64ToF32(rm_3b, vs2_val[i]) + }; + write_fflags(fflags); + elem + }, + FNV_CVT_ROD_F_F => { + let (fflags, elem) : (bits_fflags, bits('m)) = match 'm { + 8 => { handle_illegal(); return RETIRE_FAIL }, + 16 => riscv_f32ToF16(0b110, vs2_val[i]), + 32 => riscv_f64ToF32(0b110, vs2_val[i]) + }; + write_fflags(fflags); + elem + }, + FNV_CVT_RTZ_XU_F => { + let (fflags, elem) : (bits_fflags, bits('m)) = match 'm { + 8 => riscv_f16ToUi8(0b001, vs2_val[i]), + 16 => riscv_f32ToUi16(0b001, vs2_val[i]), + 32 => riscv_f64ToUi32(0b001, vs2_val[i]) + }; + write_fflags(fflags); + elem + }, + FNV_CVT_RTZ_X_F => { + let (fflags, elem) : (bits_fflags, bits('m)) = match 'm { + 8 => riscv_f16ToI8(0b001, vs2_val[i]), + 16 => riscv_f32ToI16(0b001, vs2_val[i]), + 32 => riscv_f64ToI32(0b001, vs2_val[i]) + }; + write_fflags(fflags); + elem + } + } + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping vfnunary0_mnemonic : vfnunary0 <-> string = { + FNV_CVT_XU_F <-> "vfncvt.xu.f.w", + FNV_CVT_X_F <-> "vfncvt.x.f.w", + FNV_CVT_F_XU <-> "vfncvt.f.xu.w", + FNV_CVT_F_X <-> "vfncvt.f.x.w", + FNV_CVT_F_F <-> "vfncvt.f.f.w", + FNV_CVT_ROD_F_F <-> "vfncvt.rod.f.f.w", + FNV_CVT_RTZ_XU_F <-> "vfncvt.rtz.xu.f.w", + FNV_CVT_RTZ_X_F <-> "vfncvt.rtz.x.f.w" +} + +mapping clause assembly = VFNUNARY0(vm, vs2, vfnunary0, vd) + <-> vfnunary0_mnemonic(vfnunary0) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ maybe_vmask(vm) + +/* ****************************** OPFVV (VFUNARY1) ******************************* */ +union clause ast = VFUNARY1 : (bits(1), regidx, vfunary1, regidx) + +mapping encdec_vfunary1_vs1 : vfunary1 <-> bits(5) = { + FVV_VSQRT <-> 0b00000, + FVV_VRSQRT7 <-> 0b00100, + FVV_VREC7 <-> 0b00101, + FVV_VCLASS <-> 0b10000 +} + +mapping clause encdec = VFUNARY1(vm, vs2, vfunary1, vd) if haveRVV() + <-> 0b010011 @ vm @ vs2 @ encdec_vfunary1_vs1(vfunary1) @ 0b001 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VFUNARY1(vm, vs2, vfunary1, vd)) = { + let rm_3b = fcsr.FRM(); + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + if not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match vfunary1 { + FVV_VSQRT => { + let (fflags, elem) : (bits_fflags, bits('m)) = match 'm { + 16 => riscv_f16Sqrt(rm_3b, vs2_val[i]), + 32 => riscv_f32Sqrt(rm_3b, vs2_val[i]), + 64 => riscv_f64Sqrt(rm_3b, vs2_val[i]) + }; + write_fflags(fflags); + elem + }, + FVV_VRSQRT7 => { + let (fflags, elem) : (bits_fflags, bits('m)) = match 'm { + 16 => riscv_f16Rsqrte7(rm_3b, vs2_val[i]), + 32 => riscv_f32Rsqrte7(rm_3b, vs2_val[i]), + 64 => riscv_f64Rsqrte7(rm_3b, vs2_val[i]) + }; + write_fflags(fflags); + elem + }, + FVV_VREC7 => { + let (fflags, elem) : (bits_fflags, bits('m)) = match 'm { + 16 => riscv_f16Recip7(rm_3b, vs2_val[i]), + 32 => riscv_f32Recip7(rm_3b, vs2_val[i]), + 64 => riscv_f64Recip7(rm_3b, vs2_val[i]) + }; + write_fflags(fflags); + elem + }, + FVV_VCLASS => { + let (fflags, elem) : (bits_fflags, bits('m)) = match 'm { + 16 => riscv_f16Class(vs2_val[i]), + 32 => riscv_f32Class(vs2_val[i]), + 64 => riscv_f64Class(vs2_val[i]) + }; + write_fflags(fflags); + elem + } + } + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping vfunary1_mnemonic : vfunary1 <-> string = { + FVV_VSQRT <-> "vfsqrt.v", + FVV_VRSQRT7 <-> "vfrsqrt7.v", + FVV_VREC7 <-> "vfrec7.v", + FVV_VCLASS <-> "vfclass.v" +} + +mapping clause assembly = VFUNARY1(vm, vs2, vfunary1, vd) + <-> vfunary1_mnemonic(vfunary1) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ maybe_vmask(vm) + +/* ****************************** OPFVV (VWFUNARY0) ****************************** */ +union clause ast = VFMVFS : (regidx, regidx) + +mapping clause encdec = VFMVFS(vs2, rd) if haveRVV() + <-> 0b010000 @ 0b1 @ vs2 @ 0b00000 @ 0b001 @ rd @ 0b1010111 if haveRVV() + +function clause execute(VFMVFS(vs2, rd)) = { + let rm_3b = fcsr.FRM(); + let SEW = get_sew(); + let num_elem = get_num_elem(0, SEW); + + if not(valid_fp_op(SEW, rm_3b)) | SEW > sizeof(flen) then { handle_illegal(); return RETIRE_FAIL }; + assert(num_elem > 0 & SEW != 8); + + let 'n = num_elem; + let 'm = SEW; + + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, 0, vs2); + F(rd) = if sizeof(flen) == SEW then vs2_val[0] + else NaN_box(vs2_val[0]); + vstart = EXTZ(0b0); + + RETIRE_SUCCESS +} + +mapping clause assembly = VFMVFS(vs2, rd) + <-> "vfmv.f.s" ^ spc() ^ freg_name(rd) ^ sep() ^ vreg_name(vs2) + +/* ******************************* OPFVF (VFtype) ******************************** */ +union clause ast = FVFTYPE : (fvffunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_fvffunct6 : fvffunct6 <-> bits(6) = { + VF_VADD <-> 0b000000, + VF_VSUB <-> 0b000010, + VF_VMIN <-> 0b000100, + VF_VMAX <-> 0b000110, + VF_VSGNJ <-> 0b001000, + VF_VSGNJN <-> 0b001001, + VF_VSGNJX <-> 0b001010, + VF_VSLIDE1UP <-> 0b001110, + VF_VSLIDE1DOWN <-> 0b001111, + VF_VDIV <-> 0b100000, + VF_VRDIV <-> 0b100001, + VF_VMUL <-> 0b100100, + VF_VRSUB <-> 0b100111 +} + +mapping clause encdec = FVFTYPE(funct6, vm, vs2, rs1, vd) if haveRVV() + <-> encdec_fvffunct6(funct6) @ vm @ vs2 @ rs1 @ 0b101 @ vd @ 0b1010111 if haveRVV() + +function clause execute(FVFTYPE(funct6, vm, vs2, rs1, vd)) = { + let rm_3b = fcsr.FRM(); + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + if not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; + assert(SEW != 8); + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let rs1_val : bits('m) = get_scalar_fp(rs1, 'm); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + VF_VADD => fp_add(rm_3b, vs2_val[i], rs1_val), + VF_VSUB => fp_sub(rm_3b, vs2_val[i], rs1_val), + VF_VRSUB => fp_sub(rm_3b, rs1_val, vs2_val[i]), + VF_VMIN => fp_min(vs2_val[i], rs1_val), + VF_VMAX => fp_max(vs2_val[i], rs1_val), + VF_VMUL => fp_mul(rm_3b, vs2_val[i], rs1_val), + VF_VDIV => fp_div(rm_3b, vs2_val[i], rs1_val), + VF_VRDIV => fp_div(rm_3b, rs1_val, vs2_val[i]), + VF_VSGNJ => rs1_val[('m - 1)..('m - 1)] @ vs2_val[i][('m - 2)..0], + VF_VSGNJN => (0b1 ^ rs1_val[('m - 1)..('m - 1)]) @ vs2_val[i][('m - 2)..0], + VF_VSGNJX => (vs2_val[i][('m - 1)..('m - 1)] ^ rs1_val[('m - 1)..('m - 1)]) @ vs2_val[i][('m - 2)..0], + VF_VSLIDE1UP => { + if vs2 == vd then { handle_illegal(); return RETIRE_FAIL }; + if i == 0 then rs1_val else vs2_val[i - 1] + }, + VF_VSLIDE1DOWN => { + if vs2 == vd then { handle_illegal(); return RETIRE_FAIL }; + let last_elem = get_end_element(); + assert(last_elem < num_elem); + if i < last_elem then vs2_val[i + 1] else rs1_val + } + } + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping fvftype_mnemonic : fvffunct6 <-> string = { + VF_VADD <-> "vfadd.vf", + VF_VSUB <-> "vfsub.vf", + VF_VMIN <-> "vfmin.vf", + VF_VMAX <-> "vfmax.vf", + VF_VSGNJ <-> "vfsgnj.vf", + VF_VSGNJN <-> "vfsgnjn.vf", + VF_VSGNJX <-> "vfsgnjx.vf", + VF_VSLIDE1UP <-> "vfslide1up.vf", + VF_VSLIDE1DOWN <-> "vfslide1down.vf", + VF_VDIV <-> "vfdiv.vf", + VF_VRDIV <-> "vfrdiv.vf", + VF_VMUL <-> "vfmul.vf", + VF_VRSUB <-> "vfrsub.vf" +} + +mapping clause assembly = FVFTYPE(funct6, vm, vs2, rs1, vd) + <-> fvftype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ reg_name(rs1) ^ maybe_vmask(vm) + +/* ************************* OPFVF (VFtype Multiply-Add) ************************* */ +/* Multiply-Add instructions switch the order of source operands in assembly (vs1/rs1 before vs2) */ +union clause ast = FVFMATYPE : (fvfmafunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_fvfmafunct6 : fvfmafunct6 <-> bits(6) = { + VF_VMADD <-> 0b101000, + VF_VNMADD <-> 0b101001, + VF_VMSUB <-> 0b101010, + VF_VNMSUB <-> 0b101011, + VF_VMACC <-> 0b101100, + VF_VNMACC <-> 0b101101, + VF_VMSAC <-> 0b101110, + VF_VNMSAC <-> 0b101111 +} + +mapping clause encdec = FVFMATYPE(funct6, vm, vs2, rs1, vd) if haveRVV() + <-> encdec_fvfmafunct6(funct6) @ vm @ vs2 @ rs1 @ 0b101 @ vd @ 0b1010111 if haveRVV() + +function clause execute(FVFMATYPE(funct6, vm, vs2, rs1, vd)) = { + let rm_3b = fcsr.FRM(); + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + if not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; + assert(SEW != 8); + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let rs1_val : bits('m) = get_scalar_fp(rs1, 'm); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + VF_VMACC => fp_muladd(rm_3b, rs1_val, vs2_val[i], vd_val[i]), + VF_VNMACC => fp_nmulsub(rm_3b, rs1_val, vs2_val[i], vd_val[i]), + VF_VMSAC => fp_mulsub(rm_3b, rs1_val, vs2_val[i], vd_val[i]), + VF_VNMSAC => fp_nmuladd(rm_3b, rs1_val, vs2_val[i], vd_val[i]), + VF_VMADD => fp_muladd(rm_3b, rs1_val, vd_val[i], vs2_val[i]), + VF_VNMADD => fp_nmulsub(rm_3b, rs1_val, vd_val[i], vs2_val[i]), + VF_VMSUB => fp_mulsub(rm_3b, rs1_val, vd_val[i], vs2_val[i]), + VF_VNMSUB => fp_nmuladd(rm_3b, rs1_val, vd_val[i], vs2_val[i]) + } + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping fvfmatype_mnemonic : fvfmafunct6 <-> string = { + VF_VMADD <-> "vfmadd.vf", + VF_VNMADD <-> "vfnmadd.vf", + VF_VMSUB <-> "vfmsub.vf", + VF_VNMSUB <-> "vfnmsub.vf", + VF_VMACC <-> "vfmacc.vf", + VF_VNMACC <-> "vfnmacc.vf", + VF_VMSAC <-> "vfmsac.vf", + VF_VNMSAC <-> "vfnmsac.vf" +} + +mapping clause assembly = FVFMATYPE(funct6, vm, vs2, rs1, vd) + <-> fvfmatype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) ^ sep() ^ vreg_name(vs2) ^ maybe_vmask(vm) + +/* *************************** OPFVF (VFTYPE Widening) *************************** */ +union clause ast = FWVFTYPE : (fwvffunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_fwvffunct6 : fwvffunct6 <-> bits(6) = { + FWVF_VADD <-> 0b110000, + FWVF_VSUB <-> 0b110010, + FWVF_VMUL <-> 0b111000 +} + +mapping clause encdec = FWVFTYPE(funct6, vm, vs2, rs1, vd) if haveRVV() + <-> encdec_fwvffunct6(funct6) @ vm @ vs2 @ rs1 @ 0b101 @ vd @ 0b1010111 if haveRVV() + +function clause execute(FWVFTYPE(funct6, vm, vs2, rs1, vd)) = { + let rm_3b = fcsr.FRM(); + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + let SEW_widen = SEW * 2; + let LMUL_pow_widen = LMUL_pow + 1; + + if not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) + then { handle_illegal(); return RETIRE_FAIL }; + assert(SEW >= 16 & SEW_widen <= 64); + + let 'n = num_elem; + let 'm = SEW; + let 'o = SEW_widen; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); + let rs1_val : bits('m) = get_scalar_fp(rs1, 'm); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + result : vector('n, dec, bits('o)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW_widen, LMUL_pow_widen, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + FWVF_VADD => fp_add(rm_3b, fp_widen(vs2_val[i]), fp_widen(rs1_val)), + FWVF_VSUB => fp_sub(rm_3b, fp_widen(vs2_val[i]), fp_widen(rs1_val)), + FWVF_VMUL => fp_mul(rm_3b, fp_widen(vs2_val[i]), fp_widen(rs1_val)) + } + } + }; + + write_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping fwvftype_mnemonic : fwvffunct6 <-> string = { + FWVF_VADD <-> "vfwadd.vf", + FWVF_VSUB <-> "vfwsub.vf", + FWVF_VMUL <-> "vfwmul.vf" +} + +mapping clause assembly = FWVFTYPE(funct6, vm, vs2, rs1, vd) + <-> fwvftype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ reg_name(rs1) ^ maybe_vmask(vm) + +/* ******************** OPFVF (VFtype Widening Multiply-Add) ********************* */ +/* Multiply-Add instructions switch the order of source operands in assembly (vs1/rs1 before vs2) */ +union clause ast = FWVFMATYPE : (fwvfmafunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_fwvfmafunct6 : fwvfmafunct6 <-> bits(6) = { + FWVF_VMACC <-> 0b111100, + FWVF_VNMACC <-> 0b111101, + FWVF_VMSAC <-> 0b111110, + FWVF_VNMSAC <-> 0b111111 +} + +mapping clause encdec = FWVFMATYPE(funct6, vm, rs1, vs2, vd) if haveRVV() + <-> encdec_fwvfmafunct6(funct6) @ vm @ vs2 @ rs1 @ 0b101 @ vd @ 0b1010111 if haveRVV() + +function clause execute(FWVFMATYPE(funct6, vm, rs1, vs2, vd)) = { + let rm_3b = fcsr.FRM(); + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + let SEW_widen = SEW * 2; + let LMUL_pow_widen = LMUL_pow + 1; + + if not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) + then { handle_illegal(); return RETIRE_FAIL }; + assert(SEW >= 16 & SEW_widen <= 64); + + let 'n = num_elem; + let 'm = SEW; + let 'o = SEW_widen; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); + let rs1_val : bits('m) = get_scalar_fp(rs1, 'm); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + result : vector('n, dec, bits('o)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW_widen, LMUL_pow_widen, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + FWVF_VMACC => fp_muladd(rm_3b, fp_widen(rs1_val), fp_widen(vs2_val[i]), vd_val[i]), + FWVF_VNMACC => fp_nmulsub(rm_3b, fp_widen(rs1_val), fp_widen(vs2_val[i]), vd_val[i]), + FWVF_VMSAC => fp_mulsub(rm_3b, fp_widen(rs1_val), fp_widen(vs2_val[i]), vd_val[i]), + FWVF_VNMSAC => fp_nmuladd(rm_3b, fp_widen(rs1_val), fp_widen(vs2_val[i]), vd_val[i]) + } + } + }; + + write_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping fwvfmatype_mnemonic : fwvfmafunct6 <-> string = { + FWVF_VMACC <-> "vfwmacc.vf", + FWVF_VNMACC <-> "vfwnmacc.vf", + FWVF_VMSAC <-> "vfwmsac.vf", + FWVF_VNMSAC <-> "vfwnmsac.vf" +} + +mapping clause assembly = FWVFMATYPE(funct6, vm, rs1, vs2, vd) + <-> fwvfmatype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) ^ sep() ^ vreg_name(vs2) ^ maybe_vmask(vm) + +/* *************************** OPFVF (WFTYPE Widening) *************************** */ +union clause ast = FWFTYPE : (fwffunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_fwffunct6 : fwffunct6 <-> bits(6) = { + FWF_VADD <-> 0b110100, + FWF_VSUB <-> 0b110110 +} + +mapping clause encdec = FWFTYPE(funct6, vm, vs2, rs1, vd) if haveRVV() + <-> encdec_fwffunct6(funct6) @ vm @ vs2 @ rs1 @ 0b101 @ vd @ 0b1010111 if haveRVV() + +function clause execute(FWFTYPE(funct6, vm, vs2, rs1, vd)) = { + let rm_3b = fcsr.FRM(); + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + let SEW_widen = SEW * 2; + let LMUL_pow_widen = LMUL_pow + 1; + + if not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | not(valid_fp_op(SEW, rm_3b)) + then { handle_illegal(); return RETIRE_FAIL }; + assert(SEW >= 16 & SEW_widen <= 64); + + let 'n = num_elem; + let 'm = SEW; + let 'o = SEW_widen; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); + let rs1_val : bits('m) = get_scalar_fp(rs1, 'm); + let vs2_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vs2); + result : vector('n, dec, bits('o)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW_widen, LMUL_pow_widen, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + FWF_VADD => fp_add(rm_3b, vs2_val[i], fp_widen(rs1_val)), + FWF_VSUB => fp_sub(rm_3b, vs2_val[i], fp_widen(rs1_val)) + } + } + }; + + write_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping fwftype_mnemonic : fwffunct6 <-> string = { + FWF_VADD <-> "vfwadd.wf", + FWF_VSUB <-> "vfwsub.wf" +} + +mapping clause assembly = FWFTYPE(funct6, vm, vs2, rs1, vd) + <-> fwftype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ reg_name(rs1) ^ maybe_vmask(vm) + +/* ************************** OPFVF (Merge Instruction) ************************** */ +/* This instruction operates on all body elements regardless of mask value */ +union clause ast = VFMERGE : (regidx, regidx, regidx) + +mapping clause encdec = VFMERGE(vs2, rs1, vd) if haveRVV() + <-> 0b010111 @ 0b0 @ vs2 @ rs1 @ 0b101 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VFMERGE(vs2, rs1, vd)) = { + let rm_3b = fcsr.FRM(); + let start_element = get_start_element(); + let end_element = get_end_element(); + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); /* max(VLMAX,VLEN/SEW)) */ + let real_num_elem = if LMUL_pow >= 0 then num_elem else num_elem / (0 - LMUL_pow); /* VLMAX */ + + if vd == vreg_name("v0") | not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; + assert(SEW != 8); + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vreg_name("v0")); + let rs1_val : bits('m) = get_scalar_fp(rs1, 'm); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + + let tail_ag : agtype = get_vtype_vta(); + foreach (i from 0 to (num_elem - 1)) { + if i < start_element then { + result[i] = vd_val[i] + } else if i > end_element | i >= real_num_elem then { + if tail_ag == UNDISTURBED then { + result[i] = vd_val[i] + } else if tail_ag == AGNOSTIC then { + result[i] = vd_val[i] /* TODO: configuration support */ + } + } else { + /* the merge operates on all body elements */ + result[i] = if vm_val[i] then rs1_val else vs2_val[i] + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping clause assembly = VFMERGE(vs2, rs1, vd) + <-> "vfmerge.vfm" ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ reg_name(rs1) ^ sep() ^ "v0" + +/* ************************** OPFVF (Move Instruction) *************************** */ +/* This instruction shares the encoding with vfmerge.vfm, but with vm=1 and vs2=v0 */ +union clause ast = VFMV : (regidx, regidx) + +mapping clause encdec = VFMV(rs1, vd) if haveRVV() + <-> 0b010111 @ 0b1 @ 0b00000 @ rs1 @ 0b101 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VFMV(rs1, vd)) = { + let rm_3b = fcsr.FRM(); + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + if not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; + assert(SEW != 8); + + let 'n = num_elem; + let 'm = SEW; + + let rs1_val : bits('m) = get_scalar_fp(rs1, 'm); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b1, vreg_name("v0")); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then result[i] = rs1_val + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping clause assembly = VFMV(rs1, vd) + <-> "vfmv.v.f" ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) + +/* ****************************** OPFVF (VRFUNARY0) ****************************** */ +union clause ast = VFMVSF : (regidx, regidx) + +mapping clause encdec = VFMVSF(rs1, vd) if haveRVV() + <-> 0b010000 @ 0b1 @ 0b00000 @ rs1 @ 0b101 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VFMVSF(rs1, vd)) = { + let rm_3b = fcsr.FRM(); + let SEW = get_sew(); + let num_elem = get_num_elem(0, SEW); + + if not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; + assert(num_elem > 0 & SEW != 8); + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b1, vreg_name("v0")); + let rs1_val : bits('m) = get_scalar_fp(rs1, 'm); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, 0, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, 0, vd_val, vm_val); + + /* one body element */ + if mask[0] then result[0] = rs1_val; + + /* others treated as tail elements */ + let tail_ag : agtype = get_vtype_vta(); + if tail_ag == UNDISTURBED then { + foreach (i from 1 to (num_elem - 1)) result[i] = vd_val[i] + } else if tail_ag == AGNOSTIC then { + foreach (i from 1 to (num_elem - 1)) result[i] = vd_val[i] /* TODO: configuration support */ + }; + + write_vreg(num_elem, SEW, 0, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping clause assembly = VFMVSF(rs1, vd) + <-> "vfmv.s.f" ^ spc() ^ vreg_name(vd) ^ sep() ^ freg_name(rs1) diff --git a/model/riscv_insts_vext_mask.sail b/model/riscv_insts_vext_mask.sail index cac369152..3230ed333 100755 --- a/model/riscv_insts_vext_mask.sail +++ b/model/riscv_insts_vext_mask.sail @@ -1,3 +1,41 @@ +/*=================================================================================*/ +/* Copyright (c) 2021-2023 */ +/* Authors from RIOS Lab, Tsinghua University: */ +/* Xinlai Wan */ +/* Xi Wang */ +/* Yifei Zhu */ +/* Shenwei Hu */ +/* Kalvin Vu */ +/* Other contributors: */ +/* Jessica Clarke */ +/* Victor Moya */ +/* */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or without */ +/* modification, are permitted provided that the following conditions */ +/* are met: */ +/* 1. Redistributions of source code must retain the above copyright */ +/* notice, this list of conditions and the following disclaimer. */ +/* 2. Redistributions in binary form must reproduce the above copyright */ +/* notice, this list of conditions and the following disclaimer in */ +/* the documentation and/or other materials provided with the */ +/* distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' */ +/* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED */ +/* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A */ +/* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR */ +/* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, */ +/* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT */ +/* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF */ +/* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND */ +/* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, */ +/* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF */ +/* SUCH DAMAGE. */ +/*=================================================================================*/ + /* ******************************************************************************* */ /* This file implements part of the vector extension. */ /* Chapter 15: vector mask instructions */ @@ -40,13 +78,13 @@ function clause execute(MMTYPE(funct6, vs2, vs1, vd)) = { if mask[i] then { let res : bool = match funct6 { MM_VMAND => vs2_val[i] & vs1_val[i], - MM_VMNAND => ~(vs2_val[i] & vs1_val[i]), - MM_VMANDNOT => vs2_val[i] & ~(vs1_val[i]), + MM_VMNAND => not(vs2_val[i] & vs1_val[i]), + MM_VMANDNOT => vs2_val[i] & not(vs1_val[i]), MM_VMXOR => vs2_val[i] ^ vs1_val[i], MM_VMOR => vs2_val[i] | vs1_val[i], - MM_VMNOR => ~(vs2_val[i] | vs1_val[i]), - MM_VMORNOT => vs2_val[i] | ~(vs1_val[i]), - MM_VMXNOR => ~(vs2_val[i] ^ vs1_val[i]) + MM_VMNOR => not(vs2_val[i] | vs1_val[i]), + MM_VMORNOT => vs2_val[i] | not(vs1_val[i]), + MM_VMXNOR => not(vs2_val[i] ^ vs1_val[i]) }; result[i] = res; } @@ -91,7 +129,7 @@ function clause execute(VCPOP_M(vm, vs2, rd)) = { mask : vector('n, dec, bool) = undefined; /* Value of vstart must be 0 */ - if ~(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; + if not(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; (result, mask) = init_masked_result_cmp(num_elem, SEW, LMUL_pow, vs2_val, vm_val); @@ -128,7 +166,7 @@ function clause execute(VFIRST_M(vm, vs2, rd)) = { mask : vector('n, dec, bool) = undefined; /* Value of vstart must be 0 */ - if ~(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; + if not(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; (result, mask) = init_masked_result_cmp(num_elem, SEW, LMUL_pow, vs2_val, vm_val); @@ -168,10 +206,10 @@ function clause execute(VMSBF_M(vm, vs2, vd)) = { mask : vector('n, dec, bool) = undefined; /* Value of vstart must be 0 */ - if ~(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; + if not(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; /* If masking is enabled, then dest reg cannot be v0 */ - if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; /* Dest reg cannot be the same as source reg */ if vd == vs2 then { handle_illegal(); return RETIRE_FAIL }; @@ -215,10 +253,10 @@ function clause execute(VMSIF_M(vm, vs2, vd)) = { mask : vector('n, dec, bool) = undefined; /* Value of vstart must be 0 */ - if ~(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; + if not(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; /* If masking is enabled, then dest reg cannot be v0 */ - if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; /* Dest reg cannot be the same as source reg */ if vd == vs2 then { handle_illegal(); return RETIRE_FAIL }; @@ -262,10 +300,10 @@ function clause execute(VMSOF_M(vm, vs2, vd)) = { mask : vector('n, dec, bool) = undefined; /* Value of vstart must be 0 */ - if ~(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; + if not(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; /* If masking is enabled, then dest reg cannot be v0 */ - if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; /* Dest reg cannot be the same as source reg */ if vd == vs2 then { handle_illegal(); return RETIRE_FAIL }; @@ -275,7 +313,7 @@ function clause execute(VMSOF_M(vm, vs2, vd)) = { found_elem : bool = false; foreach (i from 0 to (num_elem - 1)) { if mask[i] then { - if vs2_val[i] & ~(found_elem) then { + if vs2_val[i] & not(found_elem) then { result[i] = true; found_elem = true } else { @@ -313,10 +351,10 @@ function clause execute(VIOTA_M(vm, vs2, vd)) = { mask : vector('n, dec, bool) = undefined; /* Value of vstart must be 0 */ - if ~(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; + if not(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; /* If masking is enabled, then dest reg cannot be v0 */ - if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; /* Dest reg cannot be the same as source reg */ if vd == vs2 then { handle_illegal(); return RETIRE_FAIL }; @@ -350,7 +388,7 @@ function clause execute(VID_V(vm, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; diff --git a/model/riscv_insts_vext_mem.sail b/model/riscv_insts_vext_mem.sail index 0e50c0721..c38d1a7dd 100644 --- a/model/riscv_insts_vext_mem.sail +++ b/model/riscv_insts_vext_mem.sail @@ -1,7 +1,45 @@ -/* ************************************************************************ */ -/* This file implements part of the vector extension. */ -/* Chapter 7: Vector Loads and Stores */ -/* ************************************************************************ */ +/*=================================================================================*/ +/* Copyright (c) 2021-2023 */ +/* Authors from RIOS Lab, Tsinghua University: */ +/* Xinlai Wan */ +/* Xi Wang */ +/* Yifei Zhu */ +/* Shenwei Hu */ +/* Kalvin Vu */ +/* Other contributors: */ +/* Jessica Clarke */ +/* Victor Moya */ +/* */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or without */ +/* modification, are permitted provided that the following conditions */ +/* are met: */ +/* 1. Redistributions of source code must retain the above copyright */ +/* notice, this list of conditions and the following disclaimer. */ +/* 2. Redistributions in binary form must reproduce the above copyright */ +/* notice, this list of conditions and the following disclaimer in */ +/* the documentation and/or other materials provided with the */ +/* distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' */ +/* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED */ +/* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A */ +/* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR */ +/* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, */ +/* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT */ +/* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF */ +/* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND */ +/* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, */ +/* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF */ +/* SUCH DAMAGE. */ +/*=================================================================================*/ + +/* ******************************************************************************* */ +/* This file implements part of the vector extension. */ +/* Chapter 7: Vector Loads and Stores */ +/* ******************************************************************************* */ mapping nfields_int : bits(3) <-> {|1, 2, 3, 4, 5, 6, 7, 8|} = { 0b000 <-> 1, @@ -115,7 +153,7 @@ function clause execute(VLETYPE(vm, rs1, width, vd)) = { let EMUL_pow = EEW_pow - SEW_pow + LMUL_pow; let num_elem = get_num_elem(EMUL_pow, EEW); - if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; process_vle(vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) } @@ -128,7 +166,7 @@ mapping vletype_mnemonic : vlewidth <-> string = { } mapping clause assembly = VLETYPE(vm, rs1, width, vd) - <-> vletype_mnemonic(width) ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) ^ sep() ^ maybe_vmask(vm) + <-> vletype_mnemonic(width) ^ spc() ^ vreg_name(vd) ^ sep() ^ "(" ^ reg_name(rs1) ^ ")" ^ maybe_vmask(vm) /* ******************** Vector Store Unit-Stride Normal (nf=0, mop=0, sumop=0) ******************* */ union clause ast = VSETYPE : (bits(1), regidx, vlewidth, regidx) @@ -167,7 +205,7 @@ function process_vse (vm, vs3, load_width_bytes, rs1, EMUL_pow, num_elem) = { let res : MemoryOpResult(bool) = mem_write_value(paddr, load_width_bytes, vs3_val[i], false, false, false); match (res) { MemValue(true) => status = RETIRE_SUCCESS, - MemValue(false) => internal_error("store got false from mem_write_value"), + MemValue(false) => internal_error(__FILE__, __LINE__, "store got false from mem_write_value"), MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL } } } @@ -203,7 +241,7 @@ mapping vsetype_mnemonic : vlewidth <-> string = { } mapping clause assembly = VSETYPE(vm, rs1, width, vs3) - <-> vsetype_mnemonic(width) ^ spc() ^ vreg_name(vs3) ^ sep() ^ reg_name(rs1) ^ sep() ^ maybe_vmask(vm) + <-> vsetype_mnemonic(width) ^ spc() ^ vreg_name(vs3) ^ sep() ^ "(" ^ reg_name(rs1) ^ ")" ^ maybe_vmask(vm) /* ************************** Vector Load Strided Normal (nf=0, mop=10) ************************** */ union clause ast = VLSETYPE : (bits(1), regidx, regidx, vlewidth, regidx) @@ -261,7 +299,7 @@ function clause execute(VLSETYPE(vm, rs2, rs1, width, vd)) = { let EMUL_pow = EEW_pow - SEW_pow + LMUL_pow; let num_elem = get_num_elem(EMUL_pow, EEW); - if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; process_vlse(vm, vd, load_width_bytes, rs1, rs2, EMUL_pow, num_elem) } @@ -274,7 +312,7 @@ mapping vlsetype_mnemonic : vlewidth <-> string = { } mapping clause assembly = VLSETYPE(vm, rs2, rs1, width, vd) - <-> vlsetype_mnemonic(width) ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) ^ sep() ^ reg_name(rs2)^ sep() ^ maybe_vmask(vm) + <-> vlsetype_mnemonic(width) ^ spc() ^ vreg_name(vd) ^ sep() ^ "(" ^ reg_name(rs1) ^ ")" ^ sep() ^ reg_name(rs2)^ maybe_vmask(vm) /* ************************** Vector Store Strided Normal (nf=0, mop=10) ************************* */ union clause ast = VSSETYPE : (bits(1), regidx, regidx, vlewidth, regidx) @@ -314,7 +352,7 @@ function process_vsse (vm, vs3, load_width_bytes, rs1, rs2, EMUL_pow, num_elem) let res : MemoryOpResult(bool) = mem_write_value(paddr, load_width_bytes, vs3_val[i], false, false, false); match (res) { MemValue(true) => status = RETIRE_SUCCESS, - MemValue(false) => internal_error("store got false from mem_write_value"), + MemValue(false) => internal_error(__FILE__, __LINE__, "store got false from mem_write_value"), MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL } } } @@ -350,7 +388,7 @@ mapping vssetype_mnemonic : vlewidth <-> string = { } mapping clause assembly = VSSETYPE(vm, rs2, rs1, width, vs3) - <-> vssetype_mnemonic(width) ^ spc() ^ vreg_name(vs3) ^ sep() ^ reg_name(rs1) ^ sep() ^ reg_name(rs2)^ sep() ^ maybe_vmask(vm) + <-> vssetype_mnemonic(width) ^ spc() ^ vreg_name(vs3) ^ sep() ^ "(" ^ reg_name(rs1) ^ ")" ^ sep() ^ reg_name(rs2)^ maybe_vmask(vm) /* ************************ Vector Load Indexed Unordered (nf=0, mop=01) ************************* */ union clause ast = VLUXEITYPE : (bits(1), regidx, regidx, vlewidth, regidx) @@ -408,13 +446,13 @@ function clause execute(VLUXEITYPE(vm, vs2, rs1, width, vd)) = { let EMUL_index_pow = EEW_index_pow - EEW_data_pow + EMUL_data_pow; let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); - if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; process_vlxei(vm, vd, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 1) } mapping clause assembly = VLUXEITYPE(vm, vs2, rs1, width, vd) - <-> "vluxei" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) ^ sep() ^ reg_name(vs2) ^ sep() ^ maybe_vmask(vm) + <-> "vluxei" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vd) ^ sep() ^ "(" ^ reg_name(rs1) ^ ")" ^ sep() ^ reg_name(vs2) ^ maybe_vmask(vm) /* ************************* Vector Load Indexed Ordered (nf=0, mop=11) ************************** */ union clause ast = VLOXEITYPE : (bits(1), regidx, regidx, vlewidth, regidx) @@ -431,13 +469,13 @@ function clause execute(VLOXEITYPE(vm, vs2, rs1, width, vd)) = { let EMUL_index_pow = EEW_index_pow - EEW_data_pow + EMUL_data_pow; let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); - if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; process_vlxei(vm, vd, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 3) } mapping clause assembly = VLOXEITYPE(vm, vs2, rs1, width, vd) - <-> "vloxei" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) ^ sep() ^ reg_name(vs2) ^ sep() ^ maybe_vmask(vm) + <-> "vloxei" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vd) ^ sep() ^ "(" ^ reg_name(rs1) ^ ")" ^ sep() ^ reg_name(vs2) ^ maybe_vmask(vm) /* ************************ Vector Store Indexed Unordered (nf=0, mop=01) ************************ */ union clause ast = VSUXEITYPE : (bits(1), regidx, regidx, vlewidth, regidx) @@ -477,7 +515,7 @@ function process_vsxei (vm, vs3, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow let res : MemoryOpResult(bool) = mem_write_value(paddr, EEW_data_bytes, vs3_val[i], false, false, false); match (res) { MemValue(true) => status = RETIRE_SUCCESS, - MemValue(false) => internal_error("store got false from mem_write_value"), + MemValue(false) => internal_error(__FILE__, __LINE__, "store got false from mem_write_value"), MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL } } } @@ -506,7 +544,7 @@ function clause execute(VSUXEITYPE(vm, vs2, rs1, width, vs3)) = { } mapping clause assembly = VSUXEITYPE(vm, vs2, rs1, width, vs3) - <-> "vsuxei" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vs3) ^ sep() ^ reg_name(rs1) ^ sep() ^ reg_name(vs2) ^ sep() ^ maybe_vmask(vm) + <-> "vsuxei" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vs3) ^ sep() ^ "(" ^ reg_name(rs1) ^ ")" ^ sep() ^ reg_name(vs2) ^ maybe_vmask(vm) /* ************************* Vector Store Indexed Ordered (nf=0, mop=11) ************************* */ union clause ast = VSOXEITYPE : (bits(1), regidx, regidx, vlewidth, regidx) @@ -527,7 +565,7 @@ function clause execute(VSOXEITYPE(vm, vs2, rs1, width, vs3)) = { } mapping clause assembly = VSOXEITYPE(vm, vs2, rs1, width, vs3) - <-> "vsoxei" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vs3) ^ sep() ^ reg_name(rs1) ^ sep() ^ reg_name(vs2) ^ sep() ^ maybe_vmask(vm) + <-> "vsoxei" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vs3) ^ sep() ^ "(" ^ reg_name(rs1) ^ ")" ^ sep() ^ reg_name(vs2) ^ maybe_vmask(vm) /* ************* Vector Load Unit-Stride Fault-Only-First (nf=0, mop=0, lumop=10000) ************* */ union clause ast = VLEFFTYPE : (bits(1), regidx, vlewidth, regidx) @@ -613,7 +651,7 @@ function clause execute(VLEFFTYPE(vm, rs1, width, vd)) = { let EMUL_pow = EEW_pow - SEW_pow + LMUL_pow; let num_elem = get_num_elem(EMUL_pow, EEW); - if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; process_vleff(vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) } @@ -626,7 +664,7 @@ mapping vlefftype_mnemonic : vlewidth <-> string = { } mapping clause assembly = VLEFFTYPE(vm, rs1, width, vd) - <-> vlefftype_mnemonic(width) ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) ^ sep() ^ maybe_vmask(vm) + <-> vlefftype_mnemonic(width) ^ spc() ^ vreg_name(vd) ^ sep() ^ "(" ^ reg_name(rs1) ^ ")" ^ maybe_vmask(vm) /* ******************** Vector Load Unit-Stride Segment (mop=0, lumop=00000) ********************* */ union clause ast = VLSEGTYPE : (bits(3), bits(1), regidx, vlewidth, regidx) @@ -693,13 +731,13 @@ function clause execute(VLSEGTYPE(nf, vm, rs1, width, vd)) = { let num_elem = get_num_elem(EMUL_pow, EEW); /* # of element of each register group */ let nf_int = nfields_int(nf); - if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; process_vlseg(nf_int, vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) } mapping clause assembly = VLSEGTYPE(nf, vm, rs1, width, vd) - <-> "vlseg" ^ nfields_string(nf) ^ "e" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vd) ^ sep() ^ "(" ^ reg_name(rs1) ^ ")" ^ sep() ^ maybe_vmask(vm) + <-> "vlseg" ^ nfields_string(nf) ^ "e" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vd) ^ sep() ^ "(" ^ reg_name(rs1) ^ ")" ^ maybe_vmask(vm) /* ************ Vector Load Unit-Stride Segment Fault-Only-First (mop=0, lumop=10000) ************ */ union clause ast = VLSEGFFTYPE : (bits(3), bits(1), regidx, vlewidth, regidx) @@ -786,13 +824,13 @@ function clause execute(VLSEGFFTYPE(nf, vm, rs1, width, vd)) = { let num_elem = get_num_elem(EMUL_pow, EEW); let nf_int = nfields_int(nf); - if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; process_vlsegff(nf_int, vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) } mapping clause assembly = VLSEGTYPE(nf, vm, rs1, width, vd) - <-> "vlseg" ^ nfields_string(nf) ^ "e" ^ vlewidth_bitsnumberstr(width) ^ "ff.v" ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) ^ sep() ^ maybe_vmask(vm) + <-> "vlseg" ^ nfields_string(nf) ^ "e" ^ vlewidth_bitsnumberstr(width) ^ "ff.v" ^ spc() ^ vreg_name(vd) ^ sep() ^ "(" ^ reg_name(rs1) ^ ")" ^ maybe_vmask(vm) /* ******************** Vector Store Unit-Stride Segment (mop=0, sumop=00000) ******************** */ union clause ast = VSSEGTYPE : (bits(3), bits(1), regidx, vlewidth, regidx) @@ -831,7 +869,7 @@ function process_vsseg (nf, vm, vs3, load_width_bytes, rs1, EMUL_pow, num_elem) let res : MemoryOpResult(bool) = mem_write_value(paddr, load_width_bytes, one_elem_val, false, false, false); match (res) { MemValue(true) => status = RETIRE_SUCCESS, - MemValue(false) => internal_error("store got false from mem_write_value"), + MemValue(false) => internal_error(__FILE__, __LINE__, "store got false from mem_write_value"), MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL } } } @@ -862,7 +900,7 @@ function clause execute(VSSEGTYPE(nf, vm, rs1, width, vs3)) = { } mapping clause assembly = VSSEGTYPE(nf, vm, rs1, width, vs3) - <-> "vsseg" ^ nfields_string(nf) ^ "e" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vs3) ^ sep() ^ reg_name(rs1) ^ sep() ^ maybe_vmask(vm) + <-> "vsseg" ^ nfields_string(nf) ^ "e" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vs3) ^ sep() ^ "(" ^ reg_name(rs1) ^ ")" ^ maybe_vmask(vm) /* **************************** Vector Load Strided Segment (mop=10) ***************************** */ union clause ast = VLSSEGTYPE : (bits(3), bits(1), regidx, regidx, vlewidth, regidx) @@ -922,13 +960,13 @@ function clause execute(VLSSEGTYPE(nf, vm, rs2, rs1, width, vd)) = { let num_elem = get_num_elem(EMUL_pow, EEW); let nf_int = nfields_int(nf); - if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; process_vlsseg(nf_int, vm, vd, load_width_bytes, rs1, rs2, EMUL_pow, num_elem) } mapping clause assembly = VLSSEGTYPE(nf, vm, rs2, rs1, width, vd) - <-> "vlsseg" ^ nfields_string(nf) ^ "e" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) ^ sep() ^ reg_name(rs2) ^ sep() ^ maybe_vmask(vm) + <-> "vlsseg" ^ nfields_string(nf) ^ "e" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vd) ^ sep() ^ "(" ^ reg_name(rs1) ^ ")" ^ sep() ^ reg_name(rs2) ^ maybe_vmask(vm) /* **************************** Vector Store Strided Segment (mop=10) **************************** */ union clause ast = VSSSEGTYPE : (bits(3), bits(1), regidx, regidx, vlewidth, regidx) @@ -970,7 +1008,7 @@ function process_vssseg (nf, vm, vs3, load_width_bytes, rs1, rs2, EMUL_pow, num_ let res : MemoryOpResult(bool) = mem_write_value(paddr, load_width_bytes, one_elem_val, false, false, false); match (res) { MemValue(true) => status = RETIRE_SUCCESS, - MemValue(false) => internal_error("store got false from mem_write_value"), + MemValue(false) => internal_error(__FILE__, __LINE__, "store got false from mem_write_value"), MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL } } } @@ -1001,7 +1039,7 @@ function clause execute(VSSSEGTYPE(nf, vm, rs2, rs1, width, vs3)) = { } mapping clause assembly = VSSSEGTYPE(nf, vm, rs2, rs1, width, vs3) - <-> "vssseg" ^ nfields_string(nf) ^ "e" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vs3) ^ sep() ^ reg_name(rs1) ^ sep() ^ reg_name(rs2) ^ sep() ^ maybe_vmask(vm) + <-> "vssseg" ^ nfields_string(nf) ^ "e" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vs3) ^ sep() ^ "(" ^ reg_name(rs1) ^ ")" ^ sep() ^ reg_name(rs2) ^ maybe_vmask(vm) /* *********************** Vector Load Indexed Unordered Segment (mop=01) ************************ */ union clause ast = VLUXSEGTYPE : (bits(3), bits(1), regidx, regidx, vlewidth, regidx) @@ -1062,13 +1100,13 @@ function clause execute(VLUXSEGTYPE(nf, vm, vs2, rs1, width, vd)) = { let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); let nf_int = nfields_int(nf); - if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; process_vlxseg(nf_int, vm, vd, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 1) } mapping clause assembly = VLUXSEGTYPE(nf, vm, vs2, rs1, width, vd) - <-> "vluxseg" ^ nfields_string(nf) ^ "ei" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) ^ sep() ^ reg_name(vs2) ^ sep() ^ maybe_vmask(vm) + <-> "vluxseg" ^ nfields_string(nf) ^ "ei" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vd) ^ sep() ^ "(" ^ reg_name(rs1) ^ ")" ^ sep() ^ reg_name(vs2) ^ maybe_vmask(vm) /* ************************ Vector Load Indexed Ordered Segment (mop=11) ************************* */ union clause ast = VLOXSEGTYPE : (bits(3), bits(1), regidx, regidx, vlewidth, regidx) @@ -1086,13 +1124,13 @@ function clause execute(VLOXSEGTYPE(nf, vm, vs2, rs1, width, vd)) = { let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); let nf_int = nfields_int(nf); - if ~(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; process_vlxseg(nf_int, vm, vd, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 3) } mapping clause assembly = VLOXSEGTYPE(nf, vm, vs2, rs1, width, vd) - <-> "vloxseg" ^ nfields_string(nf) ^ "ei" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) ^ sep() ^ reg_name(vs2) ^ sep() ^ maybe_vmask(vm) + <-> "vloxseg" ^ nfields_string(nf) ^ "ei" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vd) ^ sep() ^ "(" ^ reg_name(rs1) ^ ")" ^ sep() ^ reg_name(vs2) ^ maybe_vmask(vm) /* *********************** Vector Store Indexed Unordered Segment (mop=01) *********************** */ union clause ast = VSUXSEGTYPE : (bits(3), bits(1), regidx, regidx, vlewidth, regidx) @@ -1135,7 +1173,7 @@ function process_vsxseg (nf, vm, vs3, EEW_index_bytes, EEW_data_bytes, EMUL_inde let res : MemoryOpResult(bool) = mem_write_value(paddr, EEW_data_bytes, one_elem_val, false, false, false); match (res) { MemValue(true) => status = RETIRE_SUCCESS, - MemValue(false) => internal_error("store got false from mem_write_value"), + MemValue(false) => internal_error(__FILE__, __LINE__, "store got false from mem_write_value"), MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL } } } @@ -1166,7 +1204,7 @@ function clause execute(VSUXSEGTYPE(nf, vm, vs2, rs1, width, vs3)) = { } mapping clause assembly = VSUXSEGTYPE(nf, vm, vs2, rs1, width, vs3) - <-> "vsuxseg" ^ nfields_string(nf) ^ "ei" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vs3) ^ sep() ^ reg_name(rs1) ^ sep() ^ reg_name(vs2) ^ sep() ^ maybe_vmask(vm) + <-> "vsuxseg" ^ nfields_string(nf) ^ "ei" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vs3) ^ sep() ^ "(" ^ reg_name(rs1) ^ ")" ^ sep() ^ reg_name(vs2) ^ maybe_vmask(vm) /* ************************ Vector Store Indexed Ordered Segment (mop=11) ************************ */ union clause ast = VSOXSEGTYPE : (bits(3), bits(1), regidx, regidx, vlewidth, regidx) @@ -1188,7 +1226,7 @@ function clause execute(VSOXSEGTYPE(nf, vm, vs2, rs1, width, vs3)) = { } mapping clause assembly = VSUXSEGTYPE(nf, vm, vs2, rs1, width, vs3) - <-> "vsoxseg" ^ nfields_string(nf) ^ "ei" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vs3) ^ sep() ^ reg_name(rs1) ^ sep() ^ reg_name(vs2) ^ sep() ^ maybe_vmask(vm) + <-> "vsoxseg" ^ nfields_string(nf) ^ "ei" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vs3) ^ sep() ^ "(" ^ reg_name(rs1) ^ ")" ^ sep() ^ reg_name(vs2) ^ maybe_vmask(vm) /* ************** Vector Load Unit-Stride Whole Register (vm=1, mop=0, lumop=01000) ************** */ union clause ast = VLRETYPE : (bits(3), regidx, vlewidth, regidx) @@ -1268,13 +1306,13 @@ function clause execute(VLRETYPE(nf, rs1, width, vd)) = { let nf_int = nfields_int(nf); assert(elem_per_reg >= 0); - if ~(nf_int == 1 | nf_int == 2 | nf_int == 4 | nf_int == 8) then { handle_illegal(); return RETIRE_FAIL }; + if not(nf_int == 1 | nf_int == 2 | nf_int == 4 | nf_int == 8) then { handle_illegal(); return RETIRE_FAIL }; process_vlre(nf_int, vd, load_width_bytes, rs1, elem_per_reg) } mapping clause assembly = VLRETYPE(nf, rs1, width, vd) - <-> "vl" ^ nfields_string(nf) ^ "re" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vd) ^ sep() ^ reg_name(rs1) + <-> "vl" ^ nfields_string(nf) ^ "re" ^ vlewidth_bitsnumberstr(width) ^ ".v" ^ spc() ^ vreg_name(vd) ^ sep() ^ "(" ^ reg_name(rs1) ^ ")" /* ************* Vector Store Unit-Stride Whole Register (vm=1, mop=0, lumop=01000) ************** */ union clause ast = VSRETYPE : (bits(3), regidx, regidx) @@ -1312,7 +1350,7 @@ function process_vsre (nf, load_width_bytes, rs1, vs3, elem_per_reg) = { let res : MemoryOpResult(bool) = mem_write_value(paddr, load_width_bytes, one_elem_val, false, false, false); match (res) { MemValue(true) => status = RETIRE_SUCCESS, - MemValue(false) => internal_error("store got false from mem_write_value"), + MemValue(false) => internal_error(__FILE__, __LINE__, "store got false from mem_write_value"), MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL } } } @@ -1347,7 +1385,7 @@ function process_vsre (nf, load_width_bytes, rs1, vs3, elem_per_reg) = { let res : MemoryOpResult(bool) = mem_write_value(paddr, load_width_bytes, vs_val[i], false, false, false); match (res) { MemValue(true) => status = RETIRE_SUCCESS, - MemValue(false) => internal_error("store got false from mem_write_value"), + MemValue(false) => internal_error(__FILE__, __LINE__, "store got false from mem_write_value"), MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL } } } @@ -1372,13 +1410,13 @@ function clause execute(VSRETYPE(nf, rs1, vs3)) = { let nf_int = nfields_int(nf); assert(elem_per_reg >= 0); - if ~(nf_int == 1 | nf_int == 2 | nf_int == 4 | nf_int == 8) then { handle_illegal(); return RETIRE_FAIL }; + if not(nf_int == 1 | nf_int == 2 | nf_int == 4 | nf_int == 8) then { handle_illegal(); return RETIRE_FAIL }; process_vsre(nf_int, load_width_bytes, rs1, vs3, elem_per_reg) } mapping clause assembly = VSRETYPE(nf, rs1, vs3) - <-> "vs" ^ nfields_string(nf) ^ "r.v" ^ spc() ^ vreg_name(vs3) ^ sep() ^ reg_name(rs1) + <-> "vs" ^ nfields_string(nf) ^ "r.v" ^ spc() ^ vreg_name(vs3) ^ sep() ^ "(" ^ reg_name(rs1) ^ ")" /* *********** Vector Mask Load/Store Unit-Stride (nf=0, mop=0, lumop or sumop=01011) ************ */ union clause ast = VMTYPE : (regidx, regidx, vmlsop) @@ -1442,7 +1480,7 @@ function process_vm(vd_or_vs3, rs1, EMUL_pow, num_elem, op) = { let res : MemoryOpResult(bool) = mem_write_value(paddr, 1, vd_or_vs3_val[i], false, false, false); match (res) { MemValue(true) => status = RETIRE_SUCCESS, - MemValue(false) => internal_error("store got false from mem_write_value"), + MemValue(false) => internal_error(__FILE__, __LINE__, "store got false from mem_write_value"), MemException(e) => { handle_mem_exception(vaddr, e); status = RETIRE_FAIL } } } @@ -1477,4 +1515,4 @@ mapping vmtype_mnemonic : vmlsop <-> string = { } mapping clause assembly = VMTYPE(rs1, vd_or_vs3, op) - <-> vmtype_mnemonic(op) ^ spc() ^ vreg_name(vd_or_vs3) ^ sep() ^ reg_name(rs1) + <-> vmtype_mnemonic(op) ^ spc() ^ vreg_name(vd_or_vs3) ^ sep() ^ "(" ^ reg_name(rs1) ^ ")" diff --git a/model/riscv_insts_vext_utils.sail b/model/riscv_insts_vext_utils.sail index ff87d807a..a5c28916c 100755 --- a/model/riscv_insts_vext_utils.sail +++ b/model/riscv_insts_vext_utils.sail @@ -1,6 +1,44 @@ -/* ************************************************************************** */ -/* This file implements functions used by vector instructions. */ -/* ************************************************************************** */ +/*=================================================================================*/ +/* Copyright (c) 2021-2023 */ +/* Authors from RIOS Lab, Tsinghua University: */ +/* Xinlai Wan */ +/* Xi Wang */ +/* Yifei Zhu */ +/* Shenwei Hu */ +/* Kalvin Vu */ +/* Other contributors: */ +/* Jessica Clarke */ +/* Victor Moya */ +/* */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or without */ +/* modification, are permitted provided that the following conditions */ +/* are met: */ +/* 1. Redistributions of source code must retain the above copyright */ +/* notice, this list of conditions and the following disclaimer. */ +/* 2. Redistributions in binary form must reproduce the above copyright */ +/* notice, this list of conditions and the following disclaimer in */ +/* the documentation and/or other materials provided with the */ +/* distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' */ +/* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED */ +/* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A */ +/* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR */ +/* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, */ +/* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT */ +/* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF */ +/* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND */ +/* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, */ +/* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF */ +/* SUCH DAMAGE. */ +/*=================================================================================*/ + +/* ******************************************************************************* */ +/* This file implements functions used by vector instructions. */ +/* ******************************************************************************* */ /* Vector mask mapping */ mapping maybe_vmask : string <-> bits(1) = { @@ -12,13 +50,25 @@ mapping maybe_vmask : string <-> bits(1) = { val valid_eew_emul : (int, int) -> bool effect {rreg} function valid_eew_emul(EEW, EMUL_pow) = { let ELEN = int_power(2, get_elen_pow()); - EEW >= 8 & EEW <= ELEN & EMUL_pow >= -3 & EMUL_pow <= 3; + EEW >= 8 & EEW <= ELEN & EMUL_pow >= -3 & EMUL_pow <= 3 } /* Check for vstart value */ val assert_vstart : int -> bool effect {rreg} function assert_vstart(i) = { - unsigned(vstart) == i; + unsigned(vstart) == i +} + +/* Check for valid floating-point operation types + * 1. Valid element width of floating-point numbers + * 2. Valid floating-point rounding mode + */ +val valid_fp_op : ({|8, 16, 32, 64|}, bits(3)) -> bool +function valid_fp_op(SEW, rm_3b) = { + /* 128-bit floating-point values will be supported in future extensions */ + let valid_sew = (SEW >= 16 & SEW <= 128); + let valid_rm = not(rm_3b == 0b101 | rm_3b == 0b110 | rm_3b == 0b111); + valid_sew & valid_rm } /* Check for valid destination register when vector masking is enabled: @@ -55,13 +105,13 @@ function valid_reg_overlap(rs, rd, EMUL_pow_rs, EMUL_pow_rd) = { /* Scalar register shaping */ val get_scalar : forall 'n, 'n >= 8. (regidx, int('n)) -> bits('n) effect {escape, rreg} -function get_scalar(rs1, vsew_bits) = { - if sizeof(xlen) > vsew_bits then { +function get_scalar(rs1, SEW) = { + if sizeof(xlen) > SEW then { /* Least significant SEW bits */ - X(rs1)[vsew_bits - 1 .. 0] - } else if sizeof(xlen) < vsew_bits then { + X(rs1)[SEW - 1 .. 0] + } else if sizeof(xlen) < SEW then { /* Sign extend to SEW */ - EXTS(vsew_bits, X(rs1)) + EXTS(SEW, X(rs1)) } else { X(rs1) } @@ -131,7 +181,7 @@ function init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val) = { result[i] = vd_val[i]; /* TODO: configuration support */ }; mask[i] = false - } else if ~(vm_val[i]) then { + } else if not(vm_val[i]) then { /* Inactive body elements defined by vm */ if mask_ag == UNDISTURBED then { result[i] = vd_val[i] @@ -213,7 +263,7 @@ function init_masked_result_cmp(num_elem, SEW, LMUL_pow, vd_val, vm_val) = { /* Mask tail is always agnostic */ result[i] = vd_val[i]; /* TODO: configuration support */ mask[i] = false - } else if ~(vm_val[i]) then { + } else if not(vm_val[i]) then { /* Inactive body elements defined by vm */ if mask_ag == UNDISTURBED then { result[i] = vd_val[i] @@ -256,7 +306,7 @@ function NaN_unbox(regval, 'm) = { } /* Check if the floating point number is a signaling NaN */ -val f_is_SNaN : forall 'm, ('m in {16, 32, 64, 128} & 'm <= flen). bits('m) -> bool +val f_is_SNaN : forall 'm, 'm in {16, 32, 64, 128}. bits('m) -> bool function f_is_SNaN xf = { match 'm { 16 => (xf[14..10] == ones()) & (xf[9..9] == zeros()) & (xf[8..0] != zeros()), @@ -267,7 +317,7 @@ function f_is_SNaN xf = { } /* Either QNaN or SNan */ -val f_is_NaN : forall 'm, ('m in {16, 32, 64, 128} & 'm <= flen). bits('m) -> bool +val f_is_NaN : forall 'm, 'm in {16, 32, 64, 128}. bits('m) -> bool function f_is_NaN xf = { match 'm { 16 => (xf[14..10] == ones()) & (xf[9..0] != zeros()), @@ -277,7 +327,7 @@ function f_is_NaN xf = { } } -val f_is_neg_zero : forall 'm, ('m in {16, 32, 64, 128} & 'm <= flen). bits('m) -> bool +val f_is_neg_zero : forall 'm, 'm in {16, 32, 64, 128}. bits('m) -> bool function f_is_neg_zero xf = { match 'm { 16 => (xf[15..15] == ones()) & (xf[14..0] == zeros()), @@ -287,7 +337,7 @@ function f_is_neg_zero xf = { } } -val f_is_pos_zero : forall 'm, ('m in {16, 32, 64, 128} & 'm <= flen). bits('m) -> bool +val f_is_pos_zero : forall 'm, 'm in {16, 32, 64, 128}. bits('m) -> bool function f_is_pos_zero xf = { match 'm { 16 => (xf == zeros()), @@ -299,19 +349,20 @@ function f_is_pos_zero xf = { /* Scalar register shaping for floating point operations */ val get_scalar_fp : forall 'n, 'n in {16, 32, 64, 128}. (regidx, int('n)) -> bits('n) effect {escape, rreg} -function get_scalar_fp(rs1, vsew_bits) = { - if sizeof(flen) > vsew_bits then { +function get_scalar_fp(rs1, SEW) = { + if sizeof(flen) >= SEW then { /* Least significant SEW bits */ - NaN_unbox(F(rs1), vsew_bits) + NaN_unbox(F(rs1), SEW) } else { - canonical_NaN(vsew_bits) + assert(false, "invalid vector floating-point type width: FLEN < SEW"); + zeros() } } /* Shift amounts */ val get_shift_amount : forall 'n 'm, 0 <= 'n & 'm in {8, 16, 32, 64}. (bits('n), int('m)) -> nat effect {escape} -function get_shift_amount(bit_val, vsew_bits) = { - let lowlog2bits = log2(vsew_bits); +function get_shift_amount(bit_val, SEW) = { + let lowlog2bits = log2(SEW); assert(0 < lowlog2bits & lowlog2bits < 'n); unsigned(bit_val[lowlog2bits - 1 .. 0]); } @@ -328,7 +379,7 @@ function get_fixed_rounding_incr(vec_elem, shift_amount) = { (slice(vec_elem, shift_amount - 1, 1) == 0b1) & (slice(vec_elem, 0, shift_amount - 1) != zeros() | slice(vec_elem, shift_amount, 1) == 0b1)), 0b10 => 0b0, 0b11 => bool_to_bits( - ~(slice(vec_elem, shift_amount, 1) == 0b1) & (slice(vec_elem, 0, shift_amount) != zeros())) + not(slice(vec_elem, shift_amount, 1) == 0b1) & (slice(vec_elem, 0, shift_amount) != zeros())) } } } @@ -409,6 +460,44 @@ function fp_sub(rm_3b, op1, op2) = { result_val } +val fp_min : forall 'n, 'n in {16, 32, 64}. (bits('n), bits('n)) -> bits('n) effect {escape, rreg, undef, wreg} +function fp_min(op1, op2) = { + let (fflags, op1_lt_op2) : (bits_fflags, bool) = match 'n { + 16 => riscv_f16Lt_quiet(op1, op2), + 32 => riscv_f32Lt_quiet(op1, op2), + 64 => riscv_f64Lt_quiet(op1, op2) + }; + + let result_val = if (f_is_NaN(op1) & f_is_NaN(op2)) then canonical_NaN('n) + else if f_is_NaN(op1) then op2 + else if f_is_NaN(op2) then op1 + else if (f_is_neg_zero(op1) & f_is_pos_zero(op2)) then op1 + else if (f_is_neg_zero(op2) & f_is_pos_zero(op1)) then op2 + else if op1_lt_op2 then op1 + else op2; + write_fflags(fflags); + result_val +} + +val fp_max : forall 'n, 'n in {16, 32, 64}. (bits('n), bits('n)) -> bits('n) effect {escape, rreg, undef, wreg} +function fp_max(op1, op2) = { + let (fflags, op1_lt_op2) : (bits_fflags, bool) = match 'n { + 16 => riscv_f16Lt_quiet(op1, op2), + 32 => riscv_f32Lt_quiet(op1, op2), + 64 => riscv_f64Lt_quiet(op1, op2) + }; + + let result_val = if (f_is_NaN(op1) & f_is_NaN(op2)) then canonical_NaN('n) + else if f_is_NaN(op1) then op2 + else if f_is_NaN(op2) then op1 + else if (f_is_neg_zero(op1) & f_is_pos_zero(op2)) then op2 + else if (f_is_neg_zero(op2) & f_is_pos_zero(op1)) then op1 + else if op1_lt_op2 then op2 + else op1; + write_fflags(fflags); + result_val +} + val fp_eq : forall 'n, 'n in {16, 32, 64}. (bits('n), bits('n)) -> bool effect {escape, rreg, undef, wreg} function fp_eq(op1, op2) = { let (fflags, result_val) : (bits_fflags, bool) = match 'n { @@ -427,7 +516,7 @@ function fp_gt(op1, op2) = { 32 => riscv_f32Le(op1, op2), 64 => riscv_f64Le(op1, op2) }; - let result_val = (if fflags == 0b10000 then false else ~(temp_val)); + let result_val = (if fflags == 0b10000 then false else not(temp_val)); write_fflags(fflags); result_val } @@ -439,7 +528,7 @@ function fp_ge(op1, op2) = { 32 => riscv_f32Lt(op1, op2), 64 => riscv_f64Lt(op1, op2) }; - let result_val = (if fflags == 0b10000 then false else ~(temp_val)); + let result_val = (if fflags == 0b10000 then false else not(temp_val)); write_fflags(fflags); result_val } @@ -536,7 +625,7 @@ function fp_nmulsub(rm_3b, op1, op2, opsub) = { result_val } -val fp_widen : forall 'm, ('m in {16, 32} & 'm <= flen). bits('m) -> bits('m * 2) effect {escape, rreg, undef, wreg} +val fp_widen : forall 'm, 'm in {16, 32}. bits('m) -> bits('m * 2) effect {escape, rreg, undef, wreg} function fp_widen(nval) = { let rm_3b = fcsr.FRM(); let (fflags, wval) : (bits_fflags, bits('m * 2)) = match 'm { diff --git a/model/riscv_insts_vext_vm.sail b/model/riscv_insts_vext_vm.sail index 19b34c824..93e6cd3b0 100755 --- a/model/riscv_insts_vext_vm.sail +++ b/model/riscv_insts_vext_vm.sail @@ -1,3 +1,41 @@ +/*=================================================================================*/ +/* Copyright (c) 2021-2023 */ +/* Authors from RIOS Lab, Tsinghua University: */ +/* Xinlai Wan */ +/* Xi Wang */ +/* Yifei Zhu */ +/* Shenwei Hu */ +/* Kalvin Vu */ +/* Other contributors: */ +/* Jessica Clarke */ +/* Victor Moya */ +/* */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or without */ +/* modification, are permitted provided that the following conditions */ +/* are met: */ +/* 1. Redistributions of source code must retain the above copyright */ +/* notice, this list of conditions and the following disclaimer. */ +/* 2. Redistributions in binary form must reproduce the above copyright */ +/* notice, this list of conditions and the following disclaimer in */ +/* the documentation and/or other materials provided with the */ +/* distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' */ +/* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED */ +/* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A */ +/* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR */ +/* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, */ +/* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT */ +/* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF */ +/* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND */ +/* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, */ +/* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF */ +/* SUCH DAMAGE. */ +/*=================================================================================*/ + /* ******************************************************************************* */ /* This file implements part of the vector extension. */ /* Mask instructions from Chap 11 (integer arithmetic) and 13 (floating-point) */ diff --git a/model/riscv_insts_vext_vset.sail b/model/riscv_insts_vext_vset.sail index 248855bb0..072fad38b 100644 --- a/model/riscv_insts_vext_vset.sail +++ b/model/riscv_insts_vext_vset.sail @@ -1,7 +1,45 @@ -/* ************************************************************************ */ -/* This file implements part of the vector extension. */ -/* Chapter 6: Configuration-Setting Instructions */ -/* ************************************************************************ */ +/*=================================================================================*/ +/* Copyright (c) 2021-2023 */ +/* Authors from RIOS Lab, Tsinghua University: */ +/* Xinlai Wan */ +/* Xi Wang */ +/* Yifei Zhu */ +/* Shenwei Hu */ +/* Kalvin Vu */ +/* Other contributors: */ +/* Jessica Clarke */ +/* Victor Moya */ +/* */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or without */ +/* modification, are permitted provided that the following conditions */ +/* are met: */ +/* 1. Redistributions of source code must retain the above copyright */ +/* notice, this list of conditions and the following disclaimer. */ +/* 2. Redistributions in binary form must reproduce the above copyright */ +/* notice, this list of conditions and the following disclaimer in */ +/* the documentation and/or other materials provided with the */ +/* distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' */ +/* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED */ +/* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A */ +/* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR */ +/* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, */ +/* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT */ +/* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF */ +/* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND */ +/* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, */ +/* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF */ +/* SUCH DAMAGE. */ +/*=================================================================================*/ + +/* ******************************************************************************* */ +/* This file implements part of the vector extension. */ +/* Chapter 6: Configuration-Setting Instructions */ +/* ******************************************************************************* */ mapping sew_flag : string <-> bits(3) = { "e8" <-> 0b000, @@ -37,7 +75,7 @@ mapping maybe_ma_flag : string <-> bits(1) = { sep() ^ "mu" <-> 0b0 } -/* ******************** vsetvli & vsetvl *********************** */ +/* ****************************** vsetvli & vsetvl ******************************* */ union clause ast = VSET_TYPE : (vsetop, bits(1), bits(1), bits(3), bits(3), regidx, regidx) mapping encdec_vsetop : vsetop <-> bits(4) ={ @@ -107,8 +145,7 @@ mapping vsettype_mnemonic : vsetop <-> string ={ mapping clause assembly = VSET_TYPE(op, ma, ta, sew, lmul, rs1, rd) <-> vsettype_mnemonic(op) ^ spc() ^ reg_name(rd) ^ sep() ^ reg_name(rs1) ^ sep() ^ sew_flag(sew) ^ maybe_lmul_flag(lmul) ^ maybe_ta_flag(ta) ^ maybe_ma_flag(ma) - -/* ******************** vsetivli *********************** */ +/* ********************************* vsetivli ************************************ */ union clause ast = VSETI_TYPE : ( bits(1), bits(1), bits(3), bits(3), regidx, regidx) mapping clause encdec = VSETI_TYPE(ma, ta, sew, lmul, uimm, rd) if haveRVV() diff --git a/model/riscv_softfloat_interface.sail b/model/riscv_softfloat_interface.sail index 4706e92b8..6a3c06ec4 100644 --- a/model/riscv_softfloat_interface.sail +++ b/model/riscv_softfloat_interface.sail @@ -87,7 +87,7 @@ type bits_rm = bits(3) /* Rounding mode */ type bits_fflags = bits(5) /* Accrued exceptions: NV,DZ,OF,UF,NX */ -type bits_H = bits(16) /* Half-precision float value */ +type bits_H = bits(16) /* Half-precision float value */ type bits_S = bits(32) /* Single-precision float value */ type bits_D = bits(64) /* Double-precision float value */ @@ -245,9 +245,109 @@ function riscv_f64Sqrt (rm, v) = { (float_fflags[4 .. 0], float_result) } +/* **************************************************************** */ +/* RECIPROCAL SQUARE ROOT ESTIMATE */ + +val extern_f16Rsqrte7 = {c: "softfloat_f16rsqrte7", ocaml: "Softfloat.f16_rsqrte7", lem: "softfloat_f16_rsqrte7"} : (bits_rm, bits_H) -> unit +val riscv_f16Rsqrte7 : (bits_rm, bits_H) -> (bits_fflags, bits_H) effect {rreg} +function riscv_f16Rsqrte7 (rm, v) = { + extern_f16Rsqrte7(rm, v); + (float_fflags[4 .. 0], float_result[15 .. 0]) +} + +val extern_f32Rsqrte7 = {c: "softfloat_f32rsqrte7", ocaml: "Softfloat.f32_rsqrte7", lem: "softfloat_f32_rsqrte7"} : (bits_rm, bits_S) -> unit +val riscv_f32Rsqrte7 : (bits_rm, bits_S) -> (bits_fflags, bits_S) effect {rreg} +function riscv_f32Rsqrte7 (rm, v) = { + extern_f32Rsqrte7(rm, v); + (float_fflags[4 .. 0], float_result[31 .. 0]) +} + +val extern_f64Rsqrte7 = {c: "softfloat_f64rsqrte7", ocaml: "Softfloat.f64_rsqrte7", lem: "softfloat_f64_rsqrte7"} : (bits_rm, bits_D) -> unit +val riscv_f64Rsqrte7 : (bits_rm, bits_D) -> (bits_fflags, bits_D) effect {rreg} +function riscv_f64Rsqrte7 (rm, v) = { + extern_f64Rsqrte7(rm, v); + (float_fflags[4 .. 0], float_result) +} + +/* **************************************************************** */ +/* RECIPROCAL ESTIMATE */ + +val extern_f16Recip7 = {c: "softfloat_f16recip7", ocaml: "Softfloat.f16_recip7", lem: "softfloat_f16_recip7"} : (bits_rm, bits_H) -> unit +val riscv_f16Recip7 : (bits_rm, bits_H) -> (bits_fflags, bits_H) effect {rreg} +function riscv_f16Recip7 (rm, v) = { + extern_f16Recip7(rm, v); + (float_fflags[4 .. 0], float_result[15 .. 0]) +} + +val extern_f32Recip7 = {c: "softfloat_f32recip7", ocaml: "Softfloat.f32_recip7", lem: "softfloat_f32_recip7"} : (bits_rm, bits_S) -> unit +val riscv_f32Recip7 : (bits_rm, bits_S) -> (bits_fflags, bits_S) effect {rreg} +function riscv_f32Recip7 (rm, v) = { + extern_f32Recip7(rm, v); + (float_fflags[4 .. 0], float_result[31 .. 0]) +} + +val extern_f64Recip7 = {c: "softfloat_f64recip7", ocaml: "Softfloat.f64_recip7", lem: "softfloat_f64_recip7"} : (bits_rm, bits_D) -> unit +val riscv_f64Recip7 : (bits_rm, bits_D) -> (bits_fflags, bits_D) effect {rreg} +function riscv_f64Recip7 (rm, v) = { + extern_f64Recip7(rm, v); + (float_fflags[4 .. 0], float_result) +} + +/* **************************************************************** */ +/* CLASSIFY */ + +val extern_f16Class = {c: "softfloat_f16class", ocaml: "Softfloat.f16_class", lem: "softfloat_f16_class"} : bits_H -> unit +val riscv_f16Class : bits_H -> (bits_fflags, bits_H) effect {rreg} +function riscv_f16Class (v) = { + extern_f16Class(v); + (float_fflags[4 .. 0], float_result[15 .. 0]) +} + +val extern_f32Class = {c: "softfloat_f32class", ocaml: "Softfloat.f32_class", lem: "softfloat_f32_class"} : bits_S -> unit +val riscv_f32Class : bits_S -> (bits_fflags, bits_S) effect {rreg} +function riscv_f32Class (v) = { + extern_f32Class(v); + (float_fflags[4 .. 0], float_result[31 .. 0]) +} + +val extern_f64Class = {c: "softfloat_f64class", ocaml: "Softfloat.f64_class", lem: "softfloat_f64_class"} : bits_D -> unit +val riscv_f64Class : bits_D -> (bits_fflags, bits_D) effect {rreg} +function riscv_f64Class (v) = { + extern_f64Class(v); + (float_fflags[4 .. 0], float_result) +} + /* **************************************************************** */ /* CONVERSIONS */ +val extern_f16ToI8 = {c: "softfloat_f16toi8", ocaml: "Softfloat.f16_to_i8", lem: "softfloat_f16_to_i8"} : (bits_rm, bits_H) -> unit +val riscv_f16ToI8 : (bits_rm, bits_H) -> (bits_fflags, bits(8)) effect {rreg} +function riscv_f16ToI8 (rm, v) = { + extern_f16ToI8(rm, v); + (float_fflags[4 .. 0], float_result[7 .. 0]) +} + +val extern_f16ToUi8 = {c: "softfloat_f16toui8", ocaml: "Softfloat.f16_to_ui8", lem: "softfloat_f16_to_ui8"} : (bits_rm, bits_H) -> unit +val riscv_f16ToUi8 : (bits_rm, bits_H) -> (bits_fflags, bits(8)) effect {rreg} +function riscv_f16ToUi8 (rm, v) = { + extern_f16ToUi8(rm, v); + (float_fflags[4 .. 0], float_result[7 .. 0]) +} + +val extern_f16ToI16 = {c: "softfloat_f16toi16", ocaml: "Softfloat.f16_to_i16", lem: "softfloat_f16_to_i16"} : (bits_rm, bits_H) -> unit +val riscv_f16ToI16 : (bits_rm, bits_H) -> (bits_fflags, bits(16)) effect {rreg} +function riscv_f16ToI16 (rm, v) = { + extern_f16ToI16(rm, v); + (float_fflags[4 .. 0], float_result[15 .. 0]) +} + +val extern_f16ToUi16 = {c: "softfloat_f16toui16", ocaml: "Softfloat.f16_to_ui16", lem: "softfloat_f16_to_ui16"} : (bits_rm, bits_H) -> unit +val riscv_f16ToUi16 : (bits_rm, bits_H) -> (bits_fflags, bits(16)) effect {rreg} +function riscv_f16ToUi16 (rm, v) = { + extern_f16ToUi16(rm, v); + (float_fflags[4 .. 0], float_result[15 .. 0]) +} + val extern_f16ToI32 = {c: "softfloat_f16toi32", ocaml: "Softfloat.f16_to_i32", lem: "softfloat_f16_to_i32"} : (bits_rm, bits_H) -> unit val riscv_f16ToI32 : (bits_rm, bits_H) -> (bits_fflags, bits_W) effect {rreg} function riscv_f16ToI32 (rm, v) = { @@ -304,6 +404,20 @@ function riscv_ui64ToF16 (rm, v) = { (float_fflags[4 .. 0], float_result[15 .. 0]) } +val extern_f32ToI16 = {c: "softfloat_f32toi16", ocaml: "Softfloat.f32_to_i16", lem: "softfloat_f32_to_i16"} : (bits_rm, bits_S) -> unit +val riscv_f32ToI16 : (bits_rm, bits_S) -> (bits_fflags, bits(16)) effect {rreg} +function riscv_f32ToI16 (rm, v) = { + extern_f32ToI16(rm, v); + (float_fflags[4 .. 0], float_result[15 .. 0]) +} + +val extern_f32ToUi16 = {c: "softfloat_f32toui16", ocaml: "Softfloat.f32_to_ui16", lem: "softfloat_f32_to_ui16"} : (bits_rm, bits_S) -> unit +val riscv_f32ToUi16 : (bits_rm, bits_S) -> (bits_fflags, bits(16)) effect {rreg} +function riscv_f32ToUi16 (rm, v) = { + extern_f32ToUi16(rm, v); + (float_fflags[4 .. 0], float_result[15 .. 0]) +} + val extern_f32ToI32 = {c: "softfloat_f32toi32", ocaml: "Softfloat.f32_to_i32", lem: "softfloat_f32_to_i32"} : (bits_rm, bits_S) -> unit val riscv_f32ToI32 : (bits_rm, bits_S) -> (bits_fflags, bits_W) effect {rreg} function riscv_f32ToI32 (rm, v) = { @@ -468,6 +582,13 @@ function riscv_f16Lt (v1, v2) = { (float_fflags[4 .. 0], bit_to_bool(float_result[0])) } +val extern_f16Lt_quiet = {c: "softfloat_f16lt_quiet", ocaml: "Softfloat.f16_lt_quiet", lem: "softfloat_f16_lt_quiet"} : (bits_H, bits_H) -> unit +val riscv_f16Lt_quiet : (bits_H, bits_H) -> (bits_fflags, bool) effect {rreg} +function riscv_f16Lt_quiet (v1, v2) = { + extern_f16Lt_quiet(v1, v2); + (float_fflags[4 .. 0], bit_to_bool(float_result[0])) +} + val extern_f16Le = {c: "softfloat_f16le", ocaml: "Softfloat.f16_le", lem: "softfloat_f16_le"} : (bits_H, bits_H) -> unit val riscv_f16Le : (bits_H, bits_H) -> (bits_fflags, bool) effect {rreg} function riscv_f16Le (v1, v2) = { @@ -489,6 +610,13 @@ function riscv_f32Lt (v1, v2) = { (float_fflags[4 .. 0], bit_to_bool(float_result[0])) } +val extern_f32Lt_quiet = {c: "softfloat_f32lt_quiet", ocaml: "Softfloat.f32_lt_quiet", lem: "softfloat_f32_lt_quiet"} : (bits_S, bits_S) -> unit +val riscv_f32Lt_quiet : (bits_S, bits_S) -> (bits_fflags, bool) effect {rreg} +function riscv_f32Lt_quiet (v1, v2) = { + extern_f32Lt_quiet(v1, v2); + (float_fflags[4 .. 0], bit_to_bool(float_result[0])) +} + val extern_f32Le = {c: "softfloat_f32le", ocaml: "Softfloat.f32_le", lem: "softfloat_f32_le"} : (bits_S, bits_S) -> unit val riscv_f32Le : (bits_S, bits_S) -> (bits_fflags, bool) effect {rreg} function riscv_f32Le (v1, v2) = { @@ -510,6 +638,13 @@ function riscv_f64Lt (v1, v2) = { (float_fflags[4 .. 0], bit_to_bool(float_result[0])) } +val extern_f64Lt_quiet = {c: "softfloat_f64lt_quiet", ocaml: "Softfloat.f64_lt_quiet", lem: "softfloat_f64_lt_quiet"} : (bits_D, bits_D) -> unit +val riscv_f64Lt_quiet : (bits_D, bits_D) -> (bits_fflags, bool) effect {rreg} +function riscv_f64Lt_quiet (v1, v2) = { + extern_f64Lt_quiet(v1, v2); + (float_fflags[4 .. 0], bit_to_bool(float_result[0])) +} + val extern_f64Le = {c: "softfloat_f64le", ocaml: "Softfloat.f64_le", lem: "softfloat_f64_le"} : (bits_D, bits_D) -> unit val riscv_f64Le : (bits_D, bits_D) -> (bits_fflags, bool) effect {rreg} function riscv_f64Le (v1, v2) = { diff --git a/model/riscv_vext_control.sail b/model/riscv_vext_control.sail index b2a68c2ee..9e47e9dc9 100755 --- a/model/riscv_vext_control.sail +++ b/model/riscv_vext_control.sail @@ -1,3 +1,41 @@ +/*=================================================================================*/ +/* Copyright (c) 2021-2023 */ +/* Authors from RIOS Lab, Tsinghua University: */ +/* Xinlai Wan */ +/* Xi Wang */ +/* Yifei Zhu */ +/* Shenwei Hu */ +/* Kalvin Vu */ +/* Other contributors: */ +/* Jessica Clarke */ +/* Victor Moya */ +/* */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or without */ +/* modification, are permitted provided that the following conditions */ +/* are met: */ +/* 1. Redistributions of source code must retain the above copyright */ +/* notice, this list of conditions and the following disclaimer. */ +/* 2. Redistributions in binary form must reproduce the above copyright */ +/* notice, this list of conditions and the following disclaimer in */ +/* the documentation and/or other materials provided with the */ +/* distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' */ +/* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED */ +/* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A */ +/* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR */ +/* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, */ +/* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT */ +/* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF */ +/* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND */ +/* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, */ +/* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF */ +/* SUCH DAMAGE. */ +/*=================================================================================*/ + function clause ext_is_CSR_defined (0x008, _) = true function clause ext_is_CSR_defined (0xC20, _) = true function clause ext_is_CSR_defined (0xC21, _) = true diff --git a/model/riscv_vext_regs.sail b/model/riscv_vext_regs.sail index ed8ca6b4f..bbd52868c 100644 --- a/model/riscv_vext_regs.sail +++ b/model/riscv_vext_regs.sail @@ -1,3 +1,41 @@ +/*=================================================================================*/ +/* Copyright (c) 2021-2023 */ +/* Authors from RIOS Lab, Tsinghua University: */ +/* Xinlai Wan */ +/* Xi Wang */ +/* Yifei Zhu */ +/* Shenwei Hu */ +/* Kalvin Vu */ +/* Other contributors: */ +/* Jessica Clarke */ +/* Victor Moya */ +/* */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or without */ +/* modification, are permitted provided that the following conditions */ +/* are met: */ +/* 1. Redistributions of source code must retain the above copyright */ +/* notice, this list of conditions and the following disclaimer. */ +/* 2. Redistributions in binary form must reproduce the above copyright */ +/* notice, this list of conditions and the following disclaimer in */ +/* the documentation and/or other materials provided with the */ +/* distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' */ +/* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED */ +/* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A */ +/* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR */ +/* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, */ +/* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT */ +/* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF */ +/* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND */ +/* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, */ +/* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF */ +/* SUCH DAMAGE. */ +/*=================================================================================*/ + /* vector registers */ register vr0 : vregtype register vr1 : vregtype diff --git a/model/riscv_vlen.sail b/model/riscv_vlen.sail index a411bf16b..f8ba1f46c 100644 --- a/model/riscv_vlen.sail +++ b/model/riscv_vlen.sail @@ -1,3 +1,41 @@ +/*=================================================================================*/ +/* Copyright (c) 2021-2023 */ +/* Authors from RIOS Lab, Tsinghua University: */ +/* Xinlai Wan */ +/* Xi Wang */ +/* Yifei Zhu */ +/* Shenwei Hu */ +/* Kalvin Vu */ +/* Other contributors: */ +/* Jessica Clarke */ +/* Victor Moya */ +/* */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or without */ +/* modification, are permitted provided that the following conditions */ +/* are met: */ +/* 1. Redistributions of source code must retain the above copyright */ +/* notice, this list of conditions and the following disclaimer. */ +/* 2. Redistributions in binary form must reproduce the above copyright */ +/* notice, this list of conditions and the following disclaimer in */ +/* the documentation and/or other materials provided with the */ +/* distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' */ +/* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED */ +/* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A */ +/* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR */ +/* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, */ +/* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT */ +/* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF */ +/* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND */ +/* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, */ +/* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF */ +/* SUCH DAMAGE. */ +/*=================================================================================*/ + register elen : bits(1) val get_elen_pow : unit -> {|5, 6|} effect {rreg} @@ -27,3 +65,13 @@ function get_vlen_pow() = match vlen { } type vlenmax : Int = 65536 + +/* Note: At present, the values of elen and vlen need to be manually speficied + * in the init_sys() function of riscv_sys_control.sail before compiling the emulators, + * e.g., + * vlen = 0b0101; + * elen = 0b1; + * means VLEN = 1024 and ELEN = 64, + * and the CSR vlenb is also not used. + * They will be configurable when user-specified configuration is supported in Sail. + */ diff --git a/model/riscv_vreg_type.sail b/model/riscv_vreg_type.sail index 025664656..de44a5c56 100755 --- a/model/riscv_vreg_type.sail +++ b/model/riscv_vreg_type.sail @@ -1,3 +1,41 @@ +/*=================================================================================*/ +/* Copyright (c) 2021-2023 */ +/* Authors from RIOS Lab, Tsinghua University: */ +/* Xinlai Wan */ +/* Xi Wang */ +/* Yifei Zhu */ +/* Shenwei Hu */ +/* Kalvin Vu */ +/* Other contributors: */ +/* Jessica Clarke */ +/* Victor Moya */ +/* */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or without */ +/* modification, are permitted provided that the following conditions */ +/* are met: */ +/* 1. Redistributions of source code must retain the above copyright */ +/* notice, this list of conditions and the following disclaimer. */ +/* 2. Redistributions in binary form must reproduce the above copyright */ +/* notice, this list of conditions and the following disclaimer in */ +/* the documentation and/or other materials provided with the */ +/* distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' */ +/* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED */ +/* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A */ +/* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR */ +/* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, */ +/* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT */ +/* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF */ +/* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND */ +/* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, */ +/* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF */ +/* SUCH DAMAGE. */ +/*=================================================================================*/ + /* Definitions for vector registers (V extension) */ type vreglenbits = bits(vlenmax) /* use the largest possible register length */ diff --git a/ocaml_emulator/softfloat.ml b/ocaml_emulator/softfloat.ml index 01aaa2a4f..314e0dc18 100644 --- a/ocaml_emulator/softfloat.ml +++ b/ocaml_emulator/softfloat.ml @@ -53,6 +53,45 @@ let f32_sqrt rm v = let f64_sqrt rm v = () +let f16_rsqrte7 rm v = + () + +let f32_rsqrte7 rm v = + () + +let f64_rsqrte7 rm v = + () + +let f16_recip7 rm v = + () + +let f32_recip7 rm v = + () + +let f64_recip7 rm v = + () + +let f16_class v = + () + +let f32_class v = + () + +let f64_class v = + () + +let f16_to_i8 rm v = + () + +let f16_to_ui8 rm v = + () + +let f16_to_i16 rm v = + () + +let f16_to_ui16 rm v = + () + let f16_to_i32 rm v = () @@ -77,6 +116,12 @@ let i64_to_f16 rm v = let ui64_to_f16 rm v = () +let f32_to_i16 rm v = + () + +let f32_to_ui16 rm v = + () + let f32_to_i32 rm v = () @@ -146,6 +191,9 @@ let f64_to_f32 rm v = let f16_lt v1 v2 = () +let f16_lt_quiet v1 v2 = + () + let f16_le v1 v2 = () @@ -155,6 +203,9 @@ let f16_eq v1 v2 = let f32_lt v1 v2 = () +let f32_lt_quiet v1 v2 = + () + let f32_le v1 v2 = () @@ -164,6 +215,9 @@ let f32_eq v1 v2 = let f64_lt v1 v2 = () +let f64_lt_quiet v1 v2 = + () + let f64_le v1 v2 = () From 3a3b1c0454a8bccc1e99e72f6a0ce292e15818d9 Mon Sep 17 00:00:00 2001 From: Xinlai Wan Date: Tue, 13 Jun 2023 03:28:50 +0800 Subject: [PATCH 05/11] Vector reduction and mask instructions (#259) * Add vector mask and reduction instructions * Fix register overlap check in vector mask instructions --------- Co-authored-by: xwan --- Makefile | 1 + model/riscv_insts_vext_mask.sail | 24 +- model/riscv_insts_vext_red.sail | 280 +++++++++++++ model/riscv_insts_vext_vm.sail | 661 ++++++++++++++++++++++++++++++- 4 files changed, 936 insertions(+), 30 deletions(-) create mode 100755 model/riscv_insts_vext_red.sail diff --git a/Makefile b/Makefile index 523152667..9a5f1cc13 100644 --- a/Makefile +++ b/Makefile @@ -44,6 +44,7 @@ SAIL_DEFAULT_INST += riscv_insts_vext_fp.sail SAIL_DEFAULT_INST += riscv_insts_vext_mem.sail SAIL_DEFAULT_INST += riscv_insts_vext_mask.sail SAIL_DEFAULT_INST += riscv_insts_vext_vm.sail +SAIL_DEFAULT_INST += riscv_insts_vext_red.sail SAIL_SEQ_INST = $(SAIL_DEFAULT_INST) riscv_jalr_seq.sail SAIL_RMEM_INST = $(SAIL_DEFAULT_INST) riscv_jalr_rmem.sail riscv_insts_rmem.sail diff --git a/model/riscv_insts_vext_mask.sail b/model/riscv_insts_vext_mask.sail index 3230ed333..587fbdd63 100755 --- a/model/riscv_insts_vext_mask.sail +++ b/model/riscv_insts_vext_mask.sail @@ -61,7 +61,7 @@ mapping clause encdec = MMTYPE(funct6, vs2, vs1, vd) if haveRVV() function clause execute(MMTYPE(funct6, vs2, vs1, vd)) = { let SEW = get_sew(); let LMUL_pow = get_lmul_pow(); - let num_elem = get_num_elem(LMUL_pow, SEW); + let num_elem = int_power(2, get_vlen_pow()); let 'n = num_elem; let 'm = SEW; @@ -72,7 +72,7 @@ function clause execute(MMTYPE(funct6, vs2, vs1, vd)) = { result : vector('n, dec, bool) = undefined; mask : vector('n, dec, bool) = undefined; - (result, mask) = init_masked_result_carry(num_elem, SEW, LMUL_pow, vd_val); + (result, mask) = init_masked_result_carry(num_elem, SEW, 0, vd_val); foreach (i from 0 to (num_elem - 1)) { if mask[i] then { @@ -118,7 +118,7 @@ mapping clause encdec = VCPOP_M(vm, vs2, rd) if haveRVV() function clause execute(VCPOP_M(vm, vs2, rd)) = { let SEW = get_sew(); let LMUL_pow = get_lmul_pow(); - let num_elem = get_num_elem(LMUL_pow, SEW); + let num_elem = int_power(2, get_vlen_pow()); let 'n = num_elem; let 'm = SEW; @@ -131,7 +131,7 @@ function clause execute(VCPOP_M(vm, vs2, rd)) = { /* Value of vstart must be 0 */ if not(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; - (result, mask) = init_masked_result_cmp(num_elem, SEW, LMUL_pow, vs2_val, vm_val); + (result, mask) = init_masked_result_cmp(num_elem, SEW, 0, vs2_val, vm_val); count : nat = 0; foreach (i from 0 to (num_elem - 1)) { @@ -155,7 +155,7 @@ mapping clause encdec = VFIRST_M(vm, vs2, rd) if haveRVV() function clause execute(VFIRST_M(vm, vs2, rd)) = { let SEW = get_sew(); let LMUL_pow = get_lmul_pow(); - let num_elem = get_num_elem(LMUL_pow, SEW); + let num_elem = int_power(2, get_vlen_pow()); let 'n = num_elem; let 'm = SEW; @@ -168,7 +168,7 @@ function clause execute(VFIRST_M(vm, vs2, rd)) = { /* Value of vstart must be 0 */ if not(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; - (result, mask) = init_masked_result_cmp(num_elem, SEW, LMUL_pow, vs2_val, vm_val); + (result, mask) = init_masked_result_cmp(num_elem, SEW, 0, vs2_val, vm_val); index : int = -1; foreach (i from 0 to (num_elem - 1)) { @@ -194,7 +194,7 @@ mapping clause encdec = VMSBF_M(vm, vs2, vd) if haveRVV() function clause execute(VMSBF_M(vm, vs2, vd)) = { let SEW = get_sew(); let LMUL_pow = get_lmul_pow(); - let num_elem = get_num_elem(LMUL_pow, SEW); + let num_elem = int_power(2, get_vlen_pow()); let 'n = num_elem; let 'm = SEW; @@ -214,7 +214,7 @@ function clause execute(VMSBF_M(vm, vs2, vd)) = { /* Dest reg cannot be the same as source reg */ if vd == vs2 then { handle_illegal(); return RETIRE_FAIL }; - (result, mask) = init_masked_result_cmp(num_elem, SEW, LMUL_pow, vd_val, vm_val); + (result, mask) = init_masked_result_cmp(num_elem, SEW, 0, vd_val, vm_val); found_elem : bool = false; foreach (i from 0 to (num_elem - 1)) { @@ -241,7 +241,7 @@ mapping clause encdec = VMSIF_M(vm, vs2, vd) if haveRVV() function clause execute(VMSIF_M(vm, vs2, vd)) = { let SEW = get_sew(); let LMUL_pow = get_lmul_pow(); - let num_elem = get_num_elem(LMUL_pow, SEW); + let num_elem = int_power(2, get_vlen_pow()); let 'n = num_elem; let 'm = SEW; @@ -261,7 +261,7 @@ function clause execute(VMSIF_M(vm, vs2, vd)) = { /* Dest reg cannot be the same as source reg */ if vd == vs2 then { handle_illegal(); return RETIRE_FAIL }; - (result, mask) = init_masked_result_cmp(num_elem, SEW, LMUL_pow, vd_val, vm_val); + (result, mask) = init_masked_result_cmp(num_elem, SEW, 0, vd_val, vm_val); found_elem : bool = false; foreach (i from 0 to (num_elem - 1)) { @@ -288,7 +288,7 @@ mapping clause encdec = VMSOF_M(vm, vs2, vd) if haveRVV() function clause execute(VMSOF_M(vm, vs2, vd)) = { let SEW = get_sew(); let LMUL_pow = get_lmul_pow(); - let num_elem = get_num_elem(LMUL_pow, SEW); + let num_elem = int_power(2, get_vlen_pow()); let 'n = num_elem; let 'm = SEW; @@ -308,7 +308,7 @@ function clause execute(VMSOF_M(vm, vs2, vd)) = { /* Dest reg cannot be the same as source reg */ if vd == vs2 then { handle_illegal(); return RETIRE_FAIL }; - (result, mask) = init_masked_result_cmp(num_elem, SEW, LMUL_pow, vd_val, vm_val); + (result, mask) = init_masked_result_cmp(num_elem, SEW, 0, vd_val, vm_val); found_elem : bool = false; foreach (i from 0 to (num_elem - 1)) { diff --git a/model/riscv_insts_vext_red.sail b/model/riscv_insts_vext_red.sail new file mode 100755 index 000000000..362fcb5d7 --- /dev/null +++ b/model/riscv_insts_vext_red.sail @@ -0,0 +1,280 @@ +/*=================================================================================*/ +/* Copyright (c) 2021-2023 */ +/* Authors from RIOS Lab, Tsinghua University: */ +/* Xinlai Wan */ +/* Xi Wang */ +/* Yifei Zhu */ +/* Shenwei Hu */ +/* Kalvin Vu */ +/* Other contributors: */ +/* Jessica Clarke */ +/* Victor Moya */ +/* */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or without */ +/* modification, are permitted provided that the following conditions */ +/* are met: */ +/* 1. Redistributions of source code must retain the above copyright */ +/* notice, this list of conditions and the following disclaimer. */ +/* 2. Redistributions in binary form must reproduce the above copyright */ +/* notice, this list of conditions and the following disclaimer in */ +/* the documentation and/or other materials provided with the */ +/* distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' */ +/* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED */ +/* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A */ +/* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR */ +/* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, */ +/* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT */ +/* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF */ +/* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND */ +/* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, */ +/* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF */ +/* SUCH DAMAGE. */ +/*=================================================================================*/ + +/* ******************************************************************************* */ +/* This file implements part of the vector extension. */ +/* Chapter 14: Vector Reduction Instructions */ +/* ******************************************************************************* */ + +/* ********************* OPIVV (Widening Integer Reduction) ********************** */ +union clause ast = RIVVTYPE : (rivvfunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_rivvfunct6 : rivvfunct6 <-> bits(6) = { + IVV_VWREDSUMU <-> 0b110000, + IVV_VWREDSUM <-> 0b110001 +} + +mapping clause encdec = RIVVTYPE(funct6, vm, vs2, vs1, vd) if haveRVV() + <-> encdec_rivvfunct6(funct6) @ vm @ vs2 @ vs1 @ 0b000 @ vd @ 0b1010111 if haveRVV() + +function clause execute(RIVVTYPE(funct6, vm, vs2, vs1, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + let SEW_widen = SEW * 2; + let LMUL_pow_widen = LMUL_pow + 1; + + if not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; + + let 'n = num_elem; + let 'm = SEW; + let 'o = SEW_widen; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + result : vector('n, dec, bits('o)) = undefined; + mask : vector('n, dec, bool) = undefined; + + assert(8 <= SEW_widen & SEW_widen <= 64); + (result, mask) = init_masked_result(num_elem, SEW_widen, LMUL_pow_widen, vd_val, vm_val); + + let scalar : bits('o) = read_single_element(SEW_widen, 0, LMUL_pow_widen, vs1); + sum : bits('o) = match funct6 { + IVV_VWREDSUMU => to_bits(SEW_widen, unsigned(scalar)), + IVV_VWREDSUM => to_bits(SEW_widen, signed(scalar)) + }; + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + sum = match funct6 { + IVV_VWREDSUMU => to_bits(SEW_widen, unsigned(vs2_val[i]) + unsigned(sum)), + IVV_VWREDSUM => to_bits(SEW_widen, signed(vs2_val[i]) + signed(sum)) + } + } + }; + + write_single_element(SEW_widen, 0, LMUL_pow_widen, vd, sum); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping rivvtype_mnemonic : rivvfunct6 <-> string = { + IVV_VWREDSUMU <-> "vwredsumu.vs", + IVV_VWREDSUM <-> "vwredsum.vs" +} + +mapping clause assembly = RIVVTYPE(funct6, vm, vs2, vs1, vd) + <-> rivvtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ vreg_name(vs1) ^ maybe_vmask(vm) + +/* ******************* OPMVV (Single-Width Integer Reduction) ******************** */ +union clause ast = RMVVTYPE : (rmvvfunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_rmvvfunct6 : rmvvfunct6 <-> bits(6) = { + MVV_VREDSUM <-> 0b000000, + MVV_VREDAND <-> 0b000001, + MVV_VREDOR <-> 0b000010, + MVV_VREDXOR <-> 0b000011, + MVV_VREDMINU <-> 0b000100, + MVV_VREDMIN <-> 0b000101, + MVV_VREDMAXU <-> 0b000110, + MVV_VREDMAX <-> 0b000111 +} + +mapping clause encdec = RMVVTYPE(funct6, vm, vs2, vs1, vd) if haveRVV() + <-> encdec_rmvvfunct6(funct6) @ vm @ vs2 @ vs1 @ 0b010 @ vd @ 0b1010111 if haveRVV() + +function clause execute(RMVVTYPE(funct6, vm, vs2, vs1, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + sum : bits('m) = read_single_element(SEW, 0, LMUL_pow, vs1); + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + sum = match funct6 { + MVV_VREDSUM => sum + vs2_val[i], + MVV_VREDAND => sum & vs2_val[i], + MVV_VREDOR => sum | vs2_val[i], + MVV_VREDXOR => sum ^ vs2_val[i], + MVV_VREDMIN => to_bits(SEW, min(signed(vs2_val[i]), signed(sum))), + MVV_VREDMINU => to_bits(SEW, min(unsigned(vs2_val[i]), unsigned(sum))), + MVV_VREDMAX => to_bits(SEW, max(signed(vs2_val[i]), signed(sum))), + MVV_VREDMAXU => to_bits(SEW, max(unsigned(vs2_val[i]), unsigned(sum))) + } + } + }; + + write_single_element(SEW, 0, LMUL_pow, vd, sum); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping rmvvtype_mnemonic : rmvvfunct6 <-> string = { + MVV_VREDSUM <-> "vredsum.vs", + MVV_VREDAND <-> "vredand.vs", + MVV_VREDOR <-> "vredor.vs", + MVV_VREDXOR <-> "vredxor.vs", + MVV_VREDMINU <-> "vredminu.vs", + MVV_VREDMIN <-> "vredmin.vs", + MVV_VREDMAXU <-> "vredmaxu.vs", + MVV_VREDMAX <-> "vredmax.vs" +} + +mapping clause assembly = RMVVTYPE(funct6, vm, vs2, vs1, vd) + <-> rmvvtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ vreg_name(vs1) ^ maybe_vmask(vm) + +/* ********************** OPFVV (Floating-Point Reduction) *********************** */ +union clause ast = RFVVTYPE : (rfvvfunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_rfvvfunct6 : rfvvfunct6 <-> bits(6) = { + FVV_VFREDOSUM <-> 0b000011, + FVV_VFREDUSUM <-> 0b000001, + FVV_VFREDMAX <-> 0b000111, + FVV_VFREDMIN <-> 0b000101, + FVV_VFWREDOSUM <-> 0b110011, + FVV_VFWREDUSUM <-> 0b110001 +} + +mapping clause encdec = RFVVTYPE(funct6, vm, vs2, vs1, vd) if haveRVV() + <-> encdec_rfvvfunct6(funct6) @ vm @ vs2 @ vs1 @ 0b001 @ vd @ 0b1010111 if haveRVV() + +val process_rfvv_single: forall 'n 'm 'p, 'm in {8, 16, 32, 64}. (rfvvfunct6, bits(1), regidx, regidx, regidx, int('n), int('m), int('p)) -> Retired effect {escape, rreg, undef, wreg} +function process_rfvv_single(funct6, vm, vs2, vs1, vd, num_elem, SEW, LMUL_pow) = { + let rm_3b = fcsr.FRM(); + if not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; + assert(SEW != 8); + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + sum : bits('m) = read_single_element(SEW, 0, LMUL_pow, vs1); + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + sum = match funct6 { + FVV_VFREDOSUM => fp_add(rm_3b, sum, vs2_val[i]), + FVV_VFREDUSUM => fp_add(rm_3b, sum, vs2_val[i]), + FVV_VFREDMAX => fp_max(sum, vs2_val[i]), + FVV_VFREDMIN => fp_min(sum, vs2_val[i]) + } + } + }; + + write_single_element(SEW, 0, LMUL_pow, vd, sum); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +val process_rfvv_widen: forall 'n 'm 'p, 'm in {8, 16, 32, 64}. (rfvvfunct6, bits(1), regidx, regidx, regidx, int('n), int('m), int('p)) -> Retired effect {escape, rreg, undef, wreg} +function process_rfvv_widen(funct6, vm, vs2, vs1, vd, num_elem, SEW, LMUL_pow) = { + let rm_3b = fcsr.FRM(); + let SEW_widen = SEW * 2; + let LMUL_pow_widen = LMUL_pow + 1; + + if not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | not(valid_fp_op(SEW, rm_3b)) + then { handle_illegal(); return RETIRE_FAIL }; + assert(SEW >= 16 & SEW_widen <= 64); + + let 'n = num_elem; + let 'm = SEW; + let 'o = SEW_widen; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + result : vector('n, dec, bits('o)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW_widen, LMUL_pow_widen, vd_val, vm_val); + + sum : bits('o) = read_single_element(SEW_widen, 0, LMUL_pow_widen, vs1); + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + /* Currently ordered/unordered sum reductions do the same operations */ + sum = fp_add(rm_3b, sum, fp_widen(vs2_val[i])) + } + }; + + write_single_element(SEW_widen, 0, LMUL_pow_widen, vd, sum); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +function clause execute(RFVVTYPE(funct6, vm, vs2, vs1, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + if funct6 == FVV_VFWREDOSUM | funct6 == FVV_VFWREDUSUM then + process_rfvv_widen(funct6, vm, vs2, vs1, vd, num_elem, SEW, LMUL_pow) + else + process_rfvv_single(funct6, vm, vs2, vs1, vd, num_elem, SEW, LMUL_pow) +} + +mapping rfvvtype_mnemonic : rfvvfunct6 <-> string = { + FVV_VFREDOSUM <-> "vfredosum.vs", + FVV_VFREDUSUM <-> "vfredusum.vs", + FVV_VFREDMAX <-> "vfredmax.vs", + FVV_VFREDMIN <-> "vfredmin.vs", + FVV_VFWREDOSUM <-> "vfwredosum.vs", + FVV_VFWREDUSUM <-> "vfwredusum.vs" +} + +mapping clause assembly = RFVVTYPE(funct6, vm, vs2, vs1, vd) + <-> rfvvtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ vreg_name(vs1) ^ maybe_vmask(vm) diff --git a/model/riscv_insts_vext_vm.sail b/model/riscv_insts_vext_vm.sail index 93e6cd3b0..aa681152f 100755 --- a/model/riscv_insts_vext_vm.sail +++ b/model/riscv_insts_vext_vm.sail @@ -41,6 +41,174 @@ /* Mask instructions from Chap 11 (integer arithmetic) and 13 (floating-point) */ /* ******************************************************************************* */ +/* ******************************* OPIVV (VVMTYPE) ******************************* */ +/* VVM instructions' destination is a mask register (e.g. carry out) */ +/* Instructions with no carry out will set mask result to current mask value */ +/* May or may not read from source mask register (e.g. carry in) */ +union clause ast = VVMTYPE : (vvmfunct6, regidx, regidx, regidx) + +mapping encdec_vvmfunct6 : vvmfunct6 <-> bits(6) = { + VVM_VMADC <-> 0b010001, /* carry in, carry out */ + VVM_VMSBC <-> 0b010011 +} + +mapping clause encdec = VVMTYPE(funct6, vs2, vs1, vd) if haveRVV() + <-> encdec_vvmfunct6(funct6) @ 0b0 @ vs2 @ vs1 @ 0b000 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VVMTYPE(funct6, vs2, vs1, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask_carry(num_elem, 0b0, vreg_name("v0")); + let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); + result : vector('n, dec, bool) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result_carry(num_elem, SEW, LMUL_pow, vd_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + let res : bool = match funct6 { + VVM_VMADC => unsigned(vs2_val[i]) + unsigned(vs1_val[i]) + unsigned(bool_to_bits(vm_val[i])) > 2 ^ SEW - 1, + VVM_VMSBC => unsigned(vs2_val[i]) - unsigned(vs1_val[i]) - unsigned(bool_to_bits(vm_val[i])) < 0 + }; + result[i] = res + } + }; + + write_vmask(num_elem, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping vvmtype_mnemonic : vvmfunct6 <-> string = { + VVM_VMADC <-> "vmadc.vvm", /* carry in, carry out */ + VVM_VMSBC <-> "vmsbc.vvm" +} + +mapping clause assembly = VVMTYPE(funct6, vs2, vs1, vd) + <-> vvmtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ vreg_name(vs1) ^ sep() ^ "v0" + +/* ****************************** OPIVV (VVMCTYPE) ******************************* */ +/* VVMC instructions' destination is a mask register (e.g. carry out) */ +/* Instructions with no carry out will set mask result to current mask value */ +/* May or may not read from source mask register (e.g. carry in) */ +union clause ast = VVMCTYPE : (vvmcfunct6, regidx, regidx, regidx) + +mapping encdec_vvmcfunct6 : vvmcfunct6 <-> bits(6) = { + VVMC_VMADC <-> 0b010001, /* no carry in, carry out */ + VVMC_VMSBC <-> 0b010011 +} + +mapping clause encdec = VVMCTYPE(funct6, vs2, vs1, vd) if haveRVV() + <-> encdec_vvmcfunct6(funct6) @ 0b1 @ vs2 @ vs1 @ 0b000 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VVMCTYPE(funct6, vs2, vs1, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + let 'n = num_elem; + let 'm = SEW; + + let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); + result : vector('n, dec, bool) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result_carry(num_elem, SEW, LMUL_pow, vd_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + let res : bool = match funct6 { + VVMC_VMADC => unsigned(vs2_val[i]) + unsigned(vs1_val[i]) > 2 ^ SEW - 1, + VVMC_VMSBC => unsigned(vs2_val[i]) - unsigned(vs1_val[i]) < 0 + }; + result[i] = res + } + }; + + write_vmask(num_elem, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping vvmctype_mnemonic : vvmcfunct6 <-> string = { + VVMC_VMADC <-> "vmadc.vv", /* no carry in, carry out */ + VVMC_VMSBC <-> "vmsbc.vv" +} + +mapping clause assembly = VVMCTYPE(funct6, vs2, vs1, vd) + <-> vvmctype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ vreg_name(vs1) + +/* ****************************** OPIVV (VVMSTYPE) ******************************* */ +/* VVMS instructions' destination is a vector register (e.g. actual sum) */ +/* Instructions with no carry out will set mask result to current mask value */ +/* May or may not read from source mask register (e.g. carry in) */ +union clause ast = VVMSTYPE : (vvmsfunct6, regidx, regidx, regidx) + +mapping encdec_vvmsfunct6 : vvmsfunct6 <-> bits(6) = { + VVMS_VADC <-> 0b010000, /* carry in, no carry out */ + VVMS_VSBC <-> 0b010010 +} + +mapping clause encdec = VVMSTYPE(funct6, vs2, vs1, vd) if haveRVV() + <-> encdec_vvmsfunct6(funct6) @ 0b0 @ vs2 @ vs1 @ 0b000 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VVMSTYPE(funct6, vs2, vs1, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + if vd == vreg_name("v0") then { handle_illegal(); return RETIRE_FAIL }; + + let 'n = num_elem; + let 'm = SEW; + + /* for bypassing normal masking in init_masked_result */ + vec_trues : vector('n, dec, bool) = undefined; + foreach (i from 0 to (num_elem - 1)) { + vec_trues[i] = true + }; + + let vm_val : vector('n, dec, bool) = read_vmask_carry(num_elem, 0b0, vreg_name("v0")); + let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vec_trues); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + VVMS_VADC => to_bits(SEW, unsigned(vs2_val[i]) + unsigned(vs1_val[i]) + unsigned(bool_to_bits(vm_val[i]))), + VVMS_VSBC => to_bits(SEW, unsigned(vs2_val[i]) - unsigned(vs1_val[i]) - unsigned(bool_to_bits(vm_val[i]))) + } + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping vvmstype_mnemonic : vvmsfunct6 <-> string = { + VVMS_VADC <-> "vadc.vvm", /* carry in, no carry out */ + VVMS_VSBC <-> "vsbc.vvm" +} + +mapping clause assembly = VVMSTYPE(funct6, vs2, vs1, vd) + <-> vvmstype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ vreg_name(vs1) ^ sep() ^ "v0" + /* ***************** OPIVV (Vector Integer Compare Instructions) ***************** */ /* VVCMP instructions' destination is a mask register */ union clause ast = VVCMPTYPE : (vvcmpfunct6, bits(1), regidx, regidx, regidx) @@ -65,12 +233,12 @@ function clause execute(VVCMPTYPE(funct6, vm, vs2, vs1, vd)) = { let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); - let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); - let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); - let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); - result : vector('n, dec, bool) = undefined; - mask : vector('n, dec, bool) = undefined; + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); + result : vector('n, dec, bool) = undefined; + mask : vector('n, dec, bool) = undefined; (result, mask) = init_masked_result_cmp(num_elem, SEW, LMUL_pow, vd_val, vm_val); @@ -105,6 +273,174 @@ mapping vvcmptype_mnemonic : vvcmpfunct6 <-> string = { mapping clause assembly = VVCMPTYPE(funct6, vm, vs2, vs1, vd) <-> vvcmptype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ vreg_name(vs1) ^ maybe_vmask(vm) +/* ******************************* OPIVX (VXMTYPE) ******************************* */ +/* VXM instructions' destination is a mask register (e.g. carry out) */ +/* Instructions with no carry out will set mask result to current mask value */ +/* May or may not read from source mask register (e.g. carry in) */ +union clause ast = VXMTYPE : (vxmfunct6, regidx, regidx, regidx) + +mapping encdec_vxmfunct6 : vxmfunct6 <-> bits(6) = { + VXM_VMADC <-> 0b010001, /* carry in, carry out */ + VXM_VMSBC <-> 0b010011 +} + +mapping clause encdec = VXMTYPE(funct6, vs2, rs1, vd) if haveRVV() + <-> encdec_vxmfunct6(funct6) @ 0b0 @ vs2 @ rs1 @ 0b100 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VXMTYPE(funct6, vs2, rs1, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask_carry(num_elem, 0b0, vreg_name("v0")); + let rs1_val : bits('m) = get_scalar(rs1, SEW); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); + result : vector('n, dec, bool) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result_carry(num_elem, SEW, LMUL_pow, vd_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + let res : bool = match funct6 { + VXM_VMADC => unsigned(vs2_val[i]) + unsigned(rs1_val) + unsigned(bool_to_bits(vm_val[i])) > 2 ^ SEW - 1, + VXM_VMSBC => unsigned(vs2_val[i]) - unsigned(rs1_val) - unsigned(bool_to_bits(vm_val[i])) < 0 + }; + result[i] = res + } + }; + + write_vmask(num_elem, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping vxmtype_mnemonic : vxmfunct6 <-> string = { + VXM_VMADC <-> "vmadc.vxm", /* carry in, carry out */ + VXM_VMSBC <-> "vmsbc.vxm" +} + +mapping clause assembly = VXMTYPE(funct6, vs2, rs1, vd) + <-> vxmtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ reg_name(rs1) ^ sep() ^ "v0" + +/* ****************************** OPIVX (VXMCTYPE) ******************************* */ +/* VXMC instructions' destination is a mask register (e.g. carry out) */ +/* Instructions with no carry out will set mask result to current mask value */ +/* May or may not read from source mask register (e.g. carry in) */ +union clause ast = VXMCTYPE : (vxmcfunct6, regidx, regidx, regidx) + +mapping encdec_vxmcfunct6 : vxmcfunct6 <-> bits(6) = { + VXMC_VMADC <-> 0b010001, /* carry in, carry out */ + VXMC_VMSBC <-> 0b010011 +} + +mapping clause encdec = VXMCTYPE(funct6, vs2, rs1, vd) if haveRVV() + <-> encdec_vxmcfunct6(funct6) @ 0b1 @ vs2 @ rs1 @ 0b100 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VXMCTYPE(funct6, vs2, rs1, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + let 'n = num_elem; + let 'm = SEW; + + let rs1_val : bits('m) = get_scalar(rs1, SEW); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); + result : vector('n, dec, bool) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result_carry(num_elem, SEW, LMUL_pow, vd_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + let res : bool = match funct6 { + VXMC_VMADC => unsigned(vs2_val[i]) + unsigned(rs1_val) > 2 ^ SEW - 1, + VXMC_VMSBC => unsigned(vs2_val[i]) - unsigned(rs1_val) < 0 + }; + result[i] = res + } + }; + + write_vmask(num_elem, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping vxmctype_mnemonic : vxmcfunct6 <-> string = { + VXMC_VMADC <-> "vmadc.vx", /* carry in, carry out */ + VXMC_VMSBC <-> "vmsbc.vx" +} + +mapping clause assembly = VXMCTYPE(funct6, vs2, rs1, vd) + <-> vxmctype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ reg_name(rs1) + +/* ****************************** OPIVX (VXMSTYPE) ******************************* */ +/* VXMS instructions' destination is a vector register (e.g. actual sum) */ +/* Instructions with no carry out will set mask result to current mask value */ +/* May or may not read from source mask register (e.g. carry in) */ +union clause ast = VXMSTYPE : (vxmsfunct6, regidx, regidx, regidx) + +mapping encdec_vxmsfunct6 : vxmsfunct6 <-> bits(6) = { + VXMS_VADC <-> 0b010000, /* carry in, no carry out */ + VXMS_VSBC <-> 0b010010 +} + +mapping clause encdec = VXMSTYPE(funct6, vs2, rs1, vd) if haveRVV() + <-> encdec_vxmsfunct6(funct6) @ 0b0 @ vs2 @ rs1 @ 0b100 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VXMSTYPE(funct6, vs2, rs1, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + if vd == vreg_name("v0") then { handle_illegal(); return RETIRE_FAIL }; + + let 'n = num_elem; + let 'm = SEW; + + /* for bypassing normal masking in init_masked_result */ + vec_trues : vector('n, dec, bool) = undefined; + foreach (i from 0 to (num_elem - 1)) { + vec_trues[i] = true + }; + + let vm_val : vector('n, dec, bool) = read_vmask_carry(num_elem, 0b0, vreg_name("v0")); + let rs1_val : bits('m) = get_scalar(rs1, SEW); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vec_trues); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + VXMS_VADC => to_bits(SEW, unsigned(vs2_val[i]) + unsigned(rs1_val) + unsigned(bool_to_bits(vm_val[i]))), + VXMS_VSBC => to_bits(SEW, unsigned(vs2_val[i]) - unsigned(rs1_val) - unsigned(bool_to_bits(vm_val[i]))) + } + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping vxmstype_mnemonic : vxmsfunct6 <-> string = { + VXMS_VADC <-> "vadc.vxm", /* carry in, no carry out */ + VXMS_VSBC <-> "vsbc.vxm" +} + +mapping clause assembly = VXMSTYPE(funct6, vs2, rs1, vd) + <-> vxmstype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ reg_name(rs1) ^ sep() ^ "v0" + /* ***************** OPIVX (Vector Integer Compare Instructions) ***************** */ /* VXCMP instructions' destination is a mask register */ union clause ast = VXCMPTYPE : (vxcmpfunct6, bits(1), regidx, regidx, regidx) @@ -131,12 +467,12 @@ function clause execute(VXCMPTYPE(funct6, vm, vs2, rs1, vd)) = { let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); - let rs1_val : bits('m) = get_scalar(rs1, SEW); - let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); - let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); - result : vector('n, dec, bool) = undefined; - mask : vector('n, dec, bool) = undefined; + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let rs1_val : bits('m) = get_scalar(rs1, SEW); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); + result : vector('n, dec, bool) = undefined; + mask : vector('n, dec, bool) = undefined; (result, mask) = init_masked_result_cmp(num_elem, SEW, LMUL_pow, vd_val, vm_val); @@ -175,6 +511,165 @@ mapping vxcmptype_mnemonic : vxcmpfunct6 <-> string = { mapping clause assembly = VXCMPTYPE(funct6, vm, vs2, rs1, vd) <-> vxcmptype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ reg_name(rs1) ^ maybe_vmask(vm) +/* ******************************* OPIVI (VIMTYPE) ******************************* */ +/* VIM instructions' destination is a mask register (e.g. carry out) */ +/* Instructions with no carry out will set mask result to current mask value */ +/* May or may not read from source mask register (e.g. carry in) */ +union clause ast = VIMTYPE : (vimfunct6, regidx, regidx, regidx) + +mapping encdec_vimfunct6 : vimfunct6 <-> bits(6) = { + VIM_VMADC <-> 0b010001 /* carry in, carry out */ +} + +mapping clause encdec = VIMTYPE(funct6, vs2, simm, vd) if haveRVV() + <-> encdec_vimfunct6(funct6) @ 0b0 @ vs2 @ simm @ 0b011 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VIMTYPE(funct6, vs2, simm, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask_carry(num_elem, 0b0, vreg_name("v0")); + let imm_val : bits('m) = EXTS(simm); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); + result : vector('n, dec, bool) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result_carry(num_elem, SEW, LMUL_pow, vd_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + let res : bool = match funct6 { + VIM_VMADC => unsigned(vs2_val[i]) + unsigned(imm_val) + unsigned(bool_to_bits(vm_val[i])) > 2 ^ SEW - 1 + }; + result[i] = res + } + }; + + write_vmask(num_elem, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping vimtype_mnemonic : vimfunct6 <-> string = { + VIM_VMADC <-> "vmadc.vim" /* carry in, carry out */ +} + +mapping clause assembly = VIMTYPE(funct6, vs2, simm, vd) + <-> vimtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ hex_bits_5(simm) ^ sep() ^ "v0" + +/* ****************************** OPIVI (VIMCTYPE) ******************************* */ +/* VIMC instructions' destination is a mask register (e.g. carry out) */ +/* Instructions with no carry out will set mask result to current mask value */ +/* May or may not read from source mask register (e.g. carry in) */ +union clause ast = VIMCTYPE : (vimcfunct6, regidx, regidx, regidx) + +mapping encdec_vimcfunct6 : vimcfunct6 <-> bits(6) = { + VIMC_VMADC <-> 0b010001 /* carry in, carry out */ +} + +mapping clause encdec = VIMCTYPE(funct6, vs2, simm, vd) if haveRVV() + <-> encdec_vimcfunct6(funct6) @ 0b1 @ vs2 @ simm @ 0b011 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VIMCTYPE(funct6, vs2, simm, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + let 'n = num_elem; + let 'm = SEW; + + let imm_val : bits('m) = EXTS(simm); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); + result : vector('n, dec, bool) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result_carry(num_elem, SEW, LMUL_pow, vd_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + let res : bool = match funct6 { + VIMC_VMADC => unsigned(vs2_val[i]) + unsigned(imm_val) > 2 ^ SEW - 1 + }; + result[i] = res + } + }; + + write_vmask(num_elem, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping vimctype_mnemonic : vimcfunct6 <-> string = { + VIMC_VMADC <-> "vmadc.vi" /* Carry in, carry out */ +} + +mapping clause assembly = VIMCTYPE(funct6, vs2, simm, vd) + <-> vimctype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ hex_bits_5(simm) + +/* ****************************** OPIVI (VIMSTYPE) ******************************* */ +/* VIMS instructions' destination is a vector register (e.g. actual sum) */ +/* Instructions with no carry out will set mask result to current mask value */ +/* May or may not read from source mask register (e.g. carry in) */ +union clause ast = VIMSTYPE : (vimsfunct6, regidx, regidx, regidx) + +mapping encdec_vimsfunct6 : vimsfunct6 <-> bits(6) = { + VIMS_VADC <-> 0b010000 /* Carry in, no carry out */ +} + +mapping clause encdec = VIMSTYPE(funct6, vs2, simm, vd) if haveRVV() + <-> encdec_vimsfunct6(funct6) @ 0b0 @ vs2 @ simm @ 0b011 @ vd @ 0b1010111 if haveRVV() + +function clause execute(VIMSTYPE(funct6, vs2, simm, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + if vd == vreg_name("v0") then { handle_illegal(); return RETIRE_FAIL }; + + let 'n = num_elem; + let 'm = SEW; + + /* for bypassing normal masking in init_masked_result */ + vec_trues : vector('n, dec, bool) = undefined; + foreach (i from 0 to (num_elem - 1)) { + vec_trues[i] = true + }; + + let vm_val : vector('n, dec, bool) = read_vmask_carry(num_elem, 0b0, vreg_name("v0")); + let imm_val : bits('m) = EXTS(simm); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vec_trues); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + result[i] = match funct6 { + VIMS_VADC => to_bits(SEW, unsigned(vs2_val[i]) + unsigned(imm_val) + unsigned(bool_to_bits(vm_val[i]))) + } + } + }; + + write_vreg(num_elem, SEW, LMUL_pow, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping vimstype_mnemonic : vimsfunct6 <-> string = { + VIMS_VADC <-> "vadc.vim" /* Carry in, no carry out */ +} + +mapping clause assembly = VIMSTYPE(funct6, vs2, simm, vd) + <-> vimstype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ hex_bits_5(simm) ^ sep() ^ "v0" + /* ***************** OPIVI (Vector Integer Compare Instructions) ***************** */ /* VICMP instructions' destination is a mask register */ union clause ast = VICMPTYPE : (vicmpfunct6, bits(1), regidx, regidx, regidx) @@ -199,12 +694,12 @@ function clause execute(VICMPTYPE(funct6, vm, vs2, simm, vd)) = { let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); - let imm_val : bits('m) = EXTS(simm); - let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); - let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); - result : vector('n, dec, bool) = undefined; - mask : vector('n, dec, bool) = undefined; + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let imm_val : bits('m) = EXTS(simm); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); + result : vector('n, dec, bool) = undefined; + mask : vector('n, dec, bool) = undefined; (result, mask) = init_masked_result_cmp(num_elem, SEW, LMUL_pow, vd_val, vm_val); @@ -238,3 +733,133 @@ mapping vicmptype_mnemonic : vicmpfunct6 <-> string = { mapping clause assembly = VICMPTYPE(funct6, vm, vs2, simm, vd) <-> vicmptype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ hex_bits_5(simm) ^ maybe_vmask(vm) + +/* ******************************* OPFVV (VVMTYPE) ******************************* */ +/* FVVM instructions' destination is a mask register */ +union clause ast = FVVMTYPE : (fvvmfunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_fvvmfunct6 : fvvmfunct6 <-> bits(6) = { + FVVM_VMFEQ <-> 0b011000, + FVVM_VMFLE <-> 0b011001, + FVVM_VMFLT <-> 0b011011, + FVVM_VMFNE <-> 0b011100 +} + +mapping clause encdec = FVVMTYPE(funct6, vm, vs2, vs1, vd) if haveRVV() + <-> encdec_fvvmfunct6(funct6) @ vm @ vs2 @ vs1 @ 0b001 @ vd @ 0b1010111 if haveRVV() + +function clause execute(FVVMTYPE(funct6, vm, vs2, vs1, vd)) = { + let rm_3b = fcsr.FRM(); + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + if not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; + assert(SEW != 8); + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); + result : vector('n, dec, bool) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result_cmp(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + let res : bool = match funct6 { + FVVM_VMFEQ => fp_eq(vs2_val[i], vs1_val[i]), + FVVM_VMFNE => ~(fp_eq(vs2_val[i], vs1_val[i])), + FVVM_VMFLE => fp_le(vs2_val[i], vs1_val[i]), + FVVM_VMFLT => fp_lt(vs2_val[i], vs1_val[i]) + }; + result[i] = res + } + }; + + write_vmask(num_elem, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping fvvmtype_mnemonic : fvvmfunct6 <-> string = { + FVVM_VMFEQ <-> "vmfeq.vv", + FVVM_VMFLE <-> "vmfle.vv", + FVVM_VMFLT <-> "vmflt.vv", + FVVM_VMFNE <-> "vmfne.vv" +} + +mapping clause assembly = FVVMTYPE(funct6, vm, vs2, vs1, vd) + <-> fvvmtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ vreg_name(vs1) ^ maybe_vmask(vm) + +/* ******************************* OPFVF (VFMTYPE) ******************************* */ +/* VFM instructions' destination is a mask register */ +union clause ast = FVFMTYPE : (fvfmfunct6, bits(1), regidx, regidx, regidx) + +mapping encdec_fvfmfunct6 : fvfmfunct6 <-> bits(6) = { + VFM_VMFEQ <-> 0b011000, + VFM_VMFLE <-> 0b011001, + VFM_VMFLT <-> 0b011011, + VFM_VMFNE <-> 0b011100, + VFM_VMFGT <-> 0b011101, + VFM_VMFGE <-> 0b011111 +} + +mapping clause encdec = FVFMTYPE(funct6, vm, vs2, rs1, vd) if haveRVV() + <-> encdec_fvfmfunct6(funct6) @ vm @ vs2 @ rs1 @ 0b101 @ vd @ 0b1010111 if haveRVV() + +function clause execute(FVFMTYPE(funct6, vm, vs2, rs1, vd)) = { + let rm_3b = fcsr.FRM(); + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let num_elem = get_num_elem(LMUL_pow, SEW); + + if not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; + assert(SEW != 8); + + let 'n = num_elem; + let 'm = SEW; + + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let rs1_val : bits('m) = get_scalar_fp(rs1, 'm); + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); + result : vector('n, dec, bool) = undefined; + mask : vector('n, dec, bool) = undefined; + + (result, mask) = init_masked_result_cmp(num_elem, SEW, LMUL_pow, vd_val, vm_val); + + foreach (i from 0 to (num_elem - 1)) { + if mask[i] then { + let res : bool = match funct6 { + VFM_VMFEQ => fp_eq(vs2_val[i], rs1_val), + VFM_VMFNE => ~(fp_eq(vs2_val[i], rs1_val)), + VFM_VMFLE => fp_le(vs2_val[i], rs1_val), + VFM_VMFLT => fp_lt(vs2_val[i], rs1_val), + VFM_VMFGE => fp_ge(vs2_val[i], rs1_val), + VFM_VMFGT => fp_gt(vs2_val[i], rs1_val) + }; + result[i] = res + } + }; + + write_vmask(num_elem, vd, result); + vstart = EXTZ(0b0); + RETIRE_SUCCESS +} + +mapping fvfmtype_mnemonic : fvfmfunct6 <-> string = { + VFM_VMFEQ <-> "vmfeq.vf", + VFM_VMFLE <-> "vmfle.vf", + VFM_VMFLT <-> "vmflt.vf", + VFM_VMFNE <-> "vmfne.vf", + VFM_VMFGT <-> "vmfgt.vf", + VFM_VMFGE <-> "vmfge.vf" +} + +mapping clause assembly = FVFMTYPE(funct6, vm, vs2, rs1, vd) + <-> fvfmtype_mnemonic(funct6) ^ spc() ^ vreg_name(vd) ^ sep() ^ vreg_name(vs2) ^ sep() ^ reg_name(rs1) ^ maybe_vmask(vm) From 15364c079112152034c246dc36d4cc0ee1e90d4c Mon Sep 17 00:00:00 2001 From: BrighterW <16307110083@fudan.edu.cn> Date: Sun, 18 Jun 2023 15:27:44 +0800 Subject: [PATCH 06/11] update vector CSR vtype.vill setting and judgement --- model/riscv_insts_vext_arith.sail | 176 ++++++++++++++++-------------- model/riscv_insts_vext_fp.sail | 89 ++++++++------- model/riscv_insts_vext_mask.sail | 76 +++++-------- model/riscv_insts_vext_mem.sail | 66 +++++++---- model/riscv_insts_vext_red.sail | 19 ++-- model/riscv_insts_vext_utils.sail | 11 +- model/riscv_insts_vext_vm.sail | 50 ++++++--- model/riscv_insts_vext_vset.sail | 38 +++++-- model/riscv_sys_control.sail | 4 +- model/riscv_sys_regs.sail | 2 - 10 files changed, 300 insertions(+), 231 deletions(-) diff --git a/model/riscv_insts_vext_arith.sail b/model/riscv_insts_vext_arith.sail index 2d3a3ecc3..68a986b3a 100644 --- a/model/riscv_insts_vext_arith.sail +++ b/model/riscv_insts_vext_arith.sail @@ -80,12 +80,12 @@ function clause execute(VVTYPE(funct6, vm, vs2, vs1, vd)) = { let VLEN_pow = get_vlen_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); @@ -214,15 +214,15 @@ function clause execute(NVSTYPE(funct6, vm, vs2, vs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) | - not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + not(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; let 'o = SEW_widen; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); let vs2_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vs2); @@ -281,15 +281,15 @@ function clause execute(NVTYPE(funct6, vm, vs2, vs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) | - not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + not(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; let 'o = SEW_widen; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); let vs2_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vs2); @@ -298,9 +298,10 @@ function clause execute(NVTYPE(funct6, vm, vs2, vs1, vd)) = { (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + assert(SEW_widen <= 64); foreach (i from 0 to (num_elem - 1)) { if mask[i] then { - let shift_amount = get_shift_amount(vs1_val[i], SEW); + let shift_amount = get_shift_amount(vs1_val[i], SEW_widen); let rounding_incr = get_fixed_rounding_incr(vs2_val[i], shift_amount); result[i] = match funct6 { NV_VNCLIPU => { @@ -343,12 +344,12 @@ function clause execute(MASKTYPEV(vs2, vs1, vd)) = { let num_elem = get_num_elem(LMUL_pow, SEW); /* max(VLMAX,VLEN/SEW)) */ let real_num_elem = if LMUL_pow >= 0 then num_elem else num_elem / (0 - LMUL_pow); /* VLMAX */ - if vd == vreg_name("v0") then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | vd == 0b00000 then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, 0b00000); let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); @@ -389,10 +390,12 @@ function clause execute(MOVETYPEV(vs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); + if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b1, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b1, 0b00000); let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); result : vector('n, dec, bits('m)) = undefined; @@ -446,12 +449,12 @@ function clause execute(VXTYPE(funct6, vm, vs2, rs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let rs1_val : bits('m) = get_scalar(rs1, SEW); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); @@ -564,15 +567,15 @@ function clause execute(NXSTYPE(funct6, vm, vs2, rs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) | - not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + not(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; let 'o = SEW_widen; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); let rs1_val : bits('m) = get_scalar(rs1, SEW); let vs2_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vs2); @@ -631,15 +634,15 @@ function clause execute(NXTYPE(funct6, vm, vs2, rs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) | - not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + not(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; let 'o = SEW_widen; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); let rs1_val : bits('m) = get_scalar(rs1, SEW); let vs2_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vs2); @@ -648,9 +651,10 @@ function clause execute(NXTYPE(funct6, vm, vs2, rs1, vd)) = { (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + assert(SEW_widen <= 64); foreach (i from 0 to (num_elem - 1)) { if mask[i] then { - let shift_amount = get_shift_amount(rs1_val, SEW); + let shift_amount = get_shift_amount(rs1_val, SEW_widen); let rounding_incr = get_fixed_rounding_incr(vs2_val[i], shift_amount); result[i] = match funct6 { NX_VNCLIPU => { @@ -699,12 +703,12 @@ function clause execute(VXSG(funct6, vm, vs2, rs1, vd)) = { let VLEN_pow = get_vlen_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let rs1_val : nat = unsigned(X(rs1)); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); @@ -763,12 +767,12 @@ function clause execute(MASKTYPEX(vs2, rs1, vd)) = { let num_elem = get_num_elem(LMUL_pow, SEW); /* max(VLMAX,VLEN/SEW)) */ let real_num_elem = if LMUL_pow >= 0 then num_elem else num_elem / (0 - LMUL_pow); /* VLMAX */ - if vd == vreg_name("v0") then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | vd == 0b00000 then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, 0b00000); let rs1_val : bits('m) = get_scalar(rs1, SEW); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); @@ -809,11 +813,13 @@ function clause execute(MOVETYPEX(rs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); + if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + let 'n = num_elem; let 'm = SEW; let rs1_val : bits('m) = get_scalar(rs1, 'm); - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b1, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b1, 0b00000); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); result : vector('n, dec, bits('m)) = undefined; mask : vector('n, dec, bool) = undefined; @@ -858,12 +864,12 @@ function clause execute(VITYPE(funct6, vm, vs2, simm, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let imm_val : bits('m) = EXTS(simm); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); @@ -952,15 +958,15 @@ function clause execute(NISTYPE(funct6, vm, vs2, simm, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) | - not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + not(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; let 'o = SEW_widen; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); let imm_val : bits('m) = EXTS(simm); let vs2_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vs2); @@ -1019,15 +1025,15 @@ function clause execute(NITYPE(funct6, vm, vs2, simm, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) | - not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + not(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; let 'o = SEW_widen; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); let imm_val : bits('m) = EXTS(simm); let vs2_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vs2); @@ -1036,9 +1042,10 @@ function clause execute(NITYPE(funct6, vm, vs2, simm, vd)) = { (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); + assert(SEW_widen <= 64); foreach (i from 0 to (num_elem - 1)) { if mask[i] then { - let shift_amount = get_shift_amount(imm_val, SEW); + let shift_amount = get_shift_amount(imm_val, SEW_widen); let rounding_incr = get_fixed_rounding_incr(vs2_val[i], shift_amount); result[i] = match funct6 { NI_VNCLIPU => { @@ -1087,12 +1094,12 @@ function clause execute(VISG(funct6, vm, vs2, simm, vd)) = { let VLEN_pow = get_vlen_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let imm_val : nat = unsigned(EXTZ(sizeof(xlen), simm)); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); @@ -1151,12 +1158,12 @@ function clause execute(MASKTYPEI(vs2, simm, vd)) = { let num_elem = get_num_elem(LMUL_pow, SEW); /* max(VLMAX,VLEN/SEW)) */ let real_num_elem = if LMUL_pow >= 0 then num_elem else num_elem / (0 - LMUL_pow); /* VLMAX */ - if vd == vreg_name("v0") then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | vd == 0b00000 then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, 0b00000); let imm_val : bits('m) = EXTS(simm); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); @@ -1197,10 +1204,12 @@ function clause execute(MOVETYPEI(vd, simm)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); + if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b1, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b1, 0b00000); let imm_val : bits('m) = EXTS(simm); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); result : vector('n, dec, bits('m)) = undefined; @@ -1239,7 +1248,7 @@ function clause execute(VMVRTYPE(vs2, simm, vd)) = { let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b1, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b1, 0b00000); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, EMUL_pow, vs2); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, EMUL_pow, vd); result : vector('n, dec, bits('m)) = undefined; @@ -1289,12 +1298,12 @@ function clause execute(MVVTYPE(funct6, vm, vs2, vs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); @@ -1399,12 +1408,12 @@ function clause execute(MVVMATYPE(funct6, vm, vs2, vs1, vd)) = { let VLEN = int_power(2, get_vlen_pow()); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); @@ -1462,15 +1471,16 @@ function clause execute(WVVTYPE(funct6, vm, vs2, vs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs1, vd, LMUL_pow, LMUL_pow_widen)) | - not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + not(valid_reg_overlap(vs1, vd, LMUL_pow, LMUL_pow_widen)) | + not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; let 'o = SEW_widen; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); @@ -1532,15 +1542,15 @@ function clause execute(WVTYPE(funct6, vm, vs2, vs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs1, vd, LMUL_pow, LMUL_pow_widen)) | - not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + not(valid_reg_overlap(vs1, vd, LMUL_pow, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; let 'o = SEW_widen; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); let vs2_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vs2); @@ -1596,15 +1606,16 @@ function clause execute(WMVVTYPE(funct6, vm, vs2, vs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs1, vd, LMUL_pow, LMUL_pow_widen)) | - not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + not(valid_reg_overlap(vs1, vd, LMUL_pow, LMUL_pow_widen)) | + not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; let 'o = SEW_widen; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); @@ -1657,15 +1668,15 @@ function clause execute(VEXT2TYPE(funct6, vm, vs2, vd)) = { let SEW_half = SEW / 2; let LMUL_pow_half = LMUL_pow - 1; - if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs2, vd, LMUL_pow_half, LMUL_pow)) | - not(valid_eew_emul(SEW_half, LMUL_pow_half)) + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_half, LMUL_pow_half)) | + not(valid_reg_overlap(vs2, vd, LMUL_pow_half, LMUL_pow)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; let 'o = SEW_half; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); let vs2_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_half, LMUL_pow_half, vs2); result : vector('n, dec, bits('m)) = undefined; @@ -1715,15 +1726,15 @@ function clause execute(VEXT4TYPE(funct6, vm, vs2, vd)) = { let SEW_quart = SEW / 4; let LMUL_pow_quart = LMUL_pow - 2; - if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs2, vd, LMUL_pow_quart, LMUL_pow)) | - not(valid_eew_emul(SEW_quart, LMUL_pow_quart)) + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_quart, LMUL_pow_quart)) | + not(valid_reg_overlap(vs2, vd, LMUL_pow_quart, LMUL_pow)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; let 'o = SEW_quart; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); let vs2_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_quart, LMUL_pow_quart, vs2); result : vector('n, dec, bits('m)) = undefined; @@ -1773,15 +1784,15 @@ function clause execute(VEXT8TYPE(funct6, vm, vs2, vd)) = { let SEW_eighth = SEW / 8; let LMUL_pow_eighth = LMUL_pow - 3; - if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs2, vd, LMUL_pow_eighth, LMUL_pow)) | - not(valid_eew_emul(SEW_eighth, LMUL_pow_eighth)) + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_eighth, LMUL_pow_eighth)) | + not(valid_reg_overlap(vs2, vd, LMUL_pow_eighth, LMUL_pow)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; let 'o = SEW_eighth; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); let vs2_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_eighth, LMUL_pow_eighth, vs2); result : vector('n, dec, bits('m)) = undefined; @@ -1822,6 +1833,8 @@ function clause execute(VMVXS(vs2, rd)) = { let SEW = get_sew(); let num_elem = get_num_elem(0, SEW); + if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + assert(num_elem > 0); let 'n = num_elem; let 'm = SEW; @@ -1847,14 +1860,14 @@ mapping clause encdec = MVVCOMPRESS(vs2, vs1, vd) if haveRVV() function clause execute(MVVCOMPRESS(vs2, vs1, vd)) = { let start_element = get_start_element(); let end_element = get_end_element(); - - /* vcompress should always be executed with a vstart of 0 */ - if (start_element != 0 | vs1 == vd | vs2 == vd) then { handle_illegal(); return RETIRE_FAIL }; - let SEW = get_sew(); let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); + /* vcompress should always be executed with a vstart of 0 */ + if start_element != 0 | vs1 == vd | vs2 == vd | not(valid_vtype()) + then { handle_illegal(); return RETIRE_FAIL }; + let 'n = num_elem; let 'm = SEW; @@ -1922,12 +1935,12 @@ function clause execute(MVXTYPE(funct6, vm, vs2, rs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let rs1_val : bits('m) = get_scalar(rs1, SEW); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); @@ -1964,7 +1977,6 @@ function clause execute(MVXTYPE(funct6, vm, vs2, rs1, vd)) = { if i == 0 then rs1_val else vs2_val[i - 1] }, MVX_VSLIDE1DOWN => { - if (vs2 == vd) then { handle_illegal(); return RETIRE_FAIL }; let last_elem = get_end_element(); assert(last_elem < num_elem); if i < last_elem then vs2_val[i + 1] else rs1_val @@ -2044,12 +2056,12 @@ function clause execute(MVXMATYPE(funct6, vm, vs2, rs1, vd)) = { let VLEN = int_power(2, get_vlen_pow()); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let rs1_val : bits('m) = get_scalar(rs1, SEW); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); @@ -2108,15 +2120,15 @@ function clause execute(WVXTYPE(funct6, vm, vs2, rs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) | - not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; let 'o = SEW_widen; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); let rs1_val : bits('m) = get_scalar(rs1, SEW); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); @@ -2178,14 +2190,14 @@ function clause execute(WXTYPE(funct6, vm, vs2, rs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; let 'o = SEW_widen; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); let rs1_val : bits('m) = get_scalar(rs1, SEW); let vs2_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vs2); @@ -2242,15 +2254,15 @@ function clause execute(WMVXTYPE(funct6, vm, vs2, rs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) | - not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; let 'o = SEW_widen; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); let rs1_val : bits('m) = get_scalar(rs1, SEW); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); @@ -2296,11 +2308,13 @@ function clause execute(VMVSX(rs1, vd)) = { let SEW = get_sew(); let num_elem = get_num_elem(0, SEW); + if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + assert(num_elem > 0); let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b1, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b1, 0b00000); let rs1_val : bits('m) = get_scalar(rs1, 'm); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, 0, vd); result : vector('n, dec, bits('m)) = undefined; diff --git a/model/riscv_insts_vext_fp.sail b/model/riscv_insts_vext_fp.sail index 5830fcf20..293c606c9 100755 --- a/model/riscv_insts_vext_fp.sail +++ b/model/riscv_insts_vext_fp.sail @@ -66,13 +66,14 @@ function clause execute(FVVTYPE(funct6, vm, vs2, vs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) + then { handle_illegal(); return RETIRE_FAIL }; assert(SEW != 8); let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); @@ -141,13 +142,14 @@ function clause execute(FVVMATYPE(funct6, vm, vs2, vs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) + then { handle_illegal(); return RETIRE_FAIL }; assert(SEW != 8); let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); @@ -210,7 +212,8 @@ function clause execute(FWVVTYPE(funct6, vm, vs2, vs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) | + not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | not(valid_reg_overlap(vs1, vd, LMUL_pow, LMUL_pow_widen)) | not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; @@ -220,7 +223,7 @@ function clause execute(FWVVTYPE(funct6, vm, vs2, vs1, vd)) = { let 'm = SEW; let 'o = SEW_widen; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); @@ -275,7 +278,8 @@ function clause execute(FWVVMATYPE(funct6, vm, vs1, vs2, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) | + not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | not(valid_reg_overlap(vs1, vd, LMUL_pow, LMUL_pow_widen)) | not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; @@ -285,7 +289,7 @@ function clause execute(FWVVMATYPE(funct6, vm, vs1, vs2, vd)) = { let 'm = SEW; let 'o = SEW_widen; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); @@ -339,8 +343,9 @@ function clause execute(FWVTYPE(funct6, vm, vs2, vs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_rd_mask(vd, vm)) | not(valid_reg_overlap(vs1, vd, LMUL_pow, LMUL_pow_widen)) | - not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | not(valid_fp_op(SEW, rm_3b)) + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) | + not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + not(valid_reg_overlap(vs1, vd, LMUL_pow, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; assert(SEW >= 16 & SEW_widen <= 64); @@ -348,7 +353,7 @@ function clause execute(FWVTYPE(funct6, vm, vs2, vs1, vd)) = { let 'm = SEW; let 'o = SEW_widen; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); let vs2_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vs2); @@ -400,12 +405,13 @@ function clause execute(VFUNARY0(vm, vs2, vfunary0, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) + then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); result : vector('n, dec, bits('m)) = undefined; @@ -515,7 +521,8 @@ function clause execute(VFWUNARY0(vm, vs2, vfwunary0, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) | + not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; assert(SEW >= 8 & SEW_widen <= 64); @@ -524,7 +531,7 @@ function clause execute(VFWUNARY0(vm, vs2, vfwunary0, vd)) = { let 'm = SEW; let 'o = SEW_widen; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); result : vector('n, dec, bits('o)) = undefined; @@ -645,7 +652,8 @@ function clause execute(VFNUNARY0(vm, vs2, vfnunary0, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) | + not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | not(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) then { handle_illegal(); return RETIRE_FAIL }; @@ -653,7 +661,7 @@ function clause execute(VFNUNARY0(vm, vs2, vfnunary0, vd)) = { let 'm = SEW; let 'o = SEW_widen; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vs2_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vs2); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); result : vector('n, dec, bits('m)) = undefined; @@ -778,12 +786,13 @@ function clause execute(VFUNARY1(vm, vs2, vfunary1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) + then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); result : vector('n, dec, bits('m)) = undefined; @@ -860,7 +869,8 @@ function clause execute(VFMVFS(vs2, rd)) = { let SEW = get_sew(); let num_elem = get_num_elem(0, SEW); - if not(valid_fp_op(SEW, rm_3b)) | SEW > sizeof(flen) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_fp_op(SEW, rm_3b)) | SEW > sizeof(flen) + then { handle_illegal(); return RETIRE_FAIL }; assert(num_elem > 0 & SEW != 8); let 'n = num_elem; @@ -905,13 +915,14 @@ function clause execute(FVFTYPE(funct6, vm, vs2, rs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) + then { handle_illegal(); return RETIRE_FAIL }; assert(SEW != 8); let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let rs1_val : bits('m) = get_scalar_fp(rs1, 'm); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); @@ -939,7 +950,6 @@ function clause execute(FVFTYPE(funct6, vm, vs2, rs1, vd)) = { if i == 0 then rs1_val else vs2_val[i - 1] }, VF_VSLIDE1DOWN => { - if vs2 == vd then { handle_illegal(); return RETIRE_FAIL }; let last_elem = get_end_element(); assert(last_elem < num_elem); if i < last_elem then vs2_val[i + 1] else rs1_val @@ -996,13 +1006,14 @@ function clause execute(FVFMATYPE(funct6, vm, vs2, rs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) + then { handle_illegal(); return RETIRE_FAIL }; assert(SEW != 8); let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let rs1_val : bits('m) = get_scalar_fp(rs1, 'm); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); @@ -1065,7 +1076,8 @@ function clause execute(FWVFTYPE(funct6, vm, vs2, rs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) | + not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; assert(SEW >= 16 & SEW_widen <= 64); @@ -1074,7 +1086,7 @@ function clause execute(FWVFTYPE(funct6, vm, vs2, rs1, vd)) = { let 'm = SEW; let 'o = SEW_widen; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); let rs1_val : bits('m) = get_scalar_fp(rs1, 'm); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); @@ -1129,7 +1141,8 @@ function clause execute(FWVFMATYPE(funct6, vm, rs1, vs2, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) | + not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; assert(SEW >= 16 & SEW_widen <= 64); @@ -1138,7 +1151,7 @@ function clause execute(FWVFMATYPE(funct6, vm, rs1, vs2, vd)) = { let 'm = SEW; let 'o = SEW_widen; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); let rs1_val : bits('m) = get_scalar_fp(rs1, 'm); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); @@ -1192,7 +1205,8 @@ function clause execute(FWFTYPE(funct6, vm, vs2, rs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | not(valid_fp_op(SEW, rm_3b)) + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) | + not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; assert(SEW >= 16 & SEW_widen <= 64); @@ -1200,7 +1214,7 @@ function clause execute(FWFTYPE(funct6, vm, vs2, rs1, vd)) = { let 'm = SEW; let 'o = SEW_widen; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); let rs1_val : bits('m) = get_scalar_fp(rs1, 'm); let vs2_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vs2); @@ -1247,13 +1261,14 @@ function clause execute(VFMERGE(vs2, rs1, vd)) = { let num_elem = get_num_elem(LMUL_pow, SEW); /* max(VLMAX,VLEN/SEW)) */ let real_num_elem = if LMUL_pow >= 0 then num_elem else num_elem / (0 - LMUL_pow); /* VLMAX */ - if vd == vreg_name("v0") | not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | vd == 0b00000 | not(valid_fp_op(SEW, rm_3b)) + then { handle_illegal(); return RETIRE_FAIL }; assert(SEW != 8); let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, 0b00000); let rs1_val : bits('m) = get_scalar_fp(rs1, 'm); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); @@ -1296,14 +1311,14 @@ function clause execute(VFMV(rs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; assert(SEW != 8); let 'n = num_elem; let 'm = SEW; let rs1_val : bits('m) = get_scalar_fp(rs1, 'm); - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b1, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b1, 0b00000); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); result : vector('n, dec, bits('m)) = undefined; mask : vector('n, dec, bool) = undefined; @@ -1333,13 +1348,13 @@ function clause execute(VFMVSF(rs1, vd)) = { let SEW = get_sew(); let num_elem = get_num_elem(0, SEW); - if not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; assert(num_elem > 0 & SEW != 8); let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b1, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, 0b1, 0b00000); let rs1_val : bits('m) = get_scalar_fp(rs1, 'm); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, 0, vd); result : vector('n, dec, bits('m)) = undefined; diff --git a/model/riscv_insts_vext_mask.sail b/model/riscv_insts_vext_mask.sail index 587fbdd63..bdfe20ae4 100755 --- a/model/riscv_insts_vext_mask.sail +++ b/model/riscv_insts_vext_mask.sail @@ -63,6 +63,8 @@ function clause execute(MMTYPE(funct6, vs2, vs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = int_power(2, get_vlen_pow()); + if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + let 'n = num_elem; let 'm = SEW; @@ -120,17 +122,16 @@ function clause execute(VCPOP_M(vm, vs2, rd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = int_power(2, get_vlen_pow()); + if not(valid_vtype()) | not(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; + let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vs2_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vs2); result : vector('n, dec, bool) = undefined; mask : vector('n, dec, bool) = undefined; - /* Value of vstart must be 0 */ - if not(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; - (result, mask) = init_masked_result_cmp(num_elem, SEW, 0, vs2_val, vm_val); count : nat = 0; @@ -157,17 +158,16 @@ function clause execute(VFIRST_M(vm, vs2, rd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = int_power(2, get_vlen_pow()); + if not(valid_vtype()) | not(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; + let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vs2_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vs2); result : vector('n, dec, bool) = undefined; mask : vector('n, dec, bool) = undefined; - /* Value of vstart must be 0 */ - if not(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; - (result, mask) = init_masked_result_cmp(num_elem, SEW, 0, vs2_val, vm_val); index : int = -1; @@ -196,24 +196,18 @@ function clause execute(VMSBF_M(vm, vs2, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = int_power(2, get_vlen_pow()); + if not(valid_vtype()) | not(assert_vstart(0)) | not(valid_rd_mask(vd, vm)) | vd == vs2 + then { handle_illegal(); return RETIRE_FAIL }; + let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vs2_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vs2); let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); result : vector('n, dec, bool) = undefined; mask : vector('n, dec, bool) = undefined; - /* Value of vstart must be 0 */ - if not(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; - - /* If masking is enabled, then dest reg cannot be v0 */ - if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; - - /* Dest reg cannot be the same as source reg */ - if vd == vs2 then { handle_illegal(); return RETIRE_FAIL }; - (result, mask) = init_masked_result_cmp(num_elem, SEW, 0, vd_val, vm_val); found_elem : bool = false; @@ -243,24 +237,18 @@ function clause execute(VMSIF_M(vm, vs2, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = int_power(2, get_vlen_pow()); + if not(valid_vtype()) | not(assert_vstart(0)) | not(valid_rd_mask(vd, vm)) | vd == vs2 + then { handle_illegal(); return RETIRE_FAIL }; + let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vs2_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vs2); let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); result : vector('n, dec, bool) = undefined; mask : vector('n, dec, bool) = undefined; - /* Value of vstart must be 0 */ - if not(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; - - /* If masking is enabled, then dest reg cannot be v0 */ - if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; - - /* Dest reg cannot be the same as source reg */ - if vd == vs2 then { handle_illegal(); return RETIRE_FAIL }; - (result, mask) = init_masked_result_cmp(num_elem, SEW, 0, vd_val, vm_val); found_elem : bool = false; @@ -290,24 +278,18 @@ function clause execute(VMSOF_M(vm, vs2, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = int_power(2, get_vlen_pow()); + if not(valid_vtype()) | not(assert_vstart(0)) | not(valid_rd_mask(vd, vm)) | vd == vs2 + then { handle_illegal(); return RETIRE_FAIL }; + let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vs2_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vs2); let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); result : vector('n, dec, bool) = undefined; mask : vector('n, dec, bool) = undefined; - /* Value of vstart must be 0 */ - if not(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; - - /* If masking is enabled, then dest reg cannot be v0 */ - if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; - - /* Dest reg cannot be the same as source reg */ - if vd == vs2 then { handle_illegal(); return RETIRE_FAIL }; - (result, mask) = init_masked_result_cmp(num_elem, SEW, 0, vd_val, vm_val); found_elem : bool = false; @@ -341,24 +323,18 @@ function clause execute(VIOTA_M(vm, vs2, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); + if not(valid_vtype()) | not(assert_vstart(0)) | not(valid_rd_mask(vd, vm)) | vd == vs2 + then { handle_illegal(); return RETIRE_FAIL }; + let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vs2_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vs2); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); result : vector('n, dec, bits('m)) = undefined; mask : vector('n, dec, bool) = undefined; - /* Value of vstart must be 0 */ - if not(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; - - /* If masking is enabled, then dest reg cannot be v0 */ - if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; - - /* Dest reg cannot be the same as source reg */ - if vd == vs2 then { handle_illegal(); return RETIRE_FAIL }; - (result, mask) = init_masked_result(num_elem, SEW, LMUL_pow, vd_val, vm_val); sum : int = 0; @@ -388,12 +364,12 @@ function clause execute(VID_V(vm, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); result : vector('n, dec, bits('m)) = undefined; mask : vector('n, dec, bool) = undefined; diff --git a/model/riscv_insts_vext_mem.sail b/model/riscv_insts_vext_mem.sail index c38d1a7dd..a001cfb56 100644 --- a/model/riscv_insts_vext_mem.sail +++ b/model/riscv_insts_vext_mem.sail @@ -107,7 +107,7 @@ mapping clause encdec = VLETYPE(vm, rs1, width, vd) if haveRVV() val process_vle : forall 'b 'n 'p, ('b in {1, 2, 4, 8}) & ('n >= 0). (bits(1), regidx, int('b), regidx, int('p), int('n)) -> Retired effect {escape, rmem, rmemt, rreg, undef, wmv, wmvt, wreg} function process_vle (vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) = { let width_type : word_width = bytes_wordwidth(load_width_bytes); - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('b * 8)) = read_vreg(num_elem, load_width_bytes * 8, EMUL_pow, vd); total : vector('n, dec, bits('b * 8)) = undefined; mask : vector('n, dec, bool) = undefined; @@ -153,7 +153,7 @@ function clause execute(VLETYPE(vm, rs1, width, vd)) = { let EMUL_pow = EEW_pow - SEW_pow + LMUL_pow; let num_elem = get_num_elem(EMUL_pow, EEW); - if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; process_vle(vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) } @@ -177,7 +177,7 @@ mapping clause encdec = VSETYPE(vm, rs1, width, vs3) if haveRVV() val process_vse : forall 'b 'n 'p, ('b in {1, 2, 4, 8}) & ('n >= 0). (bits(1), regidx, int('b), regidx, int('p), int('n)) -> Retired effect {eamem, escape, rmem, rmemt, rreg, undef, wmv, wmvt, wreg} function process_vse (vm, vs3, load_width_bytes, rs1, EMUL_pow, num_elem) = { let width_type : word_width = bytes_wordwidth(load_width_bytes); - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vs3_val : vector('n, dec, bits('b * 8)) = read_vreg(num_elem, load_width_bytes * 8, EMUL_pow, vs3); total : vector('n, dec, bits('b * 8)) = undefined; mask : vector('n, dec, bool) = undefined; @@ -230,6 +230,8 @@ function clause execute(VSETYPE(vm, rs1, width, vs3)) = { let EMUL_pow = EEW_pow - SEW_pow + LMUL_pow; let num_elem = get_num_elem(EMUL_pow, EEW); + if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + process_vse(vm, vs3, load_width_bytes, rs1, EMUL_pow, num_elem) } @@ -252,7 +254,7 @@ mapping clause encdec = VLSETYPE(vm, rs2, rs1, width, vd) if haveRVV() val process_vlse : forall 'b 'n 'p, ('b in {1, 2, 4, 8}) & ('n >= 0). (bits(1), regidx, int('b), regidx, regidx, int('p), int('n)) -> Retired effect {escape, rmem, rmemt, rreg, undef, wmv, wmvt, wreg} function process_vlse (vm, vd, load_width_bytes, rs1, rs2, EMUL_pow, num_elem) = { let width_type : word_width = bytes_wordwidth(load_width_bytes); - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('b * 8)) = read_vreg(num_elem, load_width_bytes * 8, EMUL_pow, vd); let rs2_val : int = signed(get_scalar(rs2, sizeof(xlen))); total : vector('n, dec, bits('b * 8)) = undefined; @@ -299,7 +301,7 @@ function clause execute(VLSETYPE(vm, rs2, rs1, width, vd)) = { let EMUL_pow = EEW_pow - SEW_pow + LMUL_pow; let num_elem = get_num_elem(EMUL_pow, EEW); - if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; process_vlse(vm, vd, load_width_bytes, rs1, rs2, EMUL_pow, num_elem) } @@ -323,7 +325,7 @@ mapping clause encdec = VSSETYPE(vm, rs2, rs1, width, vs3) if haveRVV() val process_vsse : forall 'b 'n 'p, ('b in {1, 2, 4, 8}) & ('n >= 0). (bits(1), regidx, int('b), regidx, regidx, int('p), int('n)) -> Retired effect {eamem, escape, rmem, rmemt, rreg, undef, wmv, wmvt, wreg} function process_vsse (vm, vs3, load_width_bytes, rs1, rs2, EMUL_pow, num_elem) = { let width_type : word_width = bytes_wordwidth(load_width_bytes); - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vs3_val : vector('n, dec, bits('b * 8)) = read_vreg(num_elem, load_width_bytes * 8, EMUL_pow, vs3); let rs2_val : int = signed(get_scalar(rs2, sizeof(xlen))); total : vector('n, dec, bits('b * 8)) = undefined; @@ -377,6 +379,8 @@ function clause execute(VSSETYPE(vm, rs2, rs1, width, vs3)) = { let EMUL_pow = EEW_pow - SEW_pow + LMUL_pow; let num_elem = get_num_elem(EMUL_pow, EEW); + if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + process_vsse(vm, vs3, load_width_bytes, rs1, rs2, EMUL_pow, num_elem) } @@ -399,7 +403,7 @@ mapping clause encdec = VLUXEITYPE(vm, vs2, rs1, width, vd) if haveRVV() val process_vlxei : forall 'ib 'db 'ip 'dp 'n, ('ib in {1, 2, 4, 8}) & ('db in {1, 2, 4, 8}) & ('n >= 0). (bits(1), regidx, int('ib), int('db), int('ip), int('dp), regidx, regidx, int('n), int) -> Retired effect {escape, rmem, rmemt, rreg, undef, wmv, wmvt, wreg} function process_vlxei (vm, vd, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, mop) = { let width_type : word_width = bytes_wordwidth(EEW_data_bytes); - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('db * 8)) = read_vreg(num_elem, EEW_data_bytes * 8, EMUL_data_pow, vd); let vs2_val : vector('n, dec, bits('ib * 8)) = read_vreg(num_elem, EEW_index_bytes * 8, EMUL_index_pow, vs2); total : vector('n, dec, bits('db * 8)) = undefined; @@ -446,7 +450,7 @@ function clause execute(VLUXEITYPE(vm, vs2, rs1, width, vd)) = { let EMUL_index_pow = EEW_index_pow - EEW_data_pow + EMUL_data_pow; let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); - if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; process_vlxei(vm, vd, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 1) } @@ -469,7 +473,7 @@ function clause execute(VLOXEITYPE(vm, vs2, rs1, width, vd)) = { let EMUL_index_pow = EEW_index_pow - EEW_data_pow + EMUL_data_pow; let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); - if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; process_vlxei(vm, vd, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 3) } @@ -486,7 +490,7 @@ mapping clause encdec = VSUXEITYPE(vm, vs2, rs1, width, vs3) if haveRVV() val process_vsxei : forall 'ib 'db 'ip 'dp 'n, ('ib in {1, 2, 4, 8}) & ('db in {1, 2, 4, 8}) & ('n >= 0). (bits(1), regidx, int('ib), int('db), int('ip), int('dp), regidx, regidx, int('n), int) -> Retired effect {eamem, escape, rmem, rmemt, rreg, undef, wmv, wmvt, wreg} function process_vsxei (vm, vs3, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, mop) = { let width_type : word_width = bytes_wordwidth(EEW_data_bytes); - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vs3_val : vector('n, dec, bits('db * 8)) = read_vreg(num_elem, EEW_data_bytes * 8, EMUL_data_pow, vs3); let vs2_val : vector('n, dec, bits('ib * 8)) = read_vreg(num_elem, EEW_index_bytes * 8, EMUL_index_pow, vs2); total : vector('n, dec, bits('db * 8)) = undefined; /* just used to generate mask */ @@ -540,6 +544,8 @@ function clause execute(VSUXEITYPE(vm, vs2, rs1, width, vs3)) = { let EMUL_index_pow = EEW_index_pow - EEW_data_pow + EMUL_data_pow; let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); /* number of data and indices are the same */ + if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + process_vsxei(vm, vs3, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 1) } @@ -561,6 +567,8 @@ function clause execute(VSOXEITYPE(vm, vs2, rs1, width, vs3)) = { let EMUL_index_pow = EEW_index_pow - EEW_data_pow + EMUL_data_pow; let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); /* number of data and indices are the same */ + if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + process_vsxei(vm, vs3, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 3) } @@ -576,7 +584,7 @@ mapping clause encdec = VLEFFTYPE(vm, rs1, width, vd) if haveRVV() val process_vleff : forall 'b 'n 'p, ('b in {1, 2, 4, 8}) & ('n >= 0). (bits(1), regidx, int('b), regidx, int('p), int('n)) -> Retired effect {escape, rmem, rmemt, rreg, undef, wmv, wmvt, wreg} function process_vleff (vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) = { let width_type : word_width = bytes_wordwidth(load_width_bytes); - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('b * 8)) = read_vreg(num_elem, load_width_bytes * 8, EMUL_pow, vd); total : vector('n, dec, bits('b * 8)) = undefined; mask : vector('n, dec, bool) = undefined; @@ -651,7 +659,7 @@ function clause execute(VLEFFTYPE(vm, rs1, width, vd)) = { let EMUL_pow = EEW_pow - SEW_pow + LMUL_pow; let num_elem = get_num_elem(EMUL_pow, EEW); - if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; process_vleff(vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) } @@ -676,7 +684,7 @@ val process_vlseg : forall 'f 'b 'n 'p, (0 < 'f & 'f <= 8) & ('b in {1, 2, 4, 8} function process_vlseg (nf, vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) = { let EMUL_reg : int = if EMUL_pow <= 0 then 1 else int_power(2, EMUL_pow); let width_type : word_width = bytes_wordwidth(load_width_bytes); - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); status : Retired = RETIRE_SUCCESS; vd_a = vd; vd_t = vd; @@ -731,7 +739,7 @@ function clause execute(VLSEGTYPE(nf, vm, rs1, width, vd)) = { let num_elem = get_num_elem(EMUL_pow, EEW); /* # of element of each register group */ let nf_int = nfields_int(nf); - if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; process_vlseg(nf_int, vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) } @@ -750,7 +758,7 @@ function process_vlsegff (nf, vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) let EMUL_reg : int = if EMUL_pow <= 0 then 1 else int_power(2, EMUL_pow); let width_type : word_width = bytes_wordwidth(load_width_bytes); let start_element = get_start_element(); - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); status : Retired = RETIRE_SUCCESS; if start_element >= num_elem then return status; @@ -824,7 +832,7 @@ function clause execute(VLSEGFFTYPE(nf, vm, rs1, width, vd)) = { let num_elem = get_num_elem(EMUL_pow, EEW); let nf_int = nfields_int(nf); - if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; process_vlsegff(nf_int, vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) } @@ -843,7 +851,7 @@ function process_vsseg (nf, vm, vs3, load_width_bytes, rs1, EMUL_pow, num_elem) let EMUL_reg : int = if EMUL_pow <= 0 then 1 else int_power(2, EMUL_pow); let width_type : word_width = bytes_wordwidth(load_width_bytes); let start_element = get_start_element(); - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); status : Retired = RETIRE_SUCCESS; if start_element >= num_elem then return status; @@ -896,6 +904,8 @@ function clause execute(VSSEGTYPE(nf, vm, rs1, width, vs3)) = { let num_elem = get_num_elem(EMUL_pow, EEW); let nf_int = nfields_int(nf); + if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + process_vsseg(nf_int, vm, vs3, load_width_bytes, rs1, EMUL_pow, num_elem) } @@ -912,7 +922,7 @@ val process_vlsseg : forall 'f 'b 'n 'p, (0 < 'f & 'f <= 8) & ('b in {1, 2, 4, 8 function process_vlsseg (nf, vm, vd, load_width_bytes, rs1, rs2, EMUL_pow, num_elem) = { let EMUL_reg : int = if EMUL_pow <= 0 then 1 else int_power(2, EMUL_pow); let width_type : word_width = bytes_wordwidth(load_width_bytes); - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('b * 8)) = read_vreg(num_elem, load_width_bytes * 8, EMUL_pow, vd); /* only to generate mask */ result : vector('n, dec, bits('b * 8)) = undefined; mask : vector('n, dec, bool) = undefined; @@ -960,7 +970,7 @@ function clause execute(VLSSEGTYPE(nf, vm, rs2, rs1, width, vd)) = { let num_elem = get_num_elem(EMUL_pow, EEW); let nf_int = nfields_int(nf); - if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; process_vlsseg(nf_int, vm, vd, load_width_bytes, rs1, rs2, EMUL_pow, num_elem) } @@ -978,7 +988,7 @@ val process_vssseg : forall 'f 'b 'n 'p, (0 < 'f & 'f <= 8) & ('b in {1, 2, 4, 8 function process_vssseg (nf, vm, vs3, load_width_bytes, rs1, rs2, EMUL_pow, num_elem) = { let EMUL_reg : int = if EMUL_pow <= 0 then 1 else int_power(2, EMUL_pow); let width_type : word_width = bytes_wordwidth(load_width_bytes); - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vs3_val : vector('n, dec, bits('b * 8)) = read_vreg(num_elem, load_width_bytes * 8, EMUL_pow, vs3); /* only to generate mask */ result : vector('n, dec, bits('b * 8)) = undefined; mask : vector('n, dec, bool) = undefined; @@ -1035,6 +1045,8 @@ function clause execute(VSSSEGTYPE(nf, vm, rs2, rs1, width, vs3)) = { let num_elem = get_num_elem(EMUL_pow, EEW); let nf_int = nfields_int(nf); + if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + process_vssseg(nf_int, vm, vs3, load_width_bytes, rs1, rs2, EMUL_pow, num_elem) } @@ -1051,7 +1063,7 @@ val process_vlxseg : forall 'f 'ib 'db 'ip 'dp 'n, (0 < 'f & 'f <= 8) & ('ib in function process_vlxseg (nf, vm, vd, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, mop) = { let EMUL_data_reg : int = if EMUL_data_pow <= 0 then 1 else int_power(2, EMUL_data_pow); let width_type : word_width = bytes_wordwidth(EEW_data_bytes); - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('db * 8)) = read_vreg(num_elem, EEW_data_bytes * 8, EMUL_data_pow, vd); let vs2_val : vector('n, dec, bits('ib * 8)) = read_vreg(num_elem, EEW_index_bytes * 8, EMUL_index_pow, vs2); total : vector('n, dec, bits('db * 8)) = undefined; @@ -1100,7 +1112,7 @@ function clause execute(VLUXSEGTYPE(nf, vm, vs2, rs1, width, vd)) = { let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); let nf_int = nfields_int(nf); - if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; process_vlxseg(nf_int, vm, vd, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 1) } @@ -1124,7 +1136,7 @@ function clause execute(VLOXSEGTYPE(nf, vm, vs2, rs1, width, vd)) = { let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); let nf_int = nfields_int(nf); - if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; process_vlxseg(nf_int, vm, vd, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 3) } @@ -1142,7 +1154,7 @@ val process_vsxseg : forall 'f 'ib 'db 'ip 'dp 'n, (0 < 'f & 'f <= 8) & ('ib in function process_vsxseg (nf, vm, vs3, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, mop) = { let EMUL_data_reg : int = if EMUL_data_pow <= 0 then 1 else int_power(2, EMUL_data_pow); let width_type : word_width = bytes_wordwidth(EEW_data_bytes); - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vs3_val : vector('n, dec, bits('db * 8)) = read_vreg(num_elem, EEW_data_bytes * 8, EMUL_data_pow, vs3); let vs2_val : vector('n, dec, bits('ib * 8)) = read_vreg(num_elem, EEW_index_bytes * 8, EMUL_index_pow, vs2); total : vector('n, dec, bits('db * 8)) = undefined; @@ -1200,6 +1212,8 @@ function clause execute(VSUXSEGTYPE(nf, vm, vs2, rs1, width, vs3)) = { let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); /* number of data and indices are the same */ let nf_int = nfields_int(nf); + if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + process_vsxseg(nf_int, vm, vs3, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 1) } @@ -1222,6 +1236,8 @@ function clause execute(VSOXSEGTYPE(nf, vm, vs2, rs1, width, vs3)) = { let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); /* number of data and indices are the same */ let nf_int = nfields_int(nf); + if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + process_vsxseg(nf_int, vm, vs3, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 3) } @@ -1504,6 +1520,8 @@ function clause execute(VMTYPE(rs1, vd_or_vs3, op)) = { let tmp = unsigned(vl); let num_elem : int = if tmp % 8 == 0 then tmp / 8 else tmp / 8 + 1; + if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + /* unmask vle8 except that the effective vector length is evl=ceil(vl/8) */ assert(num_elem >= 0); process_vm(vd_or_vs3, rs1, EMUL_pow, num_elem, op) diff --git a/model/riscv_insts_vext_red.sail b/model/riscv_insts_vext_red.sail index 362fcb5d7..714cf805b 100755 --- a/model/riscv_insts_vext_red.sail +++ b/model/riscv_insts_vext_red.sail @@ -59,13 +59,14 @@ function clause execute(RIVVTYPE(funct6, vm, vs2, vs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(assert_vstart(0)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; let 'o = SEW_widen; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); result : vector('n, dec, bits('o)) = undefined; @@ -124,12 +125,12 @@ function clause execute(RMVVTYPE(funct6, vm, vs2, vs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); result : vector('n, dec, bits('m)) = undefined; @@ -190,13 +191,14 @@ mapping clause encdec = RFVVTYPE(funct6, vm, vs2, vs1, vd) if haveRVV() val process_rfvv_single: forall 'n 'm 'p, 'm in {8, 16, 32, 64}. (rfvvfunct6, bits(1), regidx, regidx, regidx, int('n), int('m), int('p)) -> Retired effect {escape, rreg, undef, wreg} function process_rfvv_single(funct6, vm, vs2, vs1, vd, num_elem, SEW, LMUL_pow) = { let rm_3b = fcsr.FRM(); - if not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(assert_vstart(0)) | not(valid_fp_op(SEW, rm_3b)) + then { handle_illegal(); return RETIRE_FAIL }; assert(SEW != 8); let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); result : vector('n, dec, bits('m)) = undefined; @@ -227,7 +229,8 @@ function process_rfvv_widen(funct6, vm, vs2, vs1, vd, num_elem, SEW, LMUL_pow) = let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | not(valid_fp_op(SEW, rm_3b)) + if not(valid_vtype()) | not(assert_vstart(0)) | not(valid_fp_op(SEW, rm_3b)) | + not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; assert(SEW >= 16 & SEW_widen <= 64); @@ -235,7 +238,7 @@ function process_rfvv_widen(funct6, vm, vs2, vs1, vd, num_elem, SEW, LMUL_pow) = let 'm = SEW; let 'o = SEW_widen; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vd_val : vector('n, dec, bits('o)) = read_vreg(num_elem, SEW_widen, LMUL_pow_widen, vd); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); result : vector('n, dec, bits('o)) = undefined; diff --git a/model/riscv_insts_vext_utils.sail b/model/riscv_insts_vext_utils.sail index a5c28916c..d6afec730 100755 --- a/model/riscv_insts_vext_utils.sail +++ b/model/riscv_insts_vext_utils.sail @@ -53,6 +53,15 @@ function valid_eew_emul(EEW, EMUL_pow) = { EEW >= 8 & EEW <= ELEN & EMUL_pow >= -3 & EMUL_pow <= 3 } +/* Check for valid vtype setting + * 1. If the vill bit is set, then any attempt to execute a vector instruction that depends upon vtype will raise an illegal instruction exception. + * 2. vset{i}vl{i} and whole-register loads, stores, and moves do not depend upon vtype. + */ +val valid_vtype : unit -> bool effect {rreg} +function valid_vtype() = { + vtype.vill() == 0b0 +} + /* Check for vstart value */ val assert_vstart : int -> bool effect {rreg} function assert_vstart(i) = { @@ -79,7 +88,7 @@ function valid_fp_op(SEW, rm_3b) = { */ val valid_rd_mask : (regidx, bits(1)) -> bool function valid_rd_mask(rd, vm) = { - vm != 0b0 | rd != vreg_name("v0") + vm != 0b0 | rd != 0b00000 } /* Check for valid register overlap in vector widening/narrowing instructions: diff --git a/model/riscv_insts_vext_vm.sail b/model/riscv_insts_vext_vm.sail index aa681152f..47af56626 100755 --- a/model/riscv_insts_vext_vm.sail +++ b/model/riscv_insts_vext_vm.sail @@ -60,10 +60,12 @@ function clause execute(VVMTYPE(funct6, vs2, vs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); + if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask_carry(num_elem, 0b0, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask_carry(num_elem, 0b0, 0b00000); let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); @@ -114,6 +116,8 @@ function clause execute(VVMCTYPE(funct6, vs2, vs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); + if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + let 'n = num_elem; let 'm = SEW; @@ -167,7 +171,7 @@ function clause execute(VVMSTYPE(funct6, vs2, vs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if vd == vreg_name("v0") then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | vd == 0b00000 then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -178,7 +182,7 @@ function clause execute(VVMSTYPE(funct6, vs2, vs1, vd)) = { vec_trues[i] = true }; - let vm_val : vector('n, dec, bool) = read_vmask_carry(num_elem, 0b0, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask_carry(num_elem, 0b0, 0b00000); let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); @@ -230,10 +234,12 @@ function clause execute(VVCMPTYPE(funct6, vm, vs2, vs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); + if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); @@ -292,10 +298,12 @@ function clause execute(VXMTYPE(funct6, vs2, rs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); + if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask_carry(num_elem, 0b0, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask_carry(num_elem, 0b0, 0b00000); let rs1_val : bits('m) = get_scalar(rs1, SEW); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); @@ -346,6 +354,8 @@ function clause execute(VXMCTYPE(funct6, vs2, rs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); + if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + let 'n = num_elem; let 'm = SEW; @@ -399,7 +409,7 @@ function clause execute(VXMSTYPE(funct6, vs2, rs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if vd == vreg_name("v0") then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | vd == 0b00000 then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -410,7 +420,7 @@ function clause execute(VXMSTYPE(funct6, vs2, rs1, vd)) = { vec_trues[i] = true }; - let vm_val : vector('n, dec, bool) = read_vmask_carry(num_elem, 0b0, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask_carry(num_elem, 0b0, 0b00000); let rs1_val : bits('m) = get_scalar(rs1, SEW); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); @@ -464,10 +474,12 @@ function clause execute(VXCMPTYPE(funct6, vm, vs2, rs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); + if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let rs1_val : bits('m) = get_scalar(rs1, SEW); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); @@ -529,10 +541,12 @@ function clause execute(VIMTYPE(funct6, vs2, simm, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); + if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask_carry(num_elem, 0b0, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask_carry(num_elem, 0b0, 0b00000); let imm_val : bits('m) = EXTS(simm); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); @@ -580,6 +594,8 @@ function clause execute(VIMCTYPE(funct6, vs2, simm, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); + if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + let 'n = num_elem; let 'm = SEW; @@ -630,7 +646,7 @@ function clause execute(VIMSTYPE(funct6, vs2, simm, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if vd == vreg_name("v0") then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | vd == 0b00000 then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -641,7 +657,7 @@ function clause execute(VIMSTYPE(funct6, vs2, simm, vd)) = { vec_trues[i] = true }; - let vm_val : vector('n, dec, bool) = read_vmask_carry(num_elem, 0b0, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask_carry(num_elem, 0b0, 0b00000); let imm_val : bits('m) = EXTS(simm); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); @@ -691,10 +707,12 @@ function clause execute(VICMPTYPE(funct6, vm, vs2, simm, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); + if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let imm_val : bits('m) = EXTS(simm); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); @@ -754,13 +772,13 @@ function clause execute(FVVMTYPE(funct6, vm, vs2, vs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; assert(SEW != 8); let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let vs1_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs1); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); @@ -818,13 +836,13 @@ function clause execute(FVFMTYPE(funct6, vm, vs2, rs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; + if not(valid_vtype()) | not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; assert(SEW != 8); let 'n = num_elem; let 'm = SEW; - let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, vreg_name("v0")); + let vm_val : vector('n, dec, bool) = read_vmask(num_elem, vm, 0b00000); let rs1_val : bits('m) = get_scalar_fp(rs1, 'm); let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); let vd_val : vector('n, dec, bool) = read_vmask(num_elem, 0b0, vd); diff --git a/model/riscv_insts_vext_vset.sail b/model/riscv_insts_vext_vset.sail index 072fad38b..d84adc62d 100644 --- a/model/riscv_insts_vext_vset.sail +++ b/model/riscv_insts_vext_vset.sail @@ -88,11 +88,12 @@ mapping clause encdec = VSET_TYPE(op, ma, ta, sew, lmul, rs1, rd) if haveRVV() function clause execute VSET_TYPE(op, ma, ta, sew, lmul, rs1, rd) = { let VLEN_pow = get_vlen_pow(); + let ELEN_pow = get_elen_pow(); let LMUL_pow_ori = get_lmul_pow(); let SEW_pow_ori = get_sew_pow(); let ratio_pow_ori = SEW_pow_ori - LMUL_pow_ori; - /* set vtype and calculate VLMAX */ + /* set vtype */ match op { VSETVLI => { vtype->bits() = 0b0 @ zeros(sizeof(xlen) - 9) @ ma @ ta @ sew @ lmul @@ -102,10 +103,18 @@ function clause execute VSET_TYPE(op, ma, ta, sew, lmul, rs1, rd) = { vtype->bits() = X(rs2) } }; - print_reg("CSR vtype <- " ^ BitStr(vtype.bits())); + + /* check legal SEW and LMUL and calculate VLMAX */ let LMUL_pow_new = get_lmul_pow(); let SEW_pow_new = get_sew_pow(); - let VLMAX = int_power(2, VLEN_pow + LMUL_pow_new - SEW_pow_new); + if SEW_pow_new > LMUL_pow_new + ELEN_pow then { + vtype->bits() = 0b1 @ zeros(sizeof(xlen) - 1); /* set vtype.vill */ + vl = EXTZ(0b0); + print_reg("CSR vtype <- " ^ BitStr(vtype.bits())); + print_reg("CSR vl <- " ^ BitStr(vl)); + return RETIRE_SUCCESS + }; + let VLMAX = int_power(2, VLEN_pow + LMUL_pow_new - SEW_pow_new); /* set vl according to VLMAX and AVL */ if (rs1 != 0b00000) then { /* normal stripmining */ @@ -115,20 +124,20 @@ function clause execute VSET_TYPE(op, ma, ta, sew, lmul, rs1, rd) = { else if AVL < 2 * VLMAX then to_bits(sizeof(xlen), (AVL + 1) / 2) /* ceil(AVL / 2) ≤ vl ≤ VLMAX */ else to_bits(sizeof(xlen), VLMAX); X(rd) = vl; - print_reg("CSR vl <- " ^ BitStr(vl)) } else if (rd != 0b00000) then { /* set vl to VLMAX */ let AVL = unsigned(ones(sizeof(xlen))); vl = to_bits(sizeof(xlen), VLMAX); X(rd) = vl; - print_reg("CSR vl <- " ^ BitStr(vl)) } else { /* keep existing vl */ let AVL = unsigned(vl); let ratio_pow_new = SEW_pow_new - LMUL_pow_new; if (ratio_pow_new != ratio_pow_ori) then { vtype->bits() = 0b1 @ zeros(sizeof(xlen) - 1); /* set vtype.vill */ - print_reg("CSR vtype <- " ^ BitStr(vtype.bits())); + vl = EXTZ(0b0); } }; + print_reg("CSR vtype <- " ^ BitStr(vtype.bits())); + print_reg("CSR vl <- " ^ BitStr(vl)); /* reset vstart to 0 */ vstart = EXTZ(0b0); @@ -153,24 +162,33 @@ mapping clause encdec = VSETI_TYPE(ma, ta, sew, lmul, uimm, rd) if haveRVV() function clause execute VSETI_TYPE(ma, ta, sew, lmul, uimm, rd) = { let VLEN_pow = get_vlen_pow(); + let ELEN_pow = get_elen_pow(); let LMUL_pow_ori = get_lmul_pow(); let SEW_pow_ori = get_sew_pow(); let ratio_pow_ori = SEW_pow_ori - LMUL_pow_ori; - /* set vtype and calculate VLMAX */ + /* set vtype */ vtype->bits() = 0b0 @ zeros(sizeof(xlen) - 9) @ ma @ ta @ sew @ lmul; - print_reg("CSR vtype <- " ^ BitStr(vtype.bits())); + /* check legal SEW and LMUL and calculate VLMAX */ let LMUL_pow_new = get_lmul_pow(); let SEW_pow_new = get_sew_pow(); - let VLMAX = int_power(2, VLEN_pow + LMUL_pow_new - SEW_pow_new); - let AVL = unsigned(uimm); /* AVL is encoded as 5-bit zero-extended imm in the rs1 field */ + if SEW_pow_new > LMUL_pow_new + ELEN_pow then { + vtype->bits() = 0b1 @ zeros(sizeof(xlen) - 1); /* set vtype.vill */ + vl = EXTZ(0b0); + print_reg("CSR vtype <- " ^ BitStr(vtype.bits())); + print_reg("CSR vl <- " ^ BitStr(vl)); + return RETIRE_SUCCESS + }; + let VLMAX = int_power(2, VLEN_pow + LMUL_pow_new - SEW_pow_new); + let AVL = unsigned(uimm); /* AVL is encoded as 5-bit zero-extended imm in the rs1 field */ /* set vl according to VLMAX and AVL */ vl = if AVL <= VLMAX then to_bits(sizeof(xlen), AVL) else if AVL < 2 * VLMAX then to_bits(sizeof(xlen), (AVL + 1) / 2) /* ceil(AVL / 2) ≤ vl ≤ VLMAX */ else to_bits(sizeof(xlen), VLMAX); X(rd) = vl; + print_reg("CSR vtype <- " ^ BitStr(vtype.bits())); print_reg("CSR vl <- " ^ BitStr(vl)); /* reset vstart to 0 */ diff --git a/model/riscv_sys_control.sail b/model/riscv_sys_control.sail index 6f95e9c5e..7e3bff11b 100644 --- a/model/riscv_sys_control.sail +++ b/model/riscv_sys_control.sail @@ -603,8 +603,8 @@ function init_sys() -> unit = { vxrm = 0b00; vcsr->vxrm() = vxrm; vcsr->vxsat() = vxsat; - vl = EXTZ(0b10000); /* the default value is 16 */ - vtype->vill() = 0b0; + vl = EXTZ(0b0); + vtype->vill() = 0b1; vtype->reserved() = EXTZ(0b0); vtype->vma() = 0b0; vtype->vta() = 0b0; diff --git a/model/riscv_sys_regs.sail b/model/riscv_sys_regs.sail index a96a9e3f8..5169e73f9 100644 --- a/model/riscv_sys_regs.sail +++ b/model/riscv_sys_regs.sail @@ -849,7 +849,6 @@ register vtype : Vtype /* this returns the power of 2 for SEW */ val get_sew_pow : unit -> {|3, 4, 5, 6|} effect {escape, rreg} function get_sew_pow() = { - let ELEN_pow = get_elen_pow(); let SEW_pow : {|3, 4, 5, 6|} = match vtype.vsew() { 0b000 => 3, 0b001 => 4, @@ -857,7 +856,6 @@ function get_sew_pow() = { 0b011 => 6, _ => {assert(false, "invalid vsew field in vtype"); 0} }; - assert(SEW_pow <= ELEN_pow); SEW_pow } /* this returns the actual value of SEW */ From 58729080dc4d27dfe15a6450c0ac7cf9796d3bc8 Mon Sep 17 00:00:00 2001 From: BrighterW <16307110083@fudan.edu.cn> Date: Tue, 27 Jun 2023 16:24:24 +0800 Subject: [PATCH 07/11] Summarize patterns of vector illegal instruction check --- model/riscv_insts_vext_arith.sail | 66 +++++++++++------------ model/riscv_insts_vext_fp.sail | 51 +++++++----------- model/riscv_insts_vext_mask.sail | 16 +++--- model/riscv_insts_vext_mem.sail | 38 ++++++------- model/riscv_insts_vext_red.sail | 12 ++--- model/riscv_insts_vext_utils.sail | 90 +++++++++++++++++++++++++++++++ model/riscv_insts_vext_vm.sail | 28 +++++----- 7 files changed, 186 insertions(+), 115 deletions(-) diff --git a/model/riscv_insts_vext_arith.sail b/model/riscv_insts_vext_arith.sail index 68a986b3a..915a9fdfa 100644 --- a/model/riscv_insts_vext_arith.sail +++ b/model/riscv_insts_vext_arith.sail @@ -80,7 +80,7 @@ function clause execute(VVTYPE(funct6, vm, vs2, vs1, vd)) = { let VLEN_pow = get_vlen_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_normal(vd, vm) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -214,7 +214,7 @@ function clause execute(NVSTYPE(funct6, vm, vs2, vs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + if illegal_variable_width(vd, vm, SEW_widen, LMUL_pow_widen) | not(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) then { handle_illegal(); return RETIRE_FAIL }; @@ -281,7 +281,7 @@ function clause execute(NVTYPE(funct6, vm, vs2, vs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + if illegal_variable_width(vd, vm, SEW_widen, LMUL_pow_widen) | not(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) then { handle_illegal(); return RETIRE_FAIL }; @@ -344,7 +344,7 @@ function clause execute(MASKTYPEV(vs2, vs1, vd)) = { let num_elem = get_num_elem(LMUL_pow, SEW); /* max(VLMAX,VLEN/SEW)) */ let real_num_elem = if LMUL_pow >= 0 then num_elem else num_elem / (0 - LMUL_pow); /* VLMAX */ - if not(valid_vtype()) | vd == 0b00000 then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_masked(vd) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -390,7 +390,7 @@ function clause execute(MOVETYPEV(vs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_unmasked() then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -449,7 +449,7 @@ function clause execute(VXTYPE(funct6, vm, vs2, rs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_normal(vd, vm) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -567,7 +567,7 @@ function clause execute(NXSTYPE(funct6, vm, vs2, rs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + if illegal_variable_width(vd, vm, SEW_widen, LMUL_pow_widen) | not(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) then { handle_illegal(); return RETIRE_FAIL }; @@ -634,7 +634,7 @@ function clause execute(NXTYPE(funct6, vm, vs2, rs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + if illegal_variable_width(vd, vm, SEW_widen, LMUL_pow_widen) | not(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) then { handle_illegal(); return RETIRE_FAIL }; @@ -703,7 +703,7 @@ function clause execute(VXSG(funct6, vm, vs2, rs1, vd)) = { let VLEN_pow = get_vlen_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_normal(vd, vm) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -767,7 +767,7 @@ function clause execute(MASKTYPEX(vs2, rs1, vd)) = { let num_elem = get_num_elem(LMUL_pow, SEW); /* max(VLMAX,VLEN/SEW)) */ let real_num_elem = if LMUL_pow >= 0 then num_elem else num_elem / (0 - LMUL_pow); /* VLMAX */ - if not(valid_vtype()) | vd == 0b00000 then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_masked(vd) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -813,7 +813,7 @@ function clause execute(MOVETYPEX(rs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_unmasked() then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -864,7 +864,7 @@ function clause execute(VITYPE(funct6, vm, vs2, simm, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_normal(vd, vm) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -958,7 +958,7 @@ function clause execute(NISTYPE(funct6, vm, vs2, simm, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + if illegal_variable_width(vd, vm, SEW_widen, LMUL_pow_widen) | not(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) then { handle_illegal(); return RETIRE_FAIL }; @@ -1025,7 +1025,7 @@ function clause execute(NITYPE(funct6, vm, vs2, simm, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + if illegal_variable_width(vd, vm, SEW_widen, LMUL_pow_widen) | not(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) then { handle_illegal(); return RETIRE_FAIL }; @@ -1094,7 +1094,7 @@ function clause execute(VISG(funct6, vm, vs2, simm, vd)) = { let VLEN_pow = get_vlen_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_normal(vd, vm) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -1158,7 +1158,7 @@ function clause execute(MASKTYPEI(vs2, simm, vd)) = { let num_elem = get_num_elem(LMUL_pow, SEW); /* max(VLMAX,VLEN/SEW)) */ let real_num_elem = if LMUL_pow >= 0 then num_elem else num_elem / (0 - LMUL_pow); /* VLMAX */ - if not(valid_vtype()) | vd == 0b00000 then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_masked(vd) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -1204,7 +1204,7 @@ function clause execute(MOVETYPEI(vd, simm)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_unmasked() then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -1298,7 +1298,7 @@ function clause execute(MVVTYPE(funct6, vm, vs2, vs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_normal(vd, vm) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -1408,7 +1408,7 @@ function clause execute(MVVMATYPE(funct6, vm, vs2, vs1, vd)) = { let VLEN = int_power(2, get_vlen_pow()); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_normal(vd, vm) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -1471,7 +1471,7 @@ function clause execute(WVVTYPE(funct6, vm, vs2, vs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + if illegal_variable_width(vd, vm, SEW_widen, LMUL_pow_widen) | not(valid_reg_overlap(vs1, vd, LMUL_pow, LMUL_pow_widen)) | not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; @@ -1542,7 +1542,7 @@ function clause execute(WVTYPE(funct6, vm, vs2, vs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + if illegal_variable_width(vd, vm, SEW_widen, LMUL_pow_widen) | not(valid_reg_overlap(vs1, vd, LMUL_pow, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; @@ -1606,7 +1606,7 @@ function clause execute(WMVVTYPE(funct6, vm, vs2, vs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + if illegal_variable_width(vd, vm, SEW_widen, LMUL_pow_widen) | not(valid_reg_overlap(vs1, vd, LMUL_pow, LMUL_pow_widen)) | not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; @@ -1668,7 +1668,7 @@ function clause execute(VEXT2TYPE(funct6, vm, vs2, vd)) = { let SEW_half = SEW / 2; let LMUL_pow_half = LMUL_pow - 1; - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_half, LMUL_pow_half)) | + if illegal_variable_width(vd, vm, SEW_half, LMUL_pow_half) | not(valid_reg_overlap(vs2, vd, LMUL_pow_half, LMUL_pow)) then { handle_illegal(); return RETIRE_FAIL }; @@ -1726,7 +1726,7 @@ function clause execute(VEXT4TYPE(funct6, vm, vs2, vd)) = { let SEW_quart = SEW / 4; let LMUL_pow_quart = LMUL_pow - 2; - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_quart, LMUL_pow_quart)) | + if illegal_variable_width(vd, vm, SEW_quart, LMUL_pow_quart) | not(valid_reg_overlap(vs2, vd, LMUL_pow_quart, LMUL_pow)) then { handle_illegal(); return RETIRE_FAIL }; @@ -1784,7 +1784,7 @@ function clause execute(VEXT8TYPE(funct6, vm, vs2, vd)) = { let SEW_eighth = SEW / 8; let LMUL_pow_eighth = LMUL_pow - 3; - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_eighth, LMUL_pow_eighth)) | + if illegal_variable_width(vd, vm, SEW_eighth, LMUL_pow_eighth) | not(valid_reg_overlap(vs2, vd, LMUL_pow_eighth, LMUL_pow)) then { handle_illegal(); return RETIRE_FAIL }; @@ -1833,7 +1833,7 @@ function clause execute(VMVXS(vs2, rd)) = { let SEW = get_sew(); let num_elem = get_num_elem(0, SEW); - if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_unmasked() then { handle_illegal(); return RETIRE_FAIL }; assert(num_elem > 0); let 'n = num_elem; @@ -1865,7 +1865,7 @@ function clause execute(MVVCOMPRESS(vs2, vs1, vd)) = { let num_elem = get_num_elem(LMUL_pow, SEW); /* vcompress should always be executed with a vstart of 0 */ - if start_element != 0 | vs1 == vd | vs2 == vd | not(valid_vtype()) + if start_element != 0 | vs1 == vd | vs2 == vd | illegal_vd_unmasked() then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; @@ -1935,7 +1935,7 @@ function clause execute(MVXTYPE(funct6, vm, vs2, rs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_normal(vd, vm) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -2056,7 +2056,7 @@ function clause execute(MVXMATYPE(funct6, vm, vs2, rs1, vd)) = { let VLEN = int_power(2, get_vlen_pow()); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_normal(vd, vm) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -2120,7 +2120,7 @@ function clause execute(WVXTYPE(funct6, vm, vs2, rs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + if illegal_variable_width(vd, vm, SEW_widen, LMUL_pow_widen) | not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; @@ -2190,7 +2190,7 @@ function clause execute(WXTYPE(funct6, vm, vs2, rs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + if illegal_variable_width(vd, vm, SEW_widen, LMUL_pow_widen) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; @@ -2254,7 +2254,7 @@ function clause execute(WMVXTYPE(funct6, vm, vs2, rs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + if illegal_variable_width(vd, vm, SEW_widen, LMUL_pow_widen) | not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; @@ -2308,7 +2308,7 @@ function clause execute(VMVSX(rs1, vd)) = { let SEW = get_sew(); let num_elem = get_num_elem(0, SEW); - if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_unmasked() then { handle_illegal(); return RETIRE_FAIL }; assert(num_elem > 0); let 'n = num_elem; diff --git a/model/riscv_insts_vext_fp.sail b/model/riscv_insts_vext_fp.sail index 293c606c9..2e0e2570d 100755 --- a/model/riscv_insts_vext_fp.sail +++ b/model/riscv_insts_vext_fp.sail @@ -66,8 +66,7 @@ function clause execute(FVVTYPE(funct6, vm, vs2, vs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) - then { handle_illegal(); return RETIRE_FAIL }; + if illegal_fp_normal(vd, vm, SEW, rm_3b) then { handle_illegal(); return RETIRE_FAIL }; assert(SEW != 8); let 'n = num_elem; @@ -142,8 +141,7 @@ function clause execute(FVVMATYPE(funct6, vm, vs2, vs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) - then { handle_illegal(); return RETIRE_FAIL }; + if illegal_fp_normal(vd, vm, SEW, rm_3b) then { handle_illegal(); return RETIRE_FAIL }; assert(SEW != 8); let 'n = num_elem; @@ -212,8 +210,7 @@ function clause execute(FWVVTYPE(funct6, vm, vs2, vs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) | - not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + if illegal_fp_variable_width(vd, vm, SEW, rm_3b, SEW_widen, LMUL_pow_widen) | not(valid_reg_overlap(vs1, vd, LMUL_pow, LMUL_pow_widen)) | not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; @@ -278,8 +275,7 @@ function clause execute(FWVVMATYPE(funct6, vm, vs1, vs2, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) | - not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + if illegal_fp_variable_width(vd, vm, SEW, rm_3b, SEW_widen, LMUL_pow_widen) | not(valid_reg_overlap(vs1, vd, LMUL_pow, LMUL_pow_widen)) | not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; @@ -343,8 +339,7 @@ function clause execute(FWVTYPE(funct6, vm, vs2, vs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) | - not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + if illegal_fp_variable_width(vd, vm, SEW, rm_3b, SEW_widen, LMUL_pow_widen) | not(valid_reg_overlap(vs1, vd, LMUL_pow, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; assert(SEW >= 16 & SEW_widen <= 64); @@ -405,8 +400,7 @@ function clause execute(VFUNARY0(vm, vs2, vfunary0, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) - then { handle_illegal(); return RETIRE_FAIL }; + if illegal_fp_normal(vd, vm, SEW, rm_3b) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -521,8 +515,7 @@ function clause execute(VFWUNARY0(vm, vs2, vfwunary0, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) | - not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + if illegal_fp_variable_width(vd, vm, SEW, rm_3b, SEW_widen, LMUL_pow_widen) | not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; assert(SEW >= 8 & SEW_widen <= 64); @@ -652,8 +645,7 @@ function clause execute(VFNUNARY0(vm, vs2, vfnunary0, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) | - not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + if illegal_fp_variable_width(vd, vm, SEW, rm_3b, SEW_widen, LMUL_pow_widen) | not(valid_reg_overlap(vs2, vd, LMUL_pow_widen, LMUL_pow)) then { handle_illegal(); return RETIRE_FAIL }; @@ -786,8 +778,7 @@ function clause execute(VFUNARY1(vm, vs2, vfunary1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) - then { handle_illegal(); return RETIRE_FAIL }; + if illegal_fp_normal(vd, vm, SEW, rm_3b) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -869,7 +860,7 @@ function clause execute(VFMVFS(vs2, rd)) = { let SEW = get_sew(); let num_elem = get_num_elem(0, SEW); - if not(valid_vtype()) | not(valid_fp_op(SEW, rm_3b)) | SEW > sizeof(flen) + if illegal_fp_vd_unmasked(SEW, rm_3b) | SEW > sizeof(flen) then { handle_illegal(); return RETIRE_FAIL }; assert(num_elem > 0 & SEW != 8); @@ -915,8 +906,7 @@ function clause execute(FVFTYPE(funct6, vm, vs2, rs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) - then { handle_illegal(); return RETIRE_FAIL }; + if illegal_fp_normal(vd, vm, SEW, rm_3b) then { handle_illegal(); return RETIRE_FAIL }; assert(SEW != 8); let 'n = num_elem; @@ -1006,8 +996,7 @@ function clause execute(FVFMATYPE(funct6, vm, vs2, rs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) - then { handle_illegal(); return RETIRE_FAIL }; + if illegal_fp_normal(vd, vm, SEW, rm_3b) then { handle_illegal(); return RETIRE_FAIL }; assert(SEW != 8); let 'n = num_elem; @@ -1076,8 +1065,7 @@ function clause execute(FWVFTYPE(funct6, vm, vs2, rs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) | - not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + if illegal_fp_variable_width(vd, vm, SEW, rm_3b, SEW_widen, LMUL_pow_widen) | not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; assert(SEW >= 16 & SEW_widen <= 64); @@ -1141,8 +1129,7 @@ function clause execute(FWVFMATYPE(funct6, vm, rs1, vs2, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) | - not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) | + if illegal_fp_variable_width(vd, vm, SEW, rm_3b, SEW_widen, LMUL_pow_widen) | not(valid_reg_overlap(vs2, vd, LMUL_pow, LMUL_pow_widen)) then { handle_illegal(); return RETIRE_FAIL }; assert(SEW >= 16 & SEW_widen <= 64); @@ -1205,8 +1192,7 @@ function clause execute(FWFTYPE(funct6, vm, vs2, rs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) | - not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) + if illegal_fp_variable_width(vd, vm, SEW, rm_3b, SEW_widen, LMUL_pow_widen) then { handle_illegal(); return RETIRE_FAIL }; assert(SEW >= 16 & SEW_widen <= 64); @@ -1261,8 +1247,7 @@ function clause execute(VFMERGE(vs2, rs1, vd)) = { let num_elem = get_num_elem(LMUL_pow, SEW); /* max(VLMAX,VLEN/SEW)) */ let real_num_elem = if LMUL_pow >= 0 then num_elem else num_elem / (0 - LMUL_pow); /* VLMAX */ - if not(valid_vtype()) | vd == 0b00000 | not(valid_fp_op(SEW, rm_3b)) - then { handle_illegal(); return RETIRE_FAIL }; + if illegal_fp_vd_masked(vd, SEW, rm_3b) then { handle_illegal(); return RETIRE_FAIL }; assert(SEW != 8); let 'n = num_elem; @@ -1311,7 +1296,7 @@ function clause execute(VFMV(rs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) | not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_fp_vd_unmasked(SEW, rm_3b) then { handle_illegal(); return RETIRE_FAIL }; assert(SEW != 8); let 'n = num_elem; @@ -1348,7 +1333,7 @@ function clause execute(VFMVSF(rs1, vd)) = { let SEW = get_sew(); let num_elem = get_num_elem(0, SEW); - if not(valid_vtype()) | not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_fp_vd_unmasked(SEW, rm_3b) then { handle_illegal(); return RETIRE_FAIL }; assert(num_elem > 0 & SEW != 8); let 'n = num_elem; diff --git a/model/riscv_insts_vext_mask.sail b/model/riscv_insts_vext_mask.sail index bdfe20ae4..5f1be7af6 100755 --- a/model/riscv_insts_vext_mask.sail +++ b/model/riscv_insts_vext_mask.sail @@ -63,7 +63,7 @@ function clause execute(MMTYPE(funct6, vs2, vs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = int_power(2, get_vlen_pow()); - if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_unmasked() then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -122,7 +122,7 @@ function clause execute(VCPOP_M(vm, vs2, rd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = int_power(2, get_vlen_pow()); - if not(valid_vtype()) | not(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_unmasked() | not(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -158,7 +158,7 @@ function clause execute(VFIRST_M(vm, vs2, rd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = int_power(2, get_vlen_pow()); - if not(valid_vtype()) | not(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_unmasked() | not(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -196,7 +196,7 @@ function clause execute(VMSBF_M(vm, vs2, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = int_power(2, get_vlen_pow()); - if not(valid_vtype()) | not(assert_vstart(0)) | not(valid_rd_mask(vd, vm)) | vd == vs2 + if illegal_normal(vd, vm) | not(assert_vstart(0)) | vd == vs2 then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; @@ -237,7 +237,7 @@ function clause execute(VMSIF_M(vm, vs2, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = int_power(2, get_vlen_pow()); - if not(valid_vtype()) | not(assert_vstart(0)) | not(valid_rd_mask(vd, vm)) | vd == vs2 + if illegal_normal(vd, vm) | not(assert_vstart(0)) | vd == vs2 then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; @@ -278,7 +278,7 @@ function clause execute(VMSOF_M(vm, vs2, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = int_power(2, get_vlen_pow()); - if not(valid_vtype()) | not(assert_vstart(0)) | not(valid_rd_mask(vd, vm)) | vd == vs2 + if illegal_normal(vd, vm) | not(assert_vstart(0)) | vd == vs2 then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; @@ -323,7 +323,7 @@ function clause execute(VIOTA_M(vm, vs2, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) | not(assert_vstart(0)) | not(valid_rd_mask(vd, vm)) | vd == vs2 + if illegal_normal(vd, vm) | not(assert_vstart(0)) | vd == vs2 then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; @@ -364,7 +364,7 @@ function clause execute(VID_V(vm, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_normal(vd, vm) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; diff --git a/model/riscv_insts_vext_mem.sail b/model/riscv_insts_vext_mem.sail index a001cfb56..796ea4ad8 100644 --- a/model/riscv_insts_vext_mem.sail +++ b/model/riscv_insts_vext_mem.sail @@ -153,7 +153,7 @@ function clause execute(VLETYPE(vm, rs1, width, vd)) = { let EMUL_pow = EEW_pow - SEW_pow + LMUL_pow; let num_elem = get_num_elem(EMUL_pow, EEW); - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_normal(vd, vm) then { handle_illegal(); return RETIRE_FAIL }; process_vle(vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) } @@ -230,7 +230,7 @@ function clause execute(VSETYPE(vm, rs1, width, vs3)) = { let EMUL_pow = EEW_pow - SEW_pow + LMUL_pow; let num_elem = get_num_elem(EMUL_pow, EEW); - if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_unmasked() then { handle_illegal(); return RETIRE_FAIL }; process_vse(vm, vs3, load_width_bytes, rs1, EMUL_pow, num_elem) } @@ -301,7 +301,7 @@ function clause execute(VLSETYPE(vm, rs2, rs1, width, vd)) = { let EMUL_pow = EEW_pow - SEW_pow + LMUL_pow; let num_elem = get_num_elem(EMUL_pow, EEW); - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_normal(vd, vm) then { handle_illegal(); return RETIRE_FAIL }; process_vlse(vm, vd, load_width_bytes, rs1, rs2, EMUL_pow, num_elem) } @@ -379,7 +379,7 @@ function clause execute(VSSETYPE(vm, rs2, rs1, width, vs3)) = { let EMUL_pow = EEW_pow - SEW_pow + LMUL_pow; let num_elem = get_num_elem(EMUL_pow, EEW); - if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_unmasked() then { handle_illegal(); return RETIRE_FAIL }; process_vsse(vm, vs3, load_width_bytes, rs1, rs2, EMUL_pow, num_elem) } @@ -450,7 +450,7 @@ function clause execute(VLUXEITYPE(vm, vs2, rs1, width, vd)) = { let EMUL_index_pow = EEW_index_pow - EEW_data_pow + EMUL_data_pow; let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_normal(vd, vm) then { handle_illegal(); return RETIRE_FAIL }; process_vlxei(vm, vd, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 1) } @@ -473,7 +473,7 @@ function clause execute(VLOXEITYPE(vm, vs2, rs1, width, vd)) = { let EMUL_index_pow = EEW_index_pow - EEW_data_pow + EMUL_data_pow; let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_normal(vd, vm) then { handle_illegal(); return RETIRE_FAIL }; process_vlxei(vm, vd, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 3) } @@ -544,7 +544,7 @@ function clause execute(VSUXEITYPE(vm, vs2, rs1, width, vs3)) = { let EMUL_index_pow = EEW_index_pow - EEW_data_pow + EMUL_data_pow; let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); /* number of data and indices are the same */ - if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_unmasked() then { handle_illegal(); return RETIRE_FAIL }; process_vsxei(vm, vs3, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 1) } @@ -567,7 +567,7 @@ function clause execute(VSOXEITYPE(vm, vs2, rs1, width, vs3)) = { let EMUL_index_pow = EEW_index_pow - EEW_data_pow + EMUL_data_pow; let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); /* number of data and indices are the same */ - if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_unmasked() then { handle_illegal(); return RETIRE_FAIL }; process_vsxei(vm, vs3, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 3) } @@ -659,7 +659,7 @@ function clause execute(VLEFFTYPE(vm, rs1, width, vd)) = { let EMUL_pow = EEW_pow - SEW_pow + LMUL_pow; let num_elem = get_num_elem(EMUL_pow, EEW); - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_normal(vd, vm) then { handle_illegal(); return RETIRE_FAIL }; process_vleff(vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) } @@ -739,7 +739,7 @@ function clause execute(VLSEGTYPE(nf, vm, rs1, width, vd)) = { let num_elem = get_num_elem(EMUL_pow, EEW); /* # of element of each register group */ let nf_int = nfields_int(nf); - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_normal(vd, vm) then { handle_illegal(); return RETIRE_FAIL }; process_vlseg(nf_int, vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) } @@ -832,7 +832,7 @@ function clause execute(VLSEGFFTYPE(nf, vm, rs1, width, vd)) = { let num_elem = get_num_elem(EMUL_pow, EEW); let nf_int = nfields_int(nf); - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_normal(vd, vm) then { handle_illegal(); return RETIRE_FAIL }; process_vlsegff(nf_int, vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) } @@ -904,7 +904,7 @@ function clause execute(VSSEGTYPE(nf, vm, rs1, width, vs3)) = { let num_elem = get_num_elem(EMUL_pow, EEW); let nf_int = nfields_int(nf); - if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_unmasked() then { handle_illegal(); return RETIRE_FAIL }; process_vsseg(nf_int, vm, vs3, load_width_bytes, rs1, EMUL_pow, num_elem) } @@ -970,7 +970,7 @@ function clause execute(VLSSEGTYPE(nf, vm, rs2, rs1, width, vd)) = { let num_elem = get_num_elem(EMUL_pow, EEW); let nf_int = nfields_int(nf); - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_normal(vd, vm) then { handle_illegal(); return RETIRE_FAIL }; process_vlsseg(nf_int, vm, vd, load_width_bytes, rs1, rs2, EMUL_pow, num_elem) } @@ -1045,7 +1045,7 @@ function clause execute(VSSSEGTYPE(nf, vm, rs2, rs1, width, vs3)) = { let num_elem = get_num_elem(EMUL_pow, EEW); let nf_int = nfields_int(nf); - if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_unmasked() then { handle_illegal(); return RETIRE_FAIL }; process_vssseg(nf_int, vm, vs3, load_width_bytes, rs1, rs2, EMUL_pow, num_elem) } @@ -1112,7 +1112,7 @@ function clause execute(VLUXSEGTYPE(nf, vm, vs2, rs1, width, vd)) = { let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); let nf_int = nfields_int(nf); - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_normal(vd, vm) then { handle_illegal(); return RETIRE_FAIL }; process_vlxseg(nf_int, vm, vd, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 1) } @@ -1136,7 +1136,7 @@ function clause execute(VLOXSEGTYPE(nf, vm, vs2, rs1, width, vd)) = { let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); let nf_int = nfields_int(nf); - if not(valid_vtype()) | not(valid_rd_mask(vd, vm)) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_normal(vd, vm) then { handle_illegal(); return RETIRE_FAIL }; process_vlxseg(nf_int, vm, vd, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 3) } @@ -1212,7 +1212,7 @@ function clause execute(VSUXSEGTYPE(nf, vm, vs2, rs1, width, vs3)) = { let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); /* number of data and indices are the same */ let nf_int = nfields_int(nf); - if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_unmasked() then { handle_illegal(); return RETIRE_FAIL }; process_vsxseg(nf_int, vm, vs3, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 1) } @@ -1236,7 +1236,7 @@ function clause execute(VSOXSEGTYPE(nf, vm, vs2, rs1, width, vs3)) = { let num_elem = get_num_elem(EMUL_data_pow, EEW_data_bytes * 8); /* number of data and indices are the same */ let nf_int = nfields_int(nf); - if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_unmasked() then { handle_illegal(); return RETIRE_FAIL }; process_vsxseg(nf_int, vm, vs3, EEW_index_bytes, EEW_data_bytes, EMUL_index_pow, EMUL_data_pow, rs1, vs2, num_elem, 3) } @@ -1520,7 +1520,7 @@ function clause execute(VMTYPE(rs1, vd_or_vs3, op)) = { let tmp = unsigned(vl); let num_elem : int = if tmp % 8 == 0 then tmp / 8 else tmp / 8 + 1; - if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_unmasked() then { handle_illegal(); return RETIRE_FAIL }; /* unmask vle8 except that the effective vector length is evl=ceil(vl/8) */ assert(num_elem >= 0); diff --git a/model/riscv_insts_vext_red.sail b/model/riscv_insts_vext_red.sail index 714cf805b..afa0b95ea 100755 --- a/model/riscv_insts_vext_red.sail +++ b/model/riscv_insts_vext_red.sail @@ -59,8 +59,7 @@ function clause execute(RIVVTYPE(funct6, vm, vs2, vs1, vd)) = { let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_vtype()) | not(assert_vstart(0)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) - then { handle_illegal(); return RETIRE_FAIL }; + if illegal_reduction_widen(SEW_widen, LMUL_pow_widen) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -125,7 +124,7 @@ function clause execute(RMVVTYPE(funct6, vm, vs2, vs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) | not(assert_vstart(0)) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_reduction() then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -191,8 +190,7 @@ mapping clause encdec = RFVVTYPE(funct6, vm, vs2, vs1, vd) if haveRVV() val process_rfvv_single: forall 'n 'm 'p, 'm in {8, 16, 32, 64}. (rfvvfunct6, bits(1), regidx, regidx, regidx, int('n), int('m), int('p)) -> Retired effect {escape, rreg, undef, wreg} function process_rfvv_single(funct6, vm, vs2, vs1, vd, num_elem, SEW, LMUL_pow) = { let rm_3b = fcsr.FRM(); - if not(valid_vtype()) | not(assert_vstart(0)) | not(valid_fp_op(SEW, rm_3b)) - then { handle_illegal(); return RETIRE_FAIL }; + if illegal_fp_reduction(SEW, rm_3b) then { handle_illegal(); return RETIRE_FAIL }; assert(SEW != 8); let 'n = num_elem; @@ -229,9 +227,7 @@ function process_rfvv_widen(funct6, vm, vs2, vs1, vd, num_elem, SEW, LMUL_pow) = let SEW_widen = SEW * 2; let LMUL_pow_widen = LMUL_pow + 1; - if not(valid_vtype()) | not(assert_vstart(0)) | not(valid_fp_op(SEW, rm_3b)) | - not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) - then { handle_illegal(); return RETIRE_FAIL }; + if illegal_fp_reduction_widen(SEW, rm_3b, SEW_widen, LMUL_pow_widen) then { handle_illegal(); return RETIRE_FAIL }; assert(SEW >= 16 & SEW_widen <= 64); let 'n = num_elem; diff --git a/model/riscv_insts_vext_utils.sail b/model/riscv_insts_vext_utils.sail index d6afec730..d498cbe01 100755 --- a/model/riscv_insts_vext_utils.sail +++ b/model/riscv_insts_vext_utils.sail @@ -112,6 +112,96 @@ function valid_reg_overlap(rs, rd, EMUL_pow_rs, EMUL_pow_rd) = { is_valid } +/* ******************************************************************************* */ +/* The following functions summarize patterns of illegal instruction check. */ +/* ******************************************************************************* */ + +/* a. Normal check including vtype.vill field and vd/v0 overlap if vm = 0 */ +val illegal_normal : (regidx, bits(1)) -> bool +function illegal_normal(vd, vm) = { + not(valid_vtype()) | not(valid_rd_mask(vd, vm)) +} + +/* b. Masked check for instructions encoded with vm = 0 */ +val illegal_vd_masked : regidx -> bool +function illegal_vd_masked(vd) = { + not(valid_vtype()) | vd == 0b00000 +} + +/* c. Unmasked check for: + * 1. instructions encoded with vm = 1 + * 2. instructions with scalar rd: vcpop.m, vfirst.m + * 3. instructions with vs3 rather than vd: vector stores + * 4. vd as mask register (eew = 1): + * vmadc.vvm/vxm/vim, vmsbc.vvm/vxm, mask logical, integer compare, vlm.v, vsm.v + */ +val illegal_vd_unmasked : unit -> bool +function illegal_vd_unmasked() = { + not(valid_vtype()) +} + +/* d. Variable width check for: + * 1. integer/fixed-point widening/narrowing instructions + * 2. vector integer extension: vzext, vsext + */ +val illegal_variable_width : (regidx, bits(1), int, int) -> bool +function illegal_variable_width(vd, vm, SEW_new, LMUL_pow_new) = { + not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_eew_emul(SEW_new, LMUL_pow_new)) +} + +/* e. Normal check for reduction instructions: + * The destination vector register can overlap the source operands, including the mask register. + * Vector reduction operations raise an illegal instruction exception if vstart is non-zero. + */ +val illegal_reduction : unit -> bool +function illegal_reduction() = { + not(valid_vtype()) | not(assert_vstart(0)) +} + +/* f. Variable width check for widening reduction instructions */ +val illegal_reduction_widen : (int, int) -> bool +function illegal_reduction_widen(SEW_widen, LMUL_pow_widen) = { + not(valid_vtype()) | not(assert_vstart(0)) | not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) +} + +/* g. Normal check for floating-point instructions */ +val illegal_fp_normal : (regidx, bits(1), {|8, 16, 32, 64|}, bits(3)) -> bool +function illegal_fp_normal(vd, vm, SEW, rm_3b) = { + not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) +} + +/* h. Masked check for floating-point instructions encoded with vm = 0 */ +val illegal_fp_vd_masked : (regidx, {|8, 16, 32, 64|}, bits(3)) -> bool +function illegal_fp_vd_masked(vd, SEW, rm_3b) = { + not(valid_vtype()) | vd == 0b00000 | not(valid_fp_op(SEW, rm_3b)) +} + +/* i. Unmasked check for floating-point instructions encoded with vm = 1 */ +val illegal_fp_vd_unmasked : ({|8, 16, 32, 64|}, bits(3)) -> bool +function illegal_fp_vd_unmasked(SEW, rm_3b) = { + not(valid_vtype()) | not(valid_fp_op(SEW, rm_3b)) +} + +/* j. Variable width check for floating-point widening/narrowing instructions */ +val illegal_fp_variable_width : (regidx, bits(1), {|8, 16, 32, 64|}, bits(3), int, int) -> bool +function illegal_fp_variable_width(vd, vm, SEW, rm_3b, SEW_new, LMUL_pow_new) = { + not(valid_vtype()) | not(valid_rd_mask(vd, vm)) | not(valid_fp_op(SEW, rm_3b)) | + not(valid_eew_emul(SEW_new, LMUL_pow_new)) +} + +/* k. Normal check for floating-point reduction instructions */ +val illegal_fp_reduction : ({|8, 16, 32, 64|}, bits(3)) -> bool +function illegal_fp_reduction(SEW, rm_3b) = { + not(valid_vtype()) | not(assert_vstart(0)) | not(valid_fp_op(SEW, rm_3b)) +} + +/* l. Variable width check for floating-point widening reduction instructions */ +val illegal_fp_reduction_widen : ({|8, 16, 32, 64|}, bits(3), int, int) -> bool +function illegal_fp_reduction_widen(SEW, rm_3b, SEW_widen, LMUL_pow_widen) = { + not(valid_vtype()) | not(assert_vstart(0)) | not(valid_fp_op(SEW, rm_3b)) | + not(valid_eew_emul(SEW_widen, LMUL_pow_widen)) +} + /* Scalar register shaping */ val get_scalar : forall 'n, 'n >= 8. (regidx, int('n)) -> bits('n) effect {escape, rreg} function get_scalar(rs1, SEW) = { diff --git a/model/riscv_insts_vext_vm.sail b/model/riscv_insts_vext_vm.sail index 47af56626..6784ce7e4 100755 --- a/model/riscv_insts_vext_vm.sail +++ b/model/riscv_insts_vext_vm.sail @@ -60,7 +60,7 @@ function clause execute(VVMTYPE(funct6, vs2, vs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_unmasked() then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -116,7 +116,7 @@ function clause execute(VVMCTYPE(funct6, vs2, vs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_unmasked() then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -171,7 +171,7 @@ function clause execute(VVMSTYPE(funct6, vs2, vs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) | vd == 0b00000 then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_masked(vd) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -234,7 +234,7 @@ function clause execute(VVCMPTYPE(funct6, vm, vs2, vs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_unmasked() then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -298,7 +298,7 @@ function clause execute(VXMTYPE(funct6, vs2, rs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_unmasked() then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -354,7 +354,7 @@ function clause execute(VXMCTYPE(funct6, vs2, rs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_unmasked() then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -409,7 +409,7 @@ function clause execute(VXMSTYPE(funct6, vs2, rs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) | vd == 0b00000 then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_masked(vd) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -474,7 +474,7 @@ function clause execute(VXCMPTYPE(funct6, vm, vs2, rs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_unmasked() then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -541,7 +541,7 @@ function clause execute(VIMTYPE(funct6, vs2, simm, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_unmasked() then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -594,7 +594,7 @@ function clause execute(VIMCTYPE(funct6, vs2, simm, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_unmasked() then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -646,7 +646,7 @@ function clause execute(VIMSTYPE(funct6, vs2, simm, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) | vd == 0b00000 then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_masked(vd) then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -707,7 +707,7 @@ function clause execute(VICMPTYPE(funct6, vm, vs2, simm, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_vd_unmasked() then { handle_illegal(); return RETIRE_FAIL }; let 'n = num_elem; let 'm = SEW; @@ -772,7 +772,7 @@ function clause execute(FVVMTYPE(funct6, vm, vs2, vs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) | not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_fp_vd_unmasked(SEW, rm_3b) then { handle_illegal(); return RETIRE_FAIL }; assert(SEW != 8); let 'n = num_elem; @@ -836,7 +836,7 @@ function clause execute(FVFMTYPE(funct6, vm, vs2, rs1, vd)) = { let LMUL_pow = get_lmul_pow(); let num_elem = get_num_elem(LMUL_pow, SEW); - if not(valid_vtype()) | not(valid_fp_op(SEW, rm_3b)) then { handle_illegal(); return RETIRE_FAIL }; + if illegal_fp_vd_unmasked(SEW, rm_3b) then { handle_illegal(); return RETIRE_FAIL }; assert(SEW != 8); let 'n = num_elem; From 09701e16e00b774f3354ced9e36e1704698552ae Mon Sep 17 00:00:00 2001 From: Charalampos Mitrodimas Date: Tue, 28 Mar 2023 19:46:50 +0200 Subject: [PATCH 08/11] Zvksed: Add infrastructure for Zvksed To support the implementation of the Zvksed extension in SAIL, this creates the necessary infrastructure (i.e., a file to hold it, and the existence macro), preparing the tree for the Zvksed implementation. Signed-off-by: Charalampos Mitrodimas --- Makefile | 2 ++ model/riscv_insts_zvksed.sail | 53 +++++++++++++++++++++++++++++++++++ model/riscv_sys_regs.sail | 1 + 3 files changed, 56 insertions(+) create mode 100644 model/riscv_insts_zvksed.sail diff --git a/Makefile b/Makefile index 9a5f1cc13..86678ce2a 100644 --- a/Makefile +++ b/Makefile @@ -46,6 +46,8 @@ SAIL_DEFAULT_INST += riscv_insts_vext_mask.sail SAIL_DEFAULT_INST += riscv_insts_vext_vm.sail SAIL_DEFAULT_INST += riscv_insts_vext_red.sail +SAIL_DEFAULT_INST += riscv_insts_zvksed.sail + SAIL_SEQ_INST = $(SAIL_DEFAULT_INST) riscv_jalr_seq.sail SAIL_RMEM_INST = $(SAIL_DEFAULT_INST) riscv_jalr_rmem.sail riscv_insts_rmem.sail diff --git a/model/riscv_insts_zvksed.sail b/model/riscv_insts_zvksed.sail new file mode 100644 index 000000000..7dcfc6fbf --- /dev/null +++ b/model/riscv_insts_zvksed.sail @@ -0,0 +1,53 @@ +/* + * Vector Cryptography Extension - Vector GCM/GMAC + * ---------------------------------------------------------------------- + */ + +/* + * Helper functions. + * ---------------------------------------------------------------------- + */ + +val rol32 : forall 'm, 32 - 'm >= 0 & 'm >= 0. (bits(32), int('m)) -> bits(32) +function rol32(X,N) = (X << N) | (X >> (32 - N)) + +val round_key : (bits(32), bits(32)) -> bits(32) +function round_key(X, S) = ((X) ^ ((S) ^ rol32((S), 13) ^ rol32((S), 23))) + +// SM4 Constant Key (CK) +let ck : list(bits(32)) = [| + 0x00070E15, 0x1C232A31, 0x383F464D, 0x545B6269, + 0x70777E85, 0x8C939AA1, 0xA8AFB6BD, 0xC4CBD2D9, + 0xE0E7EEF5, 0xFC030A11, 0x181F262D, 0x343B4249, + 0x50575E65, 0x6C737A81, 0x888F969D, 0xA4ABB2B9, + 0xC0C7CED5, 0xDCE3EAF1, 0xF8FF060D, 0x141B2229, + 0x30373E45, 0x4C535A61, 0x686F767D, 0x848B9299, + 0xA0A7AEB5, 0xBCC3CAD1, 0xD8DFE6ED, 0xF4FB0209, + 0x10171E25, 0x2C333A41, 0x484F565D, 0x646B7279 +|] + +/* Lookup function for Zvksed SM4 Contant Key;- takes an index and a list, and retrieves the + * x'th element of that list. + */ +val zvksed_box_lookup : (bits(32), list(bits(32))) -> bits(32) +function zvksed_box_lookup(x, table) = { + match (x, table) { + (0x00000000, t0::tn) => t0, + ( _, t0::tn) => zvksed_box_lookup(x - 0x00000001, tn) + } +} + +val zvksed_sm4_sbox : (bits(32)) -> bits(32) +function zvksed_sm4_sbox(x) = zvksed_box_lookup(x, ck) + +val sm4_subword : bits(32) -> bits(32) +function sm4_subword(x) = { + sm4_sbox(x[31..24]) @ + sm4_sbox(x[23..16]) @ + sm4_sbox(x[15.. 8]) @ + sm4_sbox(x[ 7.. 0]) +} + +val sm4_round : (bits(32), bits(32)) -> bits(32) +function sm4_round(X, S) = + ((X) ^ ((S) ^ rol32((S), 2) ^ rol32((S), 10) ^ rol32((S), 18) ^ rol32((S), 24))) diff --git a/model/riscv_sys_regs.sail b/model/riscv_sys_regs.sail index 5169e73f9..6cf975954 100644 --- a/model/riscv_sys_regs.sail +++ b/model/riscv_sys_regs.sail @@ -209,6 +209,7 @@ function haveZknd() -> bool = true function haveZmmul() -> bool = true function haveRVV() -> bool = misa.V() == 0b1 +function haveZvksed() -> bool = true /* see below for F and D extension tests */ bitfield Mstatush : bits(32) = { From 1a8dd85a23b60ee7f6cf40be2cb9c5b33d995865 Mon Sep 17 00:00:00 2001 From: Charalampos Mitrodimas Date: Tue, 28 Mar 2023 22:26:41 +0200 Subject: [PATCH 09/11] Zvksed: add "vsm4k.vi" instruction Vector SM4 KeyExpansion, four rounds of the SM4 Key expansion are performed. Note: zvksed_box_lookup & zvksed_sm4_sbox are created to work with 32bit values since sbox_lookup & sm4_sbox are used for 8bit values. The bits in uimm[4..3] are ignored. Round group numbers range from 0 to 7 and indicate which group of four round keys are being generated. Signed-off-by: Charalampos Mitrodimas --- model/riscv_insts_zvksed.sail | 81 +++++++++++++++++++++++++++++++++++ 1 file changed, 81 insertions(+) diff --git a/model/riscv_insts_zvksed.sail b/model/riscv_insts_zvksed.sail index 7dcfc6fbf..2d0183894 100644 --- a/model/riscv_insts_zvksed.sail +++ b/model/riscv_insts_zvksed.sail @@ -8,6 +8,11 @@ * ---------------------------------------------------------------------- */ +val zvk_check_elements : (int, int, int, int) -> bool +function zvk_check_elements(VLEN, num_elem, LMUL, SEW) = { + ((unsigned(vl)%num_elem) != 0) | ((unsigned(vstart)%num_elem) != 0) | (LMUL*VLEN) < (num_elem*SEW) +} + val rol32 : forall 'm, 32 - 'm >= 0 & 'm >= 0. (bits(32), int('m)) -> bits(32) function rol32(X,N) = (X << N) | (X >> (32 - N)) @@ -51,3 +56,79 @@ function sm4_subword(x) = { val sm4_round : (bits(32), bits(32)) -> bits(32) function sm4_round(X, S) = ((X) ^ ((S) ^ rol32((S), 2) ^ rol32((S), 10) ^ rol32((S), 18) ^ rol32((S), 24))) + +/* VSM4K.VI */ + +union clause ast = RISCV_VSM4K_VI : (regidx, bits(5), regidx) + +mapping clause encdec = RISCV_VSM4K_VI(vs2, uimm, vd) if (haveRVV() & haveZvksed()) + <-> 0b1000011 @ vs2 @ uimm @ 0b010 @ vd @ 0b1110111 if (haveRVV() & haveZvksed()) + +mapping clause assembly = RISCV_VSM4K_VI(vs2, uimm, vd) + <-> "vsm4k.vi" ^ spc() ^ vreg_name(vd) + ^ sep() ^ vreg_name(vs2) + ^ sep() ^ reg_name(uimm) + +function clause execute (RISCV_VSM4K_VI(vs2, uimm, vd)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let LMUL = if LMUL_pow < 0 then 0 else LMUL_pow; + let VLEN = int_power(2, get_vlen_pow()); + let num_elem = get_num_elem(LMUL_pow, SEW); + + if (zvk_check_elements(VLEN, num_elem, LMUL, SEW) == false) + then { + handle_illegal(); + RETIRE_FAIL + } else { + let 'n = num_elem; + let 'm = SEW; + assert('m == 32); + + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + + rk : bits(128) = undefined; + + B : bits(32) = zeros(); + S : bits(32) = zeros(); + rk7_to_rk4 : bits(128) = zeros(); + rnd : bits(3) = uimm[2..0]; // Lower 3 bits + + eg_len = (unsigned(vl) / 'n); + eg_start = (unsigned(vstart) / 'n); + + foreach (i from eg_start to (eg_len - 1)) { + assert(0 <= ((i * 4) + 3) & ((i * 4) + 3) < 'n); + rk[31..0] = vs2_val[i*4+0]; + rk[63..32] = vs2_val[i*4+1]; + rk[95..64] = vs2_val[i*4+2]; + rk[127..96] = vs2_val[i*4+3]; + + B = rk[63..32] ^ rk[95..64] ^ rk[127..96] ^ zvksed_sm4_sbox(to_bits(32, 4 * unsigned(rnd))); + S = sm4_subword(B); + rk7_to_rk4[31..0] = round_key(rk[31..0], S); + + B = rk[95..64] ^ rk[127..96] ^ rk7_to_rk4[31..0] ^ zvksed_sm4_sbox(to_bits(32, 4 * unsigned(rnd) + 1)); + S = sm4_subword(B); + rk7_to_rk4[63..32] = round_key(rk[63..32], S); + + B = rk[127..96] ^ rk7_to_rk4[31..0] ^ rk7_to_rk4[63..32] ^ zvksed_sm4_sbox(to_bits(32, 4 * unsigned(rnd) + 2)); + S = sm4_subword(B); + rk7_to_rk4[95..64] = round_key(rk[95..64], S); + + B = rk7_to_rk4[31..0] ^ rk7_to_rk4[63..32] ^ rk7_to_rk4[95..64] ^ zvksed_sm4_sbox(to_bits(32, 4 * unsigned(rnd) + 3)); + S = sm4_subword(B); + rk7_to_rk4[127..96] = round_key(rk[127..96], S); + + result[i*4+0] = rk7_to_rk4[31..0]; + result[i*4+1] = rk7_to_rk4[63..32]; + result[i*4+2] = rk7_to_rk4[95..64]; + result[i*4+3] = rk7_to_rk4[127..96]; + }; + + write_single_vreg(num_elem, 'm, vd, result); + RETIRE_SUCCESS + } +} From d4584ab0c4895b68cc86f169bc9c43eb41a5aa0b Mon Sep 17 00:00:00 2001 From: Charalampos Mitrodimas Date: Tue, 28 Mar 2023 22:51:58 +0200 Subject: [PATCH 10/11] Zvksed: add "vsm4r.[vv,vs]" instructions Vector SM4 Rounds, four rounds of SM4 Encryption/Decryption are performed. The four words of current state are read in as a 4-element group from 'vd' and the round keys are read in from the corresponding 4-element group in vs2 (vector-vector form) or the scalar element group in vs2 (vector-scalar form). The next four words of state are generated by iteratively XORing the last three words of the state with the corresponding round key, performing a byte-wise substitution, and then performing XORs between rotated versions of this value and the corresponding current state. Signed-off-by: Charalampos Mitrodimas --- model/riscv_insts_zvksed.sail | 97 +++++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) diff --git a/model/riscv_insts_zvksed.sail b/model/riscv_insts_zvksed.sail index 2d0183894..1a8176fc2 100644 --- a/model/riscv_insts_zvksed.sail +++ b/model/riscv_insts_zvksed.sail @@ -132,3 +132,100 @@ function clause execute (RISCV_VSM4K_VI(vs2, uimm, vd)) = { RETIRE_SUCCESS } } + +/* VSM4R.[VV,VS] */ + +mapping zvksed_vv_or_vs : string <-> bits(7) = { + "vv" <-> 0b1010001, + "vs" <-> 0b1010011, +} + +mapping vsm4r_mnemonic : bits(7) <-> string = { + 0b1010001 <-> "vsm4r.vv", + 0b1010011 <-> "vsm4r.vs", +} + +union clause ast = RISCV_VSM4R_VV_VS : (regidx, regidx, string) + +mapping clause encdec = RISCV_VSM4R_VV_VS(vs2, vd, suffix) if (haveRVV() & haveZvksed()) + <-> zvksed_vv_or_vs(suffix) @ vs2 @ 0b10000 @ 0b010 @ vd @ 0b1110111 if (haveRVV() & haveZvksed()) + +mapping clause assembly = RISCV_VSM4R_VV_VS(vs2, vd, suffix) + <-> vsm4r_mnemonic(zvksed_vv_or_vs(suffix)) ^ spc() ^ vreg_name(vd) + ^ sep() ^ vreg_name(vs2) + +function clause execute (RISCV_VSM4R_VV_VS(vs2, vd, suffix)) = { + let SEW = get_sew(); + let LMUL_pow = get_lmul_pow(); + let LMUL = if LMUL_pow < 0 then 0 else LMUL_pow; + let VLEN = int_power(2, get_vlen_pow()); + let num_elem = get_num_elem(LMUL_pow, SEW); + + if (zvk_check_elements(VLEN, num_elem, LMUL, SEW) == false) + then { + handle_illegal(); + RETIRE_FAIL + } else { + let 'n = num_elem; + let 'm = SEW; + assert('m == 32); + + let vs2_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vs2); + let vd_val : vector('n, dec, bits('m)) = read_vreg(num_elem, SEW, LMUL_pow, vd); + result : vector('n, dec, bits('m)) = undefined; + + rk3_to_rk0 : bits(128) = undefined; + x3_to_x0 : bits(128) = undefined; + + x7_to_x4 : bits(128) = zeros(); + B : bits(32) = zeros(); + S : bits(32) = zeros(); + + eg_len = (unsigned(vl) / 'n); + eg_start = (unsigned(vstart) / 'n); + + foreach (i from eg_start to (eg_len - 1)) { + assert(0 <= ((i * 4) + 3) & ((i * 4) + 3) < 'n); + if suffix == "vv" then { + rk3_to_rk0[31..0] = vs2_val[i*4+0]; + rk3_to_rk0[63..32] = vs2_val[i*4+1]; + rk3_to_rk0[95..64] = vs2_val[i*4+2]; + rk3_to_rk0[127..96] = vs2_val[i*4+3]; + } else { + rk3_to_rk0[31..0] = vs2_val[0]; + rk3_to_rk0[63..32] = vs2_val[1]; + rk3_to_rk0[95..64] = vs2_val[2]; + rk3_to_rk0[127..96] = vs2_val[3]; + }; + + x3_to_x0[31..0] = vd_val[i*4+0]; + x3_to_x0[63..32] = vd_val[i*4+1]; + x3_to_x0[95..64] = vd_val[i*4+2]; + x3_to_x0[127..96] = vd_val[i*4+3]; + + B = x3_to_x0[63..32] ^ x3_to_x0[95..64] ^ x3_to_x0[127..96] ^ rk3_to_rk0[31..0]; + S = sm4_subword(B); + x7_to_x4[31..0] = sm4_round(x3_to_x0[31..0], S); + + B = x3_to_x0[95..64] ^ x3_to_x0[127..96] ^ x7_to_x4[31..0] ^ rk3_to_rk0[63..32]; + S = sm4_subword(B); + x7_to_x4[63..32] = sm4_round(x3_to_x0[63..32], S); + + B = x3_to_x0[127..96] ^ x7_to_x4[31..0] ^ x7_to_x4[63..32] ^ rk3_to_rk0[95..64]; + S = sm4_subword(B); + x7_to_x4[95..64] = sm4_round(x3_to_x0[95..64], S); + + B = x7_to_x4[31..0] ^ x7_to_x4[63..32] ^ x7_to_x4[95..64] ^ rk3_to_rk0[127..96]; + S = sm4_subword(B); + x7_to_x4[127..96] = sm4_round(x3_to_x0[127..96], S); + + result[i*4+0] = x7_to_x4[31..0]; + result[i*4+1] = x7_to_x4[63..32]; + result[i*4+2] = x7_to_x4[95..64]; + result[i*4+3] = x7_to_x4[127..96]; + }; + + write_single_vreg(num_elem, 'm, vd, result); + RETIRE_SUCCESS + } +} From f4d9b88120c1dbcc779cb70f58b7e512187581ad Mon Sep 17 00:00:00 2001 From: Charalampos Mitrodimas Date: Mon, 29 May 2023 22:46:02 +0200 Subject: [PATCH 11/11] Fix extension name in comments Signed-off-by: Charalampos Mitrodimas --- model/riscv_insts_zvksed.sail | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/model/riscv_insts_zvksed.sail b/model/riscv_insts_zvksed.sail index 1a8176fc2..32c1ae512 100644 --- a/model/riscv_insts_zvksed.sail +++ b/model/riscv_insts_zvksed.sail @@ -1,5 +1,5 @@ /* - * Vector Cryptography Extension - Vector GCM/GMAC + * Vector Cryptography Extension - ShangMi Suite: SM4 Block Cipher * ---------------------------------------------------------------------- */