3
3
* Tiny Code Generator for QEMU
4
4
*
5
5
* Copyright (c) 2009, 2011 Stefan Weil
6
+ * Copyright (c) 2018 SiFive, Inc
7
+ * Copyright (c) 2008-2009 Arnaud Patard <[email protected] >
8
+ * Copyright (c) 2009 Aurelien Jarno <[email protected] >
9
+ * Copyright (c) 2008 Fabrice Bellard
6
10
*
7
- * Based on tci/tcg-target.c.inc
11
+ * Based on tci/tcg-target.c.inc and riscv/tcg-target.c.inc
8
12
*
9
13
* Permission is hereby granted, free of charge, to any person obtaining a copy
10
14
* of this software and associated documentation files (the "Software"), to deal
@@ -364,6 +368,10 @@ static void tcg_wasm_out_op_i64_eqz(TCGContext *s)
364
368
{
365
369
tcg_wasm_out8 (s, 0x50 );
366
370
}
371
+ static void tcg_wasm_out_op_i64_eq (TCGContext *s)
372
+ {
373
+ tcg_wasm_out8 (s, 0x51 );
374
+ }
367
375
static void tcg_wasm_out_op_br (TCGContext *s, int i)
368
376
{
369
377
tcg_wasm_out8 (s, 0x0c );
@@ -436,6 +444,10 @@ static void tcg_wasm_out_op_local_set(TCGContext *s, uint8_t i)
436
444
{
437
445
tcg_wasm_out_op_var (s, 0x21 , i);
438
446
}
447
+ static void tcg_wasm_out_op_local_tee (TCGContext *s, uint8_t i)
448
+ {
449
+ tcg_wasm_out_op_var (s, 0x22 , i);
450
+ }
439
451
440
452
#define tcg_wasm_out_i64_calc (op ) \
441
453
static void tcg_wasm_out_i64_calc_##op( \
@@ -1993,12 +2005,161 @@ static void *qemu_ld_helper_ptr(uint32_t oi)
1993
2005
}
1994
2006
}
1995
2007
2008
+ static void tcg_wasm_out_i32_load_s (TCGContext *s, int off)
2009
+ {
2010
+ if (off < 0 ) {
2011
+ tcg_wasm_out_op_i32_const (s, off);
2012
+ tcg_wasm_out_op_i32_add (s);
2013
+ off = 0 ;
2014
+ }
2015
+ tcg_wasm_out_op_i32_load (s, 0 , off);
2016
+ }
2017
+
2018
+ static void tcg_wasm_out_i64_load_s (TCGContext *s, int off)
2019
+ {
2020
+ if (off < 0 ) {
2021
+ tcg_wasm_out_op_i32_const (s, off);
2022
+ tcg_wasm_out_op_i32_add (s);
2023
+ off = 0 ;
2024
+ }
2025
+ tcg_wasm_out_op_i64_load (s, 0 , off);
2026
+ }
2027
+
2028
+ #define MIN_TLB_MASK_TABLE_OFS INT_MIN
2029
+
2030
+ static uint8_t prepare_host_addr_wasm (TCGContext *s, uint8_t *hit_var,
2031
+ TCGReg addr_reg, MemOpIdx oi,
2032
+ bool is_ld)
2033
+ {
2034
+ MemOp opc = get_memop (oi);
2035
+ TCGAtomAlign aa;
2036
+ unsigned a_mask;
2037
+ unsigned s_bits = opc & MO_SIZE;
2038
+ unsigned s_mask = (1u << s_bits) - 1 ;
2039
+ int mem_index = get_mmuidx (oi);
2040
+ int fast_ofs = tlb_mask_table_ofs (s, mem_index);
2041
+ int mask_ofs = fast_ofs + offsetof (CPUTLBDescFast, mask);
2042
+ int table_ofs = fast_ofs + offsetof (CPUTLBDescFast, table);
2043
+ int add_off = offsetof (CPUTLBEntry, addend);
2044
+ tcg_target_long compare_mask;
2045
+
2046
+ if (!tcg_use_softmmu) {
2047
+ g_assert_not_reached ();
2048
+ }
2049
+
2050
+ *hit_var = TMP64_LOCAL_0_IDX;
2051
+ tcg_wasm_out_op_i64_const (s, 0 );
2052
+ tcg_wasm_out_op_local_set (s, *hit_var);
2053
+
2054
+ aa = atom_and_align_for_opc (s, opc, MO_ATOM_IFALIGN, false );
2055
+ a_mask = (1u << aa.align ) - 1 ;
2056
+
2057
+ /* Get the CPUTLBEntry offset */
2058
+ tcg_wasm_out_op_global_get_r (s, addr_reg);
2059
+ tcg_wasm_out_op_i64_const (s, s->page_bits - CPU_TLB_ENTRY_BITS);
2060
+ tcg_wasm_out_op_i64_shr_u (s);
2061
+ tcg_wasm_out_op_i32_wrap_i64 (s);
2062
+ tcg_wasm_out_op_global_get_r_i32 (s, TCG_AREG0);
2063
+ tcg_wasm_out_i32_load_s (s, mask_ofs);
2064
+ tcg_wasm_out_op_i32_and (s);
2065
+
2066
+ /* Get the pointer to the target CPUTLBEntry */
2067
+ tcg_wasm_out_op_global_get_r_i32 (s, TCG_AREG0);
2068
+ tcg_wasm_out_i32_load_s (s, table_ofs);
2069
+ tcg_wasm_out_op_i32_add (s);
2070
+ tcg_wasm_out_op_local_tee (s, TMP32_LOCAL_0_IDX);
2071
+
2072
+ /* Load the tlb copmarator */
2073
+ tcg_wasm_out_i64_load_s (
2074
+ s, is_ld ? offsetof (CPUTLBEntry, addr_read)
2075
+ : offsetof (CPUTLBEntry, addr_write));
2076
+
2077
+ /*
2078
+ * For aligned accesses, we check the first byte and include the
2079
+ * alignment bits within the address. For unaligned access, we
2080
+ * check that we don't cross pages using the address of the last
2081
+ * byte of the access.
2082
+ */
2083
+ tcg_wasm_out_op_global_get_r (s, addr_reg);
2084
+ if (a_mask < s_mask) {
2085
+ tcg_wasm_out_op_i64_const (s, s_mask - a_mask);
2086
+ tcg_wasm_out_op_i64_add (s);
2087
+ }
2088
+ compare_mask = (uint64_t )s->page_mask | a_mask;
2089
+ tcg_wasm_out_op_i64_const (s, compare_mask);
2090
+ tcg_wasm_out_op_i64_and (s);
2091
+
2092
+ /* Compare masked address with the TLB entry. */
2093
+ tcg_wasm_out_op_i64_eq (s);
2094
+ tcg_wasm_out_op_if_noret (s);
2095
+
2096
+ /* TLB Hit - translate address using addend. */
2097
+ tcg_wasm_out_op_local_get (s, TMP32_LOCAL_0_IDX);
2098
+ tcg_wasm_out_i32_load_s (s, add_off);
2099
+ tcg_wasm_out_op_global_get_r (s, addr_reg);
2100
+ tcg_wasm_out_op_i32_wrap_i64 (s);
2101
+ tcg_wasm_out_op_i32_add (s);
2102
+ tcg_wasm_out_op_local_set (s, TMP32_LOCAL_1_IDX);
2103
+ tcg_wasm_out_op_i64_const (s, 1 );
2104
+ tcg_wasm_out_op_local_set (s, *hit_var);
2105
+
2106
+ tcg_wasm_out_op_end (s);
2107
+
2108
+ return TMP32_LOCAL_1_IDX;
2109
+ }
2110
+
2111
+ static void tcg_wasm_out_qemu_ld_direct (
2112
+ TCGContext *s, TCGReg r, uint8_t base, MemOp opc)
2113
+ {
2114
+ switch (opc & (MO_SSIZE)) {
2115
+ case MO_UB:
2116
+ tcg_wasm_out_op_local_get (s, base);
2117
+ tcg_wasm_out_op_i64_load8_u (s, 0 , 0 );
2118
+ tcg_wasm_out_op_global_set_r (s, r);
2119
+ break ;
2120
+ case MO_SB:
2121
+ tcg_wasm_out_op_local_get (s, base);
2122
+ tcg_wasm_out_op_i64_load8_s (s, 0 , 0 );
2123
+ tcg_wasm_out_op_global_set_r (s, r);
2124
+ break ;
2125
+ case MO_UW:
2126
+ tcg_wasm_out_op_local_get (s, base);
2127
+ tcg_wasm_out_op_i64_load16_u (s, 0 , 0 );
2128
+ tcg_wasm_out_op_global_set_r (s, r);
2129
+ break ;
2130
+ case MO_SW:
2131
+ tcg_wasm_out_op_local_get (s, base);
2132
+ tcg_wasm_out_op_i64_load16_s (s, 0 , 0 );
2133
+ tcg_wasm_out_op_global_set_r (s, r);
2134
+ break ;
2135
+ case MO_UL:
2136
+ tcg_wasm_out_op_local_get (s, base);
2137
+ tcg_wasm_out_op_i64_load32_u (s, 0 , 0 );
2138
+ tcg_wasm_out_op_global_set_r (s, r);
2139
+ break ;
2140
+ case MO_SL:
2141
+ tcg_wasm_out_op_local_get (s, base);
2142
+ tcg_wasm_out_op_i64_load32_s (s, 0 , 0 );
2143
+ tcg_wasm_out_op_global_set_r (s, r);
2144
+ break ;
2145
+ case MO_UQ:
2146
+ tcg_wasm_out_op_local_get (s, base);
2147
+ tcg_wasm_out_op_i64_load (s, 0 , 0 );
2148
+ tcg_wasm_out_op_global_set_r (s, r);
2149
+ break ;
2150
+ default :
2151
+ g_assert_not_reached ();
2152
+ }
2153
+ }
2154
+
1996
2155
static void tcg_wasm_out_qemu_ld (TCGContext *s, TCGReg data_reg,
1997
2156
TCGReg addr_reg, MemOpIdx oi)
1998
2157
{
1999
2158
int helper_idx;
2000
2159
int func_idx;
2001
2160
bool addr64 = s->addr_type == TCG_TYPE_I64;
2161
+ MemOp mop = get_memop (oi);
2162
+ uint8_t base_var, hit_var;
2002
2163
2003
2164
helper_idx = (uint32_t )qemu_ld_helper_ptr (oi);
2004
2165
func_idx = get_helper_idx (s, helper_idx);
@@ -2012,6 +2173,14 @@ static void tcg_wasm_out_qemu_ld(TCGContext *s, TCGReg data_reg,
2012
2173
addr_reg = TCG_REG_TMP;
2013
2174
}
2014
2175
2176
+ base_var = prepare_host_addr_wasm (s, &hit_var, addr_reg, oi, true );
2177
+ tcg_wasm_out_op_local_get (s, hit_var);
2178
+ tcg_wasm_out_op_i64_const (s, 1 );
2179
+ tcg_wasm_out_op_i64_eq (s);
2180
+ tcg_wasm_out_op_if_noret (s);
2181
+ tcg_wasm_out_qemu_ld_direct (s, data_reg, base_var, mop); /* fast path */
2182
+ tcg_wasm_out_op_end (s);
2183
+
2015
2184
/*
2016
2185
* update the block index so that the possible rewinding will
2017
2186
* skip this block
@@ -2020,6 +2189,10 @@ static void tcg_wasm_out_qemu_ld(TCGContext *s, TCGReg data_reg,
2020
2189
tcg_wasm_out_op_global_set (s, BLOCK_PTR_IDX);
2021
2190
tcg_wasm_out_new_block (s);
2022
2191
2192
+ tcg_wasm_out_op_local_get (s, hit_var);
2193
+ tcg_wasm_out_op_i64_eqz (s);
2194
+ tcg_wasm_out_op_if_noret (s);
2195
+
2023
2196
/* call helper */
2024
2197
tcg_wasm_out_op_global_get_r (s, TCG_AREG0);
2025
2198
tcg_wasm_out_op_i32_wrap_i64 (s);
@@ -2030,6 +2203,8 @@ static void tcg_wasm_out_qemu_ld(TCGContext *s, TCGReg data_reg,
2030
2203
tcg_wasm_out_op_call (s, func_idx);
2031
2204
tcg_wasm_out_op_global_set_r (s, data_reg);
2032
2205
tcg_wasm_out_handle_unwinding (s);
2206
+
2207
+ tcg_wasm_out_op_end (s);
2033
2208
}
2034
2209
2035
2210
static void *qemu_st_helper_ptr (uint32_t oi)
@@ -2049,13 +2224,43 @@ static void *qemu_st_helper_ptr(uint32_t oi)
2049
2224
}
2050
2225
}
2051
2226
2227
+ static void tcg_wasm_out_qemu_st_direct (
2228
+ TCGContext *s, TCGReg lo, uint8_t base, MemOp opc)
2229
+ {
2230
+ switch (opc & (MO_SSIZE)) {
2231
+ case MO_8:
2232
+ tcg_wasm_out_op_local_get (s, base);
2233
+ tcg_wasm_out_op_global_get_r (s, lo);
2234
+ tcg_wasm_out_op_i64_store8 (s, 0 , 0 );
2235
+ break ;
2236
+ case MO_16:
2237
+ tcg_wasm_out_op_local_get (s, base);
2238
+ tcg_wasm_out_op_global_get_r (s, lo);
2239
+ tcg_wasm_out_op_i64_store16 (s, 0 , 0 );
2240
+ break ;
2241
+ case MO_32:
2242
+ tcg_wasm_out_op_local_get (s, base);
2243
+ tcg_wasm_out_op_global_get_r (s, lo);
2244
+ tcg_wasm_out_op_i64_store32 (s, 0 , 0 );
2245
+ break ;
2246
+ case MO_64:
2247
+ tcg_wasm_out_op_local_get (s, base);
2248
+ tcg_wasm_out_op_global_get_r (s, lo);
2249
+ tcg_wasm_out_op_i64_store (s, 0 , 0 );
2250
+ break ;
2251
+ default :
2252
+ g_assert_not_reached ();
2253
+ }
2254
+ }
2255
+
2052
2256
static void tcg_wasm_out_qemu_st (TCGContext *s, TCGReg data_reg,
2053
2257
TCGReg addr_reg, MemOpIdx oi)
2054
2258
{
2055
2259
int helper_idx;
2056
2260
int func_idx;
2057
2261
bool addr64 = s->addr_type == TCG_TYPE_I64;
2058
2262
MemOp mop = get_memop (oi);
2263
+ uint8_t base_var, hit_var;
2059
2264
2060
2265
helper_idx = (uint32_t )qemu_st_helper_ptr (oi);
2061
2266
func_idx = get_helper_idx (s, helper_idx);
@@ -2069,6 +2274,14 @@ static void tcg_wasm_out_qemu_st(TCGContext *s, TCGReg data_reg,
2069
2274
addr_reg = TCG_REG_TMP;
2070
2275
}
2071
2276
2277
+ base_var = prepare_host_addr_wasm (s, &hit_var, addr_reg, oi, false );
2278
+ tcg_wasm_out_op_local_get (s, hit_var);
2279
+ tcg_wasm_out_op_i64_const (s, 1 );
2280
+ tcg_wasm_out_op_i64_eq (s);
2281
+ tcg_wasm_out_op_if_noret (s);
2282
+ tcg_wasm_out_qemu_st_direct (s, data_reg, base_var, mop); /* fast path */
2283
+ tcg_wasm_out_op_end (s);
2284
+
2072
2285
/*
2073
2286
* update the block index so that the possible rewinding will
2074
2287
* skip this block
@@ -2077,6 +2290,10 @@ static void tcg_wasm_out_qemu_st(TCGContext *s, TCGReg data_reg,
2077
2290
tcg_wasm_out_op_global_set (s, BLOCK_PTR_IDX);
2078
2291
tcg_wasm_out_new_block (s);
2079
2292
2293
+ tcg_wasm_out_op_local_get (s, hit_var);
2294
+ tcg_wasm_out_op_i64_eqz (s);
2295
+ tcg_wasm_out_op_if_noret (s);
2296
+
2080
2297
/* call helper */
2081
2298
tcg_wasm_out_op_global_get_r (s, TCG_AREG0);
2082
2299
tcg_wasm_out_op_i32_wrap_i64 (s);
@@ -2095,6 +2312,8 @@ static void tcg_wasm_out_qemu_st(TCGContext *s, TCGReg data_reg,
2095
2312
2096
2313
tcg_wasm_out_op_call (s, func_idx);
2097
2314
tcg_wasm_out_handle_unwinding (s);
2315
+
2316
+ tcg_wasm_out_op_end (s);
2098
2317
}
2099
2318
2100
2319
static bool patch_reloc (tcg_insn_unit *code_ptr_i, int type,
@@ -3752,7 +3971,7 @@ static int tcg_out_tb_end(TCGContext *s)
3752
3971
3753
3972
bool tcg_target_has_memory_bswap (MemOp memop)
3754
3973
{
3755
- return true ;
3974
+ return false ;
3756
3975
}
3757
3976
3758
3977
static bool tcg_out_qemu_ld_slow_path (TCGContext *s, TCGLabelQemuLdst *l)
0 commit comments