10554
|
1 /* ----------------------------------------------------------------------- *
|
|
2 *
|
|
3 * Copyright 1996-2017 The NASM Authors - All Rights Reserved
|
|
4 * See the file AUTHORS included with the NASM distribution for
|
|
5 * the specific copyright holders.
|
|
6 *
|
|
7 * Redistribution and use in source and binary forms, with or without
|
|
8 * modification, are permitted provided that the following
|
|
9 * conditions are met:
|
|
10 *
|
|
11 * * Redistributions of source code must retain the above copyright
|
|
12 * notice, this list of conditions and the following disclaimer.
|
|
13 * * Redistributions in binary form must reproduce the above
|
|
14 * copyright notice, this list of conditions and the following
|
|
15 * disclaimer in the documentation and/or other materials provided
|
|
16 * with the distribution.
|
|
17 *
|
|
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
|
|
19 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
|
|
20 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
|
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
22 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
|
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
|
29 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
|
30 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
31 *
|
|
32 * ----------------------------------------------------------------------- */
|
|
33
|
|
34 /*
|
|
35 * assemble.c code generation for the Netwide Assembler
|
|
36 *
|
|
37 * Bytecode specification
|
|
38 * ----------------------
|
|
39 *
|
|
40 *
|
|
41 * Codes Mnemonic Explanation
|
|
42 *
|
|
43 * \0 terminates the code. (Unless it's a literal of course.)
|
|
44 * \1..\4 that many literal bytes follow in the code stream
|
|
45 * \5 add 4 to the primary operand number (b, low octdigit)
|
|
46 * \6 add 4 to the secondary operand number (a, middle octdigit)
|
|
47 * \7 add 4 to both the primary and the secondary operand number
|
|
48 * \10..\13 a literal byte follows in the code stream, to be added
|
|
49 * to the register value of operand 0..3
|
|
50 * \14..\17 the position of index register operand in MIB (BND insns)
|
|
51 * \20..\23 ib a byte immediate operand, from operand 0..3
|
|
52 * \24..\27 ib,u a zero-extended byte immediate operand, from operand 0..3
|
|
53 * \30..\33 iw a word immediate operand, from operand 0..3
|
|
54 * \34..\37 iwd select between \3[0-3] and \4[0-3] depending on 16/32 bit
|
|
55 * assembly mode or the operand-size override on the operand
|
|
56 * \40..\43 id a long immediate operand, from operand 0..3
|
|
57 * \44..\47 iwdq select between \3[0-3], \4[0-3] and \5[4-7]
|
|
58 * depending on the address size of the instruction.
|
|
59 * \50..\53 rel8 a byte relative operand, from operand 0..3
|
|
60 * \54..\57 iq a qword immediate operand, from operand 0..3
|
|
61 * \60..\63 rel16 a word relative operand, from operand 0..3
|
|
62 * \64..\67 rel select between \6[0-3] and \7[0-3] depending on 16/32 bit
|
|
63 * assembly mode or the operand-size override on the operand
|
|
64 * \70..\73 rel32 a long relative operand, from operand 0..3
|
|
65 * \74..\77 seg a word constant, from the _segment_ part of operand 0..3
|
|
66 * \1ab a ModRM, calculated on EA in operand a, with the spare
|
|
67 * field the register value of operand b.
|
|
68 * \172\ab the register number from operand a in bits 7..4, with
|
|
69 * the 4-bit immediate from operand b in bits 3..0.
|
|
70 * \173\xab the register number from operand a in bits 7..4, with
|
|
71 * the value b in bits 3..0.
|
|
72 * \174..\177 the register number from operand 0..3 in bits 7..4, and
|
|
73 * an arbitrary value in bits 3..0 (assembled as zero.)
|
|
74 * \2ab a ModRM, calculated on EA in operand a, with the spare
|
|
75 * field equal to digit b.
|
|
76 *
|
|
77 * \240..\243 this instruction uses EVEX rather than REX or VEX/XOP, with the
|
|
78 * V field taken from operand 0..3.
|
|
79 * \250 this instruction uses EVEX rather than REX or VEX/XOP, with the
|
|
80 * V field set to 1111b.
|
|
81 *
|
|
82 * EVEX prefixes are followed by the sequence:
|
|
83 * \cm\wlp\tup where cm is:
|
|
84 * cc 00m mmm
|
|
85 * c = 2 for EVEX and mmmm is the M field (EVEX.P0[3:0])
|
|
86 * and wlp is:
|
|
87 * 00 wwl lpp
|
|
88 * [l0] ll = 0 (.128, .lz)
|
|
89 * [l1] ll = 1 (.256)
|
|
90 * [l2] ll = 2 (.512)
|
|
91 * [lig] ll = 3 for EVEX.L'L don't care (always assembled as 0)
|
|
92 *
|
|
93 * [w0] ww = 0 for W = 0
|
|
94 * [w1] ww = 1 for W = 1
|
|
95 * [wig] ww = 2 for W don't care (always assembled as 0)
|
|
96 * [ww] ww = 3 for W used as REX.W
|
|
97 *
|
|
98 * [p0] pp = 0 for no prefix
|
|
99 * [60] pp = 1 for legacy prefix 60
|
|
100 * [f3] pp = 2
|
|
101 * [f2] pp = 3
|
|
102 *
|
|
103 * tup is tuple type for Disp8*N from %tuple_codes in insns.pl
|
|
104 * (compressed displacement encoding)
|
|
105 *
|
|
106 * \254..\257 id,s a signed 32-bit operand to be extended to 64 bits.
|
|
107 * \260..\263 this instruction uses VEX/XOP rather than REX, with the
|
|
108 * V field taken from operand 0..3.
|
|
109 * \270 this instruction uses VEX/XOP rather than REX, with the
|
|
110 * V field set to 1111b.
|
|
111 *
|
|
112 * VEX/XOP prefixes are followed by the sequence:
|
|
113 * \tmm\wlp where mm is the M field; and wlp is:
|
|
114 * 00 wwl lpp
|
|
115 * [l0] ll = 0 for L = 0 (.128, .lz)
|
|
116 * [l1] ll = 1 for L = 1 (.256)
|
|
117 * [lig] ll = 2 for L don't care (always assembled as 0)
|
|
118 *
|
|
119 * [w0] ww = 0 for W = 0
|
|
120 * [w1 ] ww = 1 for W = 1
|
|
121 * [wig] ww = 2 for W don't care (always assembled as 0)
|
|
122 * [ww] ww = 3 for W used as REX.W
|
|
123 *
|
|
124 * t = 0 for VEX (C4/C5), t = 1 for XOP (8F).
|
|
125 *
|
|
126 * \271 hlexr instruction takes XRELEASE (F3) with or without lock
|
|
127 * \272 hlenl instruction takes XACQUIRE/XRELEASE with or without lock
|
|
128 * \273 hle instruction takes XACQUIRE/XRELEASE with lock only
|
|
129 * \274..\277 ib,s a byte immediate operand, from operand 0..3, sign-extended
|
|
130 * to the operand size (if o16/o32/o64 present) or the bit size
|
|
131 * \310 a16 indicates fixed 16-bit address size, i.e. optional 0x67.
|
|
132 * \311 a32 indicates fixed 32-bit address size, i.e. optional 0x67.
|
|
133 * \312 adf (disassembler only) invalid with non-default address size.
|
|
134 * \313 a64 indicates fixed 64-bit address size, 0x67 invalid.
|
|
135 * \314 norexb (disassembler only) invalid with REX.B
|
|
136 * \315 norexx (disassembler only) invalid with REX.X
|
|
137 * \316 norexr (disassembler only) invalid with REX.R
|
|
138 * \317 norexw (disassembler only) invalid with REX.W
|
|
139 * \320 o16 indicates fixed 16-bit operand size, i.e. optional 0x66.
|
|
140 * \321 o32 indicates fixed 32-bit operand size, i.e. optional 0x66.
|
|
141 * \322 odf indicates that this instruction is only valid when the
|
|
142 * operand size is the default (instruction to disassembler,
|
|
143 * generates no code in the assembler)
|
|
144 * \323 o64nw indicates fixed 64-bit operand size, REX on extensions only.
|
|
145 * \324 o64 indicates 64-bit operand size requiring REX prefix.
|
|
146 * \325 nohi instruction which always uses spl/bpl/sil/dil
|
|
147 * \326 nof3 instruction not valid with 0xF3 REP prefix. Hint for
|
|
148 disassembler only; for SSE instructions.
|
|
149 * \330 a literal byte follows in the code stream, to be added
|
|
150 * to the condition code value of the instruction.
|
|
151 * \331 norep instruction not valid with REP prefix. Hint for
|
|
152 * disassembler only; for SSE instructions.
|
|
153 * \332 f2i REP prefix (0xF2 byte) used as opcode extension.
|
|
154 * \333 f3i REP prefix (0xF3 byte) used as opcode extension.
|
|
155 * \334 rex.l LOCK prefix used as REX.R (used in non-64-bit mode)
|
|
156 * \335 repe disassemble a rep (0xF3 byte) prefix as repe not rep.
|
|
157 * \336 mustrep force a REP(E) prefix (0xF3) even if not specified.
|
|
158 * \337 mustrepne force a REPNE prefix (0xF2) even if not specified.
|
|
159 * \336-\337 are still listed as prefixes in the disassembler.
|
|
160 * \340 resb reserve <operand 0> bytes of uninitialized storage.
|
|
161 * Operand 0 had better be a segmentless constant.
|
|
162 * \341 wait this instruction needs a WAIT "prefix"
|
|
163 * \360 np no SSE prefix (== \364\331)
|
|
164 * \361 66 SSE prefix (== \366\331)
|
|
165 * \364 !osp operand-size prefix (0x66) not permitted
|
|
166 * \365 !asp address-size prefix (0x67) not permitted
|
|
167 * \366 operand-size prefix (0x66) used as opcode extension
|
|
168 * \367 address-size prefix (0x67) used as opcode extension
|
|
169 * \370,\371 jcc8 match only if operand 0 meets byte jump criteria.
|
|
170 * jmp8 370 is used for Jcc, 371 is used for JMP.
|
|
171 * \373 jlen assemble 0x03 if bits==16, 0x05 if bits==32;
|
|
172 * used for conditional jump over longer jump
|
|
173 * \374 vsibx|vm32x|vm64x this instruction takes an XMM VSIB memory EA
|
|
174 * \375 vsiby|vm32y|vm64y this instruction takes an YMM VSIB memory EA
|
|
175 * \376 vsibz|vm32z|vm64z this instruction takes an ZMM VSIB memory EA
|
|
176 */
|
|
177
|
|
178 #include "compiler.h"
|
|
179
|
|
180 #include <stdio.h>
|
|
181 #include <string.h>
|
|
182 #include <stdlib.h>
|
|
183
|
|
184 #include "nasm.h"
|
|
185 #include "nasmlib.h"
|
|
186 #include "error.h"
|
|
187 #include "assemble.h"
|
|
188 #include "insns.h"
|
|
189 #include "tables.h"
|
|
190 #include "disp8.h"
|
|
191 #include "listing.h"
|
|
192
|
|
193 enum match_result {
|
|
194 /*
|
|
195 * Matching errors. These should be sorted so that more specific
|
|
196 * errors come later in the sequence.
|
|
197 */
|
|
198 MERR_INVALOP,
|
|
199 MERR_OPSIZEMISSING,
|
|
200 MERR_OPSIZEMISMATCH,
|
|
201 MERR_BRNUMMISMATCH,
|
|
202 MERR_BADCPU,
|
|
203 MERR_BADMODE,
|
|
204 MERR_BADHLE,
|
|
205 MERR_ENCMISMATCH,
|
|
206 MERR_BADBND,
|
|
207 MERR_BADREPNE,
|
|
208 /*
|
|
209 * Matching success; the conditional ones first
|
|
210 */
|
|
211 MOK_JUMP, /* Matching OK but needs jmp_match() */
|
|
212 MOK_GOOD /* Matching unconditionally OK */
|
|
213 };
|
|
214
|
|
215 typedef struct {
|
|
216 enum ea_type type; /* what kind of EA is this? */
|
|
217 int sib_present; /* is a SIB byte necessary? */
|
|
218 int bytes; /* # of bytes of offset needed */
|
|
219 int size; /* lazy - this is sib+bytes+1 */
|
|
220 uint8_t modrm, sib, rex, rip; /* the bytes themselves */
|
|
221 int8_t disp8; /* compressed displacement for EVEX */
|
|
222 } ea;
|
|
223
|
|
224 #define GEN_SIB(scale, index, base) \
|
|
225 (((scale) << 6) | ((index) << 3) | ((base)))
|
|
226
|
|
227 #define GEN_MODRM(mod, reg, rm) \
|
|
228 (((mod) << 6) | (((reg) & 7) << 3) | ((rm) & 7))
|
|
229
|
|
230 static int64_t calcsize(int32_t, int64_t, int, insn *,
|
|
231 const struct itemplate *);
|
|
232 static int emit_prefix(struct out_data *data, const int bits, insn *ins);
|
|
233 static void gencode(struct out_data *data, insn *ins);
|
|
234 static enum match_result find_match(const struct itemplate **tempp,
|
|
235 insn *instruction,
|
|
236 int32_t segment, int64_t offset, int bits);
|
|
237 static enum match_result matches(const struct itemplate *, insn *, int bits);
|
|
238 static opflags_t regflag(const operand *);
|
|
239 static int32_t regval(const operand *);
|
|
240 static int rexflags(int, opflags_t, int);
|
|
241 static int op_rexflags(const operand *, int);
|
|
242 static int op_evexflags(const operand *, int, uint8_t);
|
|
243 static void add_asp(insn *, int);
|
|
244
|
|
245 static enum ea_type process_ea(operand *, ea *, int, int, opflags_t, insn *);
|
|
246
|
|
247 static inline bool absolute_op(const struct operand *o)
|
|
248 {
|
|
249 return o->segment == NO_SEG && o->wrt == NO_SEG &&
|
|
250 !(o->opflags & OPFLAG_RELATIVE);
|
|
251 }
|
|
252
|
|
253 static int has_prefix(insn * ins, enum prefix_pos pos, int prefix)
|
|
254 {
|
|
255 return ins->prefixes[pos] == prefix;
|
|
256 }
|
|
257
|
|
258 static void assert_no_prefix(insn * ins, enum prefix_pos pos)
|
|
259 {
|
|
260 if (ins->prefixes[pos])
|
|
261 nasm_error(ERR_NONFATAL, "invalid %s prefix",
|
|
262 prefix_name(ins->prefixes[pos]));
|
|
263 }
|
|
264
|
|
265 static const char *size_name(int size)
|
|
266 {
|
|
267 switch (size) {
|
|
268 case 1:
|
|
269 return "byte";
|
|
270 case 2:
|
|
271 return "word";
|
|
272 case 4:
|
|
273 return "dword";
|
|
274 case 8:
|
|
275 return "qword";
|
|
276 case 10:
|
|
277 return "tword";
|
|
278 case 16:
|
|
279 return "oword";
|
|
280 case 32:
|
|
281 return "yword";
|
|
282 case 64:
|
|
283 return "zword";
|
|
284 default:
|
|
285 return "???";
|
|
286 }
|
|
287 }
|
|
288
|
|
289 static void warn_overflow(int size)
|
|
290 {
|
|
291 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
|
|
292 "%s data exceeds bounds", size_name(size));
|
|
293 }
|
|
294
|
|
295 static void warn_overflow_const(int64_t data, int size)
|
|
296 {
|
|
297 if (overflow_general(data, size))
|
|
298 warn_overflow(size);
|
|
299 }
|
|
300
|
|
301 static void warn_overflow_opd(const struct operand *o, int size)
|
|
302 {
|
|
303 if (absolute_op(o)) {
|
|
304 if (overflow_general(o->offset, size))
|
|
305 warn_overflow(size);
|
|
306 }
|
|
307 }
|
|
308
|
|
309 static void warn_overflow_out(int64_t data, int size, enum out_sign sign)
|
|
310 {
|
|
311 bool err;
|
|
312
|
|
313 switch (sign) {
|
|
314 case OUT_WRAP:
|
|
315 err = overflow_general(data, size);
|
|
316 break;
|
|
317 case OUT_SIGNED:
|
|
318 err = overflow_signed(data, size);
|
|
319 break;
|
|
320 case OUT_UNSIGNED:
|
|
321 err = overflow_unsigned(data, size);
|
|
322 break;
|
|
323 default:
|
|
324 panic();
|
|
325 break;
|
|
326 }
|
|
327
|
|
328 if (err)
|
|
329 warn_overflow(size);
|
|
330 }
|
|
331
|
|
332 /*
|
|
333 * This routine wrappers the real output format's output routine,
|
|
334 * in order to pass a copy of the data off to the listing file
|
|
335 * generator at the same time, flatten unnecessary relocations,
|
|
336 * and verify backend compatibility.
|
|
337 */
|
|
338 static void out(struct out_data *data)
|
|
339 {
|
|
340 static int32_t lineno = 0; /* static!!! */
|
|
341 static const char *lnfname = NULL;
|
|
342 int asize;
|
|
343 const int amax = ofmt->maxbits >> 3; /* Maximum address size in bytes */
|
|
344 union {
|
|
345 uint8_t b[8];
|
|
346 uint64_t q;
|
|
347 } xdata;
|
|
348 uint64_t size = data->size;
|
|
349 int64_t addrval;
|
|
350 int32_t fixseg; /* Segment for which to produce fixed data */
|
|
351
|
|
352 if (!data->size)
|
|
353 return; /* Nothing to do */
|
|
354
|
|
355 /*
|
|
356 * Convert addresses to RAWDATA if possible
|
|
357 * XXX: not all backends want this for global symbols!!!!
|
|
358 */
|
|
359 switch (data->type) {
|
|
360 case OUT_ADDRESS:
|
|
361 addrval = data->toffset;
|
|
362 fixseg = NO_SEG; /* Absolute address is fixed data */
|
|
363 goto address;
|
|
364
|
|
365 case OUT_RELADDR:
|
|
366 addrval = data->toffset - data->relbase;
|
|
367 fixseg = data->segment; /* Our own segment is fixed data */
|
|
368 goto address;
|
|
369
|
|
370 address:
|
|
371 asize = data->size;
|
|
372 nasm_assert(asize <= 8);
|
|
373 if (data->tsegment == fixseg && data->twrt == NO_SEG) {
|
|
374 uint8_t *q = xdata.b;
|
|
375
|
|
376 warn_overflow_out(addrval, asize, data->sign);
|
|
377
|
|
378 WRITEADDR(q, addrval, asize);
|
|
379 data->data = xdata.b;
|
|
380 data->type = OUT_RAWDATA;
|
|
381 asize = 0; /* No longer an address */
|
|
382 }
|
|
383 break;
|
|
384
|
|
385 default:
|
|
386 asize = 0; /* Not an address */
|
|
387 break;
|
|
388 }
|
|
389
|
|
390 lfmt->output(data);
|
|
391
|
|
392 /*
|
|
393 * this call to src_get determines when we call the
|
|
394 * debug-format-specific "linenum" function
|
|
395 * it updates lineno and lnfname to the current values
|
|
396 * returning 0 if "same as last time", -2 if lnfname
|
|
397 * changed, and the amount by which lineno changed,
|
|
398 * if it did. thus, these variables must be static
|
|
399 */
|
|
400
|
|
401 if (src_get(&lineno, &lnfname))
|
|
402 dfmt->linenum(lnfname, lineno, data->segment);
|
|
403
|
|
404 if (asize && asize > amax) {
|
|
405 if (data->type != OUT_ADDRESS || data->sign == OUT_SIGNED) {
|
|
406 nasm_error(ERR_NONFATAL,
|
|
407 "%d-bit signed relocation unsupported by output format %s\n",
|
|
408 asize << 3, ofmt->shortname);
|
|
409 } else {
|
|
410 nasm_error(ERR_WARNING | ERR_WARN_ZEXTRELOC,
|
|
411 "%d-bit unsigned relocation zero-extended from %d bits\n",
|
|
412 asize << 3, ofmt->maxbits);
|
|
413 data->size = amax;
|
|
414 ofmt->output(data);
|
|
415 data->insoffs += amax;
|
|
416 data->offset += amax;
|
|
417 data->size = size = asize - amax;
|
|
418 }
|
|
419 data->data = zero_buffer;
|
|
420 data->type = OUT_RAWDATA;
|
|
421 }
|
|
422
|
|
423 ofmt->output(data);
|
|
424 data->offset += size;
|
|
425 data->insoffs += size;
|
|
426 }
|
|
427
|
|
428 static inline void out_rawdata(struct out_data *data, const void *rawdata,
|
|
429 size_t size)
|
|
430 {
|
|
431 data->type = OUT_RAWDATA;
|
|
432 data->data = rawdata;
|
|
433 data->size = size;
|
|
434 out(data);
|
|
435 }
|
|
436
|
|
437 static void out_rawbyte(struct out_data *data, uint8_t byte)
|
|
438 {
|
|
439 data->type = OUT_RAWDATA;
|
|
440 data->data = &byte;
|
|
441 data->size = 1;
|
|
442 out(data);
|
|
443 }
|
|
444
|
|
445 static inline void out_reserve(struct out_data *data, uint64_t size)
|
|
446 {
|
|
447 data->type = OUT_RESERVE;
|
|
448 data->size = size;
|
|
449 out(data);
|
|
450 }
|
|
451
|
|
452 static inline void out_imm(struct out_data *data, const struct operand *opx,
|
|
453 int size, enum out_sign sign)
|
|
454 {
|
|
455 data->type =
|
|
456 (opx->opflags & OPFLAG_RELATIVE) ? OUT_RELADDR : OUT_ADDRESS;
|
|
457 data->sign = sign;
|
|
458 data->size = size;
|
|
459 data->toffset = opx->offset;
|
|
460 data->tsegment = opx->segment;
|
|
461 data->twrt = opx->wrt;
|
|
462 /*
|
|
463 * XXX: improve this if at some point in the future we can
|
|
464 * distinguish the subtrahend in expressions like [foo - bar]
|
|
465 * where bar is a symbol in the current segment. However, at the
|
|
466 * current point, if OPFLAG_RELATIVE is set that subtraction has
|
|
467 * already occurred.
|
|
468 */
|
|
469 data->relbase = 0;
|
|
470 out(data);
|
|
471 }
|
|
472
|
|
473 static void out_reladdr(struct out_data *data, const struct operand *opx,
|
|
474 int size)
|
|
475 {
|
|
476 if (opx->opflags & OPFLAG_RELATIVE)
|
|
477 nasm_error(ERR_NONFATAL, "invalid use of self-relative expression");
|
|
478
|
|
479 data->type = OUT_RELADDR;
|
|
480 data->sign = OUT_SIGNED;
|
|
481 data->size = size;
|
|
482 data->toffset = opx->offset;
|
|
483 data->tsegment = opx->segment;
|
|
484 data->twrt = opx->wrt;
|
|
485 data->relbase = data->offset + (data->inslen - data->insoffs);
|
|
486 out(data);
|
|
487 }
|
|
488
|
|
489 static inline void out_segment(struct out_data *data,
|
|
490 const struct operand *opx)
|
|
491 {
|
|
492 data->type = OUT_SEGMENT;
|
|
493 data->sign = OUT_UNSIGNED;
|
|
494 data->size = 2;
|
|
495 data->toffset = opx->offset;
|
|
496 data->tsegment = ofmt->segbase(opx->segment + 1);
|
|
497 data->twrt = opx->wrt;
|
|
498 out(data);
|
|
499 }
|
|
500
|
|
501 static bool jmp_match(int32_t segment, int64_t offset, int bits,
|
|
502 insn * ins, const struct itemplate *temp)
|
|
503 {
|
|
504 int64_t isize;
|
|
505 const uint8_t *code = temp->code;
|
|
506 uint8_t c = code[0];
|
|
507 bool is_byte;
|
|
508
|
|
509 if (((c & ~1) != 0370) || (ins->oprs[0].type & STRICT))
|
|
510 return false;
|
|
511 if (!optimizing)
|
|
512 return false;
|
|
513 if (optimizing < 0 && c == 0371)
|
|
514 return false;
|
|
515
|
|
516 isize = calcsize(segment, offset, bits, ins, temp);
|
|
517
|
|
518 if (ins->oprs[0].opflags & OPFLAG_UNKNOWN)
|
|
519 /* Be optimistic in pass 1 */
|
|
520 return true;
|
|
521
|
|
522 if (ins->oprs[0].segment != segment)
|
|
523 return false;
|
|
524
|
|
525 isize = ins->oprs[0].offset - offset - isize; /* isize is delta */
|
|
526 is_byte = (isize >= -128 && isize <= 127); /* is it byte size? */
|
|
527
|
|
528 if (is_byte && c == 0371 && ins->prefixes[PPS_REP] == P_BND) {
|
|
529 /* jmp short (opcode eb) cannot be used with bnd prefix. */
|
|
530 ins->prefixes[PPS_REP] = P_none;
|
|
531 nasm_error(ERR_WARNING | ERR_WARN_BND | ERR_PASS2 ,
|
|
532 "jmp short does not init bnd regs - bnd prefix dropped.");
|
|
533 }
|
|
534
|
|
535 return is_byte;
|
|
536 }
|
|
537
|
|
538 /* This is totally just a wild guess what is reasonable... */
|
|
539 #define INCBIN_MAX_BUF (ZERO_BUF_SIZE * 16)
|
|
540
|
|
541 int64_t assemble(int32_t segment, int64_t start, int bits, insn *instruction)
|
|
542 {
|
|
543 struct out_data data;
|
|
544 const struct itemplate *temp;
|
|
545 enum match_result m;
|
|
546 int32_t itimes;
|
|
547 int64_t wsize; /* size for DB etc. */
|
|
548
|
|
549 nasm_zero(&data);
|
|
550 data.offset = start;
|
|
551 data.segment = segment;
|
|
552 data.itemp = NULL;
|
|
553 data.sign = OUT_WRAP;
|
|
554 data.bits = bits;
|
|
555
|
|
556 wsize = idata_bytes(instruction->opcode);
|
|
557 if (wsize == -1)
|
|
558 return 0;
|
|
559
|
|
560 if (wsize) {
|
|
561 extop *e;
|
|
562 int32_t t = instruction->times;
|
|
563 if (t < 0)
|
|
564 nasm_panic(0, "instruction->times < 0 (%"PRId32") in assemble()", t);
|
|
565
|
|
566 while (t--) { /* repeat TIMES times */
|
|
567 list_for_each(e, instruction->eops) {
|
|
568 if (e->type == EOT_DB_NUMBER) {
|
|
569 if (wsize > 8) {
|
|
570 nasm_error(ERR_NONFATAL,
|
|
571 "integer supplied to a DT, DO or DY"
|
|
572 " instruction");
|
|
573 } else {
|
|
574 data.insoffs = 0;
|
|
575 data.type = e->relative ? OUT_RELADDR : OUT_ADDRESS;
|
|
576 data.inslen = data.size = wsize;
|
|
577 data.toffset = e->offset;
|
|
578 data.tsegment = e->segment;
|
|
579 data.twrt = e->wrt;
|
|
580 data.relbase = 0;
|
|
581 out(&data);
|
|
582 }
|
|
583 } else if (e->type == EOT_DB_STRING ||
|
|
584 e->type == EOT_DB_STRING_FREE) {
|
|
585 int align = e->stringlen % wsize;
|
|
586 if (align)
|
|
587 align = wsize - align;
|
|
588
|
|
589 data.insoffs = 0;
|
|
590 data.inslen = e->stringlen + align;
|
|
591
|
|
592 out_rawdata(&data, e->stringval, e->stringlen);
|
|
593 out_rawdata(&data, zero_buffer, align);
|
|
594 }
|
|
595 }
|
|
596 if (t > 0 && t == instruction->times - 1) {
|
|
597 lfmt->set_offset(data.offset);
|
|
598 lfmt->uplevel(LIST_TIMES);
|
|
599 }
|
|
600 }
|
|
601 if (instruction->times > 1)
|
|
602 lfmt->downlevel(LIST_TIMES);
|
|
603 } else if (instruction->opcode == I_INCBIN) {
|
|
604 const char *fname = instruction->eops->stringval;
|
|
605 FILE *fp;
|
|
606 size_t t = instruction->times;
|
|
607 off_t base = 0;
|
|
608 off_t len;
|
|
609 const void *map = NULL;
|
|
610 char *buf = NULL;
|
|
611 size_t blk = 0; /* Buffered I/O block size */
|
|
612 size_t m = 0; /* Bytes last read */
|
|
613
|
|
614 fp = nasm_open_read(fname, NF_BINARY|NF_FORMAP);
|
|
615 if (!fp) {
|
|
616 nasm_error(ERR_NONFATAL, "`incbin': unable to open file `%s'",
|
|
617 fname);
|
|
618 goto done;
|
|
619 }
|
|
620
|
|
621 len = nasm_file_size(fp);
|
|
622
|
|
623 if (len == (off_t)-1) {
|
|
624 nasm_error(ERR_NONFATAL, "`incbin': unable to get length of file `%s'",
|
|
625 fname);
|
|
626 goto close_done;
|
|
627 }
|
|
628
|
|
629 if (instruction->eops->next) {
|
|
630 base = instruction->eops->next->offset;
|
|
631 if (base >= len) {
|
|
632 len = 0;
|
|
633 } else {
|
|
634 len -= base;
|
|
635 if (instruction->eops->next->next &&
|
|
636 len > (off_t)instruction->eops->next->next->offset)
|
|
637 len = (off_t)instruction->eops->next->next->offset;
|
|
638 }
|
|
639 }
|
|
640
|
|
641 lfmt->set_offset(data.offset);
|
|
642 lfmt->uplevel(LIST_INCBIN);
|
|
643
|
|
644 if (!len)
|
|
645 goto end_incbin;
|
|
646
|
|
647 /* Try to map file data */
|
|
648 map = nasm_map_file(fp, base, len);
|
|
649 if (!map) {
|
|
650 blk = len < (off_t)INCBIN_MAX_BUF ? (size_t)len : INCBIN_MAX_BUF;
|
|
651 buf = nasm_malloc(blk);
|
|
652 }
|
|
653
|
|
654 while (t--) {
|
|
655 /*
|
|
656 * Consider these irrelevant for INCBIN, since it is fully
|
|
657 * possible that these might be (way) bigger than an int
|
|
658 * can hold; there is, however, no reason to widen these
|
|
659 * types just for INCBIN. data.inslen == 0 signals to the
|
|
660 * backend that these fields are meaningless, if at all
|
|
661 * needed.
|
|
662 */
|
|
663 data.insoffs = 0;
|
|
664 data.inslen = 0;
|
|
665
|
|
666 if (map) {
|
|
667 out_rawdata(&data, map, len);
|
|
668 } else if ((off_t)m == len) {
|
|
669 out_rawdata(&data, buf, len);
|
|
670 } else {
|
|
671 off_t l = len;
|
|
672
|
|
673 if (fseeko(fp, base, SEEK_SET) < 0 || ferror(fp)) {
|
|
674 nasm_error(ERR_NONFATAL,
|
|
675 "`incbin': unable to seek on file `%s'",
|
|
676 fname);
|
|
677 goto end_incbin;
|
|
678 }
|
|
679 while (l > 0) {
|
|
680 m = fread(buf, 1, l < (off_t)blk ? (size_t)l : blk, fp);
|
|
681 if (!m || feof(fp)) {
|
|
682 /*
|
|
683 * This shouldn't happen unless the file
|
|
684 * actually changes while we are reading
|
|
685 * it.
|
|
686 */
|
|
687 nasm_error(ERR_NONFATAL,
|
|
688 "`incbin': unexpected EOF while"
|
|
689 " reading file `%s'", fname);
|
|
690 goto end_incbin;
|
|
691 }
|
|
692 out_rawdata(&data, buf, m);
|
|
693 l -= m;
|
|
694 }
|
|
695 }
|
|
696 }
|
|
697 end_incbin:
|
|
698 lfmt->downlevel(LIST_INCBIN);
|
|
699 if (instruction->times > 1) {
|
|
700 lfmt->set_offset(data.offset);
|
|
701 lfmt->uplevel(LIST_TIMES);
|
|
702 lfmt->downlevel(LIST_TIMES);
|
|
703 }
|
|
704 if (ferror(fp)) {
|
|
705 nasm_error(ERR_NONFATAL,
|
|
706 "`incbin': error while"
|
|
707 " reading file `%s'", fname);
|
|
708 }
|
|
709 close_done:
|
|
710 if (buf)
|
|
711 nasm_free(buf);
|
|
712 if (map)
|
|
713 nasm_unmap_file(map, len);
|
|
714 fclose(fp);
|
|
715 done:
|
|
716 ;
|
|
717 } else {
|
|
718 /* "Real" instruction */
|
|
719
|
|
720 /* Check to see if we need an address-size prefix */
|
|
721 add_asp(instruction, bits);
|
|
722
|
|
723 m = find_match(&temp, instruction, data.segment, data.offset, bits);
|
|
724
|
|
725 if (m == MOK_GOOD) {
|
|
726 /* Matches! */
|
|
727 int64_t insn_size = calcsize(data.segment, data.offset,
|
|
728 bits, instruction, temp);
|
|
729 itimes = instruction->times;
|
|
730 if (insn_size < 0) /* shouldn't be, on pass two */
|
|
731 nasm_panic(0, "errors made it through from pass one");
|
|
732
|
|
733 data.itemp = temp;
|
|
734 data.bits = bits;
|
|
735
|
|
736 while (itimes--) {
|
|
737 data.insoffs = 0;
|
|
738 data.inslen = insn_size;
|
|
739
|
|
740 gencode(&data, instruction);
|
|
741 nasm_assert(data.insoffs == insn_size);
|
|
742
|
|
743 if (itimes > 0 && itimes == instruction->times - 1) {
|
|
744 lfmt->set_offset(data.offset);
|
|
745 lfmt->uplevel(LIST_TIMES);
|
|
746 }
|
|
747 }
|
|
748 if (instruction->times > 1)
|
|
749 lfmt->downlevel(LIST_TIMES);
|
|
750 } else {
|
|
751 /* No match */
|
|
752 switch (m) {
|
|
753 case MERR_OPSIZEMISSING:
|
|
754 nasm_error(ERR_NONFATAL, "operation size not specified");
|
|
755 break;
|
|
756 case MERR_OPSIZEMISMATCH:
|
|
757 nasm_error(ERR_NONFATAL, "mismatch in operand sizes");
|
|
758 break;
|
|
759 case MERR_BRNUMMISMATCH:
|
|
760 nasm_error(ERR_NONFATAL,
|
|
761 "mismatch in the number of broadcasting elements");
|
|
762 break;
|
|
763 case MERR_BADCPU:
|
|
764 nasm_error(ERR_NONFATAL, "no instruction for this cpu level");
|
|
765 break;
|
|
766 case MERR_BADMODE:
|
|
767 nasm_error(ERR_NONFATAL, "instruction not supported in %d-bit mode",
|
|
768 bits);
|
|
769 break;
|
|
770 case MERR_ENCMISMATCH:
|
|
771 nasm_error(ERR_NONFATAL, "specific encoding scheme not available");
|
|
772 break;
|
|
773 case MERR_BADBND:
|
|
774 nasm_error(ERR_NONFATAL, "bnd prefix is not allowed");
|
|
775 break;
|
|
776 case MERR_BADREPNE:
|
|
777 nasm_error(ERR_NONFATAL, "%s prefix is not allowed",
|
|
778 (has_prefix(instruction, PPS_REP, P_REPNE) ?
|
|
779 "repne" : "repnz"));
|
|
780 break;
|
|
781 default:
|
|
782 nasm_error(ERR_NONFATAL,
|
|
783 "invalid combination of opcode and operands");
|
|
784 break;
|
|
785 }
|
|
786 }
|
|
787 }
|
|
788 return data.offset - start;
|
|
789 }
|
|
790
|
|
791 int64_t insn_size(int32_t segment, int64_t offset, int bits, insn *instruction)
|
|
792 {
|
|
793 const struct itemplate *temp;
|
|
794 enum match_result m;
|
|
795
|
|
796 if (instruction->opcode == I_none)
|
|
797 return 0;
|
|
798
|
|
799 if (instruction->opcode == I_DB || instruction->opcode == I_DW ||
|
|
800 instruction->opcode == I_DD || instruction->opcode == I_DQ ||
|
|
801 instruction->opcode == I_DT || instruction->opcode == I_DO ||
|
|
802 instruction->opcode == I_DY) {
|
|
803 extop *e;
|
|
804 int32_t isize, osize, wsize;
|
|
805
|
|
806 isize = 0;
|
|
807 wsize = idata_bytes(instruction->opcode);
|
|
808
|
|
809 list_for_each(e, instruction->eops) {
|
|
810 int32_t align;
|
|
811
|
|
812 osize = 0;
|
|
813 if (e->type == EOT_DB_NUMBER) {
|
|
814 osize = 1;
|
|
815 warn_overflow_const(e->offset, wsize);
|
|
816 } else if (e->type == EOT_DB_STRING ||
|
|
817 e->type == EOT_DB_STRING_FREE)
|
|
818 osize = e->stringlen;
|
|
819
|
|
820 align = (-osize) % wsize;
|
|
821 if (align < 0)
|
|
822 align += wsize;
|
|
823 isize += osize + align;
|
|
824 }
|
|
825 return isize;
|
|
826 }
|
|
827
|
|
828 if (instruction->opcode == I_INCBIN) {
|
|
829 const char *fname = instruction->eops->stringval;
|
|
830 off_t len;
|
|
831
|
|
832 len = nasm_file_size_by_path(fname);
|
|
833 if (len == (off_t)-1) {
|
|
834 nasm_error(ERR_NONFATAL, "`incbin': unable to get length of file `%s'",
|
|
835 fname);
|
|
836 return 0;
|
|
837 }
|
|
838
|
|
839 if (instruction->eops->next) {
|
|
840 if (len <= (off_t)instruction->eops->next->offset) {
|
|
841 len = 0;
|
|
842 } else {
|
|
843 len -= instruction->eops->next->offset;
|
|
844 if (instruction->eops->next->next &&
|
|
845 len > (off_t)instruction->eops->next->next->offset) {
|
|
846 len = (off_t)instruction->eops->next->next->offset;
|
|
847 }
|
|
848 }
|
|
849 }
|
|
850
|
|
851 return len;
|
|
852 }
|
|
853
|
|
854 /* Check to see if we need an address-size prefix */
|
|
855 add_asp(instruction, bits);
|
|
856
|
|
857 m = find_match(&temp, instruction, segment, offset, bits);
|
|
858 if (m == MOK_GOOD) {
|
|
859 /* we've matched an instruction. */
|
|
860 return calcsize(segment, offset, bits, instruction, temp);
|
|
861 } else {
|
|
862 return -1; /* didn't match any instruction */
|
|
863 }
|
|
864 }
|
|
865
|
|
866 static void bad_hle_warn(const insn * ins, uint8_t hleok)
|
|
867 {
|
|
868 enum prefixes rep_pfx = ins->prefixes[PPS_REP];
|
|
869 enum whatwarn { w_none, w_lock, w_inval } ww;
|
|
870 static const enum whatwarn warn[2][4] =
|
|
871 {
|
|
872 { w_inval, w_inval, w_none, w_lock }, /* XACQUIRE */
|
|
873 { w_inval, w_none, w_none, w_lock }, /* XRELEASE */
|
|
874 };
|
|
875 unsigned int n;
|
|
876
|
|
877 n = (unsigned int)rep_pfx - P_XACQUIRE;
|
|
878 if (n > 1)
|
|
879 return; /* Not XACQUIRE/XRELEASE */
|
|
880
|
|
881 ww = warn[n][hleok];
|
|
882 if (!is_class(MEMORY, ins->oprs[0].type))
|
|
883 ww = w_inval; /* HLE requires operand 0 to be memory */
|
|
884
|
|
885 switch (ww) {
|
|
886 case w_none:
|
|
887 break;
|
|
888
|
|
889 case w_lock:
|
|
890 if (ins->prefixes[PPS_LOCK] != P_LOCK) {
|
|
891 nasm_error(ERR_WARNING | ERR_WARN_HLE | ERR_PASS2,
|
|
892 "%s with this instruction requires lock",
|
|
893 prefix_name(rep_pfx));
|
|
894 }
|
|
895 break;
|
|
896
|
|
897 case w_inval:
|
|
898 nasm_error(ERR_WARNING | ERR_WARN_HLE | ERR_PASS2,
|
|
899 "%s invalid with this instruction",
|
|
900 prefix_name(rep_pfx));
|
|
901 break;
|
|
902 }
|
|
903 }
|
|
904
|
|
905 /* Common construct */
|
|
906 #define case3(x) case (x): case (x)+1: case (x)+2
|
|
907 #define case4(x) case3(x): case (x)+3
|
|
908
|
|
909 static int64_t calcsize(int32_t segment, int64_t offset, int bits,
|
|
910 insn * ins, const struct itemplate *temp)
|
|
911 {
|
|
912 const uint8_t *codes = temp->code;
|
|
913 int64_t length = 0;
|
|
914 uint8_t c;
|
|
915 int rex_mask = ~0;
|
|
916 int op1, op2;
|
|
917 struct operand *opx;
|
|
918 uint8_t opex = 0;
|
|
919 enum ea_type eat;
|
|
920 uint8_t hleok = 0;
|
|
921 bool lockcheck = true;
|
|
922 enum reg_enum mib_index = R_none; /* For a separate index MIB reg form */
|
|
923
|
|
924 ins->rex = 0; /* Ensure REX is reset */
|
|
925 eat = EA_SCALAR; /* Expect a scalar EA */
|
|
926 memset(ins->evex_p, 0, 3); /* Ensure EVEX is reset */
|
|
927
|
|
928 if (ins->prefixes[PPS_OSIZE] == P_O64)
|
|
929 ins->rex |= REX_W;
|
|
930
|
|
931 (void)segment; /* Don't warn that this parameter is unused */
|
|
932 (void)offset; /* Don't warn that this parameter is unused */
|
|
933
|
|
934 while (*codes) {
|
|
935 c = *codes++;
|
|
936 op1 = (c & 3) + ((opex & 1) << 2);
|
|
937 op2 = ((c >> 3) & 3) + ((opex & 2) << 1);
|
|
938 opx = &ins->oprs[op1];
|
|
939 opex = 0; /* For the next iteration */
|
|
940
|
|
941 switch (c) {
|
|
942 case4(01):
|
|
943 codes += c, length += c;
|
|
944 break;
|
|
945
|
|
946 case3(05):
|
|
947 opex = c;
|
|
948 break;
|
|
949
|
|
950 case4(010):
|
|
951 ins->rex |=
|
|
952 op_rexflags(opx, REX_B|REX_H|REX_P|REX_W);
|
|
953 codes++, length++;
|
|
954 break;
|
|
955
|
|
956 case4(014):
|
|
957 /* this is an index reg of MIB operand */
|
|
958 mib_index = opx->basereg;
|
|
959 break;
|
|
960
|
|
961 case4(020):
|
|
962 case4(024):
|
|
963 length++;
|
|
964 break;
|
|
965
|
|
966 case4(030):
|
|
967 length += 2;
|
|
968 break;
|
|
969
|
|
970 case4(034):
|
|
971 if (opx->type & (BITS16 | BITS32 | BITS64))
|
|
972 length += (opx->type & BITS16) ? 2 : 4;
|
|
973 else
|
|
974 length += (bits == 16) ? 2 : 4;
|
|
975 break;
|
|
976
|
|
977 case4(040):
|
|
978 length += 4;
|
|
979 break;
|
|
980
|
|
981 case4(044):
|
|
982 length += ins->addr_size >> 3;
|
|
983 break;
|
|
984
|
|
985 case4(050):
|
|
986 length++;
|
|
987 break;
|
|
988
|
|
989 case4(054):
|
|
990 length += 8; /* MOV reg64/imm */
|
|
991 break;
|
|
992
|
|
993 case4(060):
|
|
994 length += 2;
|
|
995 break;
|
|
996
|
|
997 case4(064):
|
|
998 if (opx->type & (BITS16 | BITS32 | BITS64))
|
|
999 length += (opx->type & BITS16) ? 2 : 4;
|
|
1000 else
|
|
1001 length += (bits == 16) ? 2 : 4;
|
|
1002 break;
|
|
1003
|
|
1004 case4(070):
|
|
1005 length += 4;
|
|
1006 break;
|
|
1007
|
|
1008 case4(074):
|
|
1009 length += 2;
|
|
1010 break;
|
|
1011
|
|
1012 case 0172:
|
|
1013 case 0173:
|
|
1014 codes++;
|
|
1015 length++;
|
|
1016 break;
|
|
1017
|
|
1018 case4(0174):
|
|
1019 length++;
|
|
1020 break;
|
|
1021
|
|
1022 case4(0240):
|
|
1023 ins->rex |= REX_EV;
|
|
1024 ins->vexreg = regval(opx);
|
|
1025 ins->evex_p[2] |= op_evexflags(opx, EVEX_P2VP, 2); /* High-16 NDS */
|
|
1026 ins->vex_cm = *codes++;
|
|
1027 ins->vex_wlp = *codes++;
|
|
1028 ins->evex_tuple = (*codes++ - 0300);
|
|
1029 break;
|
|
1030
|
|
1031 case 0250:
|
|
1032 ins->rex |= REX_EV;
|
|
1033 ins->vexreg = 0;
|
|
1034 ins->vex_cm = *codes++;
|
|
1035 ins->vex_wlp = *codes++;
|
|
1036 ins->evex_tuple = (*codes++ - 0300);
|
|
1037 break;
|
|
1038
|
|
1039 case4(0254):
|
|
1040 length += 4;
|
|
1041 break;
|
|
1042
|
|
1043 case4(0260):
|
|
1044 ins->rex |= REX_V;
|
|
1045 ins->vexreg = regval(opx);
|
|
1046 ins->vex_cm = *codes++;
|
|
1047 ins->vex_wlp = *codes++;
|
|
1048 break;
|
|
1049
|
|
1050 case 0270:
|
|
1051 ins->rex |= REX_V;
|
|
1052 ins->vexreg = 0;
|
|
1053 ins->vex_cm = *codes++;
|
|
1054 ins->vex_wlp = *codes++;
|
|
1055 break;
|
|
1056
|
|
1057 case3(0271):
|
|
1058 hleok = c & 3;
|
|
1059 break;
|
|
1060
|
|
1061 case4(0274):
|
|
1062 length++;
|
|
1063 break;
|
|
1064
|
|
1065 case4(0300):
|
|
1066 break;
|
|
1067
|
|
1068 case 0310:
|
|
1069 if (bits == 64)
|
|
1070 return -1;
|
|
1071 length += (bits != 16) && !has_prefix(ins, PPS_ASIZE, P_A16);
|
|
1072 break;
|
|
1073
|
|
1074 case 0311:
|
|
1075 length += (bits != 32) && !has_prefix(ins, PPS_ASIZE, P_A32);
|
|
1076 break;
|
|
1077
|
|
1078 case 0312:
|
|
1079 break;
|
|
1080
|
|
1081 case 0313:
|
|
1082 if (bits != 64 || has_prefix(ins, PPS_ASIZE, P_A16) ||
|
|
1083 has_prefix(ins, PPS_ASIZE, P_A32))
|
|
1084 return -1;
|
|
1085 break;
|
|
1086
|
|
1087 case4(0314):
|
|
1088 break;
|
|
1089
|
|
1090 case 0320:
|
|
1091 {
|
|
1092 enum prefixes pfx = ins->prefixes[PPS_OSIZE];
|
|
1093 if (pfx == P_O16)
|
|
1094 break;
|
|
1095 if (pfx != P_none)
|
|
1096 nasm_error(ERR_WARNING | ERR_PASS2, "invalid operand size prefix");
|
|
1097 else
|
|
1098 ins->prefixes[PPS_OSIZE] = P_O16;
|
|
1099 break;
|
|
1100 }
|
|
1101
|
|
1102 case 0321:
|
|
1103 {
|
|
1104 enum prefixes pfx = ins->prefixes[PPS_OSIZE];
|
|
1105 if (pfx == P_O32)
|
|
1106 break;
|
|
1107 if (pfx != P_none)
|
|
1108 nasm_error(ERR_WARNING | ERR_PASS2, "invalid operand size prefix");
|
|
1109 else
|
|
1110 ins->prefixes[PPS_OSIZE] = P_O32;
|
|
1111 break;
|
|
1112 }
|
|
1113
|
|
1114 case 0322:
|
|
1115 break;
|
|
1116
|
|
1117 case 0323:
|
|
1118 rex_mask &= ~REX_W;
|
|
1119 break;
|
|
1120
|
|
1121 case 0324:
|
|
1122 ins->rex |= REX_W;
|
|
1123 break;
|
|
1124
|
|
1125 case 0325:
|
|
1126 ins->rex |= REX_NH;
|
|
1127 break;
|
|
1128
|
|
1129 case 0326:
|
|
1130 break;
|
|
1131
|
|
1132 case 0330:
|
|
1133 codes++, length++;
|
|
1134 break;
|
|
1135
|
|
1136 case 0331:
|
|
1137 break;
|
|
1138
|
|
1139 case 0332:
|
|
1140 case 0333:
|
|
1141 length++;
|
|
1142 break;
|
|
1143
|
|
1144 case 0334:
|
|
1145 ins->rex |= REX_L;
|
|
1146 break;
|
|
1147
|
|
1148 case 0335:
|
|
1149 break;
|
|
1150
|
|
1151 case 0336:
|
|
1152 if (!ins->prefixes[PPS_REP])
|
|
1153 ins->prefixes[PPS_REP] = P_REP;
|
|
1154 break;
|
|
1155
|
|
1156 case 0337:
|
|
1157 if (!ins->prefixes[PPS_REP])
|
|
1158 ins->prefixes[PPS_REP] = P_REPNE;
|
|
1159 break;
|
|
1160
|
|
1161 case 0340:
|
|
1162 if (!absolute_op(&ins->oprs[0]))
|
|
1163 nasm_error(ERR_NONFATAL, "attempt to reserve non-constant"
|
|
1164 " quantity of BSS space");
|
|
1165 else if (ins->oprs[0].opflags & OPFLAG_FORWARD)
|
|
1166 nasm_error(ERR_WARNING | ERR_PASS1,
|
|
1167 "forward reference in RESx can have unpredictable results");
|
|
1168 else
|
|
1169 length += ins->oprs[0].offset;
|
|
1170 break;
|
|
1171
|
|
1172 case 0341:
|
|
1173 if (!ins->prefixes[PPS_WAIT])
|
|
1174 ins->prefixes[PPS_WAIT] = P_WAIT;
|
|
1175 break;
|
|
1176
|
|
1177 case 0360:
|
|
1178 break;
|
|
1179
|
|
1180 case 0361:
|
|
1181 length++;
|
|
1182 break;
|
|
1183
|
|
1184 case 0364:
|
|
1185 case 0365:
|
|
1186 break;
|
|
1187
|
|
1188 case 0366:
|
|
1189 case 0367:
|
|
1190 length++;
|
|
1191 break;
|
|
1192
|
|
1193 case 0370:
|
|
1194 case 0371:
|
|
1195 break;
|
|
1196
|
|
1197 case 0373:
|
|
1198 length++;
|
|
1199 break;
|
|
1200
|
|
1201 case 0374:
|
|
1202 eat = EA_XMMVSIB;
|
|
1203 break;
|
|
1204
|
|
1205 case 0375:
|
|
1206 eat = EA_YMMVSIB;
|
|
1207 break;
|
|
1208
|
|
1209 case 0376:
|
|
1210 eat = EA_ZMMVSIB;
|
|
1211 break;
|
|
1212
|
|
1213 case4(0100):
|
|
1214 case4(0110):
|
|
1215 case4(0120):
|
|
1216 case4(0130):
|
|
1217 case4(0200):
|
|
1218 case4(0204):
|
|
1219 case4(0210):
|
|
1220 case4(0214):
|
|
1221 case4(0220):
|
|
1222 case4(0224):
|
|
1223 case4(0230):
|
|
1224 case4(0234):
|
|
1225 {
|
|
1226 ea ea_data;
|
|
1227 int rfield;
|
|
1228 opflags_t rflags;
|
|
1229 struct operand *opy = &ins->oprs[op2];
|
|
1230 struct operand *op_er_sae;
|
|
1231
|
|
1232 ea_data.rex = 0; /* Ensure ea.REX is initially 0 */
|
|
1233
|
|
1234 if (c <= 0177) {
|
|
1235 /* pick rfield from operand b (opx) */
|
|
1236 rflags = regflag(opx);
|
|
1237 rfield = nasm_regvals[opx->basereg];
|
|
1238 } else {
|
|
1239 rflags = 0;
|
|
1240 rfield = c & 7;
|
|
1241 }
|
|
1242
|
|
1243 /* EVEX.b1 : evex_brerop contains the operand position */
|
|
1244 op_er_sae = (ins->evex_brerop >= 0 ?
|
|
1245 &ins->oprs[ins->evex_brerop] : NULL);
|
|
1246
|
|
1247 if (op_er_sae && (op_er_sae->decoflags & (ER | SAE))) {
|
|
1248 /* set EVEX.b */
|
|
1249 ins->evex_p[2] |= EVEX_P2B;
|
|
1250 if (op_er_sae->decoflags & ER) {
|
|
1251 /* set EVEX.RC (rounding control) */
|
|
1252 ins->evex_p[2] |= ((ins->evex_rm - BRC_RN) << 5)
|
|
1253 & EVEX_P2RC;
|
|
1254 }
|
|
1255 } else {
|
|
1256 /* set EVEX.L'L (vector length) */
|
|
1257 ins->evex_p[2] |= ((ins->vex_wlp << (5 - 2)) & EVEX_P2LL);
|
|
1258 ins->evex_p[1] |= ((ins->vex_wlp << (7 - 4)) & EVEX_P1W);
|
|
1259 if (opy->decoflags & BRDCAST_MASK) {
|
|
1260 /* set EVEX.b */
|
|
1261 ins->evex_p[2] |= EVEX_P2B;
|
|
1262 }
|
|
1263 }
|
|
1264
|
|
1265 if (itemp_has(temp, IF_MIB)) {
|
|
1266 opy->eaflags |= EAF_MIB;
|
|
1267 /*
|
|
1268 * if a separate form of MIB (ICC style) is used,
|
|
1269 * the index reg info is merged into mem operand
|
|
1270 */
|
|
1271 if (mib_index != R_none) {
|
|
1272 opy->indexreg = mib_index;
|
|
1273 opy->scale = 1;
|
|
1274 opy->hintbase = mib_index;
|
|
1275 opy->hinttype = EAH_NOTBASE;
|
|
1276 }
|
|
1277 }
|
|
1278
|
|
1279 if (process_ea(opy, &ea_data, bits,
|
|
1280 rfield, rflags, ins) != eat) {
|
|
1281 nasm_error(ERR_NONFATAL, "invalid effective address");
|
|
1282 return -1;
|
|
1283 } else {
|
|
1284 ins->rex |= ea_data.rex;
|
|
1285 length += ea_data.size;
|
|
1286 }
|
|
1287 }
|
|
1288 break;
|
|
1289
|
|
1290 default:
|
|
1291 nasm_panic(0, "internal instruction table corrupt"
|
|
1292 ": instruction code \\%o (0x%02X) given", c, c);
|
|
1293 break;
|
|
1294 }
|
|
1295 }
|
|
1296
|
|
1297 ins->rex &= rex_mask;
|
|
1298
|
|
1299 if (ins->rex & REX_NH) {
|
|
1300 if (ins->rex & REX_H) {
|
|
1301 nasm_error(ERR_NONFATAL, "instruction cannot use high registers");
|
|
1302 return -1;
|
|
1303 }
|
|
1304 ins->rex &= ~REX_P; /* Don't force REX prefix due to high reg */
|
|
1305 }
|
|
1306
|
|
1307 switch (ins->prefixes[PPS_VEX]) {
|
|
1308 case P_EVEX:
|
|
1309 if (!(ins->rex & REX_EV))
|
|
1310 return -1;
|
|
1311 break;
|
|
1312 case P_VEX3:
|
|
1313 case P_VEX2:
|
|
1314 if (!(ins->rex & REX_V))
|
|
1315 return -1;
|
|
1316 break;
|
|
1317 default:
|
|
1318 break;
|
|
1319 }
|
|
1320
|
|
1321 if (ins->rex & (REX_V | REX_EV)) {
|
|
1322 int bad32 = REX_R|REX_W|REX_X|REX_B;
|
|
1323
|
|
1324 if (ins->rex & REX_H) {
|
|
1325 nasm_error(ERR_NONFATAL, "cannot use high register in AVX instruction");
|
|
1326 return -1;
|
|
1327 }
|
|
1328 switch (ins->vex_wlp & 060) {
|
|
1329 case 000:
|
|
1330 case 040:
|
|
1331 ins->rex &= ~REX_W;
|
|
1332 break;
|
|
1333 case 020:
|
|
1334 ins->rex |= REX_W;
|
|
1335 bad32 &= ~REX_W;
|
|
1336 break;
|
|
1337 case 060:
|
|
1338 /* Follow REX_W */
|
|
1339 break;
|
|
1340 }
|
|
1341
|
|
1342 if (bits != 64 && ((ins->rex & bad32) || ins->vexreg > 7)) {
|
|
1343 nasm_error(ERR_NONFATAL, "invalid operands in non-64-bit mode");
|
|
1344 return -1;
|
|
1345 } else if (!(ins->rex & REX_EV) &&
|
|
1346 ((ins->vexreg > 15) || (ins->evex_p[0] & 0xf0))) {
|
|
1347 nasm_error(ERR_NONFATAL, "invalid high-16 register in non-AVX-512");
|
|
1348 return -1;
|
|
1349 }
|
|
1350 if (ins->rex & REX_EV)
|
|
1351 length += 4;
|
|
1352 else if (ins->vex_cm != 1 || (ins->rex & (REX_W|REX_X|REX_B)) ||
|
|
1353 ins->prefixes[PPS_VEX] == P_VEX3)
|
|
1354 length += 3;
|
|
1355 else
|
|
1356 length += 2;
|
|
1357 } else if (ins->rex & REX_MASK) {
|
|
1358 if (ins->rex & REX_H) {
|
|
1359 nasm_error(ERR_NONFATAL, "cannot use high register in rex instruction");
|
|
1360 return -1;
|
|
1361 } else if (bits == 64) {
|
|
1362 length++;
|
|
1363 } else if ((ins->rex & REX_L) &&
|
|
1364 !(ins->rex & (REX_P|REX_W|REX_X|REX_B)) &&
|
|
1365 iflag_ffs(&cpu) >= IF_X86_64) {
|
|
1366 /* LOCK-as-REX.R */
|
|
1367 assert_no_prefix(ins, PPS_LOCK);
|
|
1368 lockcheck = false; /* Already errored, no need for warning */
|
|
1369 length++;
|
|
1370 } else {
|
|
1371 nasm_error(ERR_NONFATAL, "invalid operands in non-64-bit mode");
|
|
1372 return -1;
|
|
1373 }
|
|
1374 }
|
|
1375
|
|
1376 if (has_prefix(ins, PPS_LOCK, P_LOCK) && lockcheck &&
|
|
1377 (!itemp_has(temp,IF_LOCK) || !is_class(MEMORY, ins->oprs[0].type))) {
|
|
1378 nasm_error(ERR_WARNING | ERR_WARN_LOCK | ERR_PASS2 ,
|
|
1379 "instruction is not lockable");
|
|
1380 }
|
|
1381
|
|
1382 bad_hle_warn(ins, hleok);
|
|
1383
|
|
1384 /*
|
|
1385 * when BND prefix is set by DEFAULT directive,
|
|
1386 * BND prefix is added to every appropriate instruction line
|
|
1387 * unless it is overridden by NOBND prefix.
|
|
1388 */
|
|
1389 if (globalbnd &&
|
|
1390 (itemp_has(temp, IF_BND) && !has_prefix(ins, PPS_REP, P_NOBND)))
|
|
1391 ins->prefixes[PPS_REP] = P_BND;
|
|
1392
|
|
1393 /*
|
|
1394 * Add length of legacy prefixes
|
|
1395 */
|
|
1396 length += emit_prefix(NULL, bits, ins);
|
|
1397
|
|
1398 return length;
|
|
1399 }
|
|
1400
|
|
1401 static inline void emit_rex(struct out_data *data, insn *ins)
|
|
1402 {
|
|
1403 if (data->bits == 64) {
|
|
1404 if ((ins->rex & REX_MASK) &&
|
|
1405 !(ins->rex & (REX_V | REX_EV)) &&
|
|
1406 !ins->rex_done) {
|
|
1407 uint8_t rex = (ins->rex & REX_MASK) | REX_P;
|
|
1408 out_rawbyte(data, rex);
|
|
1409 ins->rex_done = true;
|
|
1410 }
|
|
1411 }
|
|
1412 }
|
|
1413
|
|
1414 static int emit_prefix(struct out_data *data, const int bits, insn *ins)
|
|
1415 {
|
|
1416 int bytes = 0;
|
|
1417 int j;
|
|
1418
|
|
1419 for (j = 0; j < MAXPREFIX; j++) {
|
|
1420 uint8_t c = 0;
|
|
1421 switch (ins->prefixes[j]) {
|
|
1422 case P_WAIT:
|
|
1423 c = 0x9B;
|
|
1424 break;
|
|
1425 case P_LOCK:
|
|
1426 c = 0xF0;
|
|
1427 break;
|
|
1428 case P_REPNE:
|
|
1429 case P_REPNZ:
|
|
1430 case P_XACQUIRE:
|
|
1431 case P_BND:
|
|
1432 c = 0xF2;
|
|
1433 break;
|
|
1434 case P_REPE:
|
|
1435 case P_REPZ:
|
|
1436 case P_REP:
|
|
1437 case P_XRELEASE:
|
|
1438 c = 0xF3;
|
|
1439 break;
|
|
1440 case R_CS:
|
|
1441 if (bits == 64) {
|
|
1442 nasm_error(ERR_WARNING | ERR_PASS2,
|
|
1443 "cs segment base generated, but will be ignored in 64-bit mode");
|
|
1444 }
|
|
1445 c = 0x2E;
|
|
1446 break;
|
|
1447 case R_DS:
|
|
1448 if (bits == 64) {
|
|
1449 nasm_error(ERR_WARNING | ERR_PASS2,
|
|
1450 "ds segment base generated, but will be ignored in 64-bit mode");
|
|
1451 }
|
|
1452 c = 0x3E;
|
|
1453 break;
|
|
1454 case R_ES:
|
|
1455 if (bits == 64) {
|
|
1456 nasm_error(ERR_WARNING | ERR_PASS2,
|
|
1457 "es segment base generated, but will be ignored in 64-bit mode");
|
|
1458 }
|
|
1459 c = 0x26;
|
|
1460 break;
|
|
1461 case R_FS:
|
|
1462 c = 0x64;
|
|
1463 break;
|
|
1464 case R_GS:
|
|
1465 c = 0x65;
|
|
1466 break;
|
|
1467 case R_SS:
|
|
1468 if (bits == 64) {
|
|
1469 nasm_error(ERR_WARNING | ERR_PASS2,
|
|
1470 "ss segment base generated, but will be ignored in 64-bit mode");
|
|
1471 }
|
|
1472 c = 0x36;
|
|
1473 break;
|
|
1474 case R_SEGR6:
|
|
1475 case R_SEGR7:
|
|
1476 nasm_error(ERR_NONFATAL,
|
|
1477 "segr6 and segr7 cannot be used as prefixes");
|
|
1478 break;
|
|
1479 case P_A16:
|
|
1480 if (bits == 64) {
|
|
1481 nasm_error(ERR_NONFATAL,
|
|
1482 "16-bit addressing is not supported "
|
|
1483 "in 64-bit mode");
|
|
1484 } else if (bits != 16)
|
|
1485 c = 0x67;
|
|
1486 break;
|
|
1487 case P_A32:
|
|
1488 if (bits != 32)
|
|
1489 c = 0x67;
|
|
1490 break;
|
|
1491 case P_A64:
|
|
1492 if (bits != 64) {
|
|
1493 nasm_error(ERR_NONFATAL,
|
|
1494 "64-bit addressing is only supported "
|
|
1495 "in 64-bit mode");
|
|
1496 }
|
|
1497 break;
|
|
1498 case P_ASP:
|
|
1499 c = 0x67;
|
|
1500 break;
|
|
1501 case P_O16:
|
|
1502 if (bits != 16)
|
|
1503 c = 0x66;
|
|
1504 break;
|
|
1505 case P_O32:
|
|
1506 if (bits == 16)
|
|
1507 c = 0x66;
|
|
1508 break;
|
|
1509 case P_O64:
|
|
1510 /* REX.W */
|
|
1511 break;
|
|
1512 case P_OSP:
|
|
1513 c = 0x66;
|
|
1514 break;
|
|
1515 case P_EVEX:
|
|
1516 case P_VEX3:
|
|
1517 case P_VEX2:
|
|
1518 case P_NOBND:
|
|
1519 case P_none:
|
|
1520 break;
|
|
1521 default:
|
|
1522 nasm_panic(0, "invalid instruction prefix");
|
|
1523 }
|
|
1524 if (c) {
|
|
1525 if (data)
|
|
1526 out_rawbyte(data, c);
|
|
1527 bytes++;
|
|
1528 }
|
|
1529 }
|
|
1530 return bytes;
|
|
1531 }
|
|
1532
|
|
1533 static void gencode(struct out_data *data, insn *ins)
|
|
1534 {
|
|
1535 uint8_t c;
|
|
1536 uint8_t bytes[4];
|
|
1537 int64_t size;
|
|
1538 int op1, op2;
|
|
1539 struct operand *opx;
|
|
1540 const uint8_t *codes = data->itemp->code;
|
|
1541 uint8_t opex = 0;
|
|
1542 enum ea_type eat = EA_SCALAR;
|
|
1543 int r;
|
|
1544 const int bits = data->bits;
|
|
1545
|
|
1546 ins->rex_done = false;
|
|
1547
|
|
1548 emit_prefix(data, bits, ins);
|
|
1549
|
|
1550 while (*codes) {
|
|
1551 c = *codes++;
|
|
1552 op1 = (c & 3) + ((opex & 1) << 2);
|
|
1553 op2 = ((c >> 3) & 3) + ((opex & 2) << 1);
|
|
1554 opx = &ins->oprs[op1];
|
|
1555 opex = 0; /* For the next iteration */
|
|
1556
|
|
1557
|
|
1558 switch (c) {
|
|
1559 case 01:
|
|
1560 case 02:
|
|
1561 case 03:
|
|
1562 case 04:
|
|
1563 emit_rex(data, ins);
|
|
1564 out_rawdata(data, codes, c);
|
|
1565 codes += c;
|
|
1566 break;
|
|
1567
|
|
1568 case 05:
|
|
1569 case 06:
|
|
1570 case 07:
|
|
1571 opex = c;
|
|
1572 break;
|
|
1573
|
|
1574 case4(010):
|
|
1575 emit_rex(data, ins);
|
|
1576 out_rawbyte(data, *codes++ + (regval(opx) & 7));
|
|
1577 break;
|
|
1578
|
|
1579 case4(014):
|
|
1580 break;
|
|
1581
|
|
1582 case4(020):
|
|
1583 if (opx->offset < -256 || opx->offset > 255)
|
|
1584 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
|
|
1585 "byte value exceeds bounds");
|
|
1586 out_imm(data, opx, 1, OUT_WRAP);
|
|
1587 break;
|
|
1588
|
|
1589 case4(024):
|
|
1590 if (opx->offset < 0 || opx->offset > 255)
|
|
1591 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
|
|
1592 "unsigned byte value exceeds bounds");
|
|
1593 out_imm(data, opx, 1, OUT_UNSIGNED);
|
|
1594 break;
|
|
1595
|
|
1596 case4(030):
|
|
1597 warn_overflow_opd(opx, 2);
|
|
1598 out_imm(data, opx, 2, OUT_WRAP);
|
|
1599 break;
|
|
1600
|
|
1601 case4(034):
|
|
1602 if (opx->type & (BITS16 | BITS32))
|
|
1603 size = (opx->type & BITS16) ? 2 : 4;
|
|
1604 else
|
|
1605 size = (bits == 16) ? 2 : 4;
|
|
1606 warn_overflow_opd(opx, size);
|
|
1607 out_imm(data, opx, size, OUT_WRAP);
|
|
1608 break;
|
|
1609
|
|
1610 case4(040):
|
|
1611 warn_overflow_opd(opx, 4);
|
|
1612 out_imm(data, opx, 4, OUT_WRAP);
|
|
1613 break;
|
|
1614
|
|
1615 case4(044):
|
|
1616 size = ins->addr_size >> 3;
|
|
1617 warn_overflow_opd(opx, size);
|
|
1618 out_imm(data, opx, size, OUT_WRAP);
|
|
1619 break;
|
|
1620
|
|
1621 case4(050):
|
|
1622 if (opx->segment == data->segment) {
|
|
1623 int64_t delta = opx->offset - data->offset
|
|
1624 - (data->inslen - data->insoffs);
|
|
1625 if (delta > 127 || delta < -128)
|
|
1626 nasm_error(ERR_NONFATAL, "short jump is out of range");
|
|
1627 }
|
|
1628 out_reladdr(data, opx, 1);
|
|
1629 break;
|
|
1630
|
|
1631 case4(054):
|
|
1632 out_imm(data, opx, 8, OUT_WRAP);
|
|
1633 break;
|
|
1634
|
|
1635 case4(060):
|
|
1636 out_reladdr(data, opx, 2);
|
|
1637 break;
|
|
1638
|
|
1639 case4(064):
|
|
1640 if (opx->type & (BITS16 | BITS32 | BITS64))
|
|
1641 size = (opx->type & BITS16) ? 2 : 4;
|
|
1642 else
|
|
1643 size = (bits == 16) ? 2 : 4;
|
|
1644
|
|
1645 out_reladdr(data, opx, size);
|
|
1646 break;
|
|
1647
|
|
1648 case4(070):
|
|
1649 out_reladdr(data, opx, 4);
|
|
1650 break;
|
|
1651
|
|
1652 case4(074):
|
|
1653 if (opx->segment == NO_SEG)
|
|
1654 nasm_error(ERR_NONFATAL, "value referenced by FAR is not"
|
|
1655 " relocatable");
|
|
1656 out_segment(data, opx);
|
|
1657 break;
|
|
1658
|
|
1659 case 0172:
|
|
1660 {
|
|
1661 int mask = ins->prefixes[PPS_VEX] == P_EVEX ? 7 : 15;
|
|
1662 const struct operand *opy;
|
|
1663
|
|
1664 c = *codes++;
|
|
1665 opx = &ins->oprs[c >> 3];
|
|
1666 opy = &ins->oprs[c & 7];
|
|
1667 if (!absolute_op(opy)) {
|
|
1668 nasm_error(ERR_NONFATAL,
|
|
1669 "non-absolute expression not permitted as argument %d",
|
|
1670 c & 7);
|
|
1671 } else if (opy->offset & ~mask) {
|
|
1672 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
|
|
1673 "is4 argument exceeds bounds");
|
|
1674 }
|
|
1675 c = opy->offset & mask;
|
|
1676 goto emit_is4;
|
|
1677 }
|
|
1678
|
|
1679 case 0173:
|
|
1680 c = *codes++;
|
|
1681 opx = &ins->oprs[c >> 4];
|
|
1682 c &= 15;
|
|
1683 goto emit_is4;
|
|
1684
|
|
1685 case4(0174):
|
|
1686 c = 0;
|
|
1687 emit_is4:
|
|
1688 r = nasm_regvals[opx->basereg];
|
|
1689 out_rawbyte(data, (r << 4) | ((r & 0x10) >> 1) | c);
|
|
1690 break;
|
|
1691
|
|
1692 case4(0254):
|
|
1693 if (absolute_op(opx) &&
|
|
1694 (int32_t)opx->offset != (int64_t)opx->offset) {
|
|
1695 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
|
|
1696 "signed dword immediate exceeds bounds");
|
|
1697 }
|
|
1698 out_imm(data, opx, 4, OUT_SIGNED);
|
|
1699 break;
|
|
1700
|
|
1701 case4(0240):
|
|
1702 case 0250:
|
|
1703 codes += 3;
|
|
1704 ins->evex_p[2] |= op_evexflags(&ins->oprs[0],
|
|
1705 EVEX_P2Z | EVEX_P2AAA, 2);
|
|
1706 ins->evex_p[2] ^= EVEX_P2VP; /* 1's complement */
|
|
1707 bytes[0] = 0x62;
|
|
1708 /* EVEX.X can be set by either REX or EVEX for different reasons */
|
|
1709 bytes[1] = ((((ins->rex & 7) << 5) |
|
|
1710 (ins->evex_p[0] & (EVEX_P0X | EVEX_P0RP))) ^ 0xf0) |
|
|
1711 (ins->vex_cm & EVEX_P0MM);
|
|
1712 bytes[2] = ((ins->rex & REX_W) << (7 - 3)) |
|
|
1713 ((~ins->vexreg & 15) << 3) |
|
|
1714 (1 << 2) | (ins->vex_wlp & 3);
|
|
1715 bytes[3] = ins->evex_p[2];
|
|
1716 out_rawdata(data, bytes, 4);
|
|
1717 break;
|
|
1718
|
|
1719 case4(0260):
|
|
1720 case 0270:
|
|
1721 codes += 2;
|
|
1722 if (ins->vex_cm != 1 || (ins->rex & (REX_W|REX_X|REX_B)) ||
|
|
1723 ins->prefixes[PPS_VEX] == P_VEX3) {
|
|
1724 bytes[0] = (ins->vex_cm >> 6) ? 0x8f : 0xc4;
|
|
1725 bytes[1] = (ins->vex_cm & 31) | ((~ins->rex & 7) << 5);
|
|
1726 bytes[2] = ((ins->rex & REX_W) << (7-3)) |
|
|
1727 ((~ins->vexreg & 15)<< 3) | (ins->vex_wlp & 07);
|
|
1728 out_rawdata(data, bytes, 3);
|
|
1729 } else {
|
|
1730 bytes[0] = 0xc5;
|
|
1731 bytes[1] = ((~ins->rex & REX_R) << (7-2)) |
|
|
1732 ((~ins->vexreg & 15) << 3) | (ins->vex_wlp & 07);
|
|
1733 out_rawdata(data, bytes, 2);
|
|
1734 }
|
|
1735 break;
|
|
1736
|
|
1737 case 0271:
|
|
1738 case 0272:
|
|
1739 case 0273:
|
|
1740 break;
|
|
1741
|
|
1742 case4(0274):
|
|
1743 {
|
|
1744 uint64_t uv, um;
|
|
1745 int s;
|
|
1746
|
|
1747 if (absolute_op(opx)) {
|
|
1748 if (ins->rex & REX_W)
|
|
1749 s = 64;
|
|
1750 else if (ins->prefixes[PPS_OSIZE] == P_O16)
|
|
1751 s = 16;
|
|
1752 else if (ins->prefixes[PPS_OSIZE] == P_O32)
|
|
1753 s = 32;
|
|
1754 else
|
|
1755 s = bits;
|
|
1756
|
|
1757 um = (uint64_t)2 << (s-1);
|
|
1758 uv = opx->offset;
|
|
1759
|
|
1760 if (uv > 127 && uv < (uint64_t)-128 &&
|
|
1761 (uv < um-128 || uv > um-1)) {
|
|
1762 /* If this wasn't explicitly byte-sized, warn as though we
|
|
1763 * had fallen through to the imm16/32/64 case.
|
|
1764 */
|
|
1765 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
|
|
1766 "%s value exceeds bounds",
|
|
1767 (opx->type & BITS8) ? "signed byte" :
|
|
1768 s == 16 ? "word" :
|
|
1769 s == 32 ? "dword" :
|
|
1770 "signed dword");
|
|
1771 }
|
|
1772
|
|
1773 /* Output as a raw byte to avoid byte overflow check */
|
|
1774 out_rawbyte(data, (uint8_t)uv);
|
|
1775 } else {
|
|
1776 out_imm(data, opx, 1, OUT_WRAP); /* XXX: OUT_SIGNED? */
|
|
1777 }
|
|
1778 break;
|
|
1779 }
|
|
1780
|
|
1781 case4(0300):
|
|
1782 break;
|
|
1783
|
|
1784 case 0310:
|
|
1785 if (bits == 32 && !has_prefix(ins, PPS_ASIZE, P_A16))
|
|
1786 out_rawbyte(data, 0x67);
|
|
1787 break;
|
|
1788
|
|
1789 case 0311:
|
|
1790 if (bits != 32 && !has_prefix(ins, PPS_ASIZE, P_A32))
|
|
1791 out_rawbyte(data, 0x67);
|
|
1792 break;
|
|
1793
|
|
1794 case 0312:
|
|
1795 break;
|
|
1796
|
|
1797 case 0313:
|
|
1798 ins->rex = 0;
|
|
1799 break;
|
|
1800
|
|
1801 case4(0314):
|
|
1802 break;
|
|
1803
|
|
1804 case 0320:
|
|
1805 case 0321:
|
|
1806 break;
|
|
1807
|
|
1808 case 0322:
|
|
1809 case 0323:
|
|
1810 break;
|
|
1811
|
|
1812 case 0324:
|
|
1813 ins->rex |= REX_W;
|
|
1814 break;
|
|
1815
|
|
1816 case 0325:
|
|
1817 break;
|
|
1818
|
|
1819 case 0326:
|
|
1820 break;
|
|
1821
|
|
1822 case 0330:
|
|
1823 out_rawbyte(data, *codes++ ^ get_cond_opcode(ins->condition));
|
|
1824 break;
|
|
1825
|
|
1826 case 0331:
|
|
1827 break;
|
|
1828
|
|
1829 case 0332:
|
|
1830 case 0333:
|
|
1831 out_rawbyte(data, c - 0332 + 0xF2);
|
|
1832 break;
|
|
1833
|
|
1834 case 0334:
|
|
1835 if (ins->rex & REX_R)
|
|
1836 out_rawbyte(data, 0xF0);
|
|
1837 ins->rex &= ~(REX_L|REX_R);
|
|
1838 break;
|
|
1839
|
|
1840 case 0335:
|
|
1841 break;
|
|
1842
|
|
1843 case 0336:
|
|
1844 case 0337:
|
|
1845 break;
|
|
1846
|
|
1847 case 0340:
|
|
1848 if (ins->oprs[0].segment != NO_SEG)
|
|
1849 nasm_panic(0, "non-constant BSS size in pass two");
|
|
1850
|
|
1851 out_reserve(data, ins->oprs[0].offset);
|
|
1852 break;
|
|
1853
|
|
1854 case 0341:
|
|
1855 break;
|
|
1856
|
|
1857 case 0360:
|
|
1858 break;
|
|
1859
|
|
1860 case 0361:
|
|
1861 out_rawbyte(data, 0x66);
|
|
1862 break;
|
|
1863
|
|
1864 case 0364:
|
|
1865 case 0365:
|
|
1866 break;
|
|
1867
|
|
1868 case 0366:
|
|
1869 case 0367:
|
|
1870 out_rawbyte(data, c - 0366 + 0x66);
|
|
1871 break;
|
|
1872
|
|
1873 case3(0370):
|
|
1874 break;
|
|
1875
|
|
1876 case 0373:
|
|
1877 out_rawbyte(data, bits == 16 ? 3 : 5);
|
|
1878 break;
|
|
1879
|
|
1880 case 0374:
|
|
1881 eat = EA_XMMVSIB;
|
|
1882 break;
|
|
1883
|
|
1884 case 0375:
|
|
1885 eat = EA_YMMVSIB;
|
|
1886 break;
|
|
1887
|
|
1888 case 0376:
|
|
1889 eat = EA_ZMMVSIB;
|
|
1890 break;
|
|
1891
|
|
1892 case4(0100):
|
|
1893 case4(0110):
|
|
1894 case4(0120):
|
|
1895 case4(0130):
|
|
1896 case4(0200):
|
|
1897 case4(0204):
|
|
1898 case4(0210):
|
|
1899 case4(0214):
|
|
1900 case4(0220):
|
|
1901 case4(0224):
|
|
1902 case4(0230):
|
|
1903 case4(0234):
|
|
1904 {
|
|
1905 ea ea_data;
|
|
1906 int rfield;
|
|
1907 opflags_t rflags;
|
|
1908 uint8_t *p;
|
|
1909 struct operand *opy = &ins->oprs[op2];
|
|
1910
|
|
1911 if (c <= 0177) {
|
|
1912 /* pick rfield from operand b (opx) */
|
|
1913 rflags = regflag(opx);
|
|
1914 rfield = nasm_regvals[opx->basereg];
|
|
1915 } else {
|
|
1916 /* rfield is constant */
|
|
1917 rflags = 0;
|
|
1918 rfield = c & 7;
|
|
1919 }
|
|
1920
|
|
1921 if (process_ea(opy, &ea_data, bits,
|
|
1922 rfield, rflags, ins) != eat)
|
|
1923 nasm_error(ERR_NONFATAL, "invalid effective address");
|
|
1924
|
|
1925 p = bytes;
|
|
1926 *p++ = ea_data.modrm;
|
|
1927 if (ea_data.sib_present)
|
|
1928 *p++ = ea_data.sib;
|
|
1929 out_rawdata(data, bytes, p - bytes);
|
|
1930
|
|
1931 /*
|
|
1932 * Make sure the address gets the right offset in case
|
|
1933 * the line breaks in the .lst file (BR 1197827)
|
|
1934 */
|
|
1935
|
|
1936 if (ea_data.bytes) {
|
|
1937 /* use compressed displacement, if available */
|
|
1938 if (ea_data.disp8) {
|
|
1939 out_rawbyte(data, ea_data.disp8);
|
|
1940 } else if (ea_data.rip) {
|
|
1941 out_reladdr(data, opy, ea_data.bytes);
|
|
1942 } else {
|
|
1943 int asize = ins->addr_size >> 3;
|
|
1944
|
|
1945 if (overflow_general(opy->offset, asize) ||
|
|
1946 signed_bits(opy->offset, ins->addr_size) !=
|
|
1947 signed_bits(opy->offset, ea_data.bytes << 3))
|
|
1948 warn_overflow(ea_data.bytes);
|
|
1949
|
|
1950 out_imm(data, opy, ea_data.bytes,
|
|
1951 (asize > ea_data.bytes)
|
|
1952 ? OUT_SIGNED : OUT_WRAP);
|
|
1953 }
|
|
1954 }
|
|
1955 }
|
|
1956 break;
|
|
1957
|
|
1958 default:
|
|
1959 nasm_panic(0, "internal instruction table corrupt"
|
|
1960 ": instruction code \\%o (0x%02X) given", c, c);
|
|
1961 break;
|
|
1962 }
|
|
1963 }
|
|
1964 }
|
|
1965
|
|
1966 static opflags_t regflag(const operand * o)
|
|
1967 {
|
|
1968 if (!is_register(o->basereg))
|
|
1969 nasm_panic(0, "invalid operand passed to regflag()");
|
|
1970 return nasm_reg_flags[o->basereg];
|
|
1971 }
|
|
1972
|
|
1973 static int32_t regval(const operand * o)
|
|
1974 {
|
|
1975 if (!is_register(o->basereg))
|
|
1976 nasm_panic(0, "invalid operand passed to regval()");
|
|
1977 return nasm_regvals[o->basereg];
|
|
1978 }
|
|
1979
|
|
1980 static int op_rexflags(const operand * o, int mask)
|
|
1981 {
|
|
1982 opflags_t flags;
|
|
1983 int val;
|
|
1984
|
|
1985 if (!is_register(o->basereg))
|
|
1986 nasm_panic(0, "invalid operand passed to op_rexflags()");
|
|
1987
|
|
1988 flags = nasm_reg_flags[o->basereg];
|
|
1989 val = nasm_regvals[o->basereg];
|
|
1990
|
|
1991 return rexflags(val, flags, mask);
|
|
1992 }
|
|
1993
|
|
1994 static int rexflags(int val, opflags_t flags, int mask)
|
|
1995 {
|
|
1996 int rex = 0;
|
|
1997
|
|
1998 if (val >= 0 && (val & 8))
|
|
1999 rex |= REX_B|REX_X|REX_R;
|
|
2000 if (flags & BITS64)
|
|
2001 rex |= REX_W;
|
|
2002 if (!(REG_HIGH & ~flags)) /* AH, CH, DH, BH */
|
|
2003 rex |= REX_H;
|
|
2004 else if (!(REG8 & ~flags) && val >= 4) /* SPL, BPL, SIL, DIL */
|
|
2005 rex |= REX_P;
|
|
2006
|
|
2007 return rex & mask;
|
|
2008 }
|
|
2009
|
|
2010 static int evexflags(int val, decoflags_t deco,
|
|
2011 int mask, uint8_t byte)
|
|
2012 {
|
|
2013 int evex = 0;
|
|
2014
|
|
2015 switch (byte) {
|
|
2016 case 0:
|
|
2017 if (val >= 0 && (val & 16))
|
|
2018 evex |= (EVEX_P0RP | EVEX_P0X);
|
|
2019 break;
|
|
2020 case 2:
|
|
2021 if (val >= 0 && (val & 16))
|
|
2022 evex |= EVEX_P2VP;
|
|
2023 if (deco & Z)
|
|
2024 evex |= EVEX_P2Z;
|
|
2025 if (deco & OPMASK_MASK)
|
|
2026 evex |= deco & EVEX_P2AAA;
|
|
2027 break;
|
|
2028 }
|
|
2029 return evex & mask;
|
|
2030 }
|
|
2031
|
|
2032 static int op_evexflags(const operand * o, int mask, uint8_t byte)
|
|
2033 {
|
|
2034 int val;
|
|
2035
|
|
2036 val = nasm_regvals[o->basereg];
|
|
2037
|
|
2038 return evexflags(val, o->decoflags, mask, byte);
|
|
2039 }
|
|
2040
|
|
2041 static enum match_result find_match(const struct itemplate **tempp,
|
|
2042 insn *instruction,
|
|
2043 int32_t segment, int64_t offset, int bits)
|
|
2044 {
|
|
2045 const struct itemplate *temp;
|
|
2046 enum match_result m, merr;
|
|
2047 opflags_t xsizeflags[MAX_OPERANDS];
|
|
2048 bool opsizemissing = false;
|
|
2049 int8_t broadcast = instruction->evex_brerop;
|
|
2050 int i;
|
|
2051
|
|
2052 /* broadcasting uses a different data element size */
|
|
2053 for (i = 0; i < instruction->operands; i++)
|
|
2054 if (i == broadcast)
|
|
2055 xsizeflags[i] = instruction->oprs[i].decoflags & BRSIZE_MASK;
|
|
2056 else
|
|
2057 xsizeflags[i] = instruction->oprs[i].type & SIZE_MASK;
|
|
2058
|
|
2059 merr = MERR_INVALOP;
|
|
2060
|
|
2061 for (temp = nasm_instructions[instruction->opcode];
|
|
2062 temp->opcode != I_none; temp++) {
|
|
2063 m = matches(temp, instruction, bits);
|
|
2064 if (m == MOK_JUMP) {
|
|
2065 if (jmp_match(segment, offset, bits, instruction, temp))
|
|
2066 m = MOK_GOOD;
|
|
2067 else
|
|
2068 m = MERR_INVALOP;
|
|
2069 } else if (m == MERR_OPSIZEMISSING && !itemp_has(temp, IF_SX)) {
|
|
2070 /*
|
|
2071 * Missing operand size and a candidate for fuzzy matching...
|
|
2072 */
|
|
2073 for (i = 0; i < temp->operands; i++)
|
|
2074 if (i == broadcast)
|
|
2075 xsizeflags[i] |= temp->deco[i] & BRSIZE_MASK;
|
|
2076 else
|
|
2077 xsizeflags[i] |= temp->opd[i] & SIZE_MASK;
|
|
2078 opsizemissing = true;
|
|
2079 }
|
|
2080 if (m > merr)
|
|
2081 merr = m;
|
|
2082 if (merr == MOK_GOOD)
|
|
2083 goto done;
|
|
2084 }
|
|
2085
|
|
2086 /* No match, but see if we can get a fuzzy operand size match... */
|
|
2087 if (!opsizemissing)
|
|
2088 goto done;
|
|
2089
|
|
2090 for (i = 0; i < instruction->operands; i++) {
|
|
2091 /*
|
|
2092 * We ignore extrinsic operand sizes on registers, so we should
|
|
2093 * never try to fuzzy-match on them. This also resolves the case
|
|
2094 * when we have e.g. "xmmrm128" in two different positions.
|
|
2095 */
|
|
2096 if (is_class(REGISTER, instruction->oprs[i].type))
|
|
2097 continue;
|
|
2098
|
|
2099 /* This tests if xsizeflags[i] has more than one bit set */
|
|
2100 if ((xsizeflags[i] & (xsizeflags[i]-1)))
|
|
2101 goto done; /* No luck */
|
|
2102
|
|
2103 if (i == broadcast) {
|
|
2104 instruction->oprs[i].decoflags |= xsizeflags[i];
|
|
2105 instruction->oprs[i].type |= (xsizeflags[i] == BR_BITS32 ?
|
|
2106 BITS32 : BITS64);
|
|
2107 } else {
|
|
2108 instruction->oprs[i].type |= xsizeflags[i]; /* Set the size */
|
|
2109 }
|
|
2110 }
|
|
2111
|
|
2112 /* Try matching again... */
|
|
2113 for (temp = nasm_instructions[instruction->opcode];
|
|
2114 temp->opcode != I_none; temp++) {
|
|
2115 m = matches(temp, instruction, bits);
|
|
2116 if (m == MOK_JUMP) {
|
|
2117 if (jmp_match(segment, offset, bits, instruction, temp))
|
|
2118 m = MOK_GOOD;
|
|
2119 else
|
|
2120 m = MERR_INVALOP;
|
|
2121 }
|
|
2122 if (m > merr)
|
|
2123 merr = m;
|
|
2124 if (merr == MOK_GOOD)
|
|
2125 goto done;
|
|
2126 }
|
|
2127
|
|
2128 done:
|
|
2129 *tempp = temp;
|
|
2130 return merr;
|
|
2131 }
|
|
2132
|
|
2133 static uint8_t get_broadcast_num(opflags_t opflags, opflags_t brsize)
|
|
2134 {
|
|
2135 unsigned int opsize = (opflags & SIZE_MASK) >> SIZE_SHIFT;
|
|
2136 uint8_t brcast_num;
|
|
2137
|
|
2138 if (brsize > BITS64)
|
|
2139 nasm_error(ERR_FATAL,
|
|
2140 "size of broadcasting element is greater than 64 bits");
|
|
2141
|
|
2142 /*
|
|
2143 * The shift term is to take care of the extra BITS80 inserted
|
|
2144 * between BITS64 and BITS128.
|
|
2145 */
|
|
2146 brcast_num = ((opsize / (BITS64 >> SIZE_SHIFT)) * (BITS64 / brsize))
|
|
2147 >> (opsize > (BITS64 >> SIZE_SHIFT));
|
|
2148
|
|
2149 return brcast_num;
|
|
2150 }
|
|
2151
|
|
2152 static enum match_result matches(const struct itemplate *itemp,
|
|
2153 insn *instruction, int bits)
|
|
2154 {
|
|
2155 opflags_t size[MAX_OPERANDS], asize;
|
|
2156 bool opsizemissing = false;
|
|
2157 int i, oprs;
|
|
2158
|
|
2159 /*
|
|
2160 * Check the opcode
|
|
2161 */
|
|
2162 if (itemp->opcode != instruction->opcode)
|
|
2163 return MERR_INVALOP;
|
|
2164
|
|
2165 /*
|
|
2166 * Count the operands
|
|
2167 */
|
|
2168 if (itemp->operands != instruction->operands)
|
|
2169 return MERR_INVALOP;
|
|
2170
|
|
2171 /*
|
|
2172 * Is it legal?
|
|
2173 */
|
|
2174 if (!(optimizing > 0) && itemp_has(itemp, IF_OPT))
|
|
2175 return MERR_INVALOP;
|
|
2176
|
|
2177 /*
|
|
2178 * {evex} available?
|
|
2179 */
|
|
2180 switch (instruction->prefixes[PPS_VEX]) {
|
|
2181 case P_EVEX:
|
|
2182 if (!itemp_has(itemp, IF_EVEX))
|
|
2183 return MERR_ENCMISMATCH;
|
|
2184 break;
|
|
2185 case P_VEX3:
|
|
2186 case P_VEX2:
|
|
2187 if (!itemp_has(itemp, IF_VEX))
|
|
2188 return MERR_ENCMISMATCH;
|
|
2189 break;
|
|
2190 default:
|
|
2191 break;
|
|
2192 }
|
|
2193
|
|
2194 /*
|
|
2195 * Check that no spurious colons or TOs are present
|
|
2196 */
|
|
2197 for (i = 0; i < itemp->operands; i++)
|
|
2198 if (instruction->oprs[i].type & ~itemp->opd[i] & (COLON | TO))
|
|
2199 return MERR_INVALOP;
|
|
2200
|
|
2201 /*
|
|
2202 * Process size flags
|
|
2203 */
|
|
2204 switch (itemp_smask(itemp)) {
|
|
2205 case IF_GENBIT(IF_SB):
|
|
2206 asize = BITS8;
|
|
2207 break;
|
|
2208 case IF_GENBIT(IF_SW):
|
|
2209 asize = BITS16;
|
|
2210 break;
|
|
2211 case IF_GENBIT(IF_SD):
|
|
2212 asize = BITS32;
|
|
2213 break;
|
|
2214 case IF_GENBIT(IF_SQ):
|
|
2215 asize = BITS64;
|
|
2216 break;
|
|
2217 case IF_GENBIT(IF_SO):
|
|
2218 asize = BITS128;
|
|
2219 break;
|
|
2220 case IF_GENBIT(IF_SY):
|
|
2221 asize = BITS256;
|
|
2222 break;
|
|
2223 case IF_GENBIT(IF_SZ):
|
|
2224 asize = BITS512;
|
|
2225 break;
|
|
2226 case IF_GENBIT(IF_SIZE):
|
|
2227 switch (bits) {
|
|
2228 case 16:
|
|
2229 asize = BITS16;
|
|
2230 break;
|
|
2231 case 32:
|
|
2232 asize = BITS32;
|
|
2233 break;
|
|
2234 case 64:
|
|
2235 asize = BITS64;
|
|
2236 break;
|
|
2237 default:
|
|
2238 asize = 0;
|
|
2239 break;
|
|
2240 }
|
|
2241 break;
|
|
2242 default:
|
|
2243 asize = 0;
|
|
2244 break;
|
|
2245 }
|
|
2246
|
|
2247 if (itemp_armask(itemp)) {
|
|
2248 /* S- flags only apply to a specific operand */
|
|
2249 i = itemp_arg(itemp);
|
|
2250 memset(size, 0, sizeof size);
|
|
2251 size[i] = asize;
|
|
2252 } else {
|
|
2253 /* S- flags apply to all operands */
|
|
2254 for (i = 0; i < MAX_OPERANDS; i++)
|
|
2255 size[i] = asize;
|
|
2256 }
|
|
2257
|
|
2258 /*
|
|
2259 * Check that the operand flags all match up,
|
|
2260 * it's a bit tricky so lets be verbose:
|
|
2261 *
|
|
2262 * 1) Find out the size of operand. If instruction
|
|
2263 * doesn't have one specified -- we're trying to
|
|
2264 * guess it either from template (IF_S* flag) or
|
|
2265 * from code bits.
|
|
2266 *
|
|
2267 * 2) If template operand do not match the instruction OR
|
|
2268 * template has an operand size specified AND this size differ
|
|
2269 * from which instruction has (perhaps we got it from code bits)
|
|
2270 * we are:
|
|
2271 * a) Check that only size of instruction and operand is differ
|
|
2272 * other characteristics do match
|
|
2273 * b) Perhaps it's a register specified in instruction so
|
|
2274 * for such a case we just mark that operand as "size
|
|
2275 * missing" and this will turn on fuzzy operand size
|
|
2276 * logic facility (handled by a caller)
|
|
2277 */
|
|
2278 for (i = 0; i < itemp->operands; i++) {
|
|
2279 opflags_t type = instruction->oprs[i].type;
|
|
2280 decoflags_t deco = instruction->oprs[i].decoflags;
|
|
2281 bool is_broadcast = deco & BRDCAST_MASK;
|
|
2282 uint8_t brcast_num = 0;
|
|
2283 opflags_t template_opsize, insn_opsize;
|
|
2284
|
|
2285 if (!(type & SIZE_MASK))
|
|
2286 type |= size[i];
|
|
2287
|
|
2288 insn_opsize = type & SIZE_MASK;
|
|
2289 if (!is_broadcast) {
|
|
2290 template_opsize = itemp->opd[i] & SIZE_MASK;
|
|
2291 } else {
|
|
2292 decoflags_t deco_brsize = itemp->deco[i] & BRSIZE_MASK;
|
|
2293 /*
|
|
2294 * when broadcasting, the element size depends on
|
|
2295 * the instruction type. decorator flag should match.
|
|
2296 */
|
|
2297
|
|
2298 if (deco_brsize) {
|
|
2299 template_opsize = (deco_brsize == BR_BITS32 ? BITS32 : BITS64);
|
|
2300 /* calculate the proper number : {1to<brcast_num>} */
|
|
2301 brcast_num = get_broadcast_num(itemp->opd[i], template_opsize);
|
|
2302 } else {
|
|
2303 template_opsize = 0;
|
|
2304 }
|
|
2305 }
|
|
2306
|
|
2307 if ((itemp->opd[i] & ~type & ~SIZE_MASK) ||
|
|
2308 (deco & ~itemp->deco[i] & ~BRNUM_MASK)) {
|
|
2309 return MERR_INVALOP;
|
|
2310 } else if (template_opsize) {
|
|
2311 if (template_opsize != insn_opsize) {
|
|
2312 if (insn_opsize) {
|
|
2313 return MERR_INVALOP;
|
|
2314 } else if (!is_class(REGISTER, type)) {
|
|
2315 /*
|
|
2316 * Note: we don't honor extrinsic operand sizes for registers,
|
|
2317 * so "missing operand size" for a register should be
|
|
2318 * considered a wildcard match rather than an error.
|
|
2319 */
|
|
2320 opsizemissing = true;
|
|
2321 }
|
|
2322 } else if (is_broadcast &&
|
|
2323 (brcast_num !=
|
|
2324 (2U << ((deco & BRNUM_MASK) >> BRNUM_SHIFT)))) {
|
|
2325 /*
|
|
2326 * broadcasting opsize matches but the number of repeated memory
|
|
2327 * element does not match.
|
|
2328 * if 64b double precision float is broadcasted to ymm (256b),
|
|
2329 * broadcasting decorator must be {1to4}.
|
|
2330 */
|
|
2331 return MERR_BRNUMMISMATCH;
|
|
2332 }
|
|
2333 }
|
|
2334 }
|
|
2335
|
|
2336 if (opsizemissing)
|
|
2337 return MERR_OPSIZEMISSING;
|
|
2338
|
|
2339 /*
|
|
2340 * Check operand sizes
|
|
2341 */
|
|
2342 if (itemp_has(itemp, IF_SM) || itemp_has(itemp, IF_SM2)) {
|
|
2343 oprs = (itemp_has(itemp, IF_SM2) ? 2 : itemp->operands);
|
|
2344 for (i = 0; i < oprs; i++) {
|
|
2345 asize = itemp->opd[i] & SIZE_MASK;
|
|
2346 if (asize) {
|
|
2347 for (i = 0; i < oprs; i++)
|
|
2348 size[i] = asize;
|
|
2349 break;
|
|
2350 }
|
|
2351 }
|
|
2352 } else {
|
|
2353 oprs = itemp->operands;
|
|
2354 }
|
|
2355
|
|
2356 for (i = 0; i < itemp->operands; i++) {
|
|
2357 if (!(itemp->opd[i] & SIZE_MASK) &&
|
|
2358 (instruction->oprs[i].type & SIZE_MASK & ~size[i]))
|
|
2359 return MERR_OPSIZEMISMATCH;
|
|
2360 }
|
|
2361
|
|
2362 /*
|
|
2363 * Check template is okay at the set cpu level
|
|
2364 */
|
|
2365 if (iflag_cmp_cpu_level(&insns_flags[itemp->iflag_idx], &cpu) > 0)
|
|
2366 return MERR_BADCPU;
|
|
2367
|
|
2368 /*
|
|
2369 * Verify the appropriate long mode flag.
|
|
2370 */
|
|
2371 if (itemp_has(itemp, (bits == 64 ? IF_NOLONG : IF_LONG)))
|
|
2372 return MERR_BADMODE;
|
|
2373
|
|
2374 /*
|
|
2375 * If we have a HLE prefix, look for the NOHLE flag
|
|
2376 */
|
|
2377 if (itemp_has(itemp, IF_NOHLE) &&
|
|
2378 (has_prefix(instruction, PPS_REP, P_XACQUIRE) ||
|
|
2379 has_prefix(instruction, PPS_REP, P_XRELEASE)))
|
|
2380 return MERR_BADHLE;
|
|
2381
|
|
2382 /*
|
|
2383 * Check if special handling needed for Jumps
|
|
2384 */
|
|
2385 if ((itemp->code[0] & ~1) == 0370)
|
|
2386 return MOK_JUMP;
|
|
2387
|
|
2388 /*
|
|
2389 * Check if BND prefix is allowed.
|
|
2390 * Other 0xF2 (REPNE/REPNZ) prefix is prohibited.
|
|
2391 */
|
|
2392 if (!itemp_has(itemp, IF_BND) &&
|
|
2393 (has_prefix(instruction, PPS_REP, P_BND) ||
|
|
2394 has_prefix(instruction, PPS_REP, P_NOBND)))
|
|
2395 return MERR_BADBND;
|
|
2396 else if (itemp_has(itemp, IF_BND) &&
|
|
2397 (has_prefix(instruction, PPS_REP, P_REPNE) ||
|
|
2398 has_prefix(instruction, PPS_REP, P_REPNZ)))
|
|
2399 return MERR_BADREPNE;
|
|
2400
|
|
2401 return MOK_GOOD;
|
|
2402 }
|
|
2403
|
|
2404 /*
|
|
2405 * Check if ModR/M.mod should/can be 01.
|
|
2406 * - EAF_BYTEOFFS is set
|
|
2407 * - offset can fit in a byte when EVEX is not used
|
|
2408 * - offset can be compressed when EVEX is used
|
|
2409 */
|
|
2410 #define IS_MOD_01() (input->eaflags & EAF_BYTEOFFS || \
|
|
2411 (o >= -128 && o <= 127 && \
|
|
2412 seg == NO_SEG && !forw_ref && \
|
|
2413 !(input->eaflags & EAF_WORDOFFS) && \
|
|
2414 !(ins->rex & REX_EV)) || \
|
|
2415 (ins->rex & REX_EV && \
|
|
2416 is_disp8n(input, ins, &output->disp8)))
|
|
2417
|
|
2418 static enum ea_type process_ea(operand *input, ea *output, int bits,
|
|
2419 int rfield, opflags_t rflags, insn *ins)
|
|
2420 {
|
|
2421 bool forw_ref = !!(input->opflags & OPFLAG_UNKNOWN);
|
|
2422 int addrbits = ins->addr_size;
|
|
2423 int eaflags = input->eaflags;
|
|
2424
|
|
2425 output->type = EA_SCALAR;
|
|
2426 output->rip = false;
|
|
2427 output->disp8 = 0;
|
|
2428
|
|
2429 /* REX flags for the rfield operand */
|
|
2430 output->rex |= rexflags(rfield, rflags, REX_R | REX_P | REX_W | REX_H);
|
|
2431 /* EVEX.R' flag for the REG operand */
|
|
2432 ins->evex_p[0] |= evexflags(rfield, 0, EVEX_P0RP, 0);
|
|
2433
|
|
2434 if (is_class(REGISTER, input->type)) {
|
|
2435 /*
|
|
2436 * It's a direct register.
|
|
2437 */
|
|
2438 if (!is_register(input->basereg))
|
|
2439 goto err;
|
|
2440
|
|
2441 if (!is_reg_class(REG_EA, input->basereg))
|
|
2442 goto err;
|
|
2443
|
|
2444 /* broadcasting is not available with a direct register operand. */
|
|
2445 if (input->decoflags & BRDCAST_MASK) {
|
|
2446 nasm_error(ERR_NONFATAL, "Broadcasting not allowed from a register");
|
|
2447 goto err;
|
|
2448 }
|
|
2449
|
|
2450 output->rex |= op_rexflags(input, REX_B | REX_P | REX_W | REX_H);
|
|
2451 ins->evex_p[0] |= op_evexflags(input, EVEX_P0X, 0);
|
|
2452 output->sib_present = false; /* no SIB necessary */
|
|
2453 output->bytes = 0; /* no offset necessary either */
|
|
2454 output->modrm = GEN_MODRM(3, rfield, nasm_regvals[input->basereg]);
|
|
2455 } else {
|
|
2456 /*
|
|
2457 * It's a memory reference.
|
|
2458 */
|
|
2459
|
|
2460 /* Embedded rounding or SAE is not available with a mem ref operand. */
|
|
2461 if (input->decoflags & (ER | SAE)) {
|
|
2462 nasm_error(ERR_NONFATAL,
|
|
2463 "Embedded rounding is available only with reg-reg op.");
|
|
2464 return -1;
|
|
2465 }
|
|
2466
|
|
2467 if (input->basereg == -1 &&
|
|
2468 (input->indexreg == -1 || input->scale == 0)) {
|
|
2469 /*
|
|
2470 * It's a pure offset.
|
|
2471 */
|
|
2472 if (bits == 64 && ((input->type & IP_REL) == IP_REL)) {
|
|
2473 if (input->segment == NO_SEG || (input->opflags & OPFLAG_RELATIVE)) {
|
|
2474 nasm_error(ERR_WARNING | ERR_PASS2, "absolute address can not be RIP-relative");
|
|
2475 input->type &= ~IP_REL;
|
|
2476 input->type |= MEMORY;
|
|
2477 }
|
|
2478 }
|
|
2479
|
|
2480 if (bits == 64 &&
|
|
2481 !(IP_REL & ~input->type) && (eaflags & EAF_MIB)) {
|
|
2482 nasm_error(ERR_NONFATAL, "RIP-relative addressing is prohibited for mib.");
|
|
2483 return -1;
|
|
2484 }
|
|
2485
|
|
2486 if (eaflags & EAF_BYTEOFFS ||
|
|
2487 (eaflags & EAF_WORDOFFS &&
|
|
2488 input->disp_size != (addrbits != 16 ? 32 : 16))) {
|
|
2489 nasm_error(ERR_WARNING | ERR_PASS1, "displacement size ignored on absolute address");
|
|
2490 }
|
|
2491
|
|
2492 if (bits == 64 && (~input->type & IP_REL)) {
|
|
2493 output->sib_present = true;
|
|
2494 output->sib = GEN_SIB(0, 4, 5);
|
|
2495 output->bytes = 4;
|
|
2496 output->modrm = GEN_MODRM(0, rfield, 4);
|
|
2497 output->rip = false;
|
|
2498 } else {
|
|
2499 output->sib_present = false;
|
|
2500 output->bytes = (addrbits != 16 ? 4 : 2);
|
|
2501 output->modrm = GEN_MODRM(0, rfield, (addrbits != 16 ? 5 : 6));
|
|
2502 output->rip = bits == 64;
|
|
2503 }
|
|
2504 } else {
|
|
2505 /*
|
|
2506 * It's an indirection.
|
|
2507 */
|
|
2508 int i = input->indexreg, b = input->basereg, s = input->scale;
|
|
2509 int32_t seg = input->segment;
|
|
2510 int hb = input->hintbase, ht = input->hinttype;
|
|
2511 int t, it, bt; /* register numbers */
|
|
2512 opflags_t x, ix, bx; /* register flags */
|
|
2513
|
|
2514 if (s == 0)
|
|
2515 i = -1; /* make this easy, at least */
|
|
2516
|
|
2517 if (is_register(i)) {
|
|
2518 it = nasm_regvals[i];
|
|
2519 ix = nasm_reg_flags[i];
|
|
2520 } else {
|
|
2521 it = -1;
|
|
2522 ix = 0;
|
|
2523 }
|
|
2524
|
|
2525 if (is_register(b)) {
|
|
2526 bt = nasm_regvals[b];
|
|
2527 bx = nasm_reg_flags[b];
|
|
2528 } else {
|
|
2529 bt = -1;
|
|
2530 bx = 0;
|
|
2531 }
|
|
2532
|
|
2533 /* if either one are a vector register... */
|
|
2534 if ((ix|bx) & (XMMREG|YMMREG|ZMMREG) & ~REG_EA) {
|
|
2535 opflags_t sok = BITS32 | BITS64;
|
|
2536 int32_t o = input->offset;
|
|
2537 int mod, scale, index, base;
|
|
2538
|
|
2539 /*
|
|
2540 * For a vector SIB, one has to be a vector and the other,
|
|
2541 * if present, a GPR. The vector must be the index operand.
|
|
2542 */
|
|
2543 if (it == -1 || (bx & (XMMREG|YMMREG|ZMMREG) & ~REG_EA)) {
|
|
2544 if (s == 0)
|
|
2545 s = 1;
|
|
2546 else if (s != 1)
|
|
2547 goto err;
|
|
2548
|
|
2549 t = bt, bt = it, it = t;
|
|
2550 x = bx, bx = ix, ix = x;
|
|
2551 }
|
|
2552
|
|
2553 if (bt != -1) {
|
|
2554 if (REG_GPR & ~bx)
|
|
2555 goto err;
|
|
2556 if (!(REG64 & ~bx) || !(REG32 & ~bx))
|
|
2557 sok &= bx;
|
|
2558 else
|
|
2559 goto err;
|
|
2560 }
|
|
2561
|
|
2562 /*
|
|
2563 * While we're here, ensure the user didn't specify
|
|
2564 * WORD or QWORD
|
|
2565 */
|
|
2566 if (input->disp_size == 16 || input->disp_size == 64)
|
|
2567 goto err;
|
|
2568
|
|
2569 if (addrbits == 16 ||
|
|
2570 (addrbits == 32 && !(sok & BITS32)) ||
|
|
2571 (addrbits == 64 && !(sok & BITS64)))
|
|
2572 goto err;
|
|
2573
|
|
2574 output->type = ((ix & ZMMREG & ~REG_EA) ? EA_ZMMVSIB
|
|
2575 : ((ix & YMMREG & ~REG_EA)
|
|
2576 ? EA_YMMVSIB : EA_XMMVSIB));
|
|
2577
|
|
2578 output->rex |= rexflags(it, ix, REX_X);
|
|
2579 output->rex |= rexflags(bt, bx, REX_B);
|
|
2580 ins->evex_p[2] |= evexflags(it, 0, EVEX_P2VP, 2);
|
|
2581
|
|
2582 index = it & 7; /* it is known to be != -1 */
|
|
2583
|
|
2584 switch (s) {
|
|
2585 case 1:
|
|
2586 scale = 0;
|
|
2587 break;
|
|
2588 case 2:
|
|
2589 scale = 1;
|
|
2590 break;
|
|
2591 case 4:
|
|
2592 scale = 2;
|
|
2593 break;
|
|
2594 case 8:
|
|
2595 scale = 3;
|
|
2596 break;
|
|
2597 default: /* then what the smeg is it? */
|
|
2598 goto err; /* panic */
|
|
2599 }
|
|
2600
|
|
2601 if (bt == -1) {
|
|
2602 base = 5;
|
|
2603 mod = 0;
|
|
2604 } else {
|
|
2605 base = (bt & 7);
|
|
2606 if (base != REG_NUM_EBP && o == 0 &&
|
|
2607 seg == NO_SEG && !forw_ref &&
|
|
2608 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
|
|
2609 mod = 0;
|
|
2610 else if (IS_MOD_01())
|
|
2611 mod = 1;
|
|
2612 else
|
|
2613 mod = 2;
|
|
2614 }
|
|
2615
|
|
2616 output->sib_present = true;
|
|
2617 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
|
|
2618 output->modrm = GEN_MODRM(mod, rfield, 4);
|
|
2619 output->sib = GEN_SIB(scale, index, base);
|
|
2620 } else if ((ix|bx) & (BITS32|BITS64)) {
|
|
2621 /*
|
|
2622 * it must be a 32/64-bit memory reference. Firstly we have
|
|
2623 * to check that all registers involved are type E/Rxx.
|
|
2624 */
|
|
2625 opflags_t sok = BITS32 | BITS64;
|
|
2626 int32_t o = input->offset;
|
|
2627
|
|
2628 if (it != -1) {
|
|
2629 if (!(REG64 & ~ix) || !(REG32 & ~ix))
|
|
2630 sok &= ix;
|
|
2631 else
|
|
2632 goto err;
|
|
2633 }
|
|
2634
|
|
2635 if (bt != -1) {
|
|
2636 if (REG_GPR & ~bx)
|
|
2637 goto err; /* Invalid register */
|
|
2638 if (~sok & bx & SIZE_MASK)
|
|
2639 goto err; /* Invalid size */
|
|
2640 sok &= bx;
|
|
2641 }
|
|
2642
|
|
2643 /*
|
|
2644 * While we're here, ensure the user didn't specify
|
|
2645 * WORD or QWORD
|
|
2646 */
|
|
2647 if (input->disp_size == 16 || input->disp_size == 64)
|
|
2648 goto err;
|
|
2649
|
|
2650 if (addrbits == 16 ||
|
|
2651 (addrbits == 32 && !(sok & BITS32)) ||
|
|
2652 (addrbits == 64 && !(sok & BITS64)))
|
|
2653 goto err;
|
|
2654
|
|
2655 /* now reorganize base/index */
|
|
2656 if (s == 1 && bt != it && bt != -1 && it != -1 &&
|
|
2657 ((hb == b && ht == EAH_NOTBASE) ||
|
|
2658 (hb == i && ht == EAH_MAKEBASE))) {
|
|
2659 /* swap if hints say so */
|
|
2660 t = bt, bt = it, it = t;
|
|
2661 x = bx, bx = ix, ix = x;
|
|
2662 }
|
|
2663
|
|
2664 if (bt == -1 && s == 1 && !(hb == i && ht == EAH_NOTBASE)) {
|
|
2665 /* make single reg base, unless hint */
|
|
2666 bt = it, bx = ix, it = -1, ix = 0;
|
|
2667 }
|
|
2668 if (eaflags & EAF_MIB) {
|
|
2669 /* only for mib operands */
|
|
2670 if (it == -1 && (hb == b && ht == EAH_NOTBASE)) {
|
|
2671 /*
|
|
2672 * make a single reg index [reg*1].
|
|
2673 * gas uses this form for an explicit index register.
|
|
2674 */
|
|
2675 it = bt, ix = bx, bt = -1, bx = 0, s = 1;
|
|
2676 }
|
|
2677 if ((ht == EAH_SUMMED) && bt == -1) {
|
|
2678 /* separate once summed index into [base, index] */
|
|
2679 bt = it, bx = ix, s--;
|
|
2680 }
|
|
2681 } else {
|
|
2682 if (((s == 2 && it != REG_NUM_ESP &&
|
|
2683 (!(eaflags & EAF_TIMESTWO) || (ht == EAH_SUMMED))) ||
|
|
2684 s == 3 || s == 5 || s == 9) && bt == -1) {
|
|
2685 /* convert 3*EAX to EAX+2*EAX */
|
|
2686 bt = it, bx = ix, s--;
|
|
2687 }
|
|
2688 if (it == -1 && (bt & 7) != REG_NUM_ESP &&
|
|
2689 (eaflags & EAF_TIMESTWO) &&
|
|
2690 (hb == b && ht == EAH_NOTBASE)) {
|
|
2691 /*
|
|
2692 * convert [NOSPLIT EAX*1]
|
|
2693 * to sib format with 0x0 displacement - [EAX*1+0].
|
|
2694 */
|
|
2695 it = bt, ix = bx, bt = -1, bx = 0, s = 1;
|
|
2696 }
|
|
2697 }
|
|
2698 if (s == 1 && it == REG_NUM_ESP) {
|
|
2699 /* swap ESP into base if scale is 1 */
|
|
2700 t = it, it = bt, bt = t;
|
|
2701 x = ix, ix = bx, bx = x;
|
|
2702 }
|
|
2703 if (it == REG_NUM_ESP ||
|
|
2704 (s != 1 && s != 2 && s != 4 && s != 8 && it != -1))
|
|
2705 goto err; /* wrong, for various reasons */
|
|
2706
|
|
2707 output->rex |= rexflags(it, ix, REX_X);
|
|
2708 output->rex |= rexflags(bt, bx, REX_B);
|
|
2709
|
|
2710 if (it == -1 && (bt & 7) != REG_NUM_ESP) {
|
|
2711 /* no SIB needed */
|
|
2712 int mod, rm;
|
|
2713
|
|
2714 if (bt == -1) {
|
|
2715 rm = 5;
|
|
2716 mod = 0;
|
|
2717 } else {
|
|
2718 rm = (bt & 7);
|
|
2719 if (rm != REG_NUM_EBP && o == 0 &&
|
|
2720 seg == NO_SEG && !forw_ref &&
|
|
2721 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
|
|
2722 mod = 0;
|
|
2723 else if (IS_MOD_01())
|
|
2724 mod = 1;
|
|
2725 else
|
|
2726 mod = 2;
|
|
2727 }
|
|
2728
|
|
2729 output->sib_present = false;
|
|
2730 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
|
|
2731 output->modrm = GEN_MODRM(mod, rfield, rm);
|
|
2732 } else {
|
|
2733 /* we need a SIB */
|
|
2734 int mod, scale, index, base;
|
|
2735
|
|
2736 if (it == -1)
|
|
2737 index = 4, s = 1;
|
|
2738 else
|
|
2739 index = (it & 7);
|
|
2740
|
|
2741 switch (s) {
|
|
2742 case 1:
|
|
2743 scale = 0;
|
|
2744 break;
|
|
2745 case 2:
|
|
2746 scale = 1;
|
|
2747 break;
|
|
2748 case 4:
|
|
2749 scale = 2;
|
|
2750 break;
|
|
2751 case 8:
|
|
2752 scale = 3;
|
|
2753 break;
|
|
2754 default: /* then what the smeg is it? */
|
|
2755 goto err; /* panic */
|
|
2756 }
|
|
2757
|
|
2758 if (bt == -1) {
|
|
2759 base = 5;
|
|
2760 mod = 0;
|
|
2761 } else {
|
|
2762 base = (bt & 7);
|
|
2763 if (base != REG_NUM_EBP && o == 0 &&
|
|
2764 seg == NO_SEG && !forw_ref &&
|
|
2765 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
|
|
2766 mod = 0;
|
|
2767 else if (IS_MOD_01())
|
|
2768 mod = 1;
|
|
2769 else
|
|
2770 mod = 2;
|
|
2771 }
|
|
2772
|
|
2773 output->sib_present = true;
|
|
2774 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
|
|
2775 output->modrm = GEN_MODRM(mod, rfield, 4);
|
|
2776 output->sib = GEN_SIB(scale, index, base);
|
|
2777 }
|
|
2778 } else { /* it's 16-bit */
|
|
2779 int mod, rm;
|
|
2780 int16_t o = input->offset;
|
|
2781
|
|
2782 /* check for 64-bit long mode */
|
|
2783 if (addrbits == 64)
|
|
2784 goto err;
|
|
2785
|
|
2786 /* check all registers are BX, BP, SI or DI */
|
|
2787 if ((b != -1 && b != R_BP && b != R_BX && b != R_SI && b != R_DI) ||
|
|
2788 (i != -1 && i != R_BP && i != R_BX && i != R_SI && i != R_DI))
|
|
2789 goto err;
|
|
2790
|
|
2791 /* ensure the user didn't specify DWORD/QWORD */
|
|
2792 if (input->disp_size == 32 || input->disp_size == 64)
|
|
2793 goto err;
|
|
2794
|
|
2795 if (s != 1 && i != -1)
|
|
2796 goto err; /* no can do, in 16-bit EA */
|
|
2797 if (b == -1 && i != -1) {
|
|
2798 int tmp = b;
|
|
2799 b = i;
|
|
2800 i = tmp;
|
|
2801 } /* swap */
|
|
2802 if ((b == R_SI || b == R_DI) && i != -1) {
|
|
2803 int tmp = b;
|
|
2804 b = i;
|
|
2805 i = tmp;
|
|
2806 }
|
|
2807 /* have BX/BP as base, SI/DI index */
|
|
2808 if (b == i)
|
|
2809 goto err; /* shouldn't ever happen, in theory */
|
|
2810 if (i != -1 && b != -1 &&
|
|
2811 (i == R_BP || i == R_BX || b == R_SI || b == R_DI))
|
|
2812 goto err; /* invalid combinations */
|
|
2813 if (b == -1) /* pure offset: handled above */
|
|
2814 goto err; /* so if it gets to here, panic! */
|
|
2815
|
|
2816 rm = -1;
|
|
2817 if (i != -1)
|
|
2818 switch (i * 256 + b) {
|
|
2819 case R_SI * 256 + R_BX:
|
|
2820 rm = 0;
|
|
2821 break;
|
|
2822 case R_DI * 256 + R_BX:
|
|
2823 rm = 1;
|
|
2824 break;
|
|
2825 case R_SI * 256 + R_BP:
|
|
2826 rm = 2;
|
|
2827 break;
|
|
2828 case R_DI * 256 + R_BP:
|
|
2829 rm = 3;
|
|
2830 break;
|
|
2831 } else
|
|
2832 switch (b) {
|
|
2833 case R_SI:
|
|
2834 rm = 4;
|
|
2835 break;
|
|
2836 case R_DI:
|
|
2837 rm = 5;
|
|
2838 break;
|
|
2839 case R_BP:
|
|
2840 rm = 6;
|
|
2841 break;
|
|
2842 case R_BX:
|
|
2843 rm = 7;
|
|
2844 break;
|
|
2845 }
|
|
2846 if (rm == -1) /* can't happen, in theory */
|
|
2847 goto err; /* so panic if it does */
|
|
2848
|
|
2849 if (o == 0 && seg == NO_SEG && !forw_ref && rm != 6 &&
|
|
2850 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
|
|
2851 mod = 0;
|
|
2852 else if (IS_MOD_01())
|
|
2853 mod = 1;
|
|
2854 else
|
|
2855 mod = 2;
|
|
2856
|
|
2857 output->sib_present = false; /* no SIB - it's 16-bit */
|
|
2858 output->bytes = mod; /* bytes of offset needed */
|
|
2859 output->modrm = GEN_MODRM(mod, rfield, rm);
|
|
2860 }
|
|
2861 }
|
|
2862 }
|
|
2863
|
|
2864 output->size = 1 + output->sib_present + output->bytes;
|
|
2865 return output->type;
|
|
2866
|
|
2867 err:
|
|
2868 return output->type = EA_INVALID;
|
|
2869 }
|
|
2870
|
|
2871 static void add_asp(insn *ins, int addrbits)
|
|
2872 {
|
|
2873 int j, valid;
|
|
2874 int defdisp;
|
|
2875
|
|
2876 valid = (addrbits == 64) ? 64|32 : 32|16;
|
|
2877
|
|
2878 switch (ins->prefixes[PPS_ASIZE]) {
|
|
2879 case P_A16:
|
|
2880 valid &= 16;
|
|
2881 break;
|
|
2882 case P_A32:
|
|
2883 valid &= 32;
|
|
2884 break;
|
|
2885 case P_A64:
|
|
2886 valid &= 64;
|
|
2887 break;
|
|
2888 case P_ASP:
|
|
2889 valid &= (addrbits == 32) ? 16 : 32;
|
|
2890 break;
|
|
2891 default:
|
|
2892 break;
|
|
2893 }
|
|
2894
|
|
2895 for (j = 0; j < ins->operands; j++) {
|
|
2896 if (is_class(MEMORY, ins->oprs[j].type)) {
|
|
2897 opflags_t i, b;
|
|
2898
|
|
2899 /* Verify as Register */
|
|
2900 if (!is_register(ins->oprs[j].indexreg))
|
|
2901 i = 0;
|
|
2902 else
|
|
2903 i = nasm_reg_flags[ins->oprs[j].indexreg];
|
|
2904
|
|
2905 /* Verify as Register */
|
|
2906 if (!is_register(ins->oprs[j].basereg))
|
|
2907 b = 0;
|
|
2908 else
|
|
2909 b = nasm_reg_flags[ins->oprs[j].basereg];
|
|
2910
|
|
2911 if (ins->oprs[j].scale == 0)
|
|
2912 i = 0;
|
|
2913
|
|
2914 if (!i && !b) {
|
|
2915 int ds = ins->oprs[j].disp_size;
|
|
2916 if ((addrbits != 64 && ds > 8) ||
|
|
2917 (addrbits == 64 && ds == 16))
|
|
2918 valid &= ds;
|
|
2919 } else {
|
|
2920 if (!(REG16 & ~b))
|
|
2921 valid &= 16;
|
|
2922 if (!(REG32 & ~b))
|
|
2923 valid &= 32;
|
|
2924 if (!(REG64 & ~b))
|
|
2925 valid &= 64;
|
|
2926
|
|
2927 if (!(REG16 & ~i))
|
|
2928 valid &= 16;
|
|
2929 if (!(REG32 & ~i))
|
|
2930 valid &= 32;
|
|
2931 if (!(REG64 & ~i))
|
|
2932 valid &= 64;
|
|
2933 }
|
|
2934 }
|
|
2935 }
|
|
2936
|
|
2937 if (valid & addrbits) {
|
|
2938 ins->addr_size = addrbits;
|
|
2939 } else if (valid & ((addrbits == 32) ? 16 : 32)) {
|
|
2940 /* Add an address size prefix */
|
|
2941 ins->prefixes[PPS_ASIZE] = (addrbits == 32) ? P_A16 : P_A32;;
|
|
2942 ins->addr_size = (addrbits == 32) ? 16 : 32;
|
|
2943 } else {
|
|
2944 /* Impossible... */
|
|
2945 nasm_error(ERR_NONFATAL, "impossible combination of address sizes");
|
|
2946 ins->addr_size = addrbits; /* Error recovery */
|
|
2947 }
|
|
2948
|
|
2949 defdisp = ins->addr_size == 16 ? 16 : 32;
|
|
2950
|
|
2951 for (j = 0; j < ins->operands; j++) {
|
|
2952 if (!(MEM_OFFS & ~ins->oprs[j].type) &&
|
|
2953 (ins->oprs[j].disp_size ? ins->oprs[j].disp_size : defdisp) != ins->addr_size) {
|
|
2954 /*
|
|
2955 * mem_offs sizes must match the address size; if not,
|
|
2956 * strip the MEM_OFFS bit and match only EA instructions
|
|
2957 */
|
|
2958 ins->oprs[j].type &= ~(MEM_OFFS & ~MEMORY);
|
|
2959 }
|
|
2960 }
|
|
2961 }
|