19 |
*/ |
*/ |
20 |
|
|
21 |
#include "cvm_config.h" |
#include "cvm_config.h" |
22 |
#include "md_x86.h" |
#include "md_amd64.h" |
23 |
|
|
24 |
#ifdef __cplusplus |
#ifdef __cplusplus |
25 |
extern "C" { |
extern "C" { |
26 |
#endif |
#endif |
27 |
|
|
28 |
#ifdef CVM_X86 |
#ifdef CVM_X86_64 |
29 |
|
|
30 |
md_inst_ptr _md_x86_shift(md_inst_ptr inst, int opc, int reg1, int reg2) |
md_inst_ptr _md_amd64_shift(md_inst_ptr inst, int opc, int reg1, int reg2) |
31 |
{ |
{ |
32 |
if(reg2 == X86_ECX) |
if(reg2 == AMD64_RCX) |
33 |
{ |
{ |
34 |
/* The shift value is already in ECX */ |
/* The shift value is already in ECX */ |
35 |
x86_shift_reg(inst, opc, reg1); |
amd64_shift_reg_size(inst, opc, reg1, 4); |
36 |
} |
} |
37 |
else if(reg1 == X86_ECX) |
else if(reg1 == AMD64_RCX) |
38 |
{ |
{ |
39 |
/* The value to be shifted is in ECX, so swap the order */ |
/* The value to be shifted is in ECX, so swap the order */ |
40 |
x86_xchg_reg_reg(inst, reg1, reg2, 4); |
amd64_xchg_reg_reg(inst, reg1, reg2, 4); |
41 |
x86_shift_reg(inst, opc, reg2); |
amd64_shift_reg_size(inst, opc, reg2, 4); |
42 |
x86_mov_reg_reg(inst, reg1, reg2, 4); |
amd64_mov_reg_reg(inst, reg1, reg2, 4); |
43 |
} |
} |
44 |
else |
else |
45 |
{ |
{ |
46 |
/* Save ECX, perform the shift, and then restore ECX */ |
/* Save ECX, perform the shift, and then restore ECX */ |
47 |
x86_push_reg(inst, X86_ECX); |
amd64_push_reg(inst, AMD64_RCX); |
48 |
x86_mov_reg_reg(inst, X86_ECX, reg2, 4); |
amd64_mov_reg_reg(inst, AMD64_RCX, reg2, 4); |
49 |
x86_shift_reg(inst, opc, reg1); |
amd64_shift_reg_size(inst, opc, reg1, 4); |
50 |
x86_pop_reg(inst, X86_ECX); |
amd64_pop_reg(inst, AMD64_RCX); |
51 |
} |
} |
52 |
return inst; |
return inst; |
53 |
} |
} |
54 |
|
|
55 |
md_inst_ptr _md_x86_mov_membase_reg_byte |
|
56 |
|
md_inst_ptr _md_amd64_mov_membase_reg_byte |
57 |
(md_inst_ptr inst, int basereg, int offset, int srcreg) |
(md_inst_ptr inst, int basereg, int offset, int srcreg) |
58 |
{ |
{ |
59 |
if(srcreg == X86_EAX || srcreg == X86_EBX || |
if(srcreg == AMD64_RAX || srcreg == AMD64_RBX || |
60 |
srcreg == X86_ECX || srcreg == X86_EDX) |
srcreg == AMD64_RCX || srcreg == AMD64_RDX) |
61 |
{ |
{ |
62 |
x86_mov_membase_reg(inst, basereg, offset, srcreg, 1); |
amd64_mov_membase_reg(inst, basereg, offset, srcreg, 1); |
63 |
} |
} |
64 |
else if(basereg != X86_EAX) |
else if(basereg != AMD64_RAX) |
65 |
{ |
{ |
66 |
x86_push_reg(inst, X86_EAX); |
amd64_push_reg(inst, AMD64_RAX); |
67 |
x86_mov_reg_reg(inst, X86_EAX, srcreg, 4); |
amd64_mov_reg_reg(inst, AMD64_RAX, srcreg, 4); |
68 |
x86_mov_membase_reg(inst, basereg, offset, X86_EAX, 1); |
amd64_mov_membase_reg(inst, basereg, offset, AMD64_RAX, 1); |
69 |
x86_pop_reg(inst, X86_EAX); |
amd64_pop_reg(inst, AMD64_RAX); |
70 |
} |
} |
71 |
else |
else |
72 |
{ |
{ |
73 |
x86_push_reg(inst, X86_EDX); |
amd64_push_reg(inst, AMD64_RDX); |
74 |
x86_mov_reg_reg(inst, X86_EDX, srcreg, 4); |
amd64_mov_reg_reg(inst, AMD64_RDX, srcreg, 4); |
75 |
x86_mov_membase_reg(inst, basereg, offset, X86_EDX, 1); |
amd64_mov_membase_reg(inst, basereg, offset, AMD64_RDX, 1); |
76 |
x86_pop_reg(inst, X86_EDX); |
amd64_pop_reg(inst, AMD64_RDX); |
77 |
} |
} |
78 |
return inst; |
return inst; |
79 |
} |
} |
80 |
|
|
81 |
|
#if 0 /* TODO */ |
82 |
md_inst_ptr _md_x86_mov_memindex_reg_byte(md_inst_ptr inst, int basereg, |
md_inst_ptr _md_x86_mov_memindex_reg_byte(md_inst_ptr inst, int basereg, |
83 |
unsigned offset, int indexreg, |
unsigned offset, int indexreg, |
84 |
int srcreg) |
int srcreg) |
133 |
} |
} |
134 |
return inst; |
return inst; |
135 |
} |
} |
136 |
|
#endif |
137 |
|
|
138 |
md_inst_ptr _md_x86_setcc(md_inst_ptr inst, int reg, int cond) |
md_inst_ptr _md_amd64_setcc(md_inst_ptr inst, int reg, int cond) |
139 |
{ |
{ |
140 |
if(cond == X86_CC_EQ || cond == X86_CC_NE) |
if(cond == X86_CC_EQ || cond == X86_CC_NE) |
141 |
{ |
{ |
142 |
x86_alu_reg_reg(inst, X86_OR, reg, reg); |
amd64_alu_reg_reg_size(inst, X86_OR, reg, reg, 4); |
143 |
} |
} |
144 |
else |
else |
145 |
{ |
{ |
146 |
x86_alu_reg_imm(inst, X86_CMP, reg, 0); |
amd64_alu_reg_imm_size(inst, X86_CMP, reg, 0, 4); |
147 |
} |
} |
148 |
if(reg == X86_EAX || reg == X86_EBX || reg == X86_ECX || reg == X86_EDX) |
if(reg == AMD64_RAX || reg == AMD64_RBX || reg == AMD64_RCX || reg == AMD64_RDX) |
149 |
{ |
{ |
150 |
/* Use a SETcc instruction if we have a basic register */ |
/* Use a SETcc instruction if we have a basic register */ |
151 |
x86_set_reg(inst, cond, reg, 1); |
amd64_set_reg_size(inst, cond, reg, 1, 4); |
152 |
x86_widen_reg(inst, reg, reg, 0, 0); |
amd64_widen_reg_size(inst, reg, reg, 0, 0, 4); |
153 |
} |
} |
154 |
else |
else |
155 |
{ |
{ |
156 |
/* The register is not useable as an 8-bit destination */ |
/* The register is not useable as an 8-bit destination */ |
157 |
unsigned char *patch1, *patch2; |
unsigned char *patch1, *patch2; |
158 |
patch1 = inst; |
patch1 = inst; |
159 |
x86_branch8(inst, cond, 0, 1); |
amd64_branch8(inst, cond, 0, 1); |
160 |
x86_clear_reg(inst, reg); |
amd64_clear_reg_size(inst, reg, 4); |
161 |
patch2 = inst; |
patch2 = inst; |
162 |
x86_jump8(inst, 0); |
amd64_jump8(inst, 0); |
163 |
x86_patch(patch1, inst); |
amd64_patch(patch1, inst); |
164 |
x86_mov_reg_imm(inst, reg, 1); |
amd64_mov_reg_imm_size(inst, reg, 1, 4); |
165 |
x86_patch(patch2, inst); |
amd64_patch(patch2, inst); |
166 |
} |
} |
167 |
return inst; |
return inst; |
168 |
} |
} |
169 |
|
|
170 |
md_inst_ptr _md_x86_compare(md_inst_ptr inst, int reg1, int reg2, int isSigned) |
md_inst_ptr _md_amd64_compare(md_inst_ptr inst, int reg1, int reg2, int isSigned, int size) |
171 |
{ |
{ |
172 |
unsigned char *patch1, *patch2, *patch3; |
unsigned char *patch1, *patch2, *patch3; |
173 |
x86_alu_reg_reg(inst, X86_CMP, reg1, reg2); |
amd64_alu_reg_reg_size(inst, X86_CMP, reg1, reg2, size); |
174 |
patch1 = inst; |
patch1 = inst; |
175 |
x86_branch8(inst, X86_CC_GE, 0, isSigned); |
amd64_branch8(inst, X86_CC_GE, 0, isSigned); |
176 |
x86_mov_reg_imm(inst, reg1, -1); |
amd64_mov_reg_imm_size(inst, reg1, -1, size); |
177 |
patch2 = inst; |
patch2 = inst; |
178 |
x86_jump8(inst, 0); |
amd64_jump8(inst, 0); |
179 |
x86_patch(patch1, inst); |
amd64_patch(patch1, inst); |
180 |
patch1 = inst; |
patch1 = inst; |
181 |
x86_branch8(inst, X86_CC_EQ, 0, 0); |
amd64_branch8(inst, X86_CC_EQ, 0, 0); |
182 |
x86_mov_reg_imm(inst, reg1, 1); |
amd64_mov_reg_imm_size(inst, reg1, 1, size); |
183 |
patch3 = inst; |
patch3 = inst; |
184 |
x86_jump8(inst, 0); |
amd64_jump8(inst, 0); |
185 |
x86_patch(patch1, inst); |
amd64_patch(patch1, inst); |
186 |
x86_clear_reg(inst, reg1); |
amd64_clear_reg_size(inst, reg1, size); |
187 |
x86_patch(patch2, inst); |
amd64_patch(patch2, inst); |
188 |
x86_patch(patch3, inst); |
amd64_patch(patch3, inst); |
189 |
return inst; |
return inst; |
190 |
} |
} |
191 |
|
|
192 |
md_inst_ptr _md_x86_widen_byte(md_inst_ptr inst, int reg, int isSigned) |
md_inst_ptr _md_amd64_widen_byte(md_inst_ptr inst, int reg, int isSigned) |
193 |
{ |
{ |
194 |
if(reg == X86_EAX || reg == X86_EBX || reg == X86_ECX || reg == X86_EDX) |
if(reg == AMD64_RAX || reg == AMD64_RBX || reg == AMD64_RCX || reg == AMD64_RDX) |
195 |
{ |
{ |
196 |
x86_widen_reg(inst, reg, reg, isSigned, 0); |
amd64_widen_reg(inst, reg, reg, isSigned, 0); |
197 |
} |
} |
198 |
else |
else |
199 |
{ |
{ |
200 |
x86_push_reg(inst, X86_EAX); |
amd64_push_reg(inst, AMD64_RAX); |
201 |
x86_mov_reg_reg(inst, X86_EAX, reg, 4); |
amd64_mov_reg_reg(inst, AMD64_RAX, reg, 4); |
202 |
x86_widen_reg(inst, reg, X86_EAX, isSigned, 0); |
amd64_widen_reg(inst, reg, AMD64_RAX, isSigned, 0); |
203 |
x86_pop_reg(inst, X86_EAX); |
amd64_pop_reg(inst, AMD64_RAX); |
204 |
} |
} |
205 |
return inst; |
return inst; |
206 |
} |
} |
207 |
|
#if 0 |
208 |
|
|
209 |
md_inst_ptr _md_x86_cmp_float(md_inst_ptr inst, int dreg, int lessop) |
md_inst_ptr _md_x86_cmp_float(md_inst_ptr inst, int dreg, int lessop) |
210 |
{ |
{ |
262 |
} |
} |
263 |
return inst; |
return inst; |
264 |
} |
} |
265 |
|
#endif |
266 |
|
|
267 |
#endif /* CVM_X86 */ |
#endif /* CVM_X86_64 */ |
268 |
|
|
269 |
#ifdef __cplusplus |
#ifdef __cplusplus |
270 |
}; |
}; |