182 |
return 0; |
return 0; |
183 |
} |
} |
184 |
|
|
185 |
|
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) |
186 |
|
{ |
187 |
|
unsigned int limit; |
188 |
|
limit = (e1 & 0xffff) | (e2 & 0x000f0000); |
189 |
|
if (e2 & DESC_G_MASK) |
190 |
|
limit = (limit << 12) | 0xfff; |
191 |
|
return limit; |
192 |
|
} |
193 |
|
|
194 |
|
static inline uint8_t *get_seg_base(uint32_t e1, uint32_t e2) |
195 |
|
{ |
196 |
|
return (uint8_t *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000)); |
197 |
|
} |
198 |
|
|
199 |
|
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2) |
200 |
|
{ |
201 |
|
sc->base = get_seg_base(e1, e2); |
202 |
|
sc->limit = get_seg_limit(e1, e2); |
203 |
|
sc->flags = e2; |
204 |
|
} |
205 |
|
|
206 |
|
/* init the segment cache in vm86 mode. */ |
207 |
|
static inline void load_seg_vm(int seg, int selector) |
208 |
|
{ |
209 |
|
selector &= 0xffff; |
210 |
|
cpu_x86_load_seg_cache(env, seg, selector, |
211 |
|
(uint8_t *)(selector << 4), 0xffff, 0); |
212 |
|
} |
213 |
|
|
214 |
/* protected mode interrupt */ |
/* protected mode interrupt */ |
215 |
static void do_interrupt_protected(int intno, int is_int, int error_code, |
static void do_interrupt_protected(int intno, int is_int, int error_code, |
316 |
if (new_stack) { |
if (new_stack) { |
317 |
old_esp = ESP; |
old_esp = ESP; |
318 |
old_ss = env->segs[R_SS].selector; |
old_ss = env->segs[R_SS].selector; |
319 |
load_seg(R_SS, ss, env->eip); |
ss = (ss & ~3) | dpl; |
320 |
|
cpu_x86_load_seg_cache(env, R_SS, ss, |
321 |
|
get_seg_base(ss_e1, ss_e2), |
322 |
|
get_seg_limit(ss_e1, ss_e2), |
323 |
|
ss_e2); |
324 |
} else { |
} else { |
325 |
old_esp = 0; |
old_esp = 0; |
326 |
old_ss = 0; |
old_ss = 0; |
331 |
else |
else |
332 |
old_eip = env->eip; |
old_eip = env->eip; |
333 |
old_cs = env->segs[R_CS].selector; |
old_cs = env->segs[R_CS].selector; |
334 |
load_seg(R_CS, selector, env->eip); |
selector = (selector & ~3) | dpl; |
335 |
|
cpu_x86_load_seg_cache(env, R_CS, selector, |
336 |
|
get_seg_base(e1, e2), |
337 |
|
get_seg_limit(e1, e2), |
338 |
|
e2); |
339 |
|
cpu_x86_set_cpl(env, dpl); |
340 |
env->eip = offset; |
env->eip = offset; |
341 |
ESP = esp - push_size; |
ESP = esp - push_size; |
342 |
ssp = env->segs[R_SS].base + esp; |
ssp = env->segs[R_SS].base + esp; |
630 |
} |
} |
631 |
} |
} |
632 |
|
|
|
static inline void load_seg_cache(SegmentCache *sc, uint32_t e1, uint32_t e2) |
|
|
{ |
|
|
sc->base = (void *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000)); |
|
|
sc->limit = (e1 & 0xffff) | (e2 & 0x000f0000); |
|
|
if (e2 & DESC_G_MASK) |
|
|
sc->limit = (sc->limit << 12) | 0xfff; |
|
|
sc->flags = e2; |
|
|
} |
|
|
|
|
633 |
void helper_lldt_T0(void) |
void helper_lldt_T0(void) |
634 |
{ |
{ |
635 |
int selector; |
int selector; |
657 |
raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
658 |
if (!(e2 & DESC_P_MASK)) |
if (!(e2 & DESC_P_MASK)) |
659 |
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); |
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); |
660 |
load_seg_cache(&env->ldt, e1, e2); |
load_seg_cache_raw_dt(&env->ldt, e1, e2); |
661 |
} |
} |
662 |
env->ldt.selector = selector; |
env->ldt.selector = selector; |
663 |
} |
} |
692 |
raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
693 |
if (!(e2 & DESC_P_MASK)) |
if (!(e2 & DESC_P_MASK)) |
694 |
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); |
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); |
695 |
load_seg_cache(&env->tr, e1, e2); |
load_seg_cache_raw_dt(&env->tr, e1, e2); |
696 |
e2 |= 0x00000200; /* set the busy bit */ |
e2 |= 0x00000200; /* set the busy bit */ |
697 |
stl(ptr + 4, e2); |
stl(ptr + 4, e2); |
698 |
} |
} |
699 |
env->tr.selector = selector; |
env->tr.selector = selector; |
700 |
} |
} |
701 |
|
|
702 |
/* only works if protected mode and not VM86 */ |
/* only works if protected mode and not VM86. Calling load_seg with |
703 |
|
seg_reg == R_CS is discouraged */ |
704 |
void load_seg(int seg_reg, int selector, unsigned int cur_eip) |
void load_seg(int seg_reg, int selector, unsigned int cur_eip) |
705 |
{ |
{ |
|
SegmentCache *sc; |
|
706 |
uint32_t e1, e2; |
uint32_t e1, e2; |
707 |
|
|
|
sc = &env->segs[seg_reg]; |
|
708 |
if ((selector & 0xfffc) == 0) { |
if ((selector & 0xfffc) == 0) { |
709 |
/* null selector case */ |
/* null selector case */ |
710 |
if (seg_reg == R_SS) { |
if (seg_reg == R_SS) { |
711 |
EIP = cur_eip; |
EIP = cur_eip; |
712 |
raise_exception_err(EXCP0D_GPF, 0); |
raise_exception_err(EXCP0D_GPF, 0); |
713 |
} else { |
} else { |
714 |
/* XXX: each access should trigger an exception */ |
cpu_x86_load_seg_cache(env, seg_reg, selector, NULL, 0, 0); |
|
sc->base = NULL; |
|
|
sc->limit = 0; |
|
|
sc->flags = 0; |
|
715 |
} |
} |
716 |
} else { |
} else { |
717 |
if (load_segment(&e1, &e2, selector) != 0) { |
if (load_segment(&e1, &e2, selector) != 0) { |
743 |
else |
else |
744 |
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); |
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); |
745 |
} |
} |
746 |
load_seg_cache(sc, e1, e2); |
cpu_x86_load_seg_cache(env, seg_reg, selector, |
747 |
|
get_seg_base(e1, e2), |
748 |
|
get_seg_limit(e1, e2), |
749 |
|
e2); |
750 |
#if 0 |
#if 0 |
751 |
fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", |
fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", |
752 |
selector, (unsigned long)sc->base, sc->limit, sc->flags); |
selector, (unsigned long)sc->base, sc->limit, sc->flags); |
753 |
#endif |
#endif |
754 |
} |
} |
|
if (seg_reg == R_CS) { |
|
|
cpu_x86_set_cpl(env, selector & 3); |
|
|
} |
|
|
sc->selector = selector; |
|
755 |
} |
} |
756 |
|
|
757 |
/* protected mode jump */ |
/* protected mode jump */ |
758 |
void helper_ljmp_protected_T0_T1(void) |
void helper_ljmp_protected_T0_T1(void) |
759 |
{ |
{ |
760 |
int new_cs, new_eip; |
int new_cs, new_eip; |
761 |
SegmentCache sc1; |
uint32_t e1, e2, cpl, dpl, rpl, limit; |
|
uint32_t e1, e2, cpl, dpl, rpl; |
|
762 |
|
|
763 |
new_cs = T0; |
new_cs = T0; |
764 |
new_eip = T1; |
new_eip = T1; |
785 |
} |
} |
786 |
if (!(e2 & DESC_P_MASK)) |
if (!(e2 & DESC_P_MASK)) |
787 |
raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); |
raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); |
788 |
load_seg_cache(&sc1, e1, e2); |
limit = get_seg_limit(e1, e2); |
789 |
if (new_eip > sc1.limit) |
if (new_eip > limit) |
790 |
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
791 |
env->segs[R_CS].base = sc1.base; |
cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, |
792 |
env->segs[R_CS].limit = sc1.limit; |
get_seg_base(e1, e2), limit, e2); |
|
env->segs[R_CS].flags = sc1.flags; |
|
|
env->segs[R_CS].selector = (new_cs & 0xfffc) | cpl; |
|
793 |
EIP = new_eip; |
EIP = new_eip; |
794 |
} else { |
} else { |
795 |
cpu_abort(env, "jmp to call/task gate not supported 0x%04x:0x%08x", |
cpu_abort(env, "jmp to call/task gate not supported 0x%04x:0x%08x", |
836 |
void helper_lcall_protected_T0_T1(int shift, int next_eip) |
void helper_lcall_protected_T0_T1(int shift, int next_eip) |
837 |
{ |
{ |
838 |
int new_cs, new_eip; |
int new_cs, new_eip; |
|
SegmentCache sc1; |
|
839 |
uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count; |
uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count; |
840 |
uint32_t ss, ss_e1, ss_e2, push_size, sp, type, ss_dpl; |
uint32_t ss, ss_e1, ss_e2, push_size, sp, type, ss_dpl; |
841 |
uint32_t old_ss, old_esp, val, i; |
uint32_t old_ss, old_esp, val, i, limit; |
842 |
uint8_t *ssp, *old_ssp; |
uint8_t *ssp, *old_ssp; |
843 |
|
|
844 |
new_cs = T0; |
new_cs = T0; |
884 |
} |
} |
885 |
sp -= (4 << shift); |
sp -= (4 << shift); |
886 |
|
|
887 |
load_seg_cache(&sc1, e1, e2); |
limit = get_seg_limit(e1, e2); |
888 |
if (new_eip > sc1.limit) |
if (new_eip > limit) |
889 |
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
890 |
/* from this point, not restartable */ |
/* from this point, not restartable */ |
891 |
if (!(env->segs[R_SS].flags & DESC_B_MASK)) |
if (!(env->segs[R_SS].flags & DESC_B_MASK)) |
892 |
ESP = (ESP & 0xffff0000) | (sp & 0xffff); |
ESP = (ESP & 0xffff0000) | (sp & 0xffff); |
893 |
else |
else |
894 |
ESP = sp; |
ESP = sp; |
895 |
env->segs[R_CS].base = sc1.base; |
cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, |
896 |
env->segs[R_CS].limit = sc1.limit; |
get_seg_base(e1, e2), limit, e2); |
|
env->segs[R_CS].flags = sc1.flags; |
|
|
env->segs[R_CS].selector = (new_cs & 0xfffc) | cpl; |
|
897 |
EIP = new_eip; |
EIP = new_eip; |
898 |
} else { |
} else { |
899 |
/* check gate type */ |
/* check gate type */ |
964 |
old_ssp = env->segs[R_SS].base + old_esp; |
old_ssp = env->segs[R_SS].base + old_esp; |
965 |
|
|
966 |
/* XXX: from this point not restartable */ |
/* XXX: from this point not restartable */ |
967 |
load_seg(R_SS, ss, env->eip); |
ss = (ss & ~3) | dpl; |
968 |
|
cpu_x86_load_seg_cache(env, R_SS, ss, |
969 |
|
get_seg_base(ss_e1, ss_e2), |
970 |
|
get_seg_limit(ss_e1, ss_e2), |
971 |
|
ss_e2); |
972 |
|
|
973 |
if (!(env->segs[R_SS].flags & DESC_B_MASK)) |
if (!(env->segs[R_SS].flags & DESC_B_MASK)) |
974 |
sp &= 0xffff; |
sp &= 0xffff; |
1015 |
} |
} |
1016 |
|
|
1017 |
sp -= push_size; |
sp -= push_size; |
1018 |
load_seg(R_CS, selector, env->eip); |
selector = (selector & ~3) | dpl; |
1019 |
|
cpu_x86_load_seg_cache(env, R_CS, selector, |
1020 |
|
get_seg_base(e1, e2), |
1021 |
|
get_seg_limit(e1, e2), |
1022 |
|
e2); |
1023 |
|
cpu_x86_set_cpl(env, dpl); |
1024 |
|
|
1025 |
/* from this point, not restartable if same priviledge */ |
/* from this point, not restartable if same priviledge */ |
1026 |
if (!(env->segs[R_SS].flags & DESC_B_MASK)) |
if (!(env->segs[R_SS].flags & DESC_B_MASK)) |
1027 |
ESP = (ESP & 0xffff0000) | (sp & 0xffff); |
ESP = (ESP & 0xffff0000) | (sp & 0xffff); |
1031 |
} |
} |
1032 |
} |
} |
1033 |
|
|
|
/* init the segment cache in vm86 mode */ |
|
|
static inline void load_seg_vm(int seg, int selector) |
|
|
{ |
|
|
SegmentCache *sc = &env->segs[seg]; |
|
|
selector &= 0xffff; |
|
|
sc->base = (uint8_t *)(selector << 4); |
|
|
sc->selector = selector; |
|
|
sc->flags = 0; |
|
|
sc->limit = 0xffff; |
|
|
} |
|
|
|
|
1034 |
/* real mode iret */ |
/* real mode iret */ |
1035 |
void helper_iret_real(int shift) |
void helper_iret_real(int shift) |
1036 |
{ |
{ |
1067 |
{ |
{ |
1068 |
uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss; |
uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss; |
1069 |
uint32_t new_es, new_ds, new_fs, new_gs; |
uint32_t new_es, new_ds, new_fs, new_gs; |
1070 |
uint32_t e1, e2; |
uint32_t e1, e2, ss_e1, ss_e2; |
1071 |
int cpl, dpl, rpl, eflags_mask; |
int cpl, dpl, rpl, eflags_mask; |
1072 |
uint8_t *ssp; |
uint8_t *ssp; |
1073 |
|
|
1114 |
|
|
1115 |
if (rpl == cpl) { |
if (rpl == cpl) { |
1116 |
/* return to same priledge level */ |
/* return to same priledge level */ |
1117 |
load_seg(R_CS, new_cs, env->eip); |
cpu_x86_load_seg_cache(env, R_CS, new_cs, |
1118 |
|
get_seg_base(e1, e2), |
1119 |
|
get_seg_limit(e1, e2), |
1120 |
|
e2); |
1121 |
new_esp = sp + (4 << shift) + ((2 * is_iret) << shift) + addend; |
new_esp = sp + (4 << shift) + ((2 * is_iret) << shift) + addend; |
1122 |
} else { |
} else { |
1123 |
/* return to different priviledge level */ |
/* return to different priviledge level */ |
1134 |
|
|
1135 |
if ((new_ss & 3) != rpl) |
if ((new_ss & 3) != rpl) |
1136 |
raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); |
raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); |
1137 |
if (load_segment(&e1, &e2, new_ss) != 0) |
if (load_segment(&ss_e1, &ss_e2, new_ss) != 0) |
1138 |
raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); |
raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); |
1139 |
if (!(e2 & DESC_S_MASK) || |
if (!(ss_e2 & DESC_S_MASK) || |
1140 |
(e2 & DESC_CS_MASK) || |
(ss_e2 & DESC_CS_MASK) || |
1141 |
!(e2 & DESC_W_MASK)) |
!(ss_e2 & DESC_W_MASK)) |
1142 |
raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); |
raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); |
1143 |
dpl = (e2 >> DESC_DPL_SHIFT) & 3; |
dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; |
1144 |
if (dpl != rpl) |
if (dpl != rpl) |
1145 |
raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); |
raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); |
1146 |
if (!(e2 & DESC_P_MASK)) |
if (!(ss_e2 & DESC_P_MASK)) |
1147 |
raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc); |
raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc); |
1148 |
|
|
1149 |
load_seg(R_CS, new_cs, env->eip); |
cpu_x86_load_seg_cache(env, R_CS, new_cs, |
1150 |
load_seg(R_SS, new_ss, env->eip); |
get_seg_base(e1, e2), |
1151 |
|
get_seg_limit(e1, e2), |
1152 |
|
e2); |
1153 |
|
cpu_x86_load_seg_cache(env, R_SS, new_ss, |
1154 |
|
get_seg_base(ss_e1, ss_e2), |
1155 |
|
get_seg_limit(ss_e1, ss_e2), |
1156 |
|
ss_e2); |
1157 |
|
cpu_x86_set_cpl(env, rpl); |
1158 |
} |
} |
1159 |
if (env->segs[R_SS].flags & DESC_B_MASK) |
if (env->segs[R_SS].flags & DESC_B_MASK) |
1160 |
ESP = new_esp; |
ESP = new_esp; |
1163 |
(new_esp & 0xffff); |
(new_esp & 0xffff); |
1164 |
env->eip = new_eip; |
env->eip = new_eip; |
1165 |
if (is_iret) { |
if (is_iret) { |
1166 |
|
/* NOTE: 'cpl' can be different from the current CPL */ |
1167 |
if (cpl == 0) |
if (cpl == 0) |
1168 |
eflags_mask = FL_UPDATE_CPL0_MASK; |
eflags_mask = FL_UPDATE_CPL0_MASK; |
1169 |
else |
else |