185 |
|
|
186 |
/* protected mode interrupt */ |
/* protected mode interrupt */ |
187 |
static void do_interrupt_protected(int intno, int is_int, int error_code, |
static void do_interrupt_protected(int intno, int is_int, int error_code, |
188 |
unsigned int next_eip) |
unsigned int next_eip) |
189 |
{ |
{ |
190 |
SegmentCache *dt; |
SegmentCache *dt; |
191 |
uint8_t *ptr, *ssp; |
uint8_t *ptr, *ssp; |
378 |
ptr = dt->base + intno * 4; |
ptr = dt->base + intno * 4; |
379 |
offset = lduw(ptr); |
offset = lduw(ptr); |
380 |
selector = lduw(ptr + 2); |
selector = lduw(ptr + 2); |
381 |
esp = env->regs[R_ESP] & 0xffff; |
esp = env->regs[R_ESP]; |
382 |
ssp = env->segs[R_SS].base + esp; |
ssp = env->segs[R_SS].base; |
383 |
if (is_int) |
if (is_int) |
384 |
old_eip = next_eip; |
old_eip = next_eip; |
385 |
else |
else |
386 |
old_eip = env->eip; |
old_eip = env->eip; |
387 |
old_cs = env->segs[R_CS].selector; |
old_cs = env->segs[R_CS].selector; |
388 |
ssp -= 2; |
esp -= 2; |
389 |
stw(ssp, compute_eflags()); |
stw(ssp + (esp & 0xffff), compute_eflags()); |
390 |
ssp -= 2; |
esp -= 2; |
391 |
stw(ssp, old_cs); |
stw(ssp + (esp & 0xffff), old_cs); |
392 |
ssp -= 2; |
esp -= 2; |
393 |
stw(ssp, old_eip); |
stw(ssp + (esp & 0xffff), old_eip); |
|
esp -= 6; |
|
394 |
|
|
395 |
/* update processor state */ |
/* update processor state */ |
396 |
env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff); |
env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff); |
732 |
} |
} |
733 |
|
|
734 |
/* protected mode jump */ |
/* protected mode jump */ |
735 |
void jmp_seg(int selector, unsigned int new_eip) |
void helper_ljmp_protected_T0_T1(void) |
736 |
{ |
{ |
737 |
|
int new_cs, new_eip; |
738 |
SegmentCache sc1; |
SegmentCache sc1; |
739 |
uint32_t e1, e2, cpl, dpl, rpl; |
uint32_t e1, e2, cpl, dpl, rpl; |
740 |
|
|
741 |
if ((selector & 0xfffc) == 0) { |
new_cs = T0; |
742 |
|
new_eip = T1; |
743 |
|
if ((new_cs & 0xfffc) == 0) |
744 |
raise_exception_err(EXCP0D_GPF, 0); |
raise_exception_err(EXCP0D_GPF, 0); |
745 |
|
if (load_segment(&e1, &e2, new_cs) != 0) |
746 |
|
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
747 |
|
cpl = env->segs[R_CS].selector & 3; |
748 |
|
if (e2 & DESC_S_MASK) { |
749 |
|
if (!(e2 & DESC_CS_MASK)) |
750 |
|
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
751 |
|
dpl = (e2 >> DESC_DPL_SHIFT) & 3; |
752 |
|
if (e2 & DESC_CS_MASK) { |
753 |
|
/* conforming code segment */ |
754 |
|
if (dpl > cpl) |
755 |
|
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
756 |
|
} else { |
757 |
|
/* non conforming code segment */ |
758 |
|
rpl = new_cs & 3; |
759 |
|
if (rpl > cpl) |
760 |
|
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
761 |
|
if (dpl != cpl) |
762 |
|
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
763 |
|
} |
764 |
|
if (!(e2 & DESC_P_MASK)) |
765 |
|
raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); |
766 |
|
load_seg_cache(&sc1, e1, e2); |
767 |
|
if (new_eip > sc1.limit) |
768 |
|
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
769 |
|
env->segs[R_CS].base = sc1.base; |
770 |
|
env->segs[R_CS].limit = sc1.limit; |
771 |
|
env->segs[R_CS].flags = sc1.flags; |
772 |
|
env->segs[R_CS].selector = (new_cs & 0xfffc) | cpl; |
773 |
|
EIP = new_eip; |
774 |
|
} else { |
775 |
|
cpu_abort(env, "jmp to call/task gate not supported 0x%04x:0x%08x", |
776 |
|
new_cs, new_eip); |
777 |
} |
} |
778 |
|
} |
779 |
|
|
780 |
if (load_segment(&e1, &e2, selector) != 0) |
/* real mode call */ |
781 |
raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
void helper_lcall_real_T0_T1(int shift, int next_eip) |
782 |
|
{ |
783 |
|
int new_cs, new_eip; |
784 |
|
uint32_t esp, esp_mask; |
785 |
|
uint8_t *ssp; |
786 |
|
|
787 |
|
new_cs = T0; |
788 |
|
new_eip = T1; |
789 |
|
esp = env->regs[R_ESP]; |
790 |
|
esp_mask = 0xffffffff; |
791 |
|
if (!(env->segs[R_SS].flags & DESC_B_MASK)) |
792 |
|
esp_mask = 0xffff; |
793 |
|
ssp = env->segs[R_SS].base; |
794 |
|
if (shift) { |
795 |
|
esp -= 4; |
796 |
|
stl(ssp + (esp & esp_mask), env->segs[R_CS].selector); |
797 |
|
esp -= 4; |
798 |
|
stl(ssp + (esp & esp_mask), next_eip); |
799 |
|
} else { |
800 |
|
esp -= 2; |
801 |
|
stw(ssp + (esp & esp_mask), env->segs[R_CS].selector); |
802 |
|
esp -= 2; |
803 |
|
stw(ssp + (esp & esp_mask), next_eip); |
804 |
|
} |
805 |
|
|
806 |
|
if (!(env->segs[R_SS].flags & DESC_B_MASK)) |
807 |
|
env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff); |
808 |
|
else |
809 |
|
env->regs[R_ESP] = esp; |
810 |
|
env->eip = new_eip; |
811 |
|
env->segs[R_CS].selector = new_cs; |
812 |
|
env->segs[R_CS].base = (uint8_t *)(new_cs << 4); |
813 |
|
} |
814 |
|
|
815 |
|
/* protected mode call */ |
816 |
|
void helper_lcall_protected_T0_T1(int shift, int next_eip) |
817 |
|
{ |
818 |
|
int new_cs, new_eip; |
819 |
|
SegmentCache sc1; |
820 |
|
uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count; |
821 |
|
uint32_t ss, ss_e1, ss_e2, push_size, sp, type, ss_dpl; |
822 |
|
uint32_t old_ss, old_esp, val, i; |
823 |
|
uint8_t *ssp, *old_ssp; |
824 |
|
|
825 |
|
new_cs = T0; |
826 |
|
new_eip = T1; |
827 |
|
if ((new_cs & 0xfffc) == 0) |
828 |
|
raise_exception_err(EXCP0D_GPF, 0); |
829 |
|
if (load_segment(&e1, &e2, new_cs) != 0) |
830 |
|
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
831 |
cpl = env->segs[R_CS].selector & 3; |
cpl = env->segs[R_CS].selector & 3; |
832 |
if (e2 & DESC_S_MASK) { |
if (e2 & DESC_S_MASK) { |
833 |
if (!(e2 & DESC_CS_MASK)) |
if (!(e2 & DESC_CS_MASK)) |
834 |
raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
835 |
dpl = (e2 >> DESC_DPL_SHIFT) & 3; |
dpl = (e2 >> DESC_DPL_SHIFT) & 3; |
836 |
if (e2 & DESC_CS_MASK) { |
if (e2 & DESC_CS_MASK) { |
837 |
/* conforming code segment */ |
/* conforming code segment */ |
838 |
if (dpl > cpl) |
if (dpl > cpl) |
839 |
raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
840 |
} else { |
} else { |
841 |
/* non conforming code segment */ |
/* non conforming code segment */ |
842 |
rpl = selector & 3; |
rpl = new_cs & 3; |
843 |
if (rpl > cpl) |
if (rpl > cpl) |
844 |
raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
845 |
if (dpl != cpl) |
if (dpl != cpl) |
846 |
raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
847 |
} |
} |
848 |
if (!(e2 & DESC_P_MASK)) |
if (!(e2 & DESC_P_MASK)) |
849 |
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); |
raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); |
850 |
|
|
851 |
|
sp = env->regs[R_ESP]; |
852 |
|
if (!(env->segs[R_SS].flags & DESC_B_MASK)) |
853 |
|
sp &= 0xffff; |
854 |
|
ssp = env->segs[R_SS].base + sp; |
855 |
|
if (shift) { |
856 |
|
ssp -= 4; |
857 |
|
stl(ssp, env->segs[R_CS].selector); |
858 |
|
ssp -= 4; |
859 |
|
stl(ssp, next_eip); |
860 |
|
} else { |
861 |
|
ssp -= 2; |
862 |
|
stw(ssp, env->segs[R_CS].selector); |
863 |
|
ssp -= 2; |
864 |
|
stw(ssp, next_eip); |
865 |
|
} |
866 |
|
sp -= (4 << shift); |
867 |
|
|
868 |
load_seg_cache(&sc1, e1, e2); |
load_seg_cache(&sc1, e1, e2); |
869 |
if (new_eip > sc1.limit) |
if (new_eip > sc1.limit) |
870 |
raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
871 |
|
/* from this point, not restartable */ |
872 |
|
if (!(env->segs[R_SS].flags & DESC_B_MASK)) |
873 |
|
env->regs[R_ESP] = (env->regs[R_ESP] & 0xffff0000) | (sp & 0xffff); |
874 |
|
else |
875 |
|
env->regs[R_ESP] = sp; |
876 |
env->segs[R_CS].base = sc1.base; |
env->segs[R_CS].base = sc1.base; |
877 |
env->segs[R_CS].limit = sc1.limit; |
env->segs[R_CS].limit = sc1.limit; |
878 |
env->segs[R_CS].flags = sc1.flags; |
env->segs[R_CS].flags = sc1.flags; |
879 |
env->segs[R_CS].selector = (selector & 0xfffc) | cpl; |
env->segs[R_CS].selector = (new_cs & 0xfffc) | cpl; |
880 |
EIP = new_eip; |
EIP = new_eip; |
881 |
} else { |
} else { |
882 |
cpu_abort(env, "jmp to call/task gate not supported 0x%04x:0x%08x", |
/* check gate type */ |
883 |
selector, new_eip); |
type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; |
884 |
|
switch(type) { |
885 |
|
case 1: /* available 286 TSS */ |
886 |
|
case 9: /* available 386 TSS */ |
887 |
|
case 5: /* task gate */ |
888 |
|
cpu_abort(env, "task gate not supported"); |
889 |
|
break; |
890 |
|
case 4: /* 286 call gate */ |
891 |
|
case 12: /* 386 call gate */ |
892 |
|
break; |
893 |
|
default: |
894 |
|
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
895 |
|
break; |
896 |
|
} |
897 |
|
shift = type >> 3; |
898 |
|
|
899 |
|
dpl = (e2 >> DESC_DPL_SHIFT) & 3; |
900 |
|
rpl = new_cs & 3; |
901 |
|
if (dpl < cpl || dpl < rpl) |
902 |
|
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
903 |
|
/* check valid bit */ |
904 |
|
if (!(e2 & DESC_P_MASK)) |
905 |
|
raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); |
906 |
|
selector = e1 >> 16; |
907 |
|
offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); |
908 |
|
if ((selector & 0xfffc) == 0) |
909 |
|
raise_exception_err(EXCP0D_GPF, 0); |
910 |
|
|
911 |
|
if (load_segment(&e1, &e2, selector) != 0) |
912 |
|
raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
913 |
|
if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) |
914 |
|
raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
915 |
|
dpl = (e2 >> DESC_DPL_SHIFT) & 3; |
916 |
|
if (dpl > cpl) |
917 |
|
raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
918 |
|
if (!(e2 & DESC_P_MASK)) |
919 |
|
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); |
920 |
|
|
921 |
|
if (!(e2 & DESC_C_MASK) && dpl < cpl) { |
922 |
|
/* to inner priviledge */ |
923 |
|
get_ss_esp_from_tss(&ss, &sp, dpl); |
924 |
|
if ((ss & 0xfffc) == 0) |
925 |
|
raise_exception_err(EXCP0A_TSS, ss & 0xfffc); |
926 |
|
if ((ss & 3) != dpl) |
927 |
|
raise_exception_err(EXCP0A_TSS, ss & 0xfffc); |
928 |
|
if (load_segment(&ss_e1, &ss_e2, ss) != 0) |
929 |
|
raise_exception_err(EXCP0A_TSS, ss & 0xfffc); |
930 |
|
ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; |
931 |
|
if (ss_dpl != dpl) |
932 |
|
raise_exception_err(EXCP0A_TSS, ss & 0xfffc); |
933 |
|
if (!(ss_e2 & DESC_S_MASK) || |
934 |
|
(ss_e2 & DESC_CS_MASK) || |
935 |
|
!(ss_e2 & DESC_W_MASK)) |
936 |
|
raise_exception_err(EXCP0A_TSS, ss & 0xfffc); |
937 |
|
if (!(ss_e2 & DESC_P_MASK)) |
938 |
|
raise_exception_err(EXCP0A_TSS, ss & 0xfffc); |
939 |
|
|
940 |
|
param_count = e2 & 0x1f; |
941 |
|
push_size = ((param_count * 2) + 8) << shift; |
942 |
|
|
943 |
|
old_esp = env->regs[R_ESP]; |
944 |
|
old_ss = env->segs[R_SS].selector; |
945 |
|
if (!(env->segs[R_SS].flags & DESC_B_MASK)) |
946 |
|
old_esp &= 0xffff; |
947 |
|
old_ssp = env->segs[R_SS].base + old_esp; |
948 |
|
|
949 |
|
/* XXX: from this point not restartable */ |
950 |
|
load_seg(R_SS, ss, env->eip); |
951 |
|
|
952 |
|
if (!(env->segs[R_SS].flags & DESC_B_MASK)) |
953 |
|
sp &= 0xffff; |
954 |
|
ssp = env->segs[R_SS].base + sp; |
955 |
|
if (shift) { |
956 |
|
ssp -= 4; |
957 |
|
stl(ssp, old_ss); |
958 |
|
ssp -= 4; |
959 |
|
stl(ssp, old_esp); |
960 |
|
ssp -= 4 * param_count; |
961 |
|
for(i = 0; i < param_count; i++) { |
962 |
|
val = ldl(old_ssp + i * 4); |
963 |
|
stl(ssp + i * 4, val); |
964 |
|
} |
965 |
|
} else { |
966 |
|
ssp -= 2; |
967 |
|
stw(ssp, old_ss); |
968 |
|
ssp -= 2; |
969 |
|
stw(ssp, old_esp); |
970 |
|
ssp -= 2 * param_count; |
971 |
|
for(i = 0; i < param_count; i++) { |
972 |
|
val = lduw(old_ssp + i * 2); |
973 |
|
stw(ssp + i * 2, val); |
974 |
|
} |
975 |
|
} |
976 |
|
} else { |
977 |
|
/* to same priviledge */ |
978 |
|
if (!(env->segs[R_SS].flags & DESC_B_MASK)) |
979 |
|
sp &= 0xffff; |
980 |
|
ssp = env->segs[R_SS].base + sp; |
981 |
|
push_size = (4 << shift); |
982 |
|
} |
983 |
|
|
984 |
|
if (shift) { |
985 |
|
ssp -= 4; |
986 |
|
stl(ssp, env->segs[R_CS].selector); |
987 |
|
ssp -= 4; |
988 |
|
stl(ssp, next_eip); |
989 |
|
} else { |
990 |
|
ssp -= 2; |
991 |
|
stw(ssp, env->segs[R_CS].selector); |
992 |
|
ssp -= 2; |
993 |
|
stw(ssp, next_eip); |
994 |
|
} |
995 |
|
|
996 |
|
sp -= push_size; |
997 |
|
load_seg(R_CS, selector, env->eip); |
998 |
|
/* from this point, not restartable if same priviledge */ |
999 |
|
if (!(env->segs[R_SS].flags & DESC_B_MASK)) |
1000 |
|
env->regs[R_ESP] = (env->regs[R_ESP] & 0xffff0000) | (sp & 0xffff); |
1001 |
|
else |
1002 |
|
env->regs[R_ESP] = sp; |
1003 |
|
EIP = offset; |
1004 |
} |
} |
1005 |
} |
} |
1006 |
|
|
1047 |
} |
} |
1048 |
|
|
1049 |
/* protected mode iret */ |
/* protected mode iret */ |
1050 |
void helper_iret_protected(int shift) |
static inline void helper_ret_protected(int shift, int is_iret, int addend) |
1051 |
{ |
{ |
1052 |
uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss; |
uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss; |
1053 |
uint32_t new_es, new_ds, new_fs, new_gs; |
uint32_t new_es, new_ds, new_fs, new_gs; |
1061 |
ssp = env->segs[R_SS].base + sp; |
ssp = env->segs[R_SS].base + sp; |
1062 |
if (shift == 1) { |
if (shift == 1) { |
1063 |
/* 32 bits */ |
/* 32 bits */ |
1064 |
new_eflags = ldl(ssp + 8); |
if (is_iret) |
1065 |
|
new_eflags = ldl(ssp + 8); |
1066 |
new_cs = ldl(ssp + 4) & 0xffff; |
new_cs = ldl(ssp + 4) & 0xffff; |
1067 |
new_eip = ldl(ssp); |
new_eip = ldl(ssp); |
1068 |
if (new_eflags & VM_MASK) |
if (is_iret && (new_eflags & VM_MASK)) |
1069 |
goto return_to_vm86; |
goto return_to_vm86; |
1070 |
} else { |
} else { |
1071 |
/* 16 bits */ |
/* 16 bits */ |
1072 |
new_eflags = lduw(ssp + 4); |
if (is_iret) |
1073 |
|
new_eflags = lduw(ssp + 4); |
1074 |
new_cs = lduw(ssp + 2); |
new_cs = lduw(ssp + 2); |
1075 |
new_eip = lduw(ssp); |
new_eip = lduw(ssp); |
1076 |
} |
} |
1099 |
if (rpl == cpl) { |
if (rpl == cpl) { |
1100 |
/* return to same priledge level */ |
/* return to same priledge level */ |
1101 |
load_seg(R_CS, new_cs, env->eip); |
load_seg(R_CS, new_cs, env->eip); |
1102 |
new_esp = sp + (6 << shift); |
new_esp = sp + (4 << shift) + ((2 * is_iret) << shift) + addend; |
1103 |
} else { |
} else { |
1104 |
/* return to differentr priviledge level */ |
/* return to different priviledge level */ |
1105 |
|
ssp += (4 << shift) + ((2 * is_iret) << shift) + addend; |
1106 |
if (shift == 1) { |
if (shift == 1) { |
1107 |
/* 32 bits */ |
/* 32 bits */ |
1108 |
new_esp = ldl(ssp + 12); |
new_esp = ldl(ssp); |
1109 |
new_ss = ldl(ssp + 16) & 0xffff; |
new_ss = ldl(ssp + 4) & 0xffff; |
1110 |
} else { |
} else { |
1111 |
/* 16 bits */ |
/* 16 bits */ |
1112 |
new_esp = lduw(ssp + 6); |
new_esp = lduw(ssp); |
1113 |
new_ss = lduw(ssp + 8); |
new_ss = lduw(ssp + 2); |
1114 |
} |
} |
1115 |
|
|
1116 |
if ((new_ss & 3) != rpl) |
if ((new_ss & 3) != rpl) |
1136 |
env->regs[R_ESP] = (env->regs[R_ESP] & 0xffff0000) | |
env->regs[R_ESP] = (env->regs[R_ESP] & 0xffff0000) | |
1137 |
(new_esp & 0xffff); |
(new_esp & 0xffff); |
1138 |
env->eip = new_eip; |
env->eip = new_eip; |
1139 |
if (cpl == 0) |
if (is_iret) { |
1140 |
eflags_mask = FL_UPDATE_CPL0_MASK; |
if (cpl == 0) |
1141 |
else |
eflags_mask = FL_UPDATE_CPL0_MASK; |
1142 |
eflags_mask = FL_UPDATE_MASK32; |
else |
1143 |
if (shift == 0) |
eflags_mask = FL_UPDATE_MASK32; |
1144 |
eflags_mask &= 0xffff; |
if (shift == 0) |
1145 |
load_eflags(new_eflags, eflags_mask); |
eflags_mask &= 0xffff; |
1146 |
|
load_eflags(new_eflags, eflags_mask); |
1147 |
|
} |
1148 |
return; |
return; |
1149 |
|
|
1150 |
return_to_vm86: |
return_to_vm86: |
1168 |
env->regs[R_ESP] = new_esp; |
env->regs[R_ESP] = new_esp; |
1169 |
} |
} |
1170 |
|
|
1171 |
|
void helper_iret_protected(int shift) |
1172 |
|
{ |
1173 |
|
helper_ret_protected(shift, 1, 0); |
1174 |
|
} |
1175 |
|
|
1176 |
|
void helper_lret_protected(int shift, int addend) |
1177 |
|
{ |
1178 |
|
helper_ret_protected(shift, 0, addend); |
1179 |
|
} |
1180 |
|
|
1181 |
void helper_movl_crN_T0(int reg) |
void helper_movl_crN_T0(int reg) |
1182 |
{ |
{ |
1183 |
env->cr[reg] = T0; |
env->cr[reg] = T0; |