/[qemu]/qemu/exec.c
ViewVC logotype

Diff of /qemu/exec.c

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 1.68 by bellard, Sun Nov 20 10:35:40 2005 UTC revision 1.69 by bellard, Mon Nov 21 23:25:50 2005 UTC
# Line 74  int phys_ram_fd; Line 74  int phys_ram_fd;
74  uint8_t *phys_ram_base;  uint8_t *phys_ram_base;
75  uint8_t *phys_ram_dirty;  uint8_t *phys_ram_dirty;
76    
77    CPUState *first_cpu;
78    /* current CPU in the current thread. It is only valid inside
79       cpu_exec() */
80    CPUState *cpu_single_env;
81    
82  typedef struct PageDesc {  typedef struct PageDesc {
83      /* list of TBs intersecting this ram page */      /* list of TBs intersecting this ram page */
84      TranslationBlock *first_tb;      TranslationBlock *first_tb;
# Line 233  static inline PhysPageDesc *phys_page_fi Line 238  static inline PhysPageDesc *phys_page_fi
238  }  }
239    
240  #if !defined(CONFIG_USER_ONLY)  #if !defined(CONFIG_USER_ONLY)
241  static void tlb_protect_code(CPUState *env, ram_addr_t ram_addr,  static void tlb_protect_code(ram_addr_t ram_addr);
                              target_ulong vaddr);  
242  static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,  static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
243                                      target_ulong vaddr);                                      target_ulong vaddr);
244  #endif  #endif
245    
246  void cpu_exec_init(void)  void cpu_exec_init(CPUState *env)
247  {  {
248        CPUState **penv;
249        int cpu_index;
250    
251      if (!code_gen_ptr) {      if (!code_gen_ptr) {
252          code_gen_ptr = code_gen_buffer;          code_gen_ptr = code_gen_buffer;
253          page_init();          page_init();
254          io_mem_init();          io_mem_init();
255      }      }
256        env->next_cpu = NULL;
257        penv = &first_cpu;
258        cpu_index = 0;
259        while (*penv != NULL) {
260            penv = (CPUState **)&(*penv)->next_cpu;
261            cpu_index++;
262        }
263        env->cpu_index = cpu_index;
264        *penv = env;
265  }  }
266    
267  static inline void invalidate_page_bitmap(PageDesc *p)  static inline void invalidate_page_bitmap(PageDesc *p)
# Line 277  static void page_flush_tb(void) Line 293  static void page_flush_tb(void)
293    
294  /* flush all the translation blocks */  /* flush all the translation blocks */
295  /* XXX: tb_flush is currently not thread safe */  /* XXX: tb_flush is currently not thread safe */
296  void tb_flush(CPUState *env)  void tb_flush(CPUState *env1)
297  {  {
298        CPUState *env;
299  #if defined(DEBUG_FLUSH)  #if defined(DEBUG_FLUSH)
300      printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",      printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
301             code_gen_ptr - code_gen_buffer,             code_gen_ptr - code_gen_buffer,
# Line 286  void tb_flush(CPUState *env) Line 303  void tb_flush(CPUState *env)
303             nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);             nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
304  #endif  #endif
305      nb_tbs = 0;      nb_tbs = 0;
306      memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));      
307        for(env = first_cpu; env != NULL; env = env->next_cpu) {
308            memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
309        }
310    
311      memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));      memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
312      page_flush_tb();      page_flush_tb();
# Line 424  static inline void tb_reset_jump(Transla Line 444  static inline void tb_reset_jump(Transla
444    
445  static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)  static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
446  {  {
447        CPUState *env;
448      PageDesc *p;      PageDesc *p;
449      unsigned int h, n1;      unsigned int h, n1;
450      target_ulong phys_pc;      target_ulong phys_pc;
# Line 451  static inline void tb_phys_invalidate(Tr Line 472  static inline void tb_phys_invalidate(Tr
472    
473      /* remove the TB from the hash list */      /* remove the TB from the hash list */
474      h = tb_jmp_cache_hash_func(tb->pc);      h = tb_jmp_cache_hash_func(tb->pc);
475      cpu_single_env->tb_jmp_cache[h] = NULL;      for(env = first_cpu; env != NULL; env = env->next_cpu) {
476            if (env->tb_jmp_cache[h] == tb)
477                env->tb_jmp_cache[h] = NULL;
478        }
479    
480      /* suppress this TB from the two jump lists */      /* suppress this TB from the two jump lists */
481      tb_jmp_remove(tb, 0);      tb_jmp_remove(tb, 0);
# Line 818  static inline void tb_alloc_page(Transla Line 842  static inline void tb_alloc_page(Transla
842         protected. So we handle the case where only the first TB is         protected. So we handle the case where only the first TB is
843         allocated in a physical page */         allocated in a physical page */
844      if (!last_first_tb) {      if (!last_first_tb) {
845          target_ulong virt_addr;          tlb_protect_code(page_addr);
   
         virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);  
         tlb_protect_code(cpu_single_env, page_addr, virt_addr);  
846      }      }
847  #endif  #endif
848    
# Line 1246  void tlb_flush_page(CPUState *env, targe Line 1267  void tlb_flush_page(CPUState *env, targe
1267  #endif  #endif
1268  }  }
1269    
 static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)  
 {  
     if (addr == (tlb_entry->address &  
                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&  
         (tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {  
         tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;  
     }  
 }  
   
1270  /* update the TLBs so that writes to code in the virtual page 'addr'  /* update the TLBs so that writes to code in the virtual page 'addr'
1271     can be detected */     can be detected */
1272  static void tlb_protect_code(CPUState *env, ram_addr_t ram_addr,  static void tlb_protect_code(ram_addr_t ram_addr)
                              target_ulong vaddr)  
1273  {  {
1274      int i;      cpu_physical_memory_reset_dirty(ram_addr,
1275                                        ram_addr + TARGET_PAGE_SIZE,
1276      vaddr &= TARGET_PAGE_MASK;                                      CODE_DIRTY_FLAG);
     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);  
     tlb_protect_code1(&env->tlb_write[0][i], vaddr);  
     tlb_protect_code1(&env->tlb_write[1][i], vaddr);  
   
 #ifdef USE_KQEMU  
     if (env->kqemu_enabled) {  
         kqemu_set_notdirty(env, ram_addr);  
     }  
 #endif  
     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] &= ~CODE_DIRTY_FLAG;  
       
 #if !defined(CONFIG_SOFTMMU)  
     /* NOTE: as we generated the code for this page, it is already at  
        least readable */  
     if (vaddr < MMAP_AREA_END)  
         mprotect((void *)vaddr, TARGET_PAGE_SIZE, PROT_READ);  
 #endif  
1277  }  }
1278    
1279  /* update the TLB so that writes in physical page 'phys_addr' are no longer  /* update the TLB so that writes in physical page 'phys_addr' are no longer
# Line 1317  void cpu_physical_memory_reset_dirty(ram Line 1311  void cpu_physical_memory_reset_dirty(ram
1311      if (length == 0)      if (length == 0)
1312          return;          return;
1313      len = length >> TARGET_PAGE_BITS;      len = length >> TARGET_PAGE_BITS;
     env = cpu_single_env;  
1314  #ifdef USE_KQEMU  #ifdef USE_KQEMU
1315        /* XXX: should not depend on cpu context */
1316        env = first_cpu;
1317      if (env->kqemu_enabled) {      if (env->kqemu_enabled) {
1318          ram_addr_t addr;          ram_addr_t addr;
1319          addr = start;          addr = start;
# Line 1336  void cpu_physical_memory_reset_dirty(ram Line 1331  void cpu_physical_memory_reset_dirty(ram
1331      /* we modify the TLB cache so that the dirty bit will be set again      /* we modify the TLB cache so that the dirty bit will be set again
1332         when accessing the range */         when accessing the range */
1333      start1 = start + (unsigned long)phys_ram_base;      start1 = start + (unsigned long)phys_ram_base;
1334      for(i = 0; i < CPU_TLB_SIZE; i++)      for(env = first_cpu; env != NULL; env = env->next_cpu) {
1335          tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);          for(i = 0; i < CPU_TLB_SIZE; i++)
1336      for(i = 0; i < CPU_TLB_SIZE; i++)              tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
1337          tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);          for(i = 0; i < CPU_TLB_SIZE; i++)
1338                tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1339        }
1340    
1341  #if !defined(CONFIG_SOFTMMU)  #if !defined(CONFIG_SOFTMMU)
1342      /* XXX: this is expensive */      /* XXX: this is expensive */
# Line 1407  static inline void tlb_set_dirty1(CPUTLB Line 1404  static inline void tlb_set_dirty1(CPUTLB
1404    
1405  /* update the TLB corresponding to virtual page vaddr and phys addr  /* update the TLB corresponding to virtual page vaddr and phys addr
1406     addr so that it is no longer dirty */     addr so that it is no longer dirty */
1407  static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)  static inline void tlb_set_dirty(CPUState *env,
1408                                     unsigned long addr, target_ulong vaddr)
1409  {  {
     CPUState *env = cpu_single_env;  
1410      int i;      int i;
1411    
1412      addr &= TARGET_PAGE_MASK;      addr &= TARGET_PAGE_MASK;
# Line 1723  void page_unprotect_range(uint8_t *data, Line 1720  void page_unprotect_range(uint8_t *data,
1720      }      }
1721  }  }
1722    
1723  static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)  static inline void tlb_set_dirty(CPUState *env,
1724                                     unsigned long addr, target_ulong vaddr)
1725  {  {
1726  }  }
1727  #endif /* defined(CONFIG_USER_ONLY) */  #endif /* defined(CONFIG_USER_ONLY) */
# Line 1787  static void notdirty_mem_writeb(void *op Line 1785  static void notdirty_mem_writeb(void *op
1785      /* we remove the notdirty callback only if the code has been      /* we remove the notdirty callback only if the code has been
1786         flushed */         flushed */
1787      if (dirty_flags == 0xff)      if (dirty_flags == 0xff)
1788          tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);          tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1789  }  }
1790    
1791  static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)  static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
# Line 1808  static void notdirty_mem_writew(void *op Line 1806  static void notdirty_mem_writew(void *op
1806      /* we remove the notdirty callback only if the code has been      /* we remove the notdirty callback only if the code has been
1807         flushed */         flushed */
1808      if (dirty_flags == 0xff)      if (dirty_flags == 0xff)
1809          tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);          tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1810  }  }
1811    
1812  static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)  static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
# Line 1829  static void notdirty_mem_writel(void *op Line 1827  static void notdirty_mem_writel(void *op
1827      /* we remove the notdirty callback only if the code has been      /* we remove the notdirty callback only if the code has been
1828         flushed */         flushed */
1829      if (dirty_flags == 0xff)      if (dirty_flags == 0xff)
1830          tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);          tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1831  }  }
1832    
1833  static CPUReadMemoryFunc *error_mem_read[3] = {  static CPUReadMemoryFunc *error_mem_read[3] = {
# Line 1953  void cpu_physical_memory_rw(target_phys_ Line 1951  void cpu_physical_memory_rw(target_phys_
1951          if (is_write) {          if (is_write) {
1952              if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {              if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
1953                  io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);                  io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1954                    /* XXX: could force cpu_single_env to NULL to avoid
1955                       potential bugs */
1956                  if (l >= 4 && ((addr & 3) == 0)) {                  if (l >= 4 && ((addr & 3) == 0)) {
1957                      /* 32 bit write access */                      /* 32 bit write access */
1958                      val = ldl_p(buf);                      val = ldl_p(buf);

Legend:
Removed from v.1.68  
changed lines
  Added in v.1.69

savannah-hackers-public@gnu.org
ViewVC Help
Powered by ViewVC 1.1.26