/[qemu]/qemu/exec.c
ViewVC logotype

Diff of /qemu/exec.c

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 1.71 by bellard, Sat Nov 26 20:12:28 2005 UTC revision 1.72 by bellard, Mon Nov 28 21:19:04 2005 UTC
# Line 1209  void tlb_flush(CPUState *env, int flush_ Line 1209  void tlb_flush(CPUState *env, int flush_
1209      env->current_tb = NULL;      env->current_tb = NULL;
1210    
1211      for(i = 0; i < CPU_TLB_SIZE; i++) {      for(i = 0; i < CPU_TLB_SIZE; i++) {
1212          env->tlb_read[0][i].address = -1;          env->tlb_table[0][i].addr_read = -1;
1213          env->tlb_write[0][i].address = -1;          env->tlb_table[0][i].addr_write = -1;
1214          env->tlb_read[1][i].address = -1;          env->tlb_table[0][i].addr_code = -1;
1215          env->tlb_write[1][i].address = -1;          env->tlb_table[1][i].addr_read = -1;
1216            env->tlb_table[1][i].addr_write = -1;
1217            env->tlb_table[1][i].addr_code = -1;
1218      }      }
1219    
1220      memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));      memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
# Line 1230  void tlb_flush(CPUState *env, int flush_ Line 1232  void tlb_flush(CPUState *env, int flush_
1232    
1233  static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)  static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1234  {  {
1235      if (addr == (tlb_entry->address &      if (addr == (tlb_entry->addr_read &
1236                   (TARGET_PAGE_MASK | TLB_INVALID_MASK)))                   (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1237          tlb_entry->address = -1;          addr == (tlb_entry->addr_write &
1238                     (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1239            addr == (tlb_entry->addr_code &
1240                     (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1241            tlb_entry->addr_read = -1;
1242            tlb_entry->addr_write = -1;
1243            tlb_entry->addr_code = -1;
1244        }
1245  }  }
1246    
1247  void tlb_flush_page(CPUState *env, target_ulong addr)  void tlb_flush_page(CPUState *env, target_ulong addr)
# Line 1249  void tlb_flush_page(CPUState *env, targe Line 1258  void tlb_flush_page(CPUState *env, targe
1258    
1259      addr &= TARGET_PAGE_MASK;      addr &= TARGET_PAGE_MASK;
1260      i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);      i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1261      tlb_flush_entry(&env->tlb_read[0][i], addr);      tlb_flush_entry(&env->tlb_table[0][i], addr);
1262      tlb_flush_entry(&env->tlb_write[0][i], addr);      tlb_flush_entry(&env->tlb_table[1][i], addr);
     tlb_flush_entry(&env->tlb_read[1][i], addr);  
     tlb_flush_entry(&env->tlb_write[1][i], addr);  
1263    
1264      for(i = 0; i < TB_JMP_CACHE_SIZE; i++) {      for(i = 0; i < TB_JMP_CACHE_SIZE; i++) {
1265          tb = env->tb_jmp_cache[i];          tb = env->tb_jmp_cache[i];
# Line 1295  static inline void tlb_reset_dirty_range Line 1302  static inline void tlb_reset_dirty_range
1302                                           unsigned long start, unsigned long length)                                           unsigned long start, unsigned long length)
1303  {  {
1304      unsigned long addr;      unsigned long addr;
1305      if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {      if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1306          addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;          addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1307          if ((addr - start) < length) {          if ((addr - start) < length) {
1308              tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;              tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1309          }          }
1310      }      }
1311  }  }
# Line 1340  void cpu_physical_memory_reset_dirty(ram Line 1347  void cpu_physical_memory_reset_dirty(ram
1347      start1 = start + (unsigned long)phys_ram_base;      start1 = start + (unsigned long)phys_ram_base;
1348      for(env = first_cpu; env != NULL; env = env->next_cpu) {      for(env = first_cpu; env != NULL; env = env->next_cpu) {
1349          for(i = 0; i < CPU_TLB_SIZE; i++)          for(i = 0; i < CPU_TLB_SIZE; i++)
1350              tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);              tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1351          for(i = 0; i < CPU_TLB_SIZE; i++)          for(i = 0; i < CPU_TLB_SIZE; i++)
1352              tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);              tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1353      }      }
1354    
1355  #if !defined(CONFIG_SOFTMMU)  #if !defined(CONFIG_SOFTMMU)
# Line 1378  static inline void tlb_update_dirty(CPUT Line 1385  static inline void tlb_update_dirty(CPUT
1385  {  {
1386      ram_addr_t ram_addr;      ram_addr_t ram_addr;
1387    
1388      if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {      if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1389          ram_addr = (tlb_entry->address & TARGET_PAGE_MASK) +          ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1390              tlb_entry->addend - (unsigned long)phys_ram_base;              tlb_entry->addend - (unsigned long)phys_ram_base;
1391          if (!cpu_physical_memory_is_dirty(ram_addr)) {          if (!cpu_physical_memory_is_dirty(ram_addr)) {
1392              tlb_entry->address |= IO_MEM_NOTDIRTY;              tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1393          }          }
1394      }      }
1395  }  }
# Line 1392  void cpu_tlb_update_dirty(CPUState *env) Line 1399  void cpu_tlb_update_dirty(CPUState *env)
1399  {  {
1400      int i;      int i;
1401      for(i = 0; i < CPU_TLB_SIZE; i++)      for(i = 0; i < CPU_TLB_SIZE; i++)
1402          tlb_update_dirty(&env->tlb_write[0][i]);          tlb_update_dirty(&env->tlb_table[0][i]);
1403      for(i = 0; i < CPU_TLB_SIZE; i++)      for(i = 0; i < CPU_TLB_SIZE; i++)
1404          tlb_update_dirty(&env->tlb_write[1][i]);          tlb_update_dirty(&env->tlb_table[1][i]);
1405  }  }
1406    
1407  static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,  static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1408                                    unsigned long start)                                    unsigned long start)
1409  {  {
1410      unsigned long addr;      unsigned long addr;
1411      if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {      if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1412          addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;          addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1413          if (addr == start) {          if (addr == start) {
1414              tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;              tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1415          }          }
1416      }      }
1417  }  }
# Line 1418  static inline void tlb_set_dirty(CPUStat Line 1425  static inline void tlb_set_dirty(CPUStat
1425    
1426      addr &= TARGET_PAGE_MASK;      addr &= TARGET_PAGE_MASK;
1427      i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);      i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1428      tlb_set_dirty1(&env->tlb_write[0][i], addr);      tlb_set_dirty1(&env->tlb_table[0][i], addr);
1429      tlb_set_dirty1(&env->tlb_write[1][i], addr);      tlb_set_dirty1(&env->tlb_table[1][i], addr);
1430  }  }
1431    
1432  /* add a new TLB entry. At most one entry for a given virtual address  /* add a new TLB entry. At most one entry for a given virtual address
1433     is permitted. Return 0 if OK or 2 if the page could not be mapped     is permitted. Return 0 if OK or 2 if the page could not be mapped
1434     (can only happen in non SOFTMMU mode for I/O pages or pages     (can only happen in non SOFTMMU mode for I/O pages or pages
1435     conflicting with the host address space). */     conflicting with the host address space). */
1436  int tlb_set_page(CPUState *env, target_ulong vaddr,  int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1437                   target_phys_addr_t paddr, int prot,                        target_phys_addr_t paddr, int prot,
1438                   int is_user, int is_softmmu)                        int is_user, int is_softmmu)
1439  {  {
1440      PhysPageDesc *p;      PhysPageDesc *p;
1441      unsigned long pd;      unsigned long pd;
# Line 1436  int tlb_set_page(CPUState *env, target_u Line 1443  int tlb_set_page(CPUState *env, target_u
1443      target_ulong address;      target_ulong address;
1444      target_phys_addr_t addend;      target_phys_addr_t addend;
1445      int ret;      int ret;
1446        CPUTLBEntry *te;
1447    
1448      p = phys_page_find(paddr >> TARGET_PAGE_BITS);      p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1449      if (!p) {      if (!p) {
# Line 1445  int tlb_set_page(CPUState *env, target_u Line 1453  int tlb_set_page(CPUState *env, target_u
1453      }      }
1454  #if defined(DEBUG_TLB)  #if defined(DEBUG_TLB)
1455      printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",      printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1456             vaddr, paddr, prot, is_user, is_softmmu, pd);             vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1457  #endif  #endif
1458    
1459      ret = 0;      ret = 0;
# Line 1465  int tlb_set_page(CPUState *env, target_u Line 1473  int tlb_set_page(CPUState *env, target_u
1473                    
1474          index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);          index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1475          addend -= vaddr;          addend -= vaddr;
1476            te = &env->tlb_table[is_user][index];
1477            te->addend = addend;
1478          if (prot & PAGE_READ) {          if (prot & PAGE_READ) {
1479              env->tlb_read[is_user][index].address = address;              te->addr_read = address;
1480              env->tlb_read[is_user][index].addend = addend;          } else {
1481                te->addr_read = -1;
1482            }
1483            if (prot & PAGE_EXEC) {
1484                te->addr_code = address;
1485          } else {          } else {
1486              env->tlb_read[is_user][index].address = -1;              te->addr_code = -1;
             env->tlb_read[is_user][index].addend = -1;  
1487          }          }
1488          if (prot & PAGE_WRITE) {          if (prot & PAGE_WRITE) {
1489              if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {              if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1490                  /* ROM: access is ignored (same as unassigned) */                  /* ROM: access is ignored (same as unassigned) */
1491                  env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;                  te->addr_write = vaddr | IO_MEM_ROM;
                 env->tlb_write[is_user][index].addend = addend;  
1492              } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&              } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1493                         !cpu_physical_memory_is_dirty(pd)) {                         !cpu_physical_memory_is_dirty(pd)) {
1494                  env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;                  te->addr_write = vaddr | IO_MEM_NOTDIRTY;
                 env->tlb_write[is_user][index].addend = addend;  
1495              } else {              } else {
1496                  env->tlb_write[is_user][index].address = address;                  te->addr_write = address;
                 env->tlb_write[is_user][index].addend = addend;  
1497              }              }
1498          } else {          } else {
1499              env->tlb_write[is_user][index].address = -1;              te->addr_write = -1;
             env->tlb_write[is_user][index].addend = -1;  
1500          }          }
1501      }      }
1502  #if !defined(CONFIG_SOFTMMU)  #if !defined(CONFIG_SOFTMMU)
# Line 1586  void tlb_flush_page(CPUState *env, targe Line 1595  void tlb_flush_page(CPUState *env, targe
1595  {  {
1596  }  }
1597    
1598  int tlb_set_page(CPUState *env, target_ulong vaddr,  int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1599                   target_phys_addr_t paddr, int prot,                        target_phys_addr_t paddr, int prot,
1600                   int is_user, int is_softmmu)                        int is_user, int is_softmmu)
1601  {  {
1602      return 0;      return 0;
1603  }  }
# Line 2052  uint32_t ldl_phys(target_phys_addr_t add Line 2061  uint32_t ldl_phys(target_phys_addr_t add
2061      return val;      return val;
2062  }  }
2063    
2064    /* warning: addr must be aligned */
2065    uint64_t ldq_phys(target_phys_addr_t addr)
2066    {
2067        int io_index;
2068        uint8_t *ptr;
2069        uint64_t val;
2070        unsigned long pd;
2071        PhysPageDesc *p;
2072    
2073        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2074        if (!p) {
2075            pd = IO_MEM_UNASSIGNED;
2076        } else {
2077            pd = p->phys_offset;
2078        }
2079            
2080        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
2081            /* I/O case */
2082            io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2083    #ifdef TARGET_WORDS_BIGENDIAN
2084            val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2085            val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2086    #else
2087            val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2088            val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2089    #endif
2090        } else {
2091            /* RAM case */
2092            ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2093                (addr & ~TARGET_PAGE_MASK);
2094            val = ldq_p(ptr);
2095        }
2096        return val;
2097    }
2098    
2099  /* XXX: optimize */  /* XXX: optimize */
2100  uint32_t ldub_phys(target_phys_addr_t addr)  uint32_t ldub_phys(target_phys_addr_t addr)
2101  {  {
# Line 2068  uint32_t lduw_phys(target_phys_addr_t ad Line 2112  uint32_t lduw_phys(target_phys_addr_t ad
2112      return tswap16(val);      return tswap16(val);
2113  }  }
2114    
 /* XXX: optimize */  
 uint64_t ldq_phys(target_phys_addr_t addr)  
 {  
     uint64_t val;  
     cpu_physical_memory_read(addr, (uint8_t *)&val, 8);  
     return tswap64(val);  
 }  
   
2115  /* warning: addr must be aligned. The ram page is not masked as dirty  /* warning: addr must be aligned. The ram page is not masked as dirty
2116     and the code inside is not invalidated. It is useful if the dirty     and the code inside is not invalidated. It is useful if the dirty
2117     bits are used to track modified PTEs */     bits are used to track modified PTEs */

Legend:
Removed from v.1.71  
changed lines
  Added in v.1.72

savannah-hackers-public@gnu.org
ViewVC Help
Powered by ViewVC 1.1.26