2 |
* Block driver for the VMDK format |
* Block driver for the VMDK format |
3 |
* |
* |
4 |
* Copyright (c) 2004 Fabrice Bellard |
* Copyright (c) 2004 Fabrice Bellard |
5 |
|
* Copyright (c) 2005 Filip Navara |
6 |
* |
* |
7 |
* Permission is hereby granted, free of charge, to any person obtaining a copy |
* Permission is hereby granted, free of charge, to any person obtaining a copy |
8 |
* of this software and associated documentation files (the "Software"), to deal |
* of this software and associated documentation files (the "Software"), to deal |
25 |
#include "vl.h" |
#include "vl.h" |
26 |
#include "block_int.h" |
#include "block_int.h" |
27 |
|
|
|
/* XXX: this code is untested */ |
|
|
/* XXX: add write support */ |
|
|
|
|
28 |
#define VMDK3_MAGIC (('C' << 24) | ('O' << 16) | ('W' << 8) | 'D') |
#define VMDK3_MAGIC (('C' << 24) | ('O' << 16) | ('W' << 8) | 'D') |
29 |
#define VMDK4_MAGIC (('K' << 24) | ('D' << 16) | ('M' << 8) | 'V') |
#define VMDK4_MAGIC (('K' << 24) | ('D' << 16) | ('M' << 8) | 'V') |
30 |
|
|
54 |
int64_t grain_offset; |
int64_t grain_offset; |
55 |
char filler[1]; |
char filler[1]; |
56 |
char check_bytes[4]; |
char check_bytes[4]; |
57 |
} VMDK4Header; |
} __attribute__((packed)) VMDK4Header; |
58 |
|
|
59 |
#define L2_CACHE_SIZE 16 |
#define L2_CACHE_SIZE 16 |
60 |
|
|
61 |
typedef struct BDRVVmdkState { |
typedef struct BDRVVmdkState { |
62 |
int fd; |
int fd; |
63 |
int64_t l1_table_offset; |
int64_t l1_table_offset; |
64 |
|
int64_t l1_backup_table_offset; |
65 |
uint32_t *l1_table; |
uint32_t *l1_table; |
66 |
|
uint32_t *l1_backup_table; |
67 |
unsigned int l1_size; |
unsigned int l1_size; |
68 |
uint32_t l1_entry_sectors; |
uint32_t l1_entry_sectors; |
69 |
|
|
96 |
uint32_t magic; |
uint32_t magic; |
97 |
int l1_size; |
int l1_size; |
98 |
|
|
99 |
fd = open(filename, O_RDONLY | O_BINARY | O_LARGEFILE); |
fd = open(filename, O_RDWR | O_BINARY | O_LARGEFILE); |
100 |
if (fd < 0) |
if (fd < 0) { |
101 |
return -1; |
fd = open(filename, O_RDONLY | O_BINARY | O_LARGEFILE); |
102 |
|
if (fd < 0) |
103 |
|
return -1; |
104 |
|
bs->read_only = 1; |
105 |
|
} |
106 |
if (read(fd, &magic, sizeof(magic)) != sizeof(magic)) |
if (read(fd, &magic, sizeof(magic)) != sizeof(magic)) |
107 |
goto fail; |
goto fail; |
108 |
magic = be32_to_cpu(magic); |
magic = be32_to_cpu(magic); |
115 |
s->l2_size = 1 << 9; |
s->l2_size = 1 << 9; |
116 |
s->l1_size = 1 << 6; |
s->l1_size = 1 << 6; |
117 |
bs->total_sectors = le32_to_cpu(header.disk_sectors); |
bs->total_sectors = le32_to_cpu(header.disk_sectors); |
118 |
s->l1_table_offset = le32_to_cpu(header.l1dir_offset) * 512; |
s->l1_table_offset = le32_to_cpu(header.l1dir_offset) << 9; |
119 |
|
s->l1_backup_table_offset = 0; |
120 |
s->l1_entry_sectors = s->l2_size * s->cluster_sectors; |
s->l1_entry_sectors = s->l2_size * s->cluster_sectors; |
121 |
} else if (magic == VMDK4_MAGIC) { |
} else if (magic == VMDK4_MAGIC) { |
122 |
VMDK4Header header; |
VMDK4Header header; |
131 |
goto fail; |
goto fail; |
132 |
s->l1_size = (bs->total_sectors + s->l1_entry_sectors - 1) |
s->l1_size = (bs->total_sectors + s->l1_entry_sectors - 1) |
133 |
/ s->l1_entry_sectors; |
/ s->l1_entry_sectors; |
134 |
s->l1_table_offset = le64_to_cpu(header.rgd_offset) * 512; |
s->l1_table_offset = le64_to_cpu(header.rgd_offset) << 9; |
135 |
|
s->l1_backup_table_offset = le64_to_cpu(header.gd_offset) << 9; |
136 |
} else { |
} else { |
137 |
goto fail; |
goto fail; |
138 |
} |
} |
149 |
le32_to_cpus(&s->l1_table[i]); |
le32_to_cpus(&s->l1_table[i]); |
150 |
} |
} |
151 |
|
|
152 |
|
if (s->l1_backup_table_offset) { |
153 |
|
s->l1_backup_table = qemu_malloc(l1_size); |
154 |
|
if (!s->l1_backup_table) |
155 |
|
goto fail; |
156 |
|
if (lseek(fd, s->l1_backup_table_offset, SEEK_SET) == -1) |
157 |
|
goto fail; |
158 |
|
if (read(fd, s->l1_backup_table, l1_size) != l1_size) |
159 |
|
goto fail; |
160 |
|
for(i = 0; i < s->l1_size; i++) { |
161 |
|
le32_to_cpus(&s->l1_backup_table[i]); |
162 |
|
} |
163 |
|
} |
164 |
|
|
165 |
s->l2_cache = qemu_malloc(s->l2_size * L2_CACHE_SIZE * sizeof(uint32_t)); |
s->l2_cache = qemu_malloc(s->l2_size * L2_CACHE_SIZE * sizeof(uint32_t)); |
166 |
if (!s->l2_cache) |
if (!s->l2_cache) |
167 |
goto fail; |
goto fail; |
168 |
s->fd = fd; |
s->fd = fd; |
|
/* XXX: currently only read only */ |
|
|
bs->read_only = 1; |
|
169 |
return 0; |
return 0; |
170 |
fail: |
fail: |
171 |
|
qemu_free(s->l1_backup_table); |
172 |
qemu_free(s->l1_table); |
qemu_free(s->l1_table); |
173 |
qemu_free(s->l2_cache); |
qemu_free(s->l2_cache); |
174 |
close(fd); |
close(fd); |
176 |
} |
} |
177 |
|
|
178 |
static uint64_t get_cluster_offset(BlockDriverState *bs, |
static uint64_t get_cluster_offset(BlockDriverState *bs, |
179 |
uint64_t offset) |
uint64_t offset, int allocate) |
180 |
{ |
{ |
181 |
BDRVVmdkState *s = bs->opaque; |
BDRVVmdkState *s = bs->opaque; |
182 |
unsigned int l1_index, l2_offset, l2_index; |
unsigned int l1_index, l2_offset, l2_index; |
183 |
int min_index, i, j; |
int min_index, i, j; |
184 |
uint32_t min_count, *l2_table; |
uint32_t min_count, *l2_table, tmp; |
185 |
uint64_t cluster_offset; |
uint64_t cluster_offset; |
186 |
|
|
187 |
l1_index = (offset >> 9) / s->l1_entry_sectors; |
l1_index = (offset >> 9) / s->l1_entry_sectors; |
190 |
l2_offset = s->l1_table[l1_index]; |
l2_offset = s->l1_table[l1_index]; |
191 |
if (!l2_offset) |
if (!l2_offset) |
192 |
return 0; |
return 0; |
|
|
|
193 |
for(i = 0; i < L2_CACHE_SIZE; i++) { |
for(i = 0; i < L2_CACHE_SIZE; i++) { |
194 |
if (l2_offset == s->l2_cache_offsets[i]) { |
if (l2_offset == s->l2_cache_offsets[i]) { |
195 |
/* increment the hit count */ |
/* increment the hit count */ |
221 |
found: |
found: |
222 |
l2_index = ((offset >> 9) / s->cluster_sectors) % s->l2_size; |
l2_index = ((offset >> 9) / s->cluster_sectors) % s->l2_size; |
223 |
cluster_offset = le32_to_cpu(l2_table[l2_index]); |
cluster_offset = le32_to_cpu(l2_table[l2_index]); |
224 |
|
if (!cluster_offset) { |
225 |
|
if (!allocate) |
226 |
|
return 0; |
227 |
|
cluster_offset = lseek(s->fd, 0, SEEK_END); |
228 |
|
ftruncate(s->fd, cluster_offset + (s->cluster_sectors << 9)); |
229 |
|
cluster_offset >>= 9; |
230 |
|
/* update L2 table */ |
231 |
|
tmp = cpu_to_le32(cluster_offset); |
232 |
|
l2_table[l2_index] = tmp; |
233 |
|
lseek(s->fd, ((int64_t)l2_offset * 512) + (l2_index * sizeof(tmp)), SEEK_SET); |
234 |
|
if (write(s->fd, &tmp, sizeof(tmp)) != sizeof(tmp)) |
235 |
|
return 0; |
236 |
|
/* update backup L2 table */ |
237 |
|
if (s->l1_backup_table_offset != 0) { |
238 |
|
l2_offset = s->l1_backup_table[l1_index]; |
239 |
|
lseek(s->fd, ((int64_t)l2_offset * 512) + (l2_index * sizeof(tmp)), SEEK_SET); |
240 |
|
if (write(s->fd, &tmp, sizeof(tmp)) != sizeof(tmp)) |
241 |
|
return 0; |
242 |
|
} |
243 |
|
} |
244 |
cluster_offset <<= 9; |
cluster_offset <<= 9; |
245 |
return cluster_offset; |
return cluster_offset; |
246 |
} |
} |
252 |
int index_in_cluster, n; |
int index_in_cluster, n; |
253 |
uint64_t cluster_offset; |
uint64_t cluster_offset; |
254 |
|
|
255 |
cluster_offset = get_cluster_offset(bs, sector_num << 9); |
cluster_offset = get_cluster_offset(bs, sector_num << 9, 0); |
256 |
index_in_cluster = sector_num % s->cluster_sectors; |
index_in_cluster = sector_num % s->cluster_sectors; |
257 |
n = s->cluster_sectors - index_in_cluster; |
n = s->cluster_sectors - index_in_cluster; |
258 |
if (n > nb_sectors) |
if (n > nb_sectors) |
269 |
uint64_t cluster_offset; |
uint64_t cluster_offset; |
270 |
|
|
271 |
while (nb_sectors > 0) { |
while (nb_sectors > 0) { |
272 |
cluster_offset = get_cluster_offset(bs, sector_num << 9); |
cluster_offset = get_cluster_offset(bs, sector_num << 9, 0); |
273 |
index_in_cluster = sector_num % s->cluster_sectors; |
index_in_cluster = sector_num % s->cluster_sectors; |
274 |
n = s->cluster_sectors - index_in_cluster; |
n = s->cluster_sectors - index_in_cluster; |
275 |
if (n > nb_sectors) |
if (n > nb_sectors) |
292 |
static int vmdk_write(BlockDriverState *bs, int64_t sector_num, |
static int vmdk_write(BlockDriverState *bs, int64_t sector_num, |
293 |
const uint8_t *buf, int nb_sectors) |
const uint8_t *buf, int nb_sectors) |
294 |
{ |
{ |
295 |
return -1; |
BDRVVmdkState *s = bs->opaque; |
296 |
|
int ret, index_in_cluster, n; |
297 |
|
uint64_t cluster_offset; |
298 |
|
|
299 |
|
while (nb_sectors > 0) { |
300 |
|
index_in_cluster = sector_num & (s->cluster_sectors - 1); |
301 |
|
n = s->cluster_sectors - index_in_cluster; |
302 |
|
if (n > nb_sectors) |
303 |
|
n = nb_sectors; |
304 |
|
cluster_offset = get_cluster_offset(bs, sector_num << 9, 1); |
305 |
|
if (!cluster_offset) |
306 |
|
return -1; |
307 |
|
lseek(s->fd, cluster_offset + index_in_cluster * 512, SEEK_SET); |
308 |
|
ret = write(s->fd, buf, n * 512); |
309 |
|
if (ret != n * 512) |
310 |
|
return -1; |
311 |
|
nb_sectors -= n; |
312 |
|
sector_num += n; |
313 |
|
buf += n * 512; |
314 |
|
} |
315 |
|
return 0; |
316 |
} |
} |
317 |
|
|
318 |
static void vmdk_close(BlockDriverState *bs) |
static void vmdk_close(BlockDriverState *bs) |