Lines Matching refs:bd
14 #include "bd/lfs_emubd.h"
51 lfs_emubd_t *bd = cfg->context;
60 sizeof(lfs_emubd_block_t) + bd->cfg->erase_size);
66 sizeof(lfs_emubd_block_t) + bd->cfg->erase_size);
76 sizeof(lfs_emubd_block_t) + bd->cfg->erase_size);
86 (bd->cfg->erase_value != -1) ? bd->cfg->erase_value : 0,
87 bd->cfg->erase_size);
116 lfs_emubd_t *bd = cfg->context;
117 bd->cfg = bdcfg;
120 bd->blocks = malloc(bd->cfg->erase_count * sizeof(lfs_emubd_block_t*));
121 if (!bd->blocks) {
125 memset(bd->blocks, 0, bd->cfg->erase_count * sizeof(lfs_emubd_block_t*));
128 bd->readed = 0;
129 bd->proged = 0;
130 bd->erased = 0;
131 bd->power_cycles = bd->cfg->power_cycles;
132 bd->disk = NULL;
134 if (bd->cfg->disk_path) {
135 bd->disk = malloc(sizeof(lfs_emubd_disk_t));
136 if (!bd->disk) {
140 bd->disk->rc = 1;
141 bd->disk->scratch = NULL;
144 bd->disk->fd = open(bd->cfg->disk_path,
147 bd->disk->fd = open(bd->cfg->disk_path,
150 if (bd->disk->fd < 0) {
158 if (bd->cfg->erase_value != -1) {
159 bd->disk->scratch = malloc(bd->cfg->erase_size);
160 if (!bd->disk->scratch) {
164 memset(bd->disk->scratch,
165 bd->cfg->erase_value,
166 bd->cfg->erase_size);
170 for (size_t i = 0; i < bd->cfg->erase_count; i++) {
171 ssize_t res = write(bd->disk->fd,
172 bd->disk->scratch,
173 bd->cfg->erase_size);
189 lfs_emubd_t *bd = cfg->context;
192 for (lfs_block_t i = 0; i < bd->cfg->erase_count; i++) {
193 lfs_emubd_decblock(bd->blocks[i]);
195 free(bd->blocks);
198 if (bd->disk) {
199 bd->disk->rc -= 1;
200 if (bd->disk->rc == 0) {
201 close(bd->disk->fd);
202 free(bd->disk->scratch);
203 free(bd->disk);
220 lfs_emubd_t *bd = cfg->context;
223 LFS_ASSERT(block < bd->cfg->erase_count);
224 LFS_ASSERT(off % bd->cfg->read_size == 0);
225 LFS_ASSERT(size % bd->cfg->read_size == 0);
226 LFS_ASSERT(off+size <= bd->cfg->erase_size);
229 const lfs_emubd_block_t *b = bd->blocks[block];
232 if (bd->cfg->erase_cycles && b->wear >= bd->cfg->erase_cycles &&
233 bd->cfg->badblock_behavior == LFS_EMUBD_BADBLOCK_READERROR) {
243 (bd->cfg->erase_value != -1) ? bd->cfg->erase_value : 0,
248 bd->readed += size;
249 if (bd->cfg->read_sleep) {
251 .tv_sec=bd->cfg->read_sleep/1000000000,
252 .tv_nsec=bd->cfg->read_sleep%1000000000},
270 lfs_emubd_t *bd = cfg->context;
273 LFS_ASSERT(block < bd->cfg->erase_count);
274 LFS_ASSERT(off % bd->cfg->prog_size == 0);
275 LFS_ASSERT(size % bd->cfg->prog_size == 0);
276 LFS_ASSERT(off+size <= bd->cfg->erase_size);
279 lfs_emubd_block_t *b = lfs_emubd_mutblock(cfg, &bd->blocks[block]);
286 if (bd->cfg->erase_cycles && b->wear >= bd->cfg->erase_cycles) {
287 if (bd->cfg->badblock_behavior ==
291 } else if (bd->cfg->badblock_behavior ==
293 bd->cfg->badblock_behavior ==
301 if (bd->cfg->erase_value != -1) {
303 LFS_ASSERT(b->data[off+i] == bd->cfg->erase_value);
311 if (bd->disk) {
312 off_t res1 = lseek(bd->disk->fd,
313 (off_t)block*bd->cfg->erase_size + (off_t)off,
321 ssize_t res2 = write(bd->disk->fd, buffer, size);
330 bd->proged += size;
331 if (bd->cfg->prog_sleep) {
333 .tv_sec=bd->cfg->prog_sleep/1000000000,
334 .tv_nsec=bd->cfg->prog_sleep%1000000000},
344 if (bd->power_cycles > 0) {
345 bd->power_cycles -= 1;
346 if (bd->power_cycles == 0) {
348 bd->cfg->powerloss_cb(bd->cfg->powerloss_data);
359 lfs_emubd_t *bd = cfg->context;
362 LFS_ASSERT(block < bd->cfg->erase_count);
365 lfs_emubd_block_t *b = lfs_emubd_mutblock(cfg, &bd->blocks[block]);
372 if (bd->cfg->erase_cycles) {
373 if (b->wear >= bd->cfg->erase_cycles) {
374 if (bd->cfg->badblock_behavior ==
378 } else if (bd->cfg->badblock_behavior ==
390 if (bd->cfg->erase_value != -1) {
391 memset(b->data, bd->cfg->erase_value, bd->cfg->erase_size);
394 if (bd->disk) {
395 off_t res1 = lseek(bd->disk->fd,
396 (off_t)block*bd->cfg->erase_size,
404 ssize_t res2 = write(bd->disk->fd,
405 bd->disk->scratch,
406 bd->cfg->erase_size);
416 bd->erased += bd->cfg->erase_size;
417 if (bd->cfg->erase_sleep) {
419 .tv_sec=bd->cfg->erase_sleep/1000000000,
420 .tv_nsec=bd->cfg->erase_sleep%1000000000},
430 if (bd->power_cycles > 0) {
431 bd->power_cycles -= 1;
432 if (bd->power_cycles == 0) {
434 bd->cfg->powerloss_cb(bd->cfg->powerloss_data);
456 lfs_emubd_t *bd = cfg->context;
463 const lfs_emubd_block_t *b = bd->blocks[block];
467 uint8_t erase_value = (bd->cfg->erase_value != -1)
468 ? bd->cfg->erase_value
510 lfs_emubd_t *bd = cfg->context;
511 LFS_EMUBD_TRACE("lfs_emubd_readed -> %"PRIu64, bd->readed);
512 return bd->readed;
517 lfs_emubd_t *bd = cfg->context;
518 LFS_EMUBD_TRACE("lfs_emubd_proged -> %"PRIu64, bd->proged);
519 return bd->proged;
524 lfs_emubd_t *bd = cfg->context;
525 LFS_EMUBD_TRACE("lfs_emubd_erased -> %"PRIu64, bd->erased);
526 return bd->erased;
531 lfs_emubd_t *bd = cfg->context;
532 bd->readed = readed;
539 lfs_emubd_t *bd = cfg->context;
540 bd->proged = proged;
547 lfs_emubd_t *bd = cfg->context;
548 bd->erased = erased;
556 lfs_emubd_t *bd = cfg->context;
559 LFS_ASSERT(block < bd->cfg->erase_count);
563 const lfs_emubd_block_t *b = bd->blocks[block];
578 lfs_emubd_t *bd = cfg->context;
581 LFS_ASSERT(block < bd->cfg->erase_count);
584 lfs_emubd_block_t *b = lfs_emubd_mutblock(cfg, &bd->blocks[block]);
598 lfs_emubd_t *bd = cfg->context;
600 LFS_EMUBD_TRACE("lfs_emubd_powercycles -> %"PRIi32, bd->power_cycles);
601 return bd->power_cycles;
608 lfs_emubd_t *bd = cfg->context;
610 bd->power_cycles = power_cycles;
618 lfs_emubd_t *bd = cfg->context;
621 copy->blocks = malloc(bd->cfg->erase_count * sizeof(lfs_emubd_block_t*));
627 for (size_t i = 0; i < bd->cfg->erase_count; i++) {
628 copy->blocks[i] = lfs_emubd_incblock(bd->blocks[i]);
632 copy->readed = bd->readed;
633 copy->proged = bd->proged;
634 copy->erased = bd->erased;
635 copy->power_cycles = bd->power_cycles;
636 copy->disk = bd->disk;
640 copy->cfg = bd->cfg;