Lines Matching defs:fat
32 __RCSID("$NetBSD: fat.c,v 1.18 2006/06/05 16:51:18 christos Exp $");
186 fat_clear_cl_head(struct fat_descriptor *fat, cl_t cl)
188 bitmap_clear(&fat->headbitmap, cl);
192 fat_is_cl_head(struct fat_descriptor *fat, cl_t cl)
194 return (bitmap_get(&fat->headbitmap, cl));
198 fat_is_cl_head_in_range(struct fat_descriptor *fat, cl_t cl)
200 return (!(bitmap_none_in_range(&fat->headbitmap, cl)));
204 fat_get_head_count(struct fat_descriptor *fat)
206 return (bitmap_count(&fat->headbitmap));
215 fat_get_fat12_ptr(struct fat_descriptor *fat, cl_t cl)
217 return (fat->fatbuf + ((cl + (cl >> 1))));
221 fat_get_fat12_next(struct fat_descriptor *fat, cl_t cl)
226 p = fat_get_fat12_ptr(fat, cl);
240 fat_set_fat12_next(struct fat_descriptor *fat, cl_t cl, cl_t nextcl)
247 p = fat_get_fat12_ptr(fat, cl);
272 fat_get_fat16_ptr(struct fat_descriptor *fat, cl_t cl)
274 return (fat->fatbuf + (cl << 1));
278 fat_get_fat16_next(struct fat_descriptor *fat, cl_t cl)
283 p = fat_get_fat16_ptr(fat, cl);
293 fat_set_fat16_next(struct fat_descriptor *fat, cl_t cl, cl_t nextcl)
300 p = fat_get_fat16_ptr(fat, cl);
311 fat_get_fat32_ptr(struct fat_descriptor *fat, cl_t cl)
313 return (fat->fatbuf + (cl << 2));
317 fat_get_fat32_next(struct fat_descriptor *fat, cl_t cl)
322 p = fat_get_fat32_ptr(fat, cl);
332 fat_set_fat32_next(struct fat_descriptor *fat, cl_t cl, cl_t nextcl)
339 p = fat_get_fat32_ptr(fat, cl);
347 fat_get_iosize(struct fat_descriptor *fat, off_t address)
350 if (address == fat->fat32_lastaddr) {
351 return (fat->fatsize & ((off_t)fat32_cache_chunk_size - 1));
358 fat_flush_fat32_cache_entry(struct fat_descriptor *fat,
365 fd = fd_of_(fat);
370 writesize = fat_get_iosize(fat, entry->addr);
372 fat_addr = fat->fat32_offset + entry->addr;
384 fat_get_fat32_cache_entry(struct fat_descriptor *fat, off_t addr,
394 first = TAILQ_FIRST(&fat->fat32_cache_head);
399 TAILQ_FOREACH(entry, &fat->fat32_cache_head, entries) {
406 TAILQ_REMOVE(&fat->fat32_cache_head, entry, entries);
407 TAILQ_INSERT_HEAD(&fat->fat32_cache_head, entry, entries);
417 entry = TAILQ_LAST(&fat->fat32_cache_head, cachehead);
418 TAILQ_REMOVE(&fat->fat32_cache_head, entry, entries);
419 if (fat_flush_fat32_cache_entry(fat, entry) != FSOK) {
423 rwsize = fat_get_iosize(fat, addr);
424 fat_addr = fat->fat32_offset + addr;
426 fd = fd_of_(fat);
435 TAILQ_INSERT_HEAD(&fat->fat32_cache_head, entry, entries);
441 fat_get_fat32_cached_ptr(struct fat_descriptor *fat, cl_t cl, bool writing)
447 entry = fat_get_fat32_cache_entry(fat, addr, writing);
459 fat_get_fat32_cached_next(struct fat_descriptor *fat, cl_t cl)
464 p = fat_get_fat32_cached_ptr(fat, cl, false);
477 fat_set_fat32_cached_next(struct fat_descriptor *fat, cl_t cl, cl_t nextcl)
484 p = fat_get_fat32_cached_ptr(fat, cl, true);
493 cl_t fat_get_cl_next(struct fat_descriptor *fat, cl_t cl)
496 if (!valid_cl(fat, cl)) {
501 return (fat->get(fat, cl));
504 int fat_set_cl_next(struct fat_descriptor *fat, cl_t cl, cl_t nextcl)
512 if (!valid_cl(fat, cl)) {
517 return (fat->set(fat, cl, nextcl));
521 boot_of_(struct fat_descriptor *fat) {
523 return (fat->boot);
527 fat_get_boot(struct fat_descriptor *fat) {
529 return (boot_of_(fat));
533 fd_of_(struct fat_descriptor *fat)
535 return (fat->fd);
539 fat_get_fd(struct fat_descriptor * fat)
541 return (fd_of_(fat));
548 fat_is_valid_cl(struct fat_descriptor *fat, cl_t cl)
551 return (valid_cl(fat, cl));
555 valid_cl(struct fat_descriptor *fat, cl_t cl)
557 const struct bootblock *boot = boot_of_(fat);
643 cleardirty(struct fat_descriptor *fat)
651 boot = boot_of_(fat);
652 fd = fd_of_(fat);
693 _readfat(struct fat_descriptor *fat)
702 boot = boot_of_(fat);
703 fd = fd_of_(fat);
704 fat->fatsize = boot->FATsecs * boot->bpbBytesPerSec;
709 fat->is_mmapped = false;
710 fat->use_cache = false;
714 fat->fatbuf = mmap(NULL, fat->fatsize,
716 MAP_SHARED, fd_of_(fat), off);
717 if (fat->fatbuf != MAP_FAILED) {
718 fat->is_mmapped = true;
733 fat->fatsize >= fat32_cache_size) {
735 fat->use_cache = true;
737 fat->fat32_offset = boot->bpbResSectors * boot->bpbBytesPerSec;
738 fat->fat32_lastaddr = fat->fatsize & ~(fat32_cache_chunk_size);
740 readsize = fat->fatsize;
742 fat->fatbuf = malloc(readsize);
743 if (fat->fatbuf == NULL) {
752 if ((size_t)read(fd, fat->fatbuf, readsize) != readsize) {
761 if (fat->use_cache) {
762 TAILQ_INIT(&fat->fat32_cache_head);
771 entry[i].chunk = &fat->fatbuf[entry[i].addr];
772 TAILQ_INSERT_TAIL(&fat->fat32_cache_head,
775 fat->fat32_cache_allentries = entry;
781 free(fat->fatbuf);
782 fat->fatbuf = NULL;
787 releasefat(struct fat_descriptor *fat)
789 if (fat->is_mmapped) {
790 munmap(fat->fatbuf, fat->fatsize);
792 if (fat->use_cache) {
793 free(fat->fat32_cache_allentries);
794 fat->fat32_cache_allentries = NULL;
796 free(fat->fatbuf);
798 fat->fatbuf = NULL;
799 bitmap_dtor(&fat->headbitmap);
808 struct fat_descriptor *fat;
815 fat = calloc(1, sizeof(struct fat_descriptor));
816 if (fat == NULL) {
821 fat->fd = fs;
822 fat->boot = boot;
824 if (!_readfat(fat)) {
825 free(fat);
828 buffer = fat->fatbuf;
833 fat->get = fat_get_fat12_next;
834 fat->set = fat_set_fat12_next;
837 fat->get = fat_get_fat16_next;
838 fat->set = fat_set_fat16_next;
841 if (fat->is_mmapped || !fat->use_cache) {
842 fat->get = fat_get_fat32_next;
843 fat->set = fat_set_fat32_next;
845 fat->get = fat_get_fat32_cached_next;
846 fat->set = fat_set_fat32_cached_next;
851 releasefat(fat);
852 free(fat);
856 if (bitmap_ctor(&fat->headbitmap, boot->NumClusters,
860 releasefat(fat);
861 free(fat);
965 nextcl = fat_get_cl_next(fat, cl);
973 if (fat_is_cl_head(fat, cl)) {
974 fat_clear_cl_head(fat, cl);
978 if (fat_is_cl_head(fat, cl)) {
979 fat_clear_cl_head(fat, cl);
982 } else if (!valid_cl(fat, nextcl) && nextcl < CLUST_RSRVD) {
988 ret |= fat_set_cl_next(fat, cl, CLUST_EOF);
991 } else if (valid_cl(fat, nextcl)) {
992 if (fat_is_cl_head(fat, nextcl)) {
993 fat_clear_cl_head(fat, nextcl);
998 ret |= fat_set_cl_next(fat, cl, CLUST_EOF);
1007 releasefat(fat);
1008 free(fat);
1011 *fp = fat;
1034 checkchain(struct fat_descriptor *fat, cl_t head, size_t *chainsize)
1045 assert(valid_cl(fat, head));
1046 assert(fat_is_cl_head(fat, head));
1051 fat_clear_cl_head(fat, head);
1069 for (next_cl = fat_get_cl_next(fat, current_cl);
1070 valid_cl(fat, next_cl);
1071 prev_cl = current_cl, current_cl = next_cl, next_cl = fat_get_cl_next(fat, current_cl))
1098 next_cl & boot_of_(fat)->ClustMask);
1110 return (fat_set_cl_next(fat, current_cl, next_cl) | FSFATMOD);
1120 clearchain(struct fat_descriptor *fat, cl_t head)
1123 struct bootblock *boot = boot_of_(fat);
1127 while (valid_cl(fat, current_cl)) {
1128 next_cl = fat_get_cl_next(fat, current_cl);
1129 (void)fat_set_cl_next(fat, current_cl, CLUST_FREE);
1140 copyfat(struct fat_descriptor *fat, int n)
1148 fd = fd_of_(fat);
1149 boot = boot_of_(fat);
1151 blobs = howmany(fat->fatsize, fat32_cache_size);
1152 tailsize = fat->fatsize % fat32_cache_size;
1158 src_off = fat->fat32_offset;
1168 (size_t)read(fd, fat->fatbuf, rwsize) != rwsize) &&
1175 (size_t)write(fd, fat->fatbuf, rwsize) != rwsize) &&
1188 writefat(struct fat_descriptor *fat)
1197 boot = boot_of_(fat);
1198 fd = fd_of_(fat);
1200 if (fat->use_cache) {
1207 TAILQ_FOREACH(entry, &fat->fat32_cache_head, entries) {
1208 if (fat_flush_fat32_cache_entry(fat, entry) != FSOK) {
1220 if (copyfat(fat, i) != FSOK)
1224 writesz = fat->fatsize;
1226 for (i = fat->is_mmapped ? 1 : 0; i < boot->bpbFATs; i++) {
1230 (size_t)write(fd, fat->fatbuf, writesz) != writesz) &&
1245 checklost(struct fat_descriptor *fat)
1253 dosfs = fd_of_(fat);
1254 boot = boot_of_(fat);
1261 chains = fat_get_head_count(fat);
1270 !fat_is_cl_head_in_range(fat, head)) {
1274 if (fat_is_cl_head(fat, head)) {
1275 ret = checkchain(fat, head, &chainlength);
1280 mod |= ret = reconnect(fat, head,
1286 clearchain(fat, head);
1309 (boot->NumFree && fat_get_cl_next(fat, boot->FSNext) != CLUST_FREE))) {
1315 if (fat_get_cl_next(fat, head) == CLUST_FREE) {