Lines Matching refs:wbuf

38  * have to make sure that the write-buffer offset (@wbuf->offs) becomes aligned
40 * write-buffer size (@wbuf->size).
498 struct ubifs_wbuf *wbuf = container_of(timer, struct ubifs_wbuf, timer);
500 dbg_io("jhead %s", dbg_jhead(wbuf->jhead));
501 wbuf->need_sync = 1;
502 wbuf->c->need_wbuf_sync = 1;
503 ubifs_wake_up_bgt(wbuf->c);
510 * @wbuf: write-buffer descriptor
512 static void new_wbuf_timer_nolock(struct ubifs_info *c, struct ubifs_wbuf *wbuf)
520 ubifs_assert(c, !hrtimer_active(&wbuf->timer));
523 if (wbuf->no_timer)
526 dbg_jhead(wbuf->jhead),
529 hrtimer_start_range_ns(&wbuf->timer, softlimit, delta,
535 * @wbuf: write-buffer descriptor
537 static void cancel_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
539 if (wbuf->no_timer)
541 wbuf->need_sync = 0;
542 hrtimer_cancel(&wbuf->timer);
547 * @wbuf: write-buffer to synchronize
558 int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
560 struct ubifs_info *c = wbuf->c;
563 cancel_wbuf_timer_nolock(wbuf);
564 if (!wbuf->used || wbuf->lnum == -1)
569 wbuf->lnum, wbuf->offs, wbuf->used, dbg_jhead(wbuf->jhead));
570 ubifs_assert(c, !(wbuf->avail & 7));
571 ubifs_assert(c, wbuf->offs + wbuf->size <= c->leb_size);
572 ubifs_assert(c, wbuf->size >= c->min_io_size);
573 ubifs_assert(c, wbuf->size <= c->max_write_size);
574 ubifs_assert(c, wbuf->size % c->min_io_size == 0);
576 if (c->leb_size - wbuf->offs >= c->max_write_size)
577 ubifs_assert(c, !((wbuf->offs + wbuf->size) % c->max_write_size));
586 sync_len = ALIGN(wbuf->used, c->min_io_size);
587 dirt = sync_len - wbuf->used;
589 ubifs_pad(c, wbuf->buf + wbuf->used, dirt);
590 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, sync_len);
594 spin_lock(&wbuf->lock);
595 wbuf->offs += sync_len;
597 * Now @wbuf->offs is not necessarily aligned to @c->max_write_size.
600 * Thus, if @wbuf->offs is not aligned to @c->max_write_size now, make
601 * sure that @wbuf->offs + @wbuf->size is aligned to
606 if (c->leb_size - wbuf->offs < c->max_write_size)
607 wbuf->size = c->leb_size - wbuf->offs;
608 else if (wbuf->offs & (c->max_write_size - 1))
609 wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs;
611 wbuf->size = c->max_write_size;
612 wbuf->avail = wbuf->size;
613 wbuf->used = 0;
614 wbuf->next_ino = 0;
615 spin_unlock(&wbuf->lock);
617 if (wbuf->sync_callback)
618 err = wbuf->sync_callback(c, wbuf->lnum,
619 c->leb_size - wbuf->offs, dirt);
625 * @wbuf: write-buffer
633 int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs)
635 const struct ubifs_info *c = wbuf->c;
637 dbg_io("LEB %d:%d, jhead %s", lnum, offs, dbg_jhead(wbuf->jhead));
641 ubifs_assert(c, lnum != wbuf->lnum);
642 ubifs_assert(c, wbuf->used == 0);
644 spin_lock(&wbuf->lock);
645 wbuf->lnum = lnum;
646 wbuf->offs = offs;
647 if (c->leb_size - wbuf->offs < c->max_write_size)
648 wbuf->size = c->leb_size - wbuf->offs;
649 else if (wbuf->offs & (c->max_write_size - 1))
650 wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs;
652 wbuf->size = c->max_write_size;
653 wbuf->avail = wbuf->size;
654 wbuf->used = 0;
655 spin_unlock(&wbuf->lock);
684 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
689 * If the mutex is locked then wbuf is being changed, so
692 if (mutex_is_locked(&wbuf->io_mutex))
695 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
696 if (!wbuf->need_sync) {
697 mutex_unlock(&wbuf->io_mutex);
701 err = ubifs_wbuf_sync_nolock(wbuf);
702 mutex_unlock(&wbuf->io_mutex);
715 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
717 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
718 cancel_wbuf_timer_nolock(wbuf);
719 mutex_unlock(&wbuf->io_mutex);
726 * @wbuf: write-buffer
730 * This function writes data to flash via write-buffer @wbuf. This means that
740 int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
742 struct ubifs_info *c = wbuf->c;
745 dbg_io("%d bytes (%s) to jhead %s wbuf at LEB %d:%d", len,
747 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs + wbuf->used);
748 ubifs_assert(c, len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt);
749 ubifs_assert(c, wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0);
750 ubifs_assert(c, !(wbuf->offs & 7) && wbuf->offs <= c->leb_size);
751 ubifs_assert(c, wbuf->avail > 0 && wbuf->avail <= wbuf->size);
752 ubifs_assert(c, wbuf->size >= c->min_io_size);
753 ubifs_assert(c, wbuf->size <= c->max_write_size);
754 ubifs_assert(c, wbuf->size % c->min_io_size == 0);
755 ubifs_assert(c, mutex_is_locked(&wbuf->io_mutex));
758 if (c->leb_size - wbuf->offs >= c->max_write_size)
759 ubifs_assert(c, !((wbuf->offs + wbuf->size) % c->max_write_size));
761 if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) {
766 cancel_wbuf_timer_nolock(wbuf);
771 if (aligned_len <= wbuf->avail) {
776 memcpy(wbuf->buf + wbuf->used, buf, len);
779 ubifs_pad(c, wbuf->buf + wbuf->used + len, aligned_len - len);
782 if (aligned_len == wbuf->avail) {
783 dbg_io("flush jhead %s wbuf to LEB %d:%d",
784 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
785 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf,
786 wbuf->offs, wbuf->size);
790 spin_lock(&wbuf->lock);
791 wbuf->offs += wbuf->size;
792 if (c->leb_size - wbuf->offs >= c->max_write_size)
793 wbuf->size = c->max_write_size;
795 wbuf->size = c->leb_size - wbuf->offs;
796 wbuf->avail = wbuf->size;
797 wbuf->used = 0;
798 wbuf->next_ino = 0;
799 spin_unlock(&wbuf->lock);
801 spin_lock(&wbuf->lock);
802 wbuf->avail -= aligned_len;
803 wbuf->used += aligned_len;
804 spin_unlock(&wbuf->lock);
810 if (wbuf->used) {
816 dbg_io("flush jhead %s wbuf to LEB %d:%d",
817 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
818 memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail);
819 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs,
820 wbuf->size);
824 wbuf->offs += wbuf->size;
825 len -= wbuf->avail;
826 aligned_len -= wbuf->avail;
827 written += wbuf->avail;
828 } else if (wbuf->offs & (c->max_write_size - 1)) {
831 * @c->max_write_size and @wbuf->size is less than
832 * @c->max_write_size. Write @wbuf->size bytes to make sure the
837 wbuf->size, wbuf->lnum, wbuf->offs);
838 err = ubifs_leb_write(c, wbuf->lnum, buf, wbuf->offs,
839 wbuf->size);
843 wbuf->offs += wbuf->size;
844 len -= wbuf->size;
845 aligned_len -= wbuf->size;
846 written += wbuf->size;
852 * We align node length to 8-byte boundary because we anyway flash wbuf
859 dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum,
860 wbuf->offs);
865 err = ubifs_leb_write(c, wbuf->lnum, buf + written,
866 wbuf->offs, m);
869 wbuf->offs += m;
881 memcpy(wbuf->buf, buf + written, min(len, n));
884 ubifs_pad(c, wbuf->buf + len, n - len);
887 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, n);
890 wbuf->offs += n;
896 spin_lock(&wbuf->lock);
903 memcpy(wbuf->buf, buf + written, len);
906 ubifs_pad(c, wbuf->buf + len, aligned_len - len);
910 if (c->leb_size - wbuf->offs >= c->max_write_size)
911 wbuf->size = c->max_write_size;
913 wbuf->size = c->leb_size - wbuf->offs;
914 wbuf->avail = wbuf->size - aligned_len;
915 wbuf->used = aligned_len;
916 wbuf->next_ino = 0;
917 spin_unlock(&wbuf->lock);
920 if (wbuf->sync_callback) {
921 int free = c->leb_size - wbuf->offs - wbuf->used;
923 err = wbuf->sync_callback(c, wbuf->lnum, free, 0);
928 if (wbuf->used)
929 new_wbuf_timer_nolock(c, wbuf);
935 len, wbuf->lnum, wbuf->offs, err);
938 ubifs_dump_leb(c, wbuf->lnum);
1006 * @wbuf: wbuf to check for un-written data
1019 int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
1022 const struct ubifs_info *c = wbuf->c;
1027 dbg_ntype(type), len, dbg_jhead(wbuf->jhead));
1028 ubifs_assert(c, wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
1032 spin_lock(&wbuf->lock);
1033 overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs);
1036 spin_unlock(&wbuf->lock);
1040 /* Don't read under wbuf */
1041 rlen = wbuf->offs - offs;
1046 memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen);
1047 spin_unlock(&wbuf->lock);
1145 * @wbuf: write-buffer to initialize
1150 int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf)
1154 wbuf->buf = kmalloc(c->max_write_size, GFP_KERNEL);
1155 if (!wbuf->buf)
1159 wbuf->inodes = kmalloc(size, GFP_KERNEL);
1160 if (!wbuf->inodes) {
1161 kfree(wbuf->buf);
1162 wbuf->buf = NULL;
1166 wbuf->used = 0;
1167 wbuf->lnum = wbuf->offs = -1;
1175 wbuf->avail = wbuf->size = size;
1176 wbuf->sync_callback = NULL;
1177 mutex_init(&wbuf->io_mutex);
1178 spin_lock_init(&wbuf->lock);
1179 wbuf->c = c;
1180 wbuf->next_ino = 0;
1182 hrtimer_init(&wbuf->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1183 wbuf->timer.function = wbuf_timer_callback_nolock;
1188 * ubifs_wbuf_add_ino_nolock - add an inode number into the wbuf inode array.
1189 * @wbuf: the write-buffer where to add
1194 void ubifs_wbuf_add_ino_nolock(struct ubifs_wbuf *wbuf, ino_t inum)
1196 if (!wbuf->buf)
1200 spin_lock(&wbuf->lock);
1201 if (wbuf->used)
1202 wbuf->inodes[wbuf->next_ino++] = inum;
1203 spin_unlock(&wbuf->lock);
1207 * wbuf_has_ino - returns if the wbuf contains data from the inode.
1208 * @wbuf: the write-buffer
1214 static int wbuf_has_ino(struct ubifs_wbuf *wbuf, ino_t inum)
1218 spin_lock(&wbuf->lock);
1219 for (i = 0; i < wbuf->next_ino; i++)
1220 if (inum == wbuf->inodes[i]) {
1224 spin_unlock(&wbuf->lock);
1243 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
1254 if (!wbuf_has_ino(wbuf, inode->i_ino))
1257 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
1258 if (wbuf_has_ino(wbuf, inode->i_ino))
1259 err = ubifs_wbuf_sync_nolock(wbuf);
1260 mutex_unlock(&wbuf->io_mutex);