Lines Matching refs:ps
167 static int alloc_area(struct pstore *ps)
172 len = ps->store->chunk_size << SECTOR_SHIFT;
178 ps->area = vmalloc(len);
179 if (!ps->area)
182 ps->zero_area = vzalloc(len);
183 if (!ps->zero_area)
186 ps->header_area = vmalloc(len);
187 if (!ps->header_area)
193 vfree(ps->zero_area);
196 vfree(ps->area);
202 static void free_area(struct pstore *ps)
204 vfree(ps->area);
205 ps->area = NULL;
206 vfree(ps->zero_area);
207 ps->zero_area = NULL;
208 vfree(ps->header_area);
209 ps->header_area = NULL;
229 static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int op,
233 .bdev = dm_snap_cow(ps->store->snap)->bdev,
234 .sector = ps->store->chunk_size * chunk,
235 .count = ps->store->chunk_size,
242 .client = ps->io_client,
258 queue_work(ps->metadata_wq, &req.work);
259 flush_workqueue(ps->metadata_wq);
268 static chunk_t area_location(struct pstore *ps, chunk_t area)
270 return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
273 static void skip_metadata(struct pstore *ps)
275 uint32_t stride = ps->exceptions_per_area + 1;
276 chunk_t next_free = ps->next_free;
278 ps->next_free++;
285 static int area_io(struct pstore *ps, int op, int op_flags)
287 chunk_t chunk = area_location(ps, ps->current_area);
289 return chunk_io(ps, ps->area, chunk, op, op_flags, 0);
292 static void zero_memory_area(struct pstore *ps)
294 memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
297 static int zero_disk_area(struct pstore *ps, chunk_t area)
299 return chunk_io(ps, ps->zero_area, area_location(ps, area),
303 static int read_header(struct pstore *ps, int *new_snapshot)
315 if (!ps->store->chunk_size) {
316 ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
317 bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
319 ps->store->chunk_mask = ps->store->chunk_size - 1;
320 ps->store->chunk_shift = __ffs(ps->store->chunk_size);
324 ps->io_client = dm_io_client_create();
325 if (IS_ERR(ps->io_client))
326 return PTR_ERR(ps->io_client);
328 r = alloc_area(ps);
332 r = chunk_io(ps, ps->header_area, 0, REQ_OP_READ, 0, 1);
336 dh = ps->header_area;
350 ps->valid = le32_to_cpu(dh->valid);
351 ps->version = le32_to_cpu(dh->version);
354 if (ps->store->chunk_size == chunk_size)
360 chunk_size, ps->store->chunk_size);
363 free_area(ps);
365 r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
373 r = alloc_area(ps);
377 free_area(ps);
381 static int write_header(struct pstore *ps)
385 memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
387 dh = ps->header_area;
389 dh->valid = cpu_to_le32(ps->valid);
390 dh->version = cpu_to_le32(ps->version);
391 dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
393 return chunk_io(ps, ps->header_area, 0, REQ_OP_WRITE, 0, 1);
399 static struct disk_exception *get_exception(struct pstore *ps, void *ps_area,
402 BUG_ON(index >= ps->exceptions_per_area);
407 static void read_exception(struct pstore *ps, void *ps_area,
410 struct disk_exception *de = get_exception(ps, ps_area, index);
417 static void write_exception(struct pstore *ps,
420 struct disk_exception *de = get_exception(ps, ps->area, index);
427 static void clear_exception(struct pstore *ps, uint32_t index)
429 struct disk_exception *de = get_exception(ps, ps->area, index);
441 static int insert_exceptions(struct pstore *ps, void *ps_area,
454 for (i = 0; i < ps->exceptions_per_area; i++) {
455 read_exception(ps, ps_area, i, &e);
464 ps->current_committed = i;
472 if (ps->next_free <= e.new_chunk)
473 ps->next_free = e.new_chunk + 1;
486 static int read_exceptions(struct pstore *ps,
495 client = dm_bufio_client_create(dm_snap_cow(ps->store->snap)->bdev,
496 ps->store->chunk_size << SECTOR_SHIFT,
511 for (ps->current_area = 0; full; ps->current_area++) {
516 if (unlikely(prefetch_area < ps->current_area))
517 prefetch_area = ps->current_area;
520 chunk_t pf_chunk = area_location(ps, prefetch_area);
527 } while (prefetch_area <= ps->current_area + DM_PREFETCH_CHUNKS);
529 chunk = area_location(ps, ps->current_area);
537 r = insert_exceptions(ps, area, callback, callback_context,
541 memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT);
551 ps->current_area--;
553 skip_metadata(ps);
573 struct pstore *ps = get_info(store);
575 *sectors_allocated = ps->next_free * store->chunk_size;
580 * Then there are (ps->current_area + 1) metadata chunks, each one
581 * separated from the next by ps->exceptions_per_area data chunks.
583 *metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) *
589 struct pstore *ps = get_info(store);
591 destroy_workqueue(ps->metadata_wq);
594 if (ps->io_client)
595 dm_io_client_destroy(ps->io_client);
596 free_area(ps);
599 vfree(ps->callbacks);
601 kfree(ps);
610 struct pstore *ps = get_info(store);
615 r = read_header(ps, &new_snapshot);
622 ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
624 ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
625 sizeof(*ps->callbacks));
626 if (!ps->callbacks)
633 r = write_header(ps);
639 ps->current_area = 0;
640 zero_memory_area(ps);
641 r = zero_disk_area(ps, 0);
649 if (ps->version != SNAPSHOT_DISK_VERSION) {
651 ps->version);
658 if (!ps->valid)
664 r = read_exceptions(ps, callback, callback_context);
672 struct pstore *ps = get_info(store);
676 if (size < ((ps->next_free + 1) * store->chunk_size))
679 e->new_chunk = ps->next_free;
685 ps->next_free++;
686 skip_metadata(ps);
688 atomic_inc(&ps->pending_count);
698 struct pstore *ps = get_info(store);
703 ps->valid = 0;
707 write_exception(ps, ps->current_committed++, &ce);
715 cb = ps->callbacks + ps->callback_count++;
723 if (!atomic_dec_and_test(&ps->pending_count) &&
724 (ps->current_committed != ps->exceptions_per_area))
730 if ((ps->current_committed == ps->exceptions_per_area) &&
731 zero_disk_area(ps, ps->current_area + 1))
732 ps->valid = 0;
737 if (ps->valid && area_io(ps, REQ_OP_WRITE,
739 ps->valid = 0;
744 if (ps->current_committed == ps->exceptions_per_area) {
745 ps->current_committed = 0;
746 ps->current_area++;
747 zero_memory_area(ps);
750 for (i = 0; i < ps->callback_count; i++) {
751 cb = ps->callbacks + i;
752 cb->callback(cb->context, ps->valid);
755 ps->callback_count = 0;
762 struct pstore *ps = get_info(store);
770 if (!ps->current_committed) {
774 if (!ps->current_area)
777 ps->current_area--;
778 r = area_io(ps, REQ_OP_READ, 0);
781 ps->current_committed = ps->exceptions_per_area;
784 read_exception(ps, ps->area, ps->current_committed - 1, &ce);
792 for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
794 read_exception(ps, ps->area,
795 ps->current_committed - 1 - nr_consecutive, &ce);
808 struct pstore *ps = get_info(store);
810 BUG_ON(nr_merged > ps->current_committed);
813 clear_exception(ps, ps->current_committed - 1 - i);
815 r = area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA);
819 ps->current_committed -= nr_merged;
822 * At this stage, only persistent_usage() uses ps->next_free, so
823 * we make no attempt to keep ps->next_free strictly accurate
828 * ps->current_area does not get reduced by prepare_merge() until
831 ps->next_free = area_location(ps, ps->current_area) +
832 ps->current_committed + 1;
839 struct pstore *ps = get_info(store);
841 ps->valid = 0;
842 if (write_header(ps))
848 struct pstore *ps;
852 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
853 if (!ps)
856 ps->store = store;
857 ps->valid = 1;
858 ps->version = SNAPSHOT_DISK_VERSION;
859 ps->area = NULL;
860 ps->zero_area = NULL;
861 ps->header_area = NULL;
862 ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */
863 ps->current_committed = 0;
865 ps->callback_count = 0;
866 atomic_set(&ps->pending_count, 0);
867 ps->callbacks = NULL;
869 ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0);
870 if (!ps->metadata_wq) {
887 store->context = ps;
892 destroy_workqueue(ps->metadata_wq);
894 kfree(ps);