18c2ecf20Sopenharmony_ci/* SPDX-License-Identifier: GPL-2.0 */
28c2ecf20Sopenharmony_ci#ifndef _BCACHE_H
38c2ecf20Sopenharmony_ci#define _BCACHE_H
48c2ecf20Sopenharmony_ci
58c2ecf20Sopenharmony_ci/*
68c2ecf20Sopenharmony_ci * SOME HIGH LEVEL CODE DOCUMENTATION:
78c2ecf20Sopenharmony_ci *
88c2ecf20Sopenharmony_ci * Bcache mostly works with cache sets, cache devices, and backing devices.
98c2ecf20Sopenharmony_ci *
108c2ecf20Sopenharmony_ci * Support for multiple cache devices hasn't quite been finished off yet, but
118c2ecf20Sopenharmony_ci * it's about 95% plumbed through. A cache set and its cache devices is sort of
128c2ecf20Sopenharmony_ci * like a md raid array and its component devices. Most of the code doesn't care
138c2ecf20Sopenharmony_ci * about individual cache devices, the main abstraction is the cache set.
148c2ecf20Sopenharmony_ci *
158c2ecf20Sopenharmony_ci * Multiple cache devices is intended to give us the ability to mirror dirty
168c2ecf20Sopenharmony_ci * cached data and metadata, without mirroring clean cached data.
178c2ecf20Sopenharmony_ci *
188c2ecf20Sopenharmony_ci * Backing devices are different, in that they have a lifetime independent of a
198c2ecf20Sopenharmony_ci * cache set. When you register a newly formatted backing device it'll come up
208c2ecf20Sopenharmony_ci * in passthrough mode, and then you can attach and detach a backing device from
218c2ecf20Sopenharmony_ci * a cache set at runtime - while it's mounted and in use. Detaching implicitly
228c2ecf20Sopenharmony_ci * invalidates any cached data for that backing device.
238c2ecf20Sopenharmony_ci *
248c2ecf20Sopenharmony_ci * A cache set can have multiple (many) backing devices attached to it.
258c2ecf20Sopenharmony_ci *
268c2ecf20Sopenharmony_ci * There's also flash only volumes - this is the reason for the distinction
278c2ecf20Sopenharmony_ci * between struct cached_dev and struct bcache_device. A flash only volume
288c2ecf20Sopenharmony_ci * works much like a bcache device that has a backing device, except the
298c2ecf20Sopenharmony_ci * "cached" data is always dirty. The end result is that we get thin
308c2ecf20Sopenharmony_ci * provisioning with very little additional code.
318c2ecf20Sopenharmony_ci *
328c2ecf20Sopenharmony_ci * Flash only volumes work but they're not production ready because the moving
338c2ecf20Sopenharmony_ci * garbage collector needs more work. More on that later.
348c2ecf20Sopenharmony_ci *
358c2ecf20Sopenharmony_ci * BUCKETS/ALLOCATION:
368c2ecf20Sopenharmony_ci *
378c2ecf20Sopenharmony_ci * Bcache is primarily designed for caching, which means that in normal
388c2ecf20Sopenharmony_ci * operation all of our available space will be allocated. Thus, we need an
398c2ecf20Sopenharmony_ci * efficient way of deleting things from the cache so we can write new things to
408c2ecf20Sopenharmony_ci * it.
418c2ecf20Sopenharmony_ci *
428c2ecf20Sopenharmony_ci * To do this, we first divide the cache device up into buckets. A bucket is the
438c2ecf20Sopenharmony_ci * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+
448c2ecf20Sopenharmony_ci * works efficiently.
458c2ecf20Sopenharmony_ci *
468c2ecf20Sopenharmony_ci * Each bucket has a 16 bit priority, and an 8 bit generation associated with
478c2ecf20Sopenharmony_ci * it. The gens and priorities for all the buckets are stored contiguously and
488c2ecf20Sopenharmony_ci * packed on disk (in a linked list of buckets - aside from the superblock, all
498c2ecf20Sopenharmony_ci * of bcache's metadata is stored in buckets).
508c2ecf20Sopenharmony_ci *
518c2ecf20Sopenharmony_ci * The priority is used to implement an LRU. We reset a bucket's priority when
528c2ecf20Sopenharmony_ci * we allocate it or on cache it, and every so often we decrement the priority
538c2ecf20Sopenharmony_ci * of each bucket. It could be used to implement something more sophisticated,
548c2ecf20Sopenharmony_ci * if anyone ever gets around to it.
558c2ecf20Sopenharmony_ci *
568c2ecf20Sopenharmony_ci * The generation is used for invalidating buckets. Each pointer also has an 8
578c2ecf20Sopenharmony_ci * bit generation embedded in it; for a pointer to be considered valid, its gen
588c2ecf20Sopenharmony_ci * must match the gen of the bucket it points into.  Thus, to reuse a bucket all
598c2ecf20Sopenharmony_ci * we have to do is increment its gen (and write its new gen to disk; we batch
608c2ecf20Sopenharmony_ci * this up).
618c2ecf20Sopenharmony_ci *
628c2ecf20Sopenharmony_ci * Bcache is entirely COW - we never write twice to a bucket, even buckets that
638c2ecf20Sopenharmony_ci * contain metadata (including btree nodes).
648c2ecf20Sopenharmony_ci *
658c2ecf20Sopenharmony_ci * THE BTREE:
668c2ecf20Sopenharmony_ci *
678c2ecf20Sopenharmony_ci * Bcache is in large part design around the btree.
688c2ecf20Sopenharmony_ci *
698c2ecf20Sopenharmony_ci * At a high level, the btree is just an index of key -> ptr tuples.
708c2ecf20Sopenharmony_ci *
718c2ecf20Sopenharmony_ci * Keys represent extents, and thus have a size field. Keys also have a variable
728c2ecf20Sopenharmony_ci * number of pointers attached to them (potentially zero, which is handy for
738c2ecf20Sopenharmony_ci * invalidating the cache).
748c2ecf20Sopenharmony_ci *
758c2ecf20Sopenharmony_ci * The key itself is an inode:offset pair. The inode number corresponds to a
768c2ecf20Sopenharmony_ci * backing device or a flash only volume. The offset is the ending offset of the
778c2ecf20Sopenharmony_ci * extent within the inode - not the starting offset; this makes lookups
788c2ecf20Sopenharmony_ci * slightly more convenient.
798c2ecf20Sopenharmony_ci *
808c2ecf20Sopenharmony_ci * Pointers contain the cache device id, the offset on that device, and an 8 bit
818c2ecf20Sopenharmony_ci * generation number. More on the gen later.
828c2ecf20Sopenharmony_ci *
838c2ecf20Sopenharmony_ci * Index lookups are not fully abstracted - cache lookups in particular are
848c2ecf20Sopenharmony_ci * still somewhat mixed in with the btree code, but things are headed in that
858c2ecf20Sopenharmony_ci * direction.
868c2ecf20Sopenharmony_ci *
878c2ecf20Sopenharmony_ci * Updates are fairly well abstracted, though. There are two different ways of
888c2ecf20Sopenharmony_ci * updating the btree; insert and replace.
898c2ecf20Sopenharmony_ci *
908c2ecf20Sopenharmony_ci * BTREE_INSERT will just take a list of keys and insert them into the btree -
918c2ecf20Sopenharmony_ci * overwriting (possibly only partially) any extents they overlap with. This is
928c2ecf20Sopenharmony_ci * used to update the index after a write.
938c2ecf20Sopenharmony_ci *
948c2ecf20Sopenharmony_ci * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is
958c2ecf20Sopenharmony_ci * overwriting a key that matches another given key. This is used for inserting
968c2ecf20Sopenharmony_ci * data into the cache after a cache miss, and for background writeback, and for
978c2ecf20Sopenharmony_ci * the moving garbage collector.
988c2ecf20Sopenharmony_ci *
998c2ecf20Sopenharmony_ci * There is no "delete" operation; deleting things from the index is
1008c2ecf20Sopenharmony_ci * accomplished by either by invalidating pointers (by incrementing a bucket's
1018c2ecf20Sopenharmony_ci * gen) or by inserting a key with 0 pointers - which will overwrite anything
1028c2ecf20Sopenharmony_ci * previously present at that location in the index.
1038c2ecf20Sopenharmony_ci *
1048c2ecf20Sopenharmony_ci * This means that there are always stale/invalid keys in the btree. They're
1058c2ecf20Sopenharmony_ci * filtered out by the code that iterates through a btree node, and removed when
1068c2ecf20Sopenharmony_ci * a btree node is rewritten.
1078c2ecf20Sopenharmony_ci *
1088c2ecf20Sopenharmony_ci * BTREE NODES:
1098c2ecf20Sopenharmony_ci *
1108c2ecf20Sopenharmony_ci * Our unit of allocation is a bucket, and we we can't arbitrarily allocate and
1118c2ecf20Sopenharmony_ci * free smaller than a bucket - so, that's how big our btree nodes are.
1128c2ecf20Sopenharmony_ci *
1138c2ecf20Sopenharmony_ci * (If buckets are really big we'll only use part of the bucket for a btree node
1148c2ecf20Sopenharmony_ci * - no less than 1/4th - but a bucket still contains no more than a single
1158c2ecf20Sopenharmony_ci * btree node. I'd actually like to change this, but for now we rely on the
1168c2ecf20Sopenharmony_ci * bucket's gen for deleting btree nodes when we rewrite/split a node.)
1178c2ecf20Sopenharmony_ci *
1188c2ecf20Sopenharmony_ci * Anyways, btree nodes are big - big enough to be inefficient with a textbook
1198c2ecf20Sopenharmony_ci * btree implementation.
1208c2ecf20Sopenharmony_ci *
1218c2ecf20Sopenharmony_ci * The way this is solved is that btree nodes are internally log structured; we
1228c2ecf20Sopenharmony_ci * can append new keys to an existing btree node without rewriting it. This
1238c2ecf20Sopenharmony_ci * means each set of keys we write is sorted, but the node is not.
1248c2ecf20Sopenharmony_ci *
1258c2ecf20Sopenharmony_ci * We maintain this log structure in memory - keeping 1Mb of keys sorted would
1268c2ecf20Sopenharmony_ci * be expensive, and we have to distinguish between the keys we have written and
1278c2ecf20Sopenharmony_ci * the keys we haven't. So to do a lookup in a btree node, we have to search
1288c2ecf20Sopenharmony_ci * each sorted set. But we do merge written sets together lazily, so the cost of
1298c2ecf20Sopenharmony_ci * these extra searches is quite low (normally most of the keys in a btree node
1308c2ecf20Sopenharmony_ci * will be in one big set, and then there'll be one or two sets that are much
1318c2ecf20Sopenharmony_ci * smaller).
1328c2ecf20Sopenharmony_ci *
1338c2ecf20Sopenharmony_ci * This log structure makes bcache's btree more of a hybrid between a
1348c2ecf20Sopenharmony_ci * conventional btree and a compacting data structure, with some of the
1358c2ecf20Sopenharmony_ci * advantages of both.
1368c2ecf20Sopenharmony_ci *
1378c2ecf20Sopenharmony_ci * GARBAGE COLLECTION:
1388c2ecf20Sopenharmony_ci *
1398c2ecf20Sopenharmony_ci * We can't just invalidate any bucket - it might contain dirty data or
1408c2ecf20Sopenharmony_ci * metadata. If it once contained dirty data, other writes might overwrite it
1418c2ecf20Sopenharmony_ci * later, leaving no valid pointers into that bucket in the index.
1428c2ecf20Sopenharmony_ci *
1438c2ecf20Sopenharmony_ci * Thus, the primary purpose of garbage collection is to find buckets to reuse.
1448c2ecf20Sopenharmony_ci * It also counts how much valid data it each bucket currently contains, so that
1458c2ecf20Sopenharmony_ci * allocation can reuse buckets sooner when they've been mostly overwritten.
1468c2ecf20Sopenharmony_ci *
1478c2ecf20Sopenharmony_ci * It also does some things that are really internal to the btree
1488c2ecf20Sopenharmony_ci * implementation. If a btree node contains pointers that are stale by more than
1498c2ecf20Sopenharmony_ci * some threshold, it rewrites the btree node to avoid the bucket's generation
1508c2ecf20Sopenharmony_ci * wrapping around. It also merges adjacent btree nodes if they're empty enough.
1518c2ecf20Sopenharmony_ci *
1528c2ecf20Sopenharmony_ci * THE JOURNAL:
1538c2ecf20Sopenharmony_ci *
1548c2ecf20Sopenharmony_ci * Bcache's journal is not necessary for consistency; we always strictly
1558c2ecf20Sopenharmony_ci * order metadata writes so that the btree and everything else is consistent on
1568c2ecf20Sopenharmony_ci * disk in the event of an unclean shutdown, and in fact bcache had writeback
1578c2ecf20Sopenharmony_ci * caching (with recovery from unclean shutdown) before journalling was
1588c2ecf20Sopenharmony_ci * implemented.
1598c2ecf20Sopenharmony_ci *
1608c2ecf20Sopenharmony_ci * Rather, the journal is purely a performance optimization; we can't complete a
1618c2ecf20Sopenharmony_ci * write until we've updated the index on disk, otherwise the cache would be
1628c2ecf20Sopenharmony_ci * inconsistent in the event of an unclean shutdown. This means that without the
1638c2ecf20Sopenharmony_ci * journal, on random write workloads we constantly have to update all the leaf
1648c2ecf20Sopenharmony_ci * nodes in the btree, and those writes will be mostly empty (appending at most
1658c2ecf20Sopenharmony_ci * a few keys each) - highly inefficient in terms of amount of metadata writes,
1668c2ecf20Sopenharmony_ci * and it puts more strain on the various btree resorting/compacting code.
1678c2ecf20Sopenharmony_ci *
1688c2ecf20Sopenharmony_ci * The journal is just a log of keys we've inserted; on startup we just reinsert
1698c2ecf20Sopenharmony_ci * all the keys in the open journal entries. That means that when we're updating
1708c2ecf20Sopenharmony_ci * a node in the btree, we can wait until a 4k block of keys fills up before
1718c2ecf20Sopenharmony_ci * writing them out.
1728c2ecf20Sopenharmony_ci *
1738c2ecf20Sopenharmony_ci * For simplicity, we only journal updates to leaf nodes; updates to parent
1748c2ecf20Sopenharmony_ci * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth
1758c2ecf20Sopenharmony_ci * the complexity to deal with journalling them (in particular, journal replay)
1768c2ecf20Sopenharmony_ci * - updates to non leaf nodes just happen synchronously (see btree_split()).
1778c2ecf20Sopenharmony_ci */
1788c2ecf20Sopenharmony_ci
1798c2ecf20Sopenharmony_ci#define pr_fmt(fmt) "bcache: %s() " fmt, __func__
1808c2ecf20Sopenharmony_ci
1818c2ecf20Sopenharmony_ci#include <linux/bcache.h>
1828c2ecf20Sopenharmony_ci#include <linux/bio.h>
1838c2ecf20Sopenharmony_ci#include <linux/kobject.h>
1848c2ecf20Sopenharmony_ci#include <linux/list.h>
1858c2ecf20Sopenharmony_ci#include <linux/mutex.h>
1868c2ecf20Sopenharmony_ci#include <linux/rbtree.h>
1878c2ecf20Sopenharmony_ci#include <linux/rwsem.h>
1888c2ecf20Sopenharmony_ci#include <linux/refcount.h>
1898c2ecf20Sopenharmony_ci#include <linux/types.h>
1908c2ecf20Sopenharmony_ci#include <linux/workqueue.h>
1918c2ecf20Sopenharmony_ci#include <linux/kthread.h>
1928c2ecf20Sopenharmony_ci
1938c2ecf20Sopenharmony_ci#include "bset.h"
1948c2ecf20Sopenharmony_ci#include "util.h"
1958c2ecf20Sopenharmony_ci#include "closure.h"
1968c2ecf20Sopenharmony_ci
1978c2ecf20Sopenharmony_cistruct bucket {
1988c2ecf20Sopenharmony_ci	atomic_t	pin;
1998c2ecf20Sopenharmony_ci	uint16_t	prio;
2008c2ecf20Sopenharmony_ci	uint8_t		gen;
2018c2ecf20Sopenharmony_ci	uint8_t		last_gc; /* Most out of date gen in the btree */
2028c2ecf20Sopenharmony_ci	uint16_t	gc_mark; /* Bitfield used by GC. See below for field */
2038c2ecf20Sopenharmony_ci};
2048c2ecf20Sopenharmony_ci
2058c2ecf20Sopenharmony_ci/*
2068c2ecf20Sopenharmony_ci * I'd use bitfields for these, but I don't trust the compiler not to screw me
2078c2ecf20Sopenharmony_ci * as multiple threads touch struct bucket without locking
2088c2ecf20Sopenharmony_ci */
2098c2ecf20Sopenharmony_ci
2108c2ecf20Sopenharmony_ciBITMASK(GC_MARK,	 struct bucket, gc_mark, 0, 2);
2118c2ecf20Sopenharmony_ci#define GC_MARK_RECLAIMABLE	1
2128c2ecf20Sopenharmony_ci#define GC_MARK_DIRTY		2
2138c2ecf20Sopenharmony_ci#define GC_MARK_METADATA	3
2148c2ecf20Sopenharmony_ci#define GC_SECTORS_USED_SIZE	13
2158c2ecf20Sopenharmony_ci#define MAX_GC_SECTORS_USED	(~(~0ULL << GC_SECTORS_USED_SIZE))
2168c2ecf20Sopenharmony_ciBITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
2178c2ecf20Sopenharmony_ciBITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
2188c2ecf20Sopenharmony_ci
2198c2ecf20Sopenharmony_ci#include "journal.h"
2208c2ecf20Sopenharmony_ci#include "stats.h"
2218c2ecf20Sopenharmony_cistruct search;
2228c2ecf20Sopenharmony_cistruct btree;
2238c2ecf20Sopenharmony_cistruct keybuf;
2248c2ecf20Sopenharmony_ci
2258c2ecf20Sopenharmony_cistruct keybuf_key {
2268c2ecf20Sopenharmony_ci	struct rb_node		node;
2278c2ecf20Sopenharmony_ci	BKEY_PADDED(key);
2288c2ecf20Sopenharmony_ci	void			*private;
2298c2ecf20Sopenharmony_ci};
2308c2ecf20Sopenharmony_ci
2318c2ecf20Sopenharmony_cistruct keybuf {
2328c2ecf20Sopenharmony_ci	struct bkey		last_scanned;
2338c2ecf20Sopenharmony_ci	spinlock_t		lock;
2348c2ecf20Sopenharmony_ci
2358c2ecf20Sopenharmony_ci	/*
2368c2ecf20Sopenharmony_ci	 * Beginning and end of range in rb tree - so that we can skip taking
2378c2ecf20Sopenharmony_ci	 * lock and checking the rb tree when we need to check for overlapping
2388c2ecf20Sopenharmony_ci	 * keys.
2398c2ecf20Sopenharmony_ci	 */
2408c2ecf20Sopenharmony_ci	struct bkey		start;
2418c2ecf20Sopenharmony_ci	struct bkey		end;
2428c2ecf20Sopenharmony_ci
2438c2ecf20Sopenharmony_ci	struct rb_root		keys;
2448c2ecf20Sopenharmony_ci
2458c2ecf20Sopenharmony_ci#define KEYBUF_NR		500
2468c2ecf20Sopenharmony_ci	DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR);
2478c2ecf20Sopenharmony_ci};
2488c2ecf20Sopenharmony_ci
2498c2ecf20Sopenharmony_cistruct bcache_device {
2508c2ecf20Sopenharmony_ci	struct closure		cl;
2518c2ecf20Sopenharmony_ci
2528c2ecf20Sopenharmony_ci	struct kobject		kobj;
2538c2ecf20Sopenharmony_ci
2548c2ecf20Sopenharmony_ci	struct cache_set	*c;
2558c2ecf20Sopenharmony_ci	unsigned int		id;
2568c2ecf20Sopenharmony_ci#define BCACHEDEVNAME_SIZE	12
2578c2ecf20Sopenharmony_ci	char			name[BCACHEDEVNAME_SIZE];
2588c2ecf20Sopenharmony_ci
2598c2ecf20Sopenharmony_ci	struct gendisk		*disk;
2608c2ecf20Sopenharmony_ci
2618c2ecf20Sopenharmony_ci	unsigned long		flags;
2628c2ecf20Sopenharmony_ci#define BCACHE_DEV_CLOSING		0
2638c2ecf20Sopenharmony_ci#define BCACHE_DEV_DETACHING		1
2648c2ecf20Sopenharmony_ci#define BCACHE_DEV_UNLINK_DONE		2
2658c2ecf20Sopenharmony_ci#define BCACHE_DEV_WB_RUNNING		3
2668c2ecf20Sopenharmony_ci#define BCACHE_DEV_RATE_DW_RUNNING	4
2678c2ecf20Sopenharmony_ci	int			nr_stripes;
2688c2ecf20Sopenharmony_ci#define BCH_MIN_STRIPE_SZ		((4 << 20) >> SECTOR_SHIFT)
2698c2ecf20Sopenharmony_ci	unsigned int		stripe_size;
2708c2ecf20Sopenharmony_ci	atomic_t		*stripe_sectors_dirty;
2718c2ecf20Sopenharmony_ci	unsigned long		*full_dirty_stripes;
2728c2ecf20Sopenharmony_ci
2738c2ecf20Sopenharmony_ci	struct bio_set		bio_split;
2748c2ecf20Sopenharmony_ci
2758c2ecf20Sopenharmony_ci	unsigned int		data_csum:1;
2768c2ecf20Sopenharmony_ci
2778c2ecf20Sopenharmony_ci	int (*cache_miss)(struct btree *b, struct search *s,
2788c2ecf20Sopenharmony_ci			  struct bio *bio, unsigned int sectors);
2798c2ecf20Sopenharmony_ci	int (*ioctl)(struct bcache_device *d, fmode_t mode,
2808c2ecf20Sopenharmony_ci		     unsigned int cmd, unsigned long arg);
2818c2ecf20Sopenharmony_ci};
2828c2ecf20Sopenharmony_ci
2838c2ecf20Sopenharmony_cistruct io {
2848c2ecf20Sopenharmony_ci	/* Used to track sequential IO so it can be skipped */
2858c2ecf20Sopenharmony_ci	struct hlist_node	hash;
2868c2ecf20Sopenharmony_ci	struct list_head	lru;
2878c2ecf20Sopenharmony_ci
2888c2ecf20Sopenharmony_ci	unsigned long		jiffies;
2898c2ecf20Sopenharmony_ci	unsigned int		sequential;
2908c2ecf20Sopenharmony_ci	sector_t		last;
2918c2ecf20Sopenharmony_ci};
2928c2ecf20Sopenharmony_ci
2938c2ecf20Sopenharmony_cienum stop_on_failure {
2948c2ecf20Sopenharmony_ci	BCH_CACHED_DEV_STOP_AUTO = 0,
2958c2ecf20Sopenharmony_ci	BCH_CACHED_DEV_STOP_ALWAYS,
2968c2ecf20Sopenharmony_ci	BCH_CACHED_DEV_STOP_MODE_MAX,
2978c2ecf20Sopenharmony_ci};
2988c2ecf20Sopenharmony_ci
2998c2ecf20Sopenharmony_cistruct cached_dev {
3008c2ecf20Sopenharmony_ci	struct list_head	list;
3018c2ecf20Sopenharmony_ci	struct bcache_device	disk;
3028c2ecf20Sopenharmony_ci	struct block_device	*bdev;
3038c2ecf20Sopenharmony_ci
3048c2ecf20Sopenharmony_ci	struct cache_sb		sb;
3058c2ecf20Sopenharmony_ci	struct cache_sb_disk	*sb_disk;
3068c2ecf20Sopenharmony_ci	struct bio		sb_bio;
3078c2ecf20Sopenharmony_ci	struct bio_vec		sb_bv[1];
3088c2ecf20Sopenharmony_ci	struct closure		sb_write;
3098c2ecf20Sopenharmony_ci	struct semaphore	sb_write_mutex;
3108c2ecf20Sopenharmony_ci
3118c2ecf20Sopenharmony_ci	/* Refcount on the cache set. Always nonzero when we're caching. */
3128c2ecf20Sopenharmony_ci	refcount_t		count;
3138c2ecf20Sopenharmony_ci	struct work_struct	detach;
3148c2ecf20Sopenharmony_ci
3158c2ecf20Sopenharmony_ci	/*
3168c2ecf20Sopenharmony_ci	 * Device might not be running if it's dirty and the cache set hasn't
3178c2ecf20Sopenharmony_ci	 * showed up yet.
3188c2ecf20Sopenharmony_ci	 */
3198c2ecf20Sopenharmony_ci	atomic_t		running;
3208c2ecf20Sopenharmony_ci
3218c2ecf20Sopenharmony_ci	/*
3228c2ecf20Sopenharmony_ci	 * Writes take a shared lock from start to finish; scanning for dirty
3238c2ecf20Sopenharmony_ci	 * data to refill the rb tree requires an exclusive lock.
3248c2ecf20Sopenharmony_ci	 */
3258c2ecf20Sopenharmony_ci	struct rw_semaphore	writeback_lock;
3268c2ecf20Sopenharmony_ci
3278c2ecf20Sopenharmony_ci	/*
3288c2ecf20Sopenharmony_ci	 * Nonzero, and writeback has a refcount (d->count), iff there is dirty
3298c2ecf20Sopenharmony_ci	 * data in the cache. Protected by writeback_lock; must have an
3308c2ecf20Sopenharmony_ci	 * shared lock to set and exclusive lock to clear.
3318c2ecf20Sopenharmony_ci	 */
3328c2ecf20Sopenharmony_ci	atomic_t		has_dirty;
3338c2ecf20Sopenharmony_ci
3348c2ecf20Sopenharmony_ci#define BCH_CACHE_READA_ALL		0
3358c2ecf20Sopenharmony_ci#define BCH_CACHE_READA_META_ONLY	1
3368c2ecf20Sopenharmony_ci	unsigned int		cache_readahead_policy;
3378c2ecf20Sopenharmony_ci	struct bch_ratelimit	writeback_rate;
3388c2ecf20Sopenharmony_ci	struct delayed_work	writeback_rate_update;
3398c2ecf20Sopenharmony_ci
3408c2ecf20Sopenharmony_ci	/* Limit number of writeback bios in flight */
3418c2ecf20Sopenharmony_ci	struct semaphore	in_flight;
3428c2ecf20Sopenharmony_ci	struct task_struct	*writeback_thread;
3438c2ecf20Sopenharmony_ci	struct workqueue_struct	*writeback_write_wq;
3448c2ecf20Sopenharmony_ci
3458c2ecf20Sopenharmony_ci	struct keybuf		writeback_keys;
3468c2ecf20Sopenharmony_ci
3478c2ecf20Sopenharmony_ci	struct task_struct	*status_update_thread;
3488c2ecf20Sopenharmony_ci	/*
3498c2ecf20Sopenharmony_ci	 * Order the write-half of writeback operations strongly in dispatch
3508c2ecf20Sopenharmony_ci	 * order.  (Maintain LBA order; don't allow reads completing out of
3518c2ecf20Sopenharmony_ci	 * order to re-order the writes...)
3528c2ecf20Sopenharmony_ci	 */
3538c2ecf20Sopenharmony_ci	struct closure_waitlist writeback_ordering_wait;
3548c2ecf20Sopenharmony_ci	atomic_t		writeback_sequence_next;
3558c2ecf20Sopenharmony_ci
3568c2ecf20Sopenharmony_ci	/* For tracking sequential IO */
3578c2ecf20Sopenharmony_ci#define RECENT_IO_BITS	7
3588c2ecf20Sopenharmony_ci#define RECENT_IO	(1 << RECENT_IO_BITS)
3598c2ecf20Sopenharmony_ci	struct io		io[RECENT_IO];
3608c2ecf20Sopenharmony_ci	struct hlist_head	io_hash[RECENT_IO + 1];
3618c2ecf20Sopenharmony_ci	struct list_head	io_lru;
3628c2ecf20Sopenharmony_ci	spinlock_t		io_lock;
3638c2ecf20Sopenharmony_ci
3648c2ecf20Sopenharmony_ci	struct cache_accounting	accounting;
3658c2ecf20Sopenharmony_ci
3668c2ecf20Sopenharmony_ci	/* The rest of this all shows up in sysfs */
3678c2ecf20Sopenharmony_ci	unsigned int		sequential_cutoff;
3688c2ecf20Sopenharmony_ci	unsigned int		readahead;
3698c2ecf20Sopenharmony_ci
3708c2ecf20Sopenharmony_ci	unsigned int		io_disable:1;
3718c2ecf20Sopenharmony_ci	unsigned int		verify:1;
3728c2ecf20Sopenharmony_ci	unsigned int		bypass_torture_test:1;
3738c2ecf20Sopenharmony_ci
3748c2ecf20Sopenharmony_ci	unsigned int		partial_stripes_expensive:1;
3758c2ecf20Sopenharmony_ci	unsigned int		writeback_metadata:1;
3768c2ecf20Sopenharmony_ci	unsigned int		writeback_running:1;
3778c2ecf20Sopenharmony_ci	unsigned char		writeback_percent;
3788c2ecf20Sopenharmony_ci	unsigned int		writeback_delay;
3798c2ecf20Sopenharmony_ci
3808c2ecf20Sopenharmony_ci	uint64_t		writeback_rate_target;
3818c2ecf20Sopenharmony_ci	int64_t			writeback_rate_proportional;
3828c2ecf20Sopenharmony_ci	int64_t			writeback_rate_integral;
3838c2ecf20Sopenharmony_ci	int64_t			writeback_rate_integral_scaled;
3848c2ecf20Sopenharmony_ci	int32_t			writeback_rate_change;
3858c2ecf20Sopenharmony_ci
3868c2ecf20Sopenharmony_ci	unsigned int		writeback_rate_update_seconds;
3878c2ecf20Sopenharmony_ci	unsigned int		writeback_rate_i_term_inverse;
3888c2ecf20Sopenharmony_ci	unsigned int		writeback_rate_p_term_inverse;
3898c2ecf20Sopenharmony_ci	unsigned int		writeback_rate_minimum;
3908c2ecf20Sopenharmony_ci
3918c2ecf20Sopenharmony_ci	enum stop_on_failure	stop_when_cache_set_failed;
3928c2ecf20Sopenharmony_ci#define DEFAULT_CACHED_DEV_ERROR_LIMIT	64
3938c2ecf20Sopenharmony_ci	atomic_t		io_errors;
3948c2ecf20Sopenharmony_ci	unsigned int		error_limit;
3958c2ecf20Sopenharmony_ci	unsigned int		offline_seconds;
3968c2ecf20Sopenharmony_ci
3978c2ecf20Sopenharmony_ci	char			backing_dev_name[BDEVNAME_SIZE];
3988c2ecf20Sopenharmony_ci};
3998c2ecf20Sopenharmony_ci
4008c2ecf20Sopenharmony_cienum alloc_reserve {
4018c2ecf20Sopenharmony_ci	RESERVE_BTREE,
4028c2ecf20Sopenharmony_ci	RESERVE_PRIO,
4038c2ecf20Sopenharmony_ci	RESERVE_MOVINGGC,
4048c2ecf20Sopenharmony_ci	RESERVE_NONE,
4058c2ecf20Sopenharmony_ci	RESERVE_NR,
4068c2ecf20Sopenharmony_ci};
4078c2ecf20Sopenharmony_ci
4088c2ecf20Sopenharmony_cistruct cache {
4098c2ecf20Sopenharmony_ci	struct cache_set	*set;
4108c2ecf20Sopenharmony_ci	struct cache_sb		sb;
4118c2ecf20Sopenharmony_ci	struct cache_sb_disk	*sb_disk;
4128c2ecf20Sopenharmony_ci	struct bio		sb_bio;
4138c2ecf20Sopenharmony_ci	struct bio_vec		sb_bv[1];
4148c2ecf20Sopenharmony_ci
4158c2ecf20Sopenharmony_ci	struct kobject		kobj;
4168c2ecf20Sopenharmony_ci	struct block_device	*bdev;
4178c2ecf20Sopenharmony_ci
4188c2ecf20Sopenharmony_ci	struct task_struct	*alloc_thread;
4198c2ecf20Sopenharmony_ci
4208c2ecf20Sopenharmony_ci	struct closure		prio;
4218c2ecf20Sopenharmony_ci	struct prio_set		*disk_buckets;
4228c2ecf20Sopenharmony_ci
4238c2ecf20Sopenharmony_ci	/*
4248c2ecf20Sopenharmony_ci	 * When allocating new buckets, prio_write() gets first dibs - since we
4258c2ecf20Sopenharmony_ci	 * may not be allocate at all without writing priorities and gens.
4268c2ecf20Sopenharmony_ci	 * prio_last_buckets[] contains the last buckets we wrote priorities to
4278c2ecf20Sopenharmony_ci	 * (so gc can mark them as metadata), prio_buckets[] contains the
4288c2ecf20Sopenharmony_ci	 * buckets allocated for the next prio write.
4298c2ecf20Sopenharmony_ci	 */
4308c2ecf20Sopenharmony_ci	uint64_t		*prio_buckets;
4318c2ecf20Sopenharmony_ci	uint64_t		*prio_last_buckets;
4328c2ecf20Sopenharmony_ci
4338c2ecf20Sopenharmony_ci	/*
4348c2ecf20Sopenharmony_ci	 * free: Buckets that are ready to be used
4358c2ecf20Sopenharmony_ci	 *
4368c2ecf20Sopenharmony_ci	 * free_inc: Incoming buckets - these are buckets that currently have
4378c2ecf20Sopenharmony_ci	 * cached data in them, and we can't reuse them until after we write
4388c2ecf20Sopenharmony_ci	 * their new gen to disk. After prio_write() finishes writing the new
4398c2ecf20Sopenharmony_ci	 * gens/prios, they'll be moved to the free list (and possibly discarded
4408c2ecf20Sopenharmony_ci	 * in the process)
4418c2ecf20Sopenharmony_ci	 */
4428c2ecf20Sopenharmony_ci	DECLARE_FIFO(long, free)[RESERVE_NR];
4438c2ecf20Sopenharmony_ci	DECLARE_FIFO(long, free_inc);
4448c2ecf20Sopenharmony_ci
4458c2ecf20Sopenharmony_ci	size_t			fifo_last_bucket;
4468c2ecf20Sopenharmony_ci
4478c2ecf20Sopenharmony_ci	/* Allocation stuff: */
4488c2ecf20Sopenharmony_ci	struct bucket		*buckets;
4498c2ecf20Sopenharmony_ci
4508c2ecf20Sopenharmony_ci	DECLARE_HEAP(struct bucket *, heap);
4518c2ecf20Sopenharmony_ci
4528c2ecf20Sopenharmony_ci	/*
4538c2ecf20Sopenharmony_ci	 * If nonzero, we know we aren't going to find any buckets to invalidate
4548c2ecf20Sopenharmony_ci	 * until a gc finishes - otherwise we could pointlessly burn a ton of
4558c2ecf20Sopenharmony_ci	 * cpu
4568c2ecf20Sopenharmony_ci	 */
4578c2ecf20Sopenharmony_ci	unsigned int		invalidate_needs_gc;
4588c2ecf20Sopenharmony_ci
4598c2ecf20Sopenharmony_ci	bool			discard; /* Get rid of? */
4608c2ecf20Sopenharmony_ci
4618c2ecf20Sopenharmony_ci	struct journal_device	journal;
4628c2ecf20Sopenharmony_ci
4638c2ecf20Sopenharmony_ci	/* The rest of this all shows up in sysfs */
4648c2ecf20Sopenharmony_ci#define IO_ERROR_SHIFT		20
4658c2ecf20Sopenharmony_ci	atomic_t		io_errors;
4668c2ecf20Sopenharmony_ci	atomic_t		io_count;
4678c2ecf20Sopenharmony_ci
4688c2ecf20Sopenharmony_ci	atomic_long_t		meta_sectors_written;
4698c2ecf20Sopenharmony_ci	atomic_long_t		btree_sectors_written;
4708c2ecf20Sopenharmony_ci	atomic_long_t		sectors_written;
4718c2ecf20Sopenharmony_ci
4728c2ecf20Sopenharmony_ci	char			cache_dev_name[BDEVNAME_SIZE];
4738c2ecf20Sopenharmony_ci};
4748c2ecf20Sopenharmony_ci
4758c2ecf20Sopenharmony_cistruct gc_stat {
4768c2ecf20Sopenharmony_ci	size_t			nodes;
4778c2ecf20Sopenharmony_ci	size_t			nodes_pre;
4788c2ecf20Sopenharmony_ci	size_t			key_bytes;
4798c2ecf20Sopenharmony_ci
4808c2ecf20Sopenharmony_ci	size_t			nkeys;
4818c2ecf20Sopenharmony_ci	uint64_t		data;	/* sectors */
4828c2ecf20Sopenharmony_ci	unsigned int		in_use; /* percent */
4838c2ecf20Sopenharmony_ci};
4848c2ecf20Sopenharmony_ci
4858c2ecf20Sopenharmony_ci/*
4868c2ecf20Sopenharmony_ci * Flag bits, for how the cache set is shutting down, and what phase it's at:
4878c2ecf20Sopenharmony_ci *
4888c2ecf20Sopenharmony_ci * CACHE_SET_UNREGISTERING means we're not just shutting down, we're detaching
4898c2ecf20Sopenharmony_ci * all the backing devices first (their cached data gets invalidated, and they
4908c2ecf20Sopenharmony_ci * won't automatically reattach).
4918c2ecf20Sopenharmony_ci *
4928c2ecf20Sopenharmony_ci * CACHE_SET_STOPPING always gets set first when we're closing down a cache set;
4938c2ecf20Sopenharmony_ci * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
4948c2ecf20Sopenharmony_ci * flushing dirty data).
4958c2ecf20Sopenharmony_ci *
4968c2ecf20Sopenharmony_ci * CACHE_SET_RUNNING means all cache devices have been registered and journal
4978c2ecf20Sopenharmony_ci * replay is complete.
4988c2ecf20Sopenharmony_ci *
4998c2ecf20Sopenharmony_ci * CACHE_SET_IO_DISABLE is set when bcache is stopping the whold cache set, all
5008c2ecf20Sopenharmony_ci * external and internal I/O should be denied when this flag is set.
5018c2ecf20Sopenharmony_ci *
5028c2ecf20Sopenharmony_ci */
5038c2ecf20Sopenharmony_ci#define CACHE_SET_UNREGISTERING		0
5048c2ecf20Sopenharmony_ci#define	CACHE_SET_STOPPING		1
5058c2ecf20Sopenharmony_ci#define	CACHE_SET_RUNNING		2
5068c2ecf20Sopenharmony_ci#define CACHE_SET_IO_DISABLE		3
5078c2ecf20Sopenharmony_ci
5088c2ecf20Sopenharmony_cistruct cache_set {
5098c2ecf20Sopenharmony_ci	struct closure		cl;
5108c2ecf20Sopenharmony_ci
5118c2ecf20Sopenharmony_ci	struct list_head	list;
5128c2ecf20Sopenharmony_ci	struct kobject		kobj;
5138c2ecf20Sopenharmony_ci	struct kobject		internal;
5148c2ecf20Sopenharmony_ci	struct dentry		*debug;
5158c2ecf20Sopenharmony_ci	struct cache_accounting accounting;
5168c2ecf20Sopenharmony_ci
5178c2ecf20Sopenharmony_ci	unsigned long		flags;
5188c2ecf20Sopenharmony_ci	atomic_t		idle_counter;
5198c2ecf20Sopenharmony_ci	atomic_t		at_max_writeback_rate;
5208c2ecf20Sopenharmony_ci
5218c2ecf20Sopenharmony_ci	struct cache		*cache;
5228c2ecf20Sopenharmony_ci
5238c2ecf20Sopenharmony_ci	struct bcache_device	**devices;
5248c2ecf20Sopenharmony_ci	unsigned int		devices_max_used;
5258c2ecf20Sopenharmony_ci	atomic_t		attached_dev_nr;
5268c2ecf20Sopenharmony_ci	struct list_head	cached_devs;
5278c2ecf20Sopenharmony_ci	uint64_t		cached_dev_sectors;
5288c2ecf20Sopenharmony_ci	atomic_long_t		flash_dev_dirty_sectors;
5298c2ecf20Sopenharmony_ci	struct closure		caching;
5308c2ecf20Sopenharmony_ci
5318c2ecf20Sopenharmony_ci	struct closure		sb_write;
5328c2ecf20Sopenharmony_ci	struct semaphore	sb_write_mutex;
5338c2ecf20Sopenharmony_ci
5348c2ecf20Sopenharmony_ci	mempool_t		search;
5358c2ecf20Sopenharmony_ci	mempool_t		bio_meta;
5368c2ecf20Sopenharmony_ci	struct bio_set		bio_split;
5378c2ecf20Sopenharmony_ci
5388c2ecf20Sopenharmony_ci	/* For the btree cache */
5398c2ecf20Sopenharmony_ci	struct shrinker		shrink;
5408c2ecf20Sopenharmony_ci
5418c2ecf20Sopenharmony_ci	/* For the btree cache and anything allocation related */
5428c2ecf20Sopenharmony_ci	struct mutex		bucket_lock;
5438c2ecf20Sopenharmony_ci
5448c2ecf20Sopenharmony_ci	/* log2(bucket_size), in sectors */
5458c2ecf20Sopenharmony_ci	unsigned short		bucket_bits;
5468c2ecf20Sopenharmony_ci
5478c2ecf20Sopenharmony_ci	/* log2(block_size), in sectors */
5488c2ecf20Sopenharmony_ci	unsigned short		block_bits;
5498c2ecf20Sopenharmony_ci
5508c2ecf20Sopenharmony_ci	/*
5518c2ecf20Sopenharmony_ci	 * Default number of pages for a new btree node - may be less than a
5528c2ecf20Sopenharmony_ci	 * full bucket
5538c2ecf20Sopenharmony_ci	 */
5548c2ecf20Sopenharmony_ci	unsigned int		btree_pages;
5558c2ecf20Sopenharmony_ci
5568c2ecf20Sopenharmony_ci	/*
5578c2ecf20Sopenharmony_ci	 * Lists of struct btrees; lru is the list for structs that have memory
5588c2ecf20Sopenharmony_ci	 * allocated for actual btree node, freed is for structs that do not.
5598c2ecf20Sopenharmony_ci	 *
5608c2ecf20Sopenharmony_ci	 * We never free a struct btree, except on shutdown - we just put it on
5618c2ecf20Sopenharmony_ci	 * the btree_cache_freed list and reuse it later. This simplifies the
5628c2ecf20Sopenharmony_ci	 * code, and it doesn't cost us much memory as the memory usage is
5638c2ecf20Sopenharmony_ci	 * dominated by buffers that hold the actual btree node data and those
5648c2ecf20Sopenharmony_ci	 * can be freed - and the number of struct btrees allocated is
5658c2ecf20Sopenharmony_ci	 * effectively bounded.
5668c2ecf20Sopenharmony_ci	 *
5678c2ecf20Sopenharmony_ci	 * btree_cache_freeable effectively is a small cache - we use it because
5688c2ecf20Sopenharmony_ci	 * high order page allocations can be rather expensive, and it's quite
5698c2ecf20Sopenharmony_ci	 * common to delete and allocate btree nodes in quick succession. It
5708c2ecf20Sopenharmony_ci	 * should never grow past ~2-3 nodes in practice.
5718c2ecf20Sopenharmony_ci	 */
5728c2ecf20Sopenharmony_ci	struct list_head	btree_cache;
5738c2ecf20Sopenharmony_ci	struct list_head	btree_cache_freeable;
5748c2ecf20Sopenharmony_ci	struct list_head	btree_cache_freed;
5758c2ecf20Sopenharmony_ci
5768c2ecf20Sopenharmony_ci	/* Number of elements in btree_cache + btree_cache_freeable lists */
5778c2ecf20Sopenharmony_ci	unsigned int		btree_cache_used;
5788c2ecf20Sopenharmony_ci
5798c2ecf20Sopenharmony_ci	/*
5808c2ecf20Sopenharmony_ci	 * If we need to allocate memory for a new btree node and that
5818c2ecf20Sopenharmony_ci	 * allocation fails, we can cannibalize another node in the btree cache
5828c2ecf20Sopenharmony_ci	 * to satisfy the allocation - lock to guarantee only one thread does
5838c2ecf20Sopenharmony_ci	 * this at a time:
5848c2ecf20Sopenharmony_ci	 */
5858c2ecf20Sopenharmony_ci	wait_queue_head_t	btree_cache_wait;
5868c2ecf20Sopenharmony_ci	struct task_struct	*btree_cache_alloc_lock;
5878c2ecf20Sopenharmony_ci	spinlock_t		btree_cannibalize_lock;
5888c2ecf20Sopenharmony_ci
5898c2ecf20Sopenharmony_ci	/*
5908c2ecf20Sopenharmony_ci	 * When we free a btree node, we increment the gen of the bucket the
5918c2ecf20Sopenharmony_ci	 * node is in - but we can't rewrite the prios and gens until we
5928c2ecf20Sopenharmony_ci	 * finished whatever it is we were doing, otherwise after a crash the
5938c2ecf20Sopenharmony_ci	 * btree node would be freed but for say a split, we might not have the
5948c2ecf20Sopenharmony_ci	 * pointers to the new nodes inserted into the btree yet.
5958c2ecf20Sopenharmony_ci	 *
5968c2ecf20Sopenharmony_ci	 * This is a refcount that blocks prio_write() until the new keys are
5978c2ecf20Sopenharmony_ci	 * written.
5988c2ecf20Sopenharmony_ci	 */
5998c2ecf20Sopenharmony_ci	atomic_t		prio_blocked;
6008c2ecf20Sopenharmony_ci	wait_queue_head_t	bucket_wait;
6018c2ecf20Sopenharmony_ci
6028c2ecf20Sopenharmony_ci	/*
6038c2ecf20Sopenharmony_ci	 * For any bio we don't skip we subtract the number of sectors from
6048c2ecf20Sopenharmony_ci	 * rescale; when it hits 0 we rescale all the bucket priorities.
6058c2ecf20Sopenharmony_ci	 */
6068c2ecf20Sopenharmony_ci	atomic_t		rescale;
6078c2ecf20Sopenharmony_ci	/*
6088c2ecf20Sopenharmony_ci	 * used for GC, identify if any front side I/Os is inflight
6098c2ecf20Sopenharmony_ci	 */
6108c2ecf20Sopenharmony_ci	atomic_t		search_inflight;
6118c2ecf20Sopenharmony_ci	/*
6128c2ecf20Sopenharmony_ci	 * When we invalidate buckets, we use both the priority and the amount
6138c2ecf20Sopenharmony_ci	 * of good data to determine which buckets to reuse first - to weight
6148c2ecf20Sopenharmony_ci	 * those together consistently we keep track of the smallest nonzero
6158c2ecf20Sopenharmony_ci	 * priority of any bucket.
6168c2ecf20Sopenharmony_ci	 */
6178c2ecf20Sopenharmony_ci	uint16_t		min_prio;
6188c2ecf20Sopenharmony_ci
6198c2ecf20Sopenharmony_ci	/*
6208c2ecf20Sopenharmony_ci	 * max(gen - last_gc) for all buckets. When it gets too big we have to
6218c2ecf20Sopenharmony_ci	 * gc to keep gens from wrapping around.
6228c2ecf20Sopenharmony_ci	 */
6238c2ecf20Sopenharmony_ci	uint8_t			need_gc;
6248c2ecf20Sopenharmony_ci	struct gc_stat		gc_stats;
6258c2ecf20Sopenharmony_ci	size_t			nbuckets;
6268c2ecf20Sopenharmony_ci	size_t			avail_nbuckets;
6278c2ecf20Sopenharmony_ci
6288c2ecf20Sopenharmony_ci	struct task_struct	*gc_thread;
6298c2ecf20Sopenharmony_ci	/* Where in the btree gc currently is */
6308c2ecf20Sopenharmony_ci	struct bkey		gc_done;
6318c2ecf20Sopenharmony_ci
6328c2ecf20Sopenharmony_ci	/*
6338c2ecf20Sopenharmony_ci	 * For automatical garbage collection after writeback completed, this
6348c2ecf20Sopenharmony_ci	 * varialbe is used as bit fields,
6358c2ecf20Sopenharmony_ci	 * - 0000 0001b (BCH_ENABLE_AUTO_GC): enable gc after writeback
6368c2ecf20Sopenharmony_ci	 * - 0000 0010b (BCH_DO_AUTO_GC):     do gc after writeback
6378c2ecf20Sopenharmony_ci	 * This is an optimization for following write request after writeback
6388c2ecf20Sopenharmony_ci	 * finished, but read hit rate dropped due to clean data on cache is
6398c2ecf20Sopenharmony_ci	 * discarded. Unless user explicitly sets it via sysfs, it won't be
6408c2ecf20Sopenharmony_ci	 * enabled.
6418c2ecf20Sopenharmony_ci	 */
6428c2ecf20Sopenharmony_ci#define BCH_ENABLE_AUTO_GC	1
6438c2ecf20Sopenharmony_ci#define BCH_DO_AUTO_GC		2
6448c2ecf20Sopenharmony_ci	uint8_t			gc_after_writeback;
6458c2ecf20Sopenharmony_ci
6468c2ecf20Sopenharmony_ci	/*
6478c2ecf20Sopenharmony_ci	 * The allocation code needs gc_mark in struct bucket to be correct, but
6488c2ecf20Sopenharmony_ci	 * it's not while a gc is in progress. Protected by bucket_lock.
6498c2ecf20Sopenharmony_ci	 */
6508c2ecf20Sopenharmony_ci	int			gc_mark_valid;
6518c2ecf20Sopenharmony_ci
6528c2ecf20Sopenharmony_ci	/* Counts how many sectors bio_insert has added to the cache */
6538c2ecf20Sopenharmony_ci	atomic_t		sectors_to_gc;
6548c2ecf20Sopenharmony_ci	wait_queue_head_t	gc_wait;
6558c2ecf20Sopenharmony_ci
6568c2ecf20Sopenharmony_ci	struct keybuf		moving_gc_keys;
6578c2ecf20Sopenharmony_ci	/* Number of moving GC bios in flight */
6588c2ecf20Sopenharmony_ci	struct semaphore	moving_in_flight;
6598c2ecf20Sopenharmony_ci
6608c2ecf20Sopenharmony_ci	struct workqueue_struct	*moving_gc_wq;
6618c2ecf20Sopenharmony_ci
6628c2ecf20Sopenharmony_ci	struct btree		*root;
6638c2ecf20Sopenharmony_ci
6648c2ecf20Sopenharmony_ci#ifdef CONFIG_BCACHE_DEBUG
6658c2ecf20Sopenharmony_ci	struct btree		*verify_data;
6668c2ecf20Sopenharmony_ci	struct bset		*verify_ondisk;
6678c2ecf20Sopenharmony_ci	struct mutex		verify_lock;
6688c2ecf20Sopenharmony_ci#endif
6698c2ecf20Sopenharmony_ci
6708c2ecf20Sopenharmony_ci	uint8_t			set_uuid[16];
6718c2ecf20Sopenharmony_ci	unsigned int		nr_uuids;
6728c2ecf20Sopenharmony_ci	struct uuid_entry	*uuids;
6738c2ecf20Sopenharmony_ci	BKEY_PADDED(uuid_bucket);
6748c2ecf20Sopenharmony_ci	struct closure		uuid_write;
6758c2ecf20Sopenharmony_ci	struct semaphore	uuid_write_mutex;
6768c2ecf20Sopenharmony_ci
6778c2ecf20Sopenharmony_ci	/*
6788c2ecf20Sopenharmony_ci	 * A btree node on disk could have too many bsets for an iterator to fit
6798c2ecf20Sopenharmony_ci	 * on the stack - have to dynamically allocate them.
6808c2ecf20Sopenharmony_ci	 * bch_cache_set_alloc() will make sure the pool can allocate iterators
6818c2ecf20Sopenharmony_ci	 * equipped with enough room that can host
6828c2ecf20Sopenharmony_ci	 *     (sb.bucket_size / sb.block_size)
6838c2ecf20Sopenharmony_ci	 * btree_iter_sets, which is more than static MAX_BSETS.
6848c2ecf20Sopenharmony_ci	 */
6858c2ecf20Sopenharmony_ci	mempool_t		fill_iter;
6868c2ecf20Sopenharmony_ci
6878c2ecf20Sopenharmony_ci	struct bset_sort_state	sort;
6888c2ecf20Sopenharmony_ci
6898c2ecf20Sopenharmony_ci	/* List of buckets we're currently writing data to */
6908c2ecf20Sopenharmony_ci	struct list_head	data_buckets;
6918c2ecf20Sopenharmony_ci	spinlock_t		data_bucket_lock;
6928c2ecf20Sopenharmony_ci
6938c2ecf20Sopenharmony_ci	struct journal		journal;
6948c2ecf20Sopenharmony_ci
6958c2ecf20Sopenharmony_ci#define CONGESTED_MAX		1024
6968c2ecf20Sopenharmony_ci	unsigned int		congested_last_us;
6978c2ecf20Sopenharmony_ci	atomic_t		congested;
6988c2ecf20Sopenharmony_ci
6998c2ecf20Sopenharmony_ci	/* The rest of this all shows up in sysfs */
7008c2ecf20Sopenharmony_ci	unsigned int		congested_read_threshold_us;
7018c2ecf20Sopenharmony_ci	unsigned int		congested_write_threshold_us;
7028c2ecf20Sopenharmony_ci
7038c2ecf20Sopenharmony_ci	struct time_stats	btree_gc_time;
7048c2ecf20Sopenharmony_ci	struct time_stats	btree_split_time;
7058c2ecf20Sopenharmony_ci	struct time_stats	btree_read_time;
7068c2ecf20Sopenharmony_ci
7078c2ecf20Sopenharmony_ci	atomic_long_t		cache_read_races;
7088c2ecf20Sopenharmony_ci	atomic_long_t		writeback_keys_done;
7098c2ecf20Sopenharmony_ci	atomic_long_t		writeback_keys_failed;
7108c2ecf20Sopenharmony_ci
7118c2ecf20Sopenharmony_ci	atomic_long_t		reclaim;
7128c2ecf20Sopenharmony_ci	atomic_long_t		reclaimed_journal_buckets;
7138c2ecf20Sopenharmony_ci	atomic_long_t		flush_write;
7148c2ecf20Sopenharmony_ci
7158c2ecf20Sopenharmony_ci	enum			{
7168c2ecf20Sopenharmony_ci		ON_ERROR_UNREGISTER,
7178c2ecf20Sopenharmony_ci		ON_ERROR_PANIC,
7188c2ecf20Sopenharmony_ci	}			on_error;
7198c2ecf20Sopenharmony_ci#define DEFAULT_IO_ERROR_LIMIT 8
7208c2ecf20Sopenharmony_ci	unsigned int		error_limit;
7218c2ecf20Sopenharmony_ci	unsigned int		error_decay;
7228c2ecf20Sopenharmony_ci
7238c2ecf20Sopenharmony_ci	unsigned short		journal_delay_ms;
7248c2ecf20Sopenharmony_ci	bool			expensive_debug_checks;
7258c2ecf20Sopenharmony_ci	unsigned int		verify:1;
7268c2ecf20Sopenharmony_ci	unsigned int		key_merging_disabled:1;
7278c2ecf20Sopenharmony_ci	unsigned int		gc_always_rewrite:1;
7288c2ecf20Sopenharmony_ci	unsigned int		shrinker_disabled:1;
7298c2ecf20Sopenharmony_ci	unsigned int		copy_gc_enabled:1;
7308c2ecf20Sopenharmony_ci	unsigned int		idle_max_writeback_rate_enabled:1;
7318c2ecf20Sopenharmony_ci
7328c2ecf20Sopenharmony_ci#define BUCKET_HASH_BITS	12
7338c2ecf20Sopenharmony_ci	struct hlist_head	bucket_hash[1 << BUCKET_HASH_BITS];
7348c2ecf20Sopenharmony_ci};
7358c2ecf20Sopenharmony_ci
7368c2ecf20Sopenharmony_cistruct bbio {
7378c2ecf20Sopenharmony_ci	unsigned int		submit_time_us;
7388c2ecf20Sopenharmony_ci	union {
7398c2ecf20Sopenharmony_ci		struct bkey	key;
7408c2ecf20Sopenharmony_ci		uint64_t	_pad[3];
7418c2ecf20Sopenharmony_ci		/*
7428c2ecf20Sopenharmony_ci		 * We only need pad = 3 here because we only ever carry around a
7438c2ecf20Sopenharmony_ci		 * single pointer - i.e. the pointer we're doing io to/from.
7448c2ecf20Sopenharmony_ci		 */
7458c2ecf20Sopenharmony_ci	};
7468c2ecf20Sopenharmony_ci	struct bio		bio;
7478c2ecf20Sopenharmony_ci};
7488c2ecf20Sopenharmony_ci
7498c2ecf20Sopenharmony_ci#define BTREE_PRIO		USHRT_MAX
7508c2ecf20Sopenharmony_ci#define INITIAL_PRIO		32768U
7518c2ecf20Sopenharmony_ci
7528c2ecf20Sopenharmony_ci#define btree_bytes(c)		((c)->btree_pages * PAGE_SIZE)
7538c2ecf20Sopenharmony_ci#define btree_blocks(b)							\
7548c2ecf20Sopenharmony_ci	((unsigned int) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
7558c2ecf20Sopenharmony_ci
7568c2ecf20Sopenharmony_ci#define btree_default_blocks(c)						\
7578c2ecf20Sopenharmony_ci	((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
7588c2ecf20Sopenharmony_ci
7598c2ecf20Sopenharmony_ci#define bucket_bytes(ca)	((ca)->sb.bucket_size << 9)
7608c2ecf20Sopenharmony_ci#define block_bytes(ca)		((ca)->sb.block_size << 9)
7618c2ecf20Sopenharmony_ci
7628c2ecf20Sopenharmony_cistatic inline unsigned int meta_bucket_pages(struct cache_sb *sb)
7638c2ecf20Sopenharmony_ci{
7648c2ecf20Sopenharmony_ci	unsigned int n, max_pages;
7658c2ecf20Sopenharmony_ci
7668c2ecf20Sopenharmony_ci	max_pages = min_t(unsigned int,
7678c2ecf20Sopenharmony_ci			  __rounddown_pow_of_two(USHRT_MAX) / PAGE_SECTORS,
7688c2ecf20Sopenharmony_ci			  MAX_ORDER_NR_PAGES);
7698c2ecf20Sopenharmony_ci
7708c2ecf20Sopenharmony_ci	n = sb->bucket_size / PAGE_SECTORS;
7718c2ecf20Sopenharmony_ci	if (n > max_pages)
7728c2ecf20Sopenharmony_ci		n = max_pages;
7738c2ecf20Sopenharmony_ci
7748c2ecf20Sopenharmony_ci	return n;
7758c2ecf20Sopenharmony_ci}
7768c2ecf20Sopenharmony_ci
7778c2ecf20Sopenharmony_cistatic inline unsigned int meta_bucket_bytes(struct cache_sb *sb)
7788c2ecf20Sopenharmony_ci{
7798c2ecf20Sopenharmony_ci	return meta_bucket_pages(sb) << PAGE_SHIFT;
7808c2ecf20Sopenharmony_ci}
7818c2ecf20Sopenharmony_ci
7828c2ecf20Sopenharmony_ci#define prios_per_bucket(ca)						\
7838c2ecf20Sopenharmony_ci	((meta_bucket_bytes(&(ca)->sb) - sizeof(struct prio_set)) /	\
7848c2ecf20Sopenharmony_ci	 sizeof(struct bucket_disk))
7858c2ecf20Sopenharmony_ci
7868c2ecf20Sopenharmony_ci#define prio_buckets(ca)						\
7878c2ecf20Sopenharmony_ci	DIV_ROUND_UP((size_t) (ca)->sb.nbuckets, prios_per_bucket(ca))
7888c2ecf20Sopenharmony_ci
7898c2ecf20Sopenharmony_cistatic inline size_t sector_to_bucket(struct cache_set *c, sector_t s)
7908c2ecf20Sopenharmony_ci{
7918c2ecf20Sopenharmony_ci	return s >> c->bucket_bits;
7928c2ecf20Sopenharmony_ci}
7938c2ecf20Sopenharmony_ci
7948c2ecf20Sopenharmony_cistatic inline sector_t bucket_to_sector(struct cache_set *c, size_t b)
7958c2ecf20Sopenharmony_ci{
7968c2ecf20Sopenharmony_ci	return ((sector_t) b) << c->bucket_bits;
7978c2ecf20Sopenharmony_ci}
7988c2ecf20Sopenharmony_ci
7998c2ecf20Sopenharmony_cistatic inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
8008c2ecf20Sopenharmony_ci{
8018c2ecf20Sopenharmony_ci	return s & (c->cache->sb.bucket_size - 1);
8028c2ecf20Sopenharmony_ci}
8038c2ecf20Sopenharmony_ci
8048c2ecf20Sopenharmony_cistatic inline struct cache *PTR_CACHE(struct cache_set *c,
8058c2ecf20Sopenharmony_ci				      const struct bkey *k,
8068c2ecf20Sopenharmony_ci				      unsigned int ptr)
8078c2ecf20Sopenharmony_ci{
8088c2ecf20Sopenharmony_ci	return c->cache;
8098c2ecf20Sopenharmony_ci}
8108c2ecf20Sopenharmony_ci
8118c2ecf20Sopenharmony_cistatic inline size_t PTR_BUCKET_NR(struct cache_set *c,
8128c2ecf20Sopenharmony_ci				   const struct bkey *k,
8138c2ecf20Sopenharmony_ci				   unsigned int ptr)
8148c2ecf20Sopenharmony_ci{
8158c2ecf20Sopenharmony_ci	return sector_to_bucket(c, PTR_OFFSET(k, ptr));
8168c2ecf20Sopenharmony_ci}
8178c2ecf20Sopenharmony_ci
8188c2ecf20Sopenharmony_cistatic inline struct bucket *PTR_BUCKET(struct cache_set *c,
8198c2ecf20Sopenharmony_ci					const struct bkey *k,
8208c2ecf20Sopenharmony_ci					unsigned int ptr)
8218c2ecf20Sopenharmony_ci{
8228c2ecf20Sopenharmony_ci	return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
8238c2ecf20Sopenharmony_ci}
8248c2ecf20Sopenharmony_ci
8258c2ecf20Sopenharmony_cistatic inline uint8_t gen_after(uint8_t a, uint8_t b)
8268c2ecf20Sopenharmony_ci{
8278c2ecf20Sopenharmony_ci	uint8_t r = a - b;
8288c2ecf20Sopenharmony_ci
8298c2ecf20Sopenharmony_ci	return r > 128U ? 0 : r;
8308c2ecf20Sopenharmony_ci}
8318c2ecf20Sopenharmony_ci
8328c2ecf20Sopenharmony_cistatic inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
8338c2ecf20Sopenharmony_ci				unsigned int i)
8348c2ecf20Sopenharmony_ci{
8358c2ecf20Sopenharmony_ci	return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
8368c2ecf20Sopenharmony_ci}
8378c2ecf20Sopenharmony_ci
8388c2ecf20Sopenharmony_cistatic inline bool ptr_available(struct cache_set *c, const struct bkey *k,
8398c2ecf20Sopenharmony_ci				 unsigned int i)
8408c2ecf20Sopenharmony_ci{
8418c2ecf20Sopenharmony_ci	return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
8428c2ecf20Sopenharmony_ci}
8438c2ecf20Sopenharmony_ci
8448c2ecf20Sopenharmony_ci/* Btree key macros */
8458c2ecf20Sopenharmony_ci
8468c2ecf20Sopenharmony_ci/*
8478c2ecf20Sopenharmony_ci * This is used for various on disk data structures - cache_sb, prio_set, bset,
8488c2ecf20Sopenharmony_ci * jset: The checksum is _always_ the first 8 bytes of these structs
8498c2ecf20Sopenharmony_ci */
8508c2ecf20Sopenharmony_ci#define csum_set(i)							\
8518c2ecf20Sopenharmony_ci	bch_crc64(((void *) (i)) + sizeof(uint64_t),			\
8528c2ecf20Sopenharmony_ci		  ((void *) bset_bkey_last(i)) -			\
8538c2ecf20Sopenharmony_ci		  (((void *) (i)) + sizeof(uint64_t)))
8548c2ecf20Sopenharmony_ci
8558c2ecf20Sopenharmony_ci/* Error handling macros */
8568c2ecf20Sopenharmony_ci
8578c2ecf20Sopenharmony_ci#define btree_bug(b, ...)						\
8588c2ecf20Sopenharmony_cido {									\
8598c2ecf20Sopenharmony_ci	if (bch_cache_set_error((b)->c, __VA_ARGS__))			\
8608c2ecf20Sopenharmony_ci		dump_stack();						\
8618c2ecf20Sopenharmony_ci} while (0)
8628c2ecf20Sopenharmony_ci
8638c2ecf20Sopenharmony_ci#define cache_bug(c, ...)						\
8648c2ecf20Sopenharmony_cido {									\
8658c2ecf20Sopenharmony_ci	if (bch_cache_set_error(c, __VA_ARGS__))			\
8668c2ecf20Sopenharmony_ci		dump_stack();						\
8678c2ecf20Sopenharmony_ci} while (0)
8688c2ecf20Sopenharmony_ci
8698c2ecf20Sopenharmony_ci#define btree_bug_on(cond, b, ...)					\
8708c2ecf20Sopenharmony_cido {									\
8718c2ecf20Sopenharmony_ci	if (cond)							\
8728c2ecf20Sopenharmony_ci		btree_bug(b, __VA_ARGS__);				\
8738c2ecf20Sopenharmony_ci} while (0)
8748c2ecf20Sopenharmony_ci
8758c2ecf20Sopenharmony_ci#define cache_bug_on(cond, c, ...)					\
8768c2ecf20Sopenharmony_cido {									\
8778c2ecf20Sopenharmony_ci	if (cond)							\
8788c2ecf20Sopenharmony_ci		cache_bug(c, __VA_ARGS__);				\
8798c2ecf20Sopenharmony_ci} while (0)
8808c2ecf20Sopenharmony_ci
8818c2ecf20Sopenharmony_ci#define cache_set_err_on(cond, c, ...)					\
8828c2ecf20Sopenharmony_cido {									\
8838c2ecf20Sopenharmony_ci	if (cond)							\
8848c2ecf20Sopenharmony_ci		bch_cache_set_error(c, __VA_ARGS__);			\
8858c2ecf20Sopenharmony_ci} while (0)
8868c2ecf20Sopenharmony_ci
8878c2ecf20Sopenharmony_ci/* Looping macros */
8888c2ecf20Sopenharmony_ci
8898c2ecf20Sopenharmony_ci#define for_each_bucket(b, ca)						\
8908c2ecf20Sopenharmony_ci	for (b = (ca)->buckets + (ca)->sb.first_bucket;			\
8918c2ecf20Sopenharmony_ci	     b < (ca)->buckets + (ca)->sb.nbuckets; b++)
8928c2ecf20Sopenharmony_ci
8938c2ecf20Sopenharmony_cistatic inline void cached_dev_put(struct cached_dev *dc)
8948c2ecf20Sopenharmony_ci{
8958c2ecf20Sopenharmony_ci	if (refcount_dec_and_test(&dc->count))
8968c2ecf20Sopenharmony_ci		schedule_work(&dc->detach);
8978c2ecf20Sopenharmony_ci}
8988c2ecf20Sopenharmony_ci
8998c2ecf20Sopenharmony_cistatic inline bool cached_dev_get(struct cached_dev *dc)
9008c2ecf20Sopenharmony_ci{
9018c2ecf20Sopenharmony_ci	if (!refcount_inc_not_zero(&dc->count))
9028c2ecf20Sopenharmony_ci		return false;
9038c2ecf20Sopenharmony_ci
9048c2ecf20Sopenharmony_ci	/* Paired with the mb in cached_dev_attach */
9058c2ecf20Sopenharmony_ci	smp_mb__after_atomic();
9068c2ecf20Sopenharmony_ci	return true;
9078c2ecf20Sopenharmony_ci}
9088c2ecf20Sopenharmony_ci
9098c2ecf20Sopenharmony_ci/*
9108c2ecf20Sopenharmony_ci * bucket_gc_gen() returns the difference between the bucket's current gen and
9118c2ecf20Sopenharmony_ci * the oldest gen of any pointer into that bucket in the btree (last_gc).
9128c2ecf20Sopenharmony_ci */
9138c2ecf20Sopenharmony_ci
9148c2ecf20Sopenharmony_cistatic inline uint8_t bucket_gc_gen(struct bucket *b)
9158c2ecf20Sopenharmony_ci{
9168c2ecf20Sopenharmony_ci	return b->gen - b->last_gc;
9178c2ecf20Sopenharmony_ci}
9188c2ecf20Sopenharmony_ci
9198c2ecf20Sopenharmony_ci#define BUCKET_GC_GEN_MAX	96U
9208c2ecf20Sopenharmony_ci
9218c2ecf20Sopenharmony_ci#define kobj_attribute_write(n, fn)					\
9228c2ecf20Sopenharmony_ci	static struct kobj_attribute ksysfs_##n = __ATTR(n, 0200, NULL, fn)
9238c2ecf20Sopenharmony_ci
9248c2ecf20Sopenharmony_ci#define kobj_attribute_rw(n, show, store)				\
9258c2ecf20Sopenharmony_ci	static struct kobj_attribute ksysfs_##n =			\
9268c2ecf20Sopenharmony_ci		__ATTR(n, 0600, show, store)
9278c2ecf20Sopenharmony_ci
9288c2ecf20Sopenharmony_cistatic inline void wake_up_allocators(struct cache_set *c)
9298c2ecf20Sopenharmony_ci{
9308c2ecf20Sopenharmony_ci	struct cache *ca = c->cache;
9318c2ecf20Sopenharmony_ci
9328c2ecf20Sopenharmony_ci	wake_up_process(ca->alloc_thread);
9338c2ecf20Sopenharmony_ci}
9348c2ecf20Sopenharmony_ci
9358c2ecf20Sopenharmony_cistatic inline void closure_bio_submit(struct cache_set *c,
9368c2ecf20Sopenharmony_ci				      struct bio *bio,
9378c2ecf20Sopenharmony_ci				      struct closure *cl)
9388c2ecf20Sopenharmony_ci{
9398c2ecf20Sopenharmony_ci	closure_get(cl);
9408c2ecf20Sopenharmony_ci	if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags))) {
9418c2ecf20Sopenharmony_ci		bio->bi_status = BLK_STS_IOERR;
9428c2ecf20Sopenharmony_ci		bio_endio(bio);
9438c2ecf20Sopenharmony_ci		return;
9448c2ecf20Sopenharmony_ci	}
9458c2ecf20Sopenharmony_ci	submit_bio_noacct(bio);
9468c2ecf20Sopenharmony_ci}
9478c2ecf20Sopenharmony_ci
9488c2ecf20Sopenharmony_ci/*
9498c2ecf20Sopenharmony_ci * Prevent the kthread exits directly, and make sure when kthread_stop()
9508c2ecf20Sopenharmony_ci * is called to stop a kthread, it is still alive. If a kthread might be
9518c2ecf20Sopenharmony_ci * stopped by CACHE_SET_IO_DISABLE bit set, wait_for_kthread_stop() is
9528c2ecf20Sopenharmony_ci * necessary before the kthread returns.
9538c2ecf20Sopenharmony_ci */
9548c2ecf20Sopenharmony_cistatic inline void wait_for_kthread_stop(void)
9558c2ecf20Sopenharmony_ci{
9568c2ecf20Sopenharmony_ci	while (!kthread_should_stop()) {
9578c2ecf20Sopenharmony_ci		set_current_state(TASK_INTERRUPTIBLE);
9588c2ecf20Sopenharmony_ci		schedule();
9598c2ecf20Sopenharmony_ci	}
9608c2ecf20Sopenharmony_ci}
9618c2ecf20Sopenharmony_ci
9628c2ecf20Sopenharmony_ci/* Forward declarations */
9638c2ecf20Sopenharmony_ci
9648c2ecf20Sopenharmony_civoid bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio);
9658c2ecf20Sopenharmony_civoid bch_count_io_errors(struct cache *ca, blk_status_t error,
9668c2ecf20Sopenharmony_ci			 int is_read, const char *m);
9678c2ecf20Sopenharmony_civoid bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
9688c2ecf20Sopenharmony_ci			      blk_status_t error, const char *m);
9698c2ecf20Sopenharmony_civoid bch_bbio_endio(struct cache_set *c, struct bio *bio,
9708c2ecf20Sopenharmony_ci		    blk_status_t error, const char *m);
9718c2ecf20Sopenharmony_civoid bch_bbio_free(struct bio *bio, struct cache_set *c);
9728c2ecf20Sopenharmony_cistruct bio *bch_bbio_alloc(struct cache_set *c);
9738c2ecf20Sopenharmony_ci
9748c2ecf20Sopenharmony_civoid __bch_submit_bbio(struct bio *bio, struct cache_set *c);
9758c2ecf20Sopenharmony_civoid bch_submit_bbio(struct bio *bio, struct cache_set *c,
9768c2ecf20Sopenharmony_ci		     struct bkey *k, unsigned int ptr);
9778c2ecf20Sopenharmony_ci
9788c2ecf20Sopenharmony_ciuint8_t bch_inc_gen(struct cache *ca, struct bucket *b);
9798c2ecf20Sopenharmony_civoid bch_rescale_priorities(struct cache_set *c, int sectors);
9808c2ecf20Sopenharmony_ci
9818c2ecf20Sopenharmony_cibool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b);
9828c2ecf20Sopenharmony_civoid __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b);
9838c2ecf20Sopenharmony_ci
9848c2ecf20Sopenharmony_civoid __bch_bucket_free(struct cache *ca, struct bucket *b);
9858c2ecf20Sopenharmony_civoid bch_bucket_free(struct cache_set *c, struct bkey *k);
9868c2ecf20Sopenharmony_ci
9878c2ecf20Sopenharmony_cilong bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait);
9888c2ecf20Sopenharmony_ciint __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
9898c2ecf20Sopenharmony_ci			   struct bkey *k, bool wait);
9908c2ecf20Sopenharmony_ciint bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
9918c2ecf20Sopenharmony_ci			 struct bkey *k, bool wait);
9928c2ecf20Sopenharmony_cibool bch_alloc_sectors(struct cache_set *c, struct bkey *k,
9938c2ecf20Sopenharmony_ci		       unsigned int sectors, unsigned int write_point,
9948c2ecf20Sopenharmony_ci		       unsigned int write_prio, bool wait);
9958c2ecf20Sopenharmony_cibool bch_cached_dev_error(struct cached_dev *dc);
9968c2ecf20Sopenharmony_ci
9978c2ecf20Sopenharmony_ci__printf(2, 3)
9988c2ecf20Sopenharmony_cibool bch_cache_set_error(struct cache_set *c, const char *fmt, ...);
9998c2ecf20Sopenharmony_ci
10008c2ecf20Sopenharmony_ciint bch_prio_write(struct cache *ca, bool wait);
10018c2ecf20Sopenharmony_civoid bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
10028c2ecf20Sopenharmony_ci
10038c2ecf20Sopenharmony_ciextern struct workqueue_struct *bcache_wq;
10048c2ecf20Sopenharmony_ciextern struct workqueue_struct *bch_journal_wq;
10058c2ecf20Sopenharmony_ciextern struct workqueue_struct *bch_flush_wq;
10068c2ecf20Sopenharmony_ciextern struct mutex bch_register_lock;
10078c2ecf20Sopenharmony_ciextern struct list_head bch_cache_sets;
10088c2ecf20Sopenharmony_ci
10098c2ecf20Sopenharmony_ciextern struct kobj_type bch_cached_dev_ktype;
10108c2ecf20Sopenharmony_ciextern struct kobj_type bch_flash_dev_ktype;
10118c2ecf20Sopenharmony_ciextern struct kobj_type bch_cache_set_ktype;
10128c2ecf20Sopenharmony_ciextern struct kobj_type bch_cache_set_internal_ktype;
10138c2ecf20Sopenharmony_ciextern struct kobj_type bch_cache_ktype;
10148c2ecf20Sopenharmony_ci
10158c2ecf20Sopenharmony_civoid bch_cached_dev_release(struct kobject *kobj);
10168c2ecf20Sopenharmony_civoid bch_flash_dev_release(struct kobject *kobj);
10178c2ecf20Sopenharmony_civoid bch_cache_set_release(struct kobject *kobj);
10188c2ecf20Sopenharmony_civoid bch_cache_release(struct kobject *kobj);
10198c2ecf20Sopenharmony_ci
10208c2ecf20Sopenharmony_ciint bch_uuid_write(struct cache_set *c);
10218c2ecf20Sopenharmony_civoid bcache_write_super(struct cache_set *c);
10228c2ecf20Sopenharmony_ci
10238c2ecf20Sopenharmony_ciint bch_flash_dev_create(struct cache_set *c, uint64_t size);
10248c2ecf20Sopenharmony_ci
10258c2ecf20Sopenharmony_ciint bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
10268c2ecf20Sopenharmony_ci			  uint8_t *set_uuid);
10278c2ecf20Sopenharmony_civoid bch_cached_dev_detach(struct cached_dev *dc);
10288c2ecf20Sopenharmony_ciint bch_cached_dev_run(struct cached_dev *dc);
10298c2ecf20Sopenharmony_civoid bcache_device_stop(struct bcache_device *d);
10308c2ecf20Sopenharmony_ci
10318c2ecf20Sopenharmony_civoid bch_cache_set_unregister(struct cache_set *c);
10328c2ecf20Sopenharmony_civoid bch_cache_set_stop(struct cache_set *c);
10338c2ecf20Sopenharmony_ci
10348c2ecf20Sopenharmony_cistruct cache_set *bch_cache_set_alloc(struct cache_sb *sb);
10358c2ecf20Sopenharmony_civoid bch_btree_cache_free(struct cache_set *c);
10368c2ecf20Sopenharmony_ciint bch_btree_cache_alloc(struct cache_set *c);
10378c2ecf20Sopenharmony_civoid bch_moving_init_cache_set(struct cache_set *c);
10388c2ecf20Sopenharmony_ciint bch_open_buckets_alloc(struct cache_set *c);
10398c2ecf20Sopenharmony_civoid bch_open_buckets_free(struct cache_set *c);
10408c2ecf20Sopenharmony_ci
10418c2ecf20Sopenharmony_ciint bch_cache_allocator_start(struct cache *ca);
10428c2ecf20Sopenharmony_ci
10438c2ecf20Sopenharmony_civoid bch_debug_exit(void);
10448c2ecf20Sopenharmony_civoid bch_debug_init(void);
10458c2ecf20Sopenharmony_civoid bch_request_exit(void);
10468c2ecf20Sopenharmony_ciint bch_request_init(void);
10478c2ecf20Sopenharmony_civoid bch_btree_exit(void);
10488c2ecf20Sopenharmony_ciint bch_btree_init(void);
10498c2ecf20Sopenharmony_ci
10508c2ecf20Sopenharmony_ci#endif /* _BCACHE_H */
1051