Lines Matching defs:device
79 #define __drbd_printk_device(level, device, fmt, args...) \
80 dev_printk(level, disk_to_dev((device)->vdisk), fmt, ## args)
82 dev_printk(level, disk_to_dev((peer_device)->device->vdisk), fmt, ## args)
123 #define dynamic_drbd_dbg(device, fmt, args...) \
124 dynamic_dev_dbg(disk_to_dev(device->vdisk), fmt, ## args)
126 #define D_ASSERT(device, exp) do { \
128 drbd_err(device, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__); \
139 drbd_err(device, "ASSERTION %s FAILED in %s\n", \
161 _drbd_insert_fault(struct drbd_device *device, unsigned int type);
164 drbd_insert_fault(struct drbd_device *device, unsigned int type) {
168 _drbd_insert_fault(device, type);
203 extern void INFO_bm_xfer_stats(struct drbd_device *device,
264 struct drbd_device *device;
276 struct drbd_device *device;
349 * when did we start submiting to the lower level device,
352 * how long did it take the lower level device to complete this request
481 /* flag bits per device */
511 FLUSH_PENDING, /* if set, device->flush_jif is when we submitted that flush
514 /* cleared only after backing device related structures have been destroyed. */
598 sector_t known_size; /* last known size of that backing device */
615 int (*io_fn)(struct drbd_device *device);
616 void (*done)(struct drbd_device *device, int rv);
647 DEVICE_WORK_PENDING, /* tell worker that some device has pending work */
661 struct idr devices; /* volume number to device mapping */
696 struct idr peer_devices; /* volume number to peer device mapping */
807 struct drbd_device *device;
832 unsigned int minor; /* device minor number */
954 struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
960 * on the lower level device when we last looked. */
974 struct drbd_device *device;
975 struct list_head list; /* on device->pending_bitmap_io */;
1005 struct drbd_device *device;
1015 static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device)
1017 return list_first_entry_or_null(&device->peer_devices, struct drbd_peer_device, peer_devices);
1044 #define for_each_peer_device(peer_device, device) \
1045 list_for_each_entry(peer_device, &device->peer_devices, peer_devices)
1047 #define for_each_peer_device_rcu(peer_device, device) \
1048 list_for_each_entry_rcu(peer_device, &device->peer_devices, peer_devices)
1050 #define for_each_peer_device_safe(peer_device, tmp, device) \
1051 list_for_each_entry_safe(peer_device, tmp, &device->peer_devices, peer_devices)
1053 static inline unsigned int device_to_minor(struct drbd_device *device)
1055 return device->minor;
1069 extern void drbd_init_set_defaults(struct drbd_device *device);
1116 extern int drbd_send_bitmap(struct drbd_device *device);
1120 extern void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev);
1121 extern void drbd_device_cleanup(struct drbd_device *device);
1122 extern void drbd_print_uuids(struct drbd_device *device, const char *text);
1123 extern void drbd_queue_unplug(struct drbd_device *device);
1126 extern void drbd_md_write(struct drbd_device *device, void *buffer);
1127 extern void drbd_md_sync(struct drbd_device *device);
1128 extern int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev);
1129 extern void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1130 extern void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1131 extern void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local);
1132 extern void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local);
1133 extern void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local);
1134 extern void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1135 extern void drbd_md_set_flag(struct drbd_device *device, int flags) __must_hold(local);
1136 extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold(local);
1138 extern void drbd_md_mark_dirty(struct drbd_device *device);
1139 extern void drbd_queue_bitmap_io(struct drbd_device *device,
1143 extern int drbd_bitmap_io(struct drbd_device *device,
1146 extern int drbd_bitmap_io_from_worker(struct drbd_device *device,
1149 extern int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local);
1150 extern int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local);
1174 * end of the device, so that the [4k superblock] will be 4k aligned.
1341 extern int drbd_bm_init(struct drbd_device *device);
1342 extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits);
1343 extern void drbd_bm_cleanup(struct drbd_device *device);
1344 extern void drbd_bm_set_all(struct drbd_device *device);
1345 extern void drbd_bm_clear_all(struct drbd_device *device);
1348 struct drbd_device *device, unsigned long s, unsigned long e);
1350 struct drbd_device *device, unsigned long s, unsigned long e);
1352 struct drbd_device *device, const unsigned long s, const unsigned long e);
1355 extern void _drbd_bm_set_bits(struct drbd_device *device,
1357 extern int drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr);
1358 extern int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr);
1359 extern int drbd_bm_read(struct drbd_device *device) __must_hold(local);
1360 extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr);
1361 extern int drbd_bm_write(struct drbd_device *device) __must_hold(local);
1362 extern void drbd_bm_reset_al_hints(struct drbd_device *device) __must_hold(local);
1363 extern int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local);
1364 extern int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local);
1365 extern int drbd_bm_write_all(struct drbd_device *device) __must_hold(local);
1366 extern int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local);
1367 extern size_t drbd_bm_words(struct drbd_device *device);
1368 extern unsigned long drbd_bm_bits(struct drbd_device *device);
1369 extern sector_t drbd_bm_capacity(struct drbd_device *device);
1372 extern unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1374 extern unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1375 extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo);
1376 extern unsigned long _drbd_bm_total_weight(struct drbd_device *device);
1377 extern unsigned long drbd_bm_total_weight(struct drbd_device *device);
1379 extern void drbd_bm_merge_lel(struct drbd_device *device, size_t offset,
1382 extern void drbd_bm_get_lel(struct drbd_device *device, size_t offset,
1385 extern void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags);
1386 extern void drbd_bm_unlock(struct drbd_device *device);
1436 extern void drbd_delete_device(struct drbd_device *device);
1454 extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
1462 extern void drbd_suspend_io(struct drbd_device *device);
1463 extern void drbd_resume_io(struct drbd_device *device);
1478 extern void drbd_reconsider_queue_parameters(struct drbd_device *device,
1480 extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
1486 extern int drbd_khelper(struct drbd_device *device, char *cmd);
1494 enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
1495 void drbd_resync_after_changed(struct drbd_device *device);
1496 extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side);
1497 extern void resume_next_sg(struct drbd_device *device);
1498 extern void suspend_other_sg(struct drbd_device *device);
1499 extern int drbd_resync_finished(struct drbd_device *device);
1501 extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
1502 extern void drbd_md_put_buffer(struct drbd_device *device);
1503 extern int drbd_md_sync_page_io(struct drbd_device *device,
1506 extern void wait_until_done_or_force_detached(struct drbd_device *device,
1508 extern void drbd_rs_controller_reset(struct drbd_device *device);
1510 static inline void ov_out_of_sync_print(struct drbd_device *device)
1512 if (device->ov_last_oos_size) {
1513 drbd_err(device, "Out of sync: start=%llu, size=%lu (sectors)\n",
1514 (unsigned long long)device->ov_last_oos_start,
1515 (unsigned long)device->ov_last_oos_size);
1517 device->ov_last_oos_size = 0;
1546 extern int drbd_issue_discard_or_zero_out(struct drbd_device *device,
1552 extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device);
1553 extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
1568 extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
1569 extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
1572 /* sets the number of 512 byte sectors of our virtual device */
1573 void drbd_set_my_capacity(struct drbd_device *device, sector_t size);
1578 static inline void drbd_submit_bio_noacct(struct drbd_device *device,
1583 drbd_err(device, "drbd_submit_bio_noacct: bio->bi_disk == NULL\n");
1589 if (drbd_insert_fault(device, fault_type))
1603 extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i);
1604 extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i);
1605 extern void drbd_al_begin_io_commit(struct drbd_device *device);
1606 extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i);
1607 extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i);
1608 extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i);
1609 extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector);
1610 extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector);
1611 extern int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector);
1612 extern void drbd_rs_cancel_all(struct drbd_device *device);
1613 extern int drbd_rs_del_all(struct drbd_device *device);
1614 extern void drbd_rs_failed_io(struct drbd_device *device,
1616 extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go);
1619 extern int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
1621 #define drbd_set_in_sync(device, sector, size) \
1622 __drbd_change_sync(device, sector, size, SET_IN_SYNC)
1623 #define drbd_set_out_of_sync(device, sector, size) \
1624 __drbd_change_sync(device, sector, size, SET_OUT_OF_SYNC)
1625 #define drbd_rs_failed_io(device, sector, size) \
1626 __drbd_change_sync(device, sector, size, RECORD_RS_FAILED)
1627 extern void drbd_al_shrink(struct drbd_device *device);
1645 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
1696 static inline union drbd_state drbd_read_state(struct drbd_device *device)
1698 struct drbd_resource *resource = device->resource;
1701 rv.i = device->state.i;
1717 static inline void __drbd_chk_io_error_(struct drbd_device *device,
1724 ep = rcu_dereference(device->ldev->disk_conf)->on_io_error;
1730 drbd_err(device, "Local IO failed in %s.\n", where);
1731 if (device->state.disk > D_INCONSISTENT)
1732 _drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
1758 set_bit(WAS_IO_ERROR, &device->flags);
1760 set_bit(WAS_READ_ERROR, &device->flags);
1762 set_bit(FORCE_DETACH, &device->flags);
1763 if (device->state.disk > D_FAILED) {
1764 _drbd_set_state(_NS(device, disk, D_FAILED), CS_HARD, NULL);
1765 drbd_err(device,
1774 * @device: DRBD device.
1781 static inline void drbd_chk_io_error_(struct drbd_device *device,
1786 spin_lock_irqsave(&device->resource->req_lock, flags);
1787 __drbd_chk_io_error_(device, forcedetach, where);
1788 spin_unlock_irqrestore(&device->resource->req_lock, flags);
1795 * @bdev: Meta data block device.
1814 * @bdev: Meta data block device.
1828 /* Returns the number of 512 byte sectors of the device */
1837 * @bdev: Meta data block device.
1858 /* clip at maximum size the meta device can support */
1872 * @bdev: Meta data block device.
1913 drbd_device_post_work(struct drbd_device *device, int work_bit)
1915 if (!test_and_set_bit(work_bit, &device->flags)) {
1917 first_peer_device(device)->connection;
1994 static inline void inc_ap_pending(struct drbd_device *device)
1996 atomic_inc(&device->ap_pending_cnt);
2000 if (atomic_read(&device->which) < 0) \
2001 drbd_err(device, "in %s:%d: " #which " = %d < 0 !\n", \
2003 atomic_read(&device->which))
2005 #define dec_ap_pending(device) _dec_ap_pending(device, __func__, __LINE__)
2006 static inline void _dec_ap_pending(struct drbd_device *device, const char *func, int line)
2008 if (atomic_dec_and_test(&device->ap_pending_cnt))
2009 wake_up(&device->misc_wait);
2019 static inline void inc_rs_pending(struct drbd_device *device)
2021 atomic_inc(&device->rs_pending_cnt);
2024 #define dec_rs_pending(device) _dec_rs_pending(device, __func__, __LINE__)
2025 static inline void _dec_rs_pending(struct drbd_device *device, const char *func, int line)
2027 atomic_dec(&device->rs_pending_cnt);
2040 static inline void inc_unacked(struct drbd_device *device)
2042 atomic_inc(&device->unacked_cnt);
2045 #define dec_unacked(device) _dec_unacked(device, __func__, __LINE__)
2046 static inline void _dec_unacked(struct drbd_device *device, const char *func, int line)
2048 atomic_dec(&device->unacked_cnt);
2052 #define sub_unacked(device, n) _sub_unacked(device, n, __func__, __LINE__)
2053 static inline void _sub_unacked(struct drbd_device *device, int n, const char *func, int line)
2055 atomic_sub(n, &device->unacked_cnt);
2078 * get_ldev() - Increase the ref count on device->ldev. Returns 0 if there is no ldev
2079 * @_device: DRBD device.
2080 * @_min_state: Minimum device state required for success.
2082 * You have to call put_ldev() when finished working with device->ldev.
2089 static inline void put_ldev(struct drbd_device *device)
2091 enum drbd_disk_state disk_state = device->state.disk;
2096 int i = atomic_dec_return(&device->local_cnt);
2102 D_ASSERT(device, i >= 0);
2106 drbd_device_post_work(device, DESTROY_DISK);
2109 if (!test_and_set_bit(GOING_DISKLESS, &device->flags))
2110 drbd_device_post_work(device, GO_DISKLESS);
2111 wake_up(&device->misc_wait);
2116 static inline int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
2121 if (device->state.disk == D_DISKLESS)
2124 atomic_inc(&device->local_cnt);
2125 io_allowed = (device->state.disk >= mins);
2127 put_ldev(device);
2131 extern int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins);
2137 static inline int drbd_get_max_buffers(struct drbd_device *device)
2143 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
2150 static inline int drbd_state_is_stable(struct drbd_device *device)
2152 union drbd_dev_state s = device->state;
2186 if (first_peer_device(device)->connection->agreed_pro_version < 96)
2220 static inline int drbd_suspended(struct drbd_device *device)
2222 struct drbd_resource *resource = device->resource;
2227 static inline bool may_inc_ap_bio(struct drbd_device *device)
2229 int mxb = drbd_get_max_buffers(device);
2231 if (drbd_suspended(device))
2233 if (atomic_read(&device->suspend_cnt))
2241 if (!drbd_state_is_stable(device))
2246 if (atomic_read(&device->ap_bio_cnt) > mxb)
2248 if (test_bit(BITMAP_IO, &device->flags))
2253 static inline bool inc_ap_bio_cond(struct drbd_device *device)
2257 spin_lock_irq(&device->resource->req_lock);
2258 rv = may_inc_ap_bio(device);
2260 atomic_inc(&device->ap_bio_cnt);
2261 spin_unlock_irq(&device->resource->req_lock);
2266 static inline void inc_ap_bio(struct drbd_device *device)
2269 * as long as the device is suspended
2276 wait_event(device->misc_wait, inc_ap_bio_cond(device));
2279 static inline void dec_ap_bio(struct drbd_device *device)
2281 int mxb = drbd_get_max_buffers(device);
2282 int ap_bio = atomic_dec_return(&device->ap_bio_cnt);
2284 D_ASSERT(device, ap_bio >= 0);
2286 if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
2287 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
2288 drbd_queue_work(&first_peer_device(device)->
2290 &device->bm_io_work.w);
2297 wake_up(&device->misc_wait);
2300 static inline bool verify_can_do_stop_sector(struct drbd_device *device)
2302 return first_peer_device(device)->connection->agreed_pro_version >= 97 &&
2303 first_peer_device(device)->connection->agreed_pro_version != 100;
2306 static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val)
2308 int changed = device->ed_uuid != val;
2309 device->ed_uuid = val;
2313 static inline int drbd_queue_order_type(struct drbd_device *device)