Lines Matching defs:device

88 _drbd_insert_fault(struct drbd_device *device, unsigned int type);
91 drbd_insert_fault(struct drbd_device *device, unsigned int type) {
95 _drbd_insert_fault(device, type);
191 struct drbd_device *device;
203 struct drbd_device *device;
276 * when did we start submiting to the lower level device,
279 * how long did it take the lower level device to complete this request
413 /* flag bits per device */
443 FLUSH_PENDING, /* if set, device->flush_jif is when we submitted that flush
446 /* cleared only after backing device related structures have been destroyed. */
530 sector_t known_size; /* last known size of that backing device */
548 int (*io_fn)(struct drbd_device *device, struct drbd_peer_device *peer_device);
549 void (*done)(struct drbd_device *device, int rv);
577 DEVICE_WORK_PENDING, /* tell worker that some device has pending work */
591 struct idr devices; /* volume number to device mapping */
626 struct idr peer_devices; /* volume number to peer device mapping */
737 struct drbd_device *device;
762 unsigned int minor; /* device minor number */
884 struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
890 * on the lower level device when we last looked. */
904 struct drbd_device *device;
905 struct list_head list; /* on device->pending_bitmap_io */;
935 struct drbd_device *device;
945 static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device)
947 return list_first_entry_or_null(&device->peer_devices, struct drbd_peer_device, peer_devices);
974 #define for_each_peer_device(peer_device, device) \
975 list_for_each_entry(peer_device, &device->peer_devices, peer_devices)
977 #define for_each_peer_device_rcu(peer_device, device) \
978 list_for_each_entry_rcu(peer_device, &device->peer_devices, peer_devices)
980 #define for_each_peer_device_safe(peer_device, tmp, device) \
981 list_for_each_entry_safe(peer_device, tmp, &device->peer_devices, peer_devices)
983 static inline unsigned int device_to_minor(struct drbd_device *device)
985 return device->minor;
999 extern void drbd_init_set_defaults(struct drbd_device *device);
1046 extern int drbd_send_bitmap(struct drbd_device *device, struct drbd_peer_device *peer_device);
1050 extern void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev);
1051 extern void drbd_device_cleanup(struct drbd_device *device);
1052 extern void drbd_print_uuids(struct drbd_device *device, const char *text);
1053 extern void drbd_queue_unplug(struct drbd_device *device);
1056 extern void drbd_md_write(struct drbd_device *device, void *buffer);
1057 extern void drbd_md_sync(struct drbd_device *device);
1058 extern int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev);
1059 extern void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1060 extern void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1061 extern void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local);
1062 extern void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local);
1063 extern void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local);
1064 extern void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1065 extern void drbd_md_set_flag(struct drbd_device *device, int flags) __must_hold(local);
1066 extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold(local);
1068 extern void drbd_md_mark_dirty(struct drbd_device *device);
1069 extern void drbd_queue_bitmap_io(struct drbd_device *device,
1074 extern int drbd_bitmap_io(struct drbd_device *device,
1078 extern int drbd_bitmap_io_from_worker(struct drbd_device *device,
1082 extern int drbd_bmio_set_n_write(struct drbd_device *device,
1084 extern int drbd_bmio_clear_n_write(struct drbd_device *device,
1109 * end of the device, so that the [4k superblock] will be 4k aligned.
1276 extern int drbd_bm_init(struct drbd_device *device);
1277 extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits);
1278 extern void drbd_bm_cleanup(struct drbd_device *device);
1279 extern void drbd_bm_set_all(struct drbd_device *device);
1280 extern void drbd_bm_clear_all(struct drbd_device *device);
1283 struct drbd_device *device, unsigned long s, unsigned long e);
1285 struct drbd_device *device, unsigned long s, unsigned long e);
1287 struct drbd_device *device, const unsigned long s, const unsigned long e);
1290 extern void _drbd_bm_set_bits(struct drbd_device *device,
1292 extern int drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr);
1293 extern int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr);
1294 extern int drbd_bm_read(struct drbd_device *device,
1296 extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr);
1297 extern int drbd_bm_write(struct drbd_device *device,
1299 extern void drbd_bm_reset_al_hints(struct drbd_device *device) __must_hold(local);
1300 extern int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local);
1301 extern int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local);
1302 extern int drbd_bm_write_all(struct drbd_device *device,
1304 extern int drbd_bm_write_copy_pages(struct drbd_device *device,
1306 extern size_t drbd_bm_words(struct drbd_device *device);
1307 extern unsigned long drbd_bm_bits(struct drbd_device *device);
1308 extern sector_t drbd_bm_capacity(struct drbd_device *device);
1311 extern unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1313 extern unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1314 extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo);
1315 extern unsigned long _drbd_bm_total_weight(struct drbd_device *device);
1316 extern unsigned long drbd_bm_total_weight(struct drbd_device *device);
1318 extern void drbd_bm_merge_lel(struct drbd_device *device, size_t offset,
1321 extern void drbd_bm_get_lel(struct drbd_device *device, size_t offset,
1324 extern void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags);
1325 extern void drbd_bm_unlock(struct drbd_device *device);
1373 extern void drbd_delete_device(struct drbd_device *device);
1391 extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
1399 extern void drbd_suspend_io(struct drbd_device *device);
1400 extern void drbd_resume_io(struct drbd_device *device);
1415 extern void drbd_reconsider_queue_parameters(struct drbd_device *device,
1417 extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
1423 extern int drbd_khelper(struct drbd_device *device, char *cmd);
1431 enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
1432 void drbd_resync_after_changed(struct drbd_device *device);
1433 extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side);
1434 extern void resume_next_sg(struct drbd_device *device);
1435 extern void suspend_other_sg(struct drbd_device *device);
1438 extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
1439 extern void drbd_md_put_buffer(struct drbd_device *device);
1440 extern int drbd_md_sync_page_io(struct drbd_device *device,
1444 extern void wait_until_done_or_force_detached(struct drbd_device *device,
1450 struct drbd_device *device = peer_device->device;
1452 if (device->ov_last_oos_size) {
1454 (unsigned long long)device->ov_last_oos_start,
1455 (unsigned long)device->ov_last_oos_size);
1457 device->ov_last_oos_size = 0;
1485 extern int drbd_issue_discard_or_zero_out(struct drbd_device *device,
1491 extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device);
1505 extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
1506 extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
1509 /* sets the number of 512 byte sectors of our virtual device */
1510 void drbd_set_my_capacity(struct drbd_device *device, sector_t size);
1515 static inline void drbd_submit_bio_noacct(struct drbd_device *device,
1520 drbd_err(device, "drbd_submit_bio_noacct: bio->bi_bdev == NULL\n");
1526 if (drbd_insert_fault(device, fault_type))
1540 extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i);
1541 extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i);
1542 extern void drbd_al_begin_io_commit(struct drbd_device *device);
1543 extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i);
1544 extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i);
1545 extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i);
1546 extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector);
1547 extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector);
1549 extern void drbd_rs_cancel_all(struct drbd_device *device);
1550 extern int drbd_rs_del_all(struct drbd_device *device);
1564 extern void drbd_al_shrink(struct drbd_device *device);
1582 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
1633 static inline union drbd_state drbd_read_state(struct drbd_device *device)
1635 struct drbd_resource *resource = device->resource;
1638 rv.i = device->state.i;
1654 static inline void __drbd_chk_io_error_(struct drbd_device *device,
1661 ep = rcu_dereference(device->ldev->disk_conf)->on_io_error;
1667 drbd_err(device, "Local IO failed in %s.\n", where);
1668 if (device->state.disk > D_INCONSISTENT)
1669 _drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
1695 set_bit(WAS_IO_ERROR, &device->flags);
1697 set_bit(WAS_READ_ERROR, &device->flags);
1699 set_bit(FORCE_DETACH, &device->flags);
1700 if (device->state.disk > D_FAILED) {
1701 _drbd_set_state(_NS(device, disk, D_FAILED), CS_HARD, NULL);
1702 drbd_err(device,
1711 * @device: DRBD device.
1718 static inline void drbd_chk_io_error_(struct drbd_device *device,
1723 spin_lock_irqsave(&device->resource->req_lock, flags);
1724 __drbd_chk_io_error_(device, forcedetach, where);
1725 spin_unlock_irqrestore(&device->resource->req_lock, flags);
1732 * @bdev: Meta data block device.
1751 * @bdev: Meta data block device.
1765 /* Returns the number of 512 byte sectors of the device */
1773 * @bdev: Meta data block device.
1794 /* clip at maximum size the meta device can support */
1808 * @bdev: Meta data block device.
1849 drbd_device_post_work(struct drbd_device *device, int work_bit)
1851 if (!test_and_set_bit(work_bit, &device->flags)) {
1853 first_peer_device(device)->connection;
1930 static inline void inc_ap_pending(struct drbd_device *device)
1932 atomic_inc(&device->ap_pending_cnt);
1935 #define dec_ap_pending(device) ((void)expect((device), __dec_ap_pending(device) >= 0))
1936 static inline int __dec_ap_pending(struct drbd_device *device)
1938 int ap_pending_cnt = atomic_dec_return(&device->ap_pending_cnt);
1941 wake_up(&device->misc_wait);
1953 atomic_inc(&peer_device->device->rs_pending_cnt);
1960 return atomic_dec_return(&peer_device->device->rs_pending_cnt);
1972 static inline void inc_unacked(struct drbd_device *device)
1974 atomic_inc(&device->unacked_cnt);
1977 #define dec_unacked(device) ((void)expect(device, __dec_unacked(device) >= 0))
1978 static inline int __dec_unacked(struct drbd_device *device)
1980 return atomic_dec_return(&device->unacked_cnt);
1983 #define sub_unacked(device, n) ((void)expect(device, __sub_unacked(device) >= 0))
1984 static inline int __sub_unacked(struct drbd_device *device, int n)
1986 return atomic_sub_return(n, &device->unacked_cnt);
2008 * get_ldev() - Increase the ref count on device->ldev. Returns 0 if there is no ldev
2009 * @_device: DRBD device.
2010 * @_min_state: Minimum device state required for success.
2012 * You have to call put_ldev() when finished working with device->ldev.
2019 static inline void put_ldev(struct drbd_device *device)
2021 enum drbd_disk_state disk_state = device->state.disk;
2026 int i = atomic_dec_return(&device->local_cnt);
2032 D_ASSERT(device, i >= 0);
2036 drbd_device_post_work(device, DESTROY_DISK);
2039 if (!test_and_set_bit(GOING_DISKLESS, &device->flags))
2040 drbd_device_post_work(device, GO_DISKLESS);
2041 wake_up(&device->misc_wait);
2046 static inline int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
2051 if (device->state.disk == D_DISKLESS)
2054 atomic_inc(&device->local_cnt);
2055 io_allowed = (device->state.disk >= mins);
2057 put_ldev(device);
2061 extern int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins);
2067 static inline int drbd_get_max_buffers(struct drbd_device *device)
2073 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
2080 static inline int drbd_state_is_stable(struct drbd_device *device)
2082 union drbd_dev_state s = device->state;
2116 if (first_peer_device(device)->connection->agreed_pro_version < 96)
2150 static inline int drbd_suspended(struct drbd_device *device)
2152 struct drbd_resource *resource = device->resource;
2157 static inline bool may_inc_ap_bio(struct drbd_device *device)
2159 int mxb = drbd_get_max_buffers(device);
2161 if (drbd_suspended(device))
2163 if (atomic_read(&device->suspend_cnt))
2171 if (!drbd_state_is_stable(device))
2176 if (atomic_read(&device->ap_bio_cnt) > mxb)
2178 if (test_bit(BITMAP_IO, &device->flags))
2183 static inline bool inc_ap_bio_cond(struct drbd_device *device)
2187 spin_lock_irq(&device->resource->req_lock);
2188 rv = may_inc_ap_bio(device);
2190 atomic_inc(&device->ap_bio_cnt);
2191 spin_unlock_irq(&device->resource->req_lock);
2196 static inline void inc_ap_bio(struct drbd_device *device)
2199 * as long as the device is suspended
2206 wait_event(device->misc_wait, inc_ap_bio_cond(device));
2209 static inline void dec_ap_bio(struct drbd_device *device)
2211 int mxb = drbd_get_max_buffers(device);
2212 int ap_bio = atomic_dec_return(&device->ap_bio_cnt);
2214 D_ASSERT(device, ap_bio >= 0);
2216 if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
2217 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
2218 drbd_queue_work(&first_peer_device(device)->
2220 &device->bm_io_work.w);
2227 wake_up(&device->misc_wait);
2230 static inline bool verify_can_do_stop_sector(struct drbd_device *device)
2232 return first_peer_device(device)->connection->agreed_pro_version >= 97 &&
2233 first_peer_device(device)->connection->agreed_pro_version != 100;
2236 static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val)
2238 int changed = device->ed_uuid != val;
2239 device->ed_uuid = val;
2243 static inline int drbd_queue_order_type(struct drbd_device *device)