Lines Matching defs:peer_req

195 	struct drbd_peer_request *peer_req, *tmp;
202 list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) {
203 if (drbd_peer_req_has_active_page(peer_req))
205 list_move(&peer_req->w.list, to_be_freed);
212 struct drbd_peer_request *peer_req, *t;
217 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
218 drbd_free_net_peer_req(device, peer_req);
363 struct drbd_peer_request *peer_req;
370 peer_req = mempool_alloc(&drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
371 if (!peer_req) {
384 memset(peer_req, 0, sizeof(*peer_req));
385 INIT_LIST_HEAD(&peer_req->w.list);
386 drbd_clear_interval(&peer_req->i);
387 peer_req->i.size = request_size;
388 peer_req->i.sector = sector;
389 peer_req->submit_jif = jiffies;
390 peer_req->peer_device = peer_device;
391 peer_req->pages = page;
396 peer_req->block_id = id;
398 return peer_req;
401 mempool_free(peer_req, &drbd_ee_mempool);
405 void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req,
409 if (peer_req->flags & EE_HAS_DIGEST)
410 kfree(peer_req->digest);
411 drbd_free_pages(device, peer_req->pages, is_net);
412 D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0);
413 D_ASSERT(device, drbd_interval_empty(&peer_req->i));
414 if (!expect(!(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) {
415 peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO;
416 drbd_al_complete_io(device, &peer_req->i);
418 mempool_free(peer_req, &drbd_ee_mempool);
424 struct drbd_peer_request *peer_req, *t;
432 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
433 __drbd_free_peer_req(device, peer_req, is_net);
446 struct drbd_peer_request *peer_req, *t;
454 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
455 drbd_free_net_peer_req(device, peer_req);
461 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
465 err2 = peer_req->w.cb(&peer_req->w, !!err);
468 drbd_free_peer_req(device, peer_req);
1593 static void drbd_issue_peer_discard_or_zero_out(struct drbd_device *device, struct drbd_peer_request *peer_req)
1600 peer_req->flags |= EE_ZEROOUT;
1602 if (drbd_issue_discard_or_zero_out(device, peer_req->i.sector,
1603 peer_req->i.size >> 9, peer_req->flags & (EE_ZEROOUT|EE_TRIM)))
1604 peer_req->flags |= EE_WAS_ERROR;
1605 drbd_endio_write_sec_final(peer_req);
1609 struct drbd_peer_request *peer_req)
1612 sector_t s = peer_req->i.sector;
1613 sector_t nr = peer_req->i.size >> 9;
1614 if (blkdev_issue_write_same(bdev, s, nr, GFP_NOIO, peer_req->pages))
1615 peer_req->flags |= EE_WAS_ERROR;
1616 drbd_endio_write_sec_final(peer_req);
1623 * @peer_req: peer request
1638 struct drbd_peer_request *peer_req,
1644 struct page *page = peer_req->pages;
1645 sector_t sector = peer_req->i.sector;
1646 unsigned data_size = peer_req->i.size;
1657 if (peer_req->flags & (EE_TRIM|EE_WRITE_SAME|EE_ZEROOUT)) {
1660 conn_wait_active_ee_empty(peer_req->peer_device->connection);
1663 peer_req->submit_jif = jiffies;
1664 peer_req->flags |= EE_SUBMITTED;
1668 if (list_empty(&peer_req->w.list)) {
1670 list_add_tail(&peer_req->w.list, &device->active_ee);
1674 if (peer_req->flags & (EE_TRIM|EE_ZEROOUT))
1675 drbd_issue_peer_discard_or_zero_out(device, peer_req);
1677 drbd_issue_peer_wsame(device, peer_req);
1695 /* > peer_req->i.sector, unless this is the first bio */
1699 bio->bi_private = peer_req;
1717 atomic_set(&peer_req->pending_bios, n_bios);
1719 peer_req->submit_jif = jiffies;
1720 peer_req->flags |= EE_SUBMITTED;
1740 struct drbd_peer_request *peer_req)
1742 struct drbd_interval *i = &peer_req->i;
1864 struct drbd_peer_request *peer_req;
1933 peer_req = drbd_alloc_peer_req(peer_device, id, sector, ds, data_size, GFP_NOIO);
1934 if (!peer_req)
1937 peer_req->flags |= EE_WRITE;
1939 peer_req->flags |= EE_TRIM;
1940 return peer_req;
1943 peer_req->flags |= EE_ZEROOUT;
1944 return peer_req;
1947 peer_req->flags |= EE_WRITE_SAME;
1951 page = peer_req->pages;
1962 drbd_free_peer_req(device, peer_req);
1969 drbd_csum_ee_size(peer_device->connection->peer_integrity_tfm, peer_req, dig_vv, data_size);
1973 drbd_free_peer_req(device, peer_req);
1978 return peer_req;
2063 struct drbd_peer_request *peer_req =
2065 struct drbd_peer_device *peer_device = peer_req->peer_device;
2067 sector_t sector = peer_req->i.sector;
2070 D_ASSERT(device, drbd_interval_empty(&peer_req->i));
2072 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
2073 drbd_set_in_sync(device, sector, peer_req->i.size);
2074 err = drbd_send_ack(peer_device, P_RS_WRITE_ACK, peer_req);
2077 drbd_rs_failed_io(device, sector, peer_req->i.size);
2079 err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
2090 struct drbd_peer_request *peer_req;
2092 peer_req = read_in_block(peer_device, ID_SYNCER, sector, pi);
2093 if (!peer_req)
2102 peer_req->w.cb = e_end_resync_block;
2103 peer_req->submit_jif = jiffies;
2106 list_add_tail(&peer_req->w.list, &device->sync_ee);
2110 if (drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE, 0,
2117 list_del(&peer_req->w.list);
2120 drbd_free_peer_req(device, peer_req);
2237 struct drbd_peer_request *peer_req =
2239 struct drbd_peer_device *peer_device = peer_req->peer_device;
2241 sector_t sector = peer_req->i.sector;
2244 if (peer_req->flags & EE_SEND_WRITE_ACK) {
2245 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
2248 peer_req->flags & EE_MAY_SET_IN_SYNC) ?
2250 err = drbd_send_ack(peer_device, pcmd, peer_req);
2252 drbd_set_in_sync(device, sector, peer_req->i.size);
2254 err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
2263 if (peer_req->flags & EE_IN_INTERVAL_TREE) {
2265 D_ASSERT(device, !drbd_interval_empty(&peer_req->i));
2266 drbd_remove_epoch_entry_interval(device, peer_req);
2267 if (peer_req->flags & EE_RESTART_REQUESTS)
2268 restart_conflicting_writes(device, sector, peer_req->i.size);
2271 D_ASSERT(device, drbd_interval_empty(&peer_req->i));
2273 drbd_may_finish_epoch(peer_device->connection, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
2280 struct drbd_peer_request *peer_req =
2282 struct drbd_peer_device *peer_device = peer_req->peer_device;
2285 err = drbd_send_ack(peer_device, ack, peer_req);
2298 struct drbd_peer_request *peer_req =
2300 struct drbd_connection *connection = peer_req->peer_device->connection;
2343 static bool overlapping_resync_write(struct drbd_device *device, struct drbd_peer_request *peer_req)
2350 if (overlaps(peer_req->i.sector, peer_req->i.size,
2478 struct drbd_peer_request *peer_req)
2480 struct drbd_connection *connection = peer_req->peer_device->connection;
2482 sector_t sector = peer_req->i.sector;
2483 const unsigned int size = peer_req->i.size;
2492 drbd_insert_interval(&device->write_requests, &peer_req->i);
2496 if (i == &peer_req->i)
2532 peer_req->w.cb = superseded ? e_send_superseded :
2534 list_add_tail(&peer_req->w.list, &device->done_ee);
2535 queue_work(connection->ack_sender, &peer_req->peer_device->send_acks_work);
2574 peer_req->flags |= EE_RESTART_REQUESTS;
2581 drbd_remove_epoch_entry_interval(device, peer_req);
2592 struct drbd_peer_request *peer_req;
2623 peer_req = read_in_block(peer_device, p->block_id, sector, pi);
2624 if (!peer_req) {
2629 peer_req->w.cb = e_end_block;
2630 peer_req->submit_jif = jiffies;
2631 peer_req->flags |= EE_APPLICATION;
2637 D_ASSERT(peer_device, peer_req->i.size > 0);
2639 D_ASSERT(peer_device, peer_req->pages == NULL);
2643 peer_req->flags |= EE_ZEROOUT;
2645 D_ASSERT(peer_device, peer_req->i.size > 0);
2647 D_ASSERT(peer_device, peer_req->pages == NULL);
2650 peer_req->flags |= EE_TRIM;
2651 } else if (peer_req->pages == NULL) {
2652 D_ASSERT(device, peer_req->i.size == 0);
2657 peer_req->flags |= EE_MAY_SET_IN_SYNC;
2660 peer_req->epoch = connection->current_epoch;
2661 atomic_inc(&peer_req->epoch->epoch_size);
2662 atomic_inc(&peer_req->epoch->active);
2681 peer_req->flags |= EE_SEND_WRITE_ACK;
2690 drbd_send_ack(peer_device, P_RECV_ACK, peer_req);
2696 peer_req->flags |= EE_IN_INTERVAL_TREE;
2701 err = handle_write_conflicts(device, peer_req);
2718 if ((peer_req->flags & (EE_TRIM|EE_WRITE_SAME|EE_ZEROOUT)) == 0)
2719 list_add_tail(&peer_req->w.list, &device->active_ee);
2723 wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req));
2727 drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size);
2728 peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
2729 drbd_al_begin_io(device, &peer_req->i);
2730 peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
2733 err = drbd_submit_peer_request(device, peer_req, op, op_flags,
2741 list_del(&peer_req->w.list);
2742 drbd_remove_epoch_entry_interval(device, peer_req);
2744 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO) {
2745 peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO;
2746 drbd_al_complete_io(device, &peer_req->i);
2750 drbd_may_finish_epoch(connection, peer_req->epoch, EV_PUT | EV_CLEANUP);
2752 drbd_free_peer_req(device, peer_req);
2842 struct drbd_peer_request *peer_req;
2899 peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size,
2901 if (!peer_req) {
2908 peer_req->w.cb = w_e_end_data_req;
2911 peer_req->flags |= EE_APPLICATION;
2919 peer_req->flags |= EE_RS_THIN_REQ;
2922 peer_req->w.cb = w_e_end_rsdata_req;
2938 peer_req->digest = di;
2939 peer_req->flags |= EE_HAS_DIGEST;
2946 peer_req->w.cb = w_e_end_csum_rs_req;
2954 peer_req->w.cb = w_e_end_ov_reply;
2978 peer_req->w.cb = w_e_end_ov_req;
3014 list_add_tail(&peer_req->w.list, &device->read_ee);
3031 if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0,
3040 list_del(&peer_req->w.list);
3045 drbd_free_peer_req(device, peer_req);
5011 struct drbd_peer_request *peer_req;
5014 peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER, sector,
5016 if (!peer_req) {
5021 peer_req->w.cb = e_end_resync_block;
5022 peer_req->submit_jif = jiffies;
5023 peer_req->flags |= EE_TRIM;
5026 list_add_tail(&peer_req->w.list, &device->sync_ee);
5030 err = drbd_submit_peer_request(device, peer_req, op, 0, DRBD_FAULT_RS_WR);
5034 list_del(&peer_req->w.list);
5037 drbd_free_peer_req(device, peer_req);