Lines Matching defs:peer_req

196 	struct drbd_peer_request *peer_req, *tmp;
203 list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) {
204 if (drbd_peer_req_has_active_page(peer_req))
206 list_move(&peer_req->w.list, to_be_freed);
213 struct drbd_peer_request *peer_req, *t;
218 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
219 drbd_free_net_peer_req(device, peer_req);
364 struct drbd_peer_request *peer_req;
371 peer_req = mempool_alloc(&drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
372 if (!peer_req) {
385 memset(peer_req, 0, sizeof(*peer_req));
386 INIT_LIST_HEAD(&peer_req->w.list);
387 drbd_clear_interval(&peer_req->i);
388 peer_req->i.size = request_size;
389 peer_req->i.sector = sector;
390 peer_req->submit_jif = jiffies;
391 peer_req->peer_device = peer_device;
392 peer_req->pages = page;
397 peer_req->block_id = id;
399 return peer_req;
402 mempool_free(peer_req, &drbd_ee_mempool);
406 void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req,
410 if (peer_req->flags & EE_HAS_DIGEST)
411 kfree(peer_req->digest);
412 drbd_free_pages(device, peer_req->pages, is_net);
413 D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0);
414 D_ASSERT(device, drbd_interval_empty(&peer_req->i));
415 if (!expect(device, !(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) {
416 peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO;
417 drbd_al_complete_io(device, &peer_req->i);
419 mempool_free(peer_req, &drbd_ee_mempool);
425 struct drbd_peer_request *peer_req, *t;
433 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
434 __drbd_free_peer_req(device, peer_req, is_net);
447 struct drbd_peer_request *peer_req, *t;
455 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
456 drbd_free_net_peer_req(device, peer_req);
462 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
466 err2 = peer_req->w.cb(&peer_req->w, !!err);
469 drbd_free_peer_req(device, peer_req);
1593 static void drbd_issue_peer_discard_or_zero_out(struct drbd_device *device, struct drbd_peer_request *peer_req)
1600 peer_req->flags |= EE_ZEROOUT;
1602 if (drbd_issue_discard_or_zero_out(device, peer_req->i.sector,
1603 peer_req->i.size >> 9, peer_req->flags & (EE_ZEROOUT|EE_TRIM)))
1604 peer_req->flags |= EE_WAS_ERROR;
1605 drbd_endio_write_sec_final(peer_req);
1608 static int peer_request_fault_type(struct drbd_peer_request *peer_req)
1610 if (peer_req_op(peer_req) == REQ_OP_READ) {
1611 return peer_req->flags & EE_APPLICATION ?
1614 return peer_req->flags & EE_APPLICATION ?
1621 * @peer_req: peer request
1634 int drbd_submit_peer_request(struct drbd_peer_request *peer_req)
1636 struct drbd_device *device = peer_req->peer_device->device;
1639 struct page *page = peer_req->pages;
1640 sector_t sector = peer_req->i.sector;
1641 unsigned int data_size = peer_req->i.size;
1651 if (peer_req->flags & (EE_TRIM | EE_ZEROOUT)) {
1654 conn_wait_active_ee_empty(peer_req->peer_device->connection);
1657 peer_req->submit_jif = jiffies;
1658 peer_req->flags |= EE_SUBMITTED;
1662 if (list_empty(&peer_req->w.list)) {
1664 list_add_tail(&peer_req->w.list, &device->active_ee);
1668 drbd_issue_peer_discard_or_zero_out(device, peer_req);
1686 if (!(peer_req_op(peer_req) == REQ_OP_WRITE ||
1687 peer_req_op(peer_req) == REQ_OP_READ)) {
1688 drbd_err(device, "Invalid bio op received: 0x%x\n", peer_req->opf);
1692 bio = bio_alloc(device->ldev->backing_bdev, nr_pages, peer_req->opf, GFP_NOIO);
1693 /* > peer_req->i.sector, unless this is the first bio */
1695 bio->bi_private = peer_req;
1713 atomic_set(&peer_req->pending_bios, n_bios);
1715 peer_req->submit_jif = jiffies;
1716 peer_req->flags |= EE_SUBMITTED;
1722 drbd_submit_bio_noacct(device, peer_request_fault_type(peer_req), bio);
1728 struct drbd_peer_request *peer_req)
1730 struct drbd_interval *i = &peer_req->i;
1852 struct drbd_peer_request *peer_req;
1908 peer_req = drbd_alloc_peer_req(peer_device, id, sector, ds, data_size, GFP_NOIO);
1909 if (!peer_req)
1912 peer_req->flags |= EE_WRITE;
1914 peer_req->flags |= EE_TRIM;
1915 return peer_req;
1918 peer_req->flags |= EE_ZEROOUT;
1919 return peer_req;
1924 page = peer_req->pages;
1935 drbd_free_peer_req(device, peer_req);
1942 drbd_csum_ee_size(peer_device->connection->peer_integrity_tfm, peer_req, dig_vv, data_size);
1946 drbd_free_peer_req(device, peer_req);
1951 return peer_req;
2036 struct drbd_peer_request *peer_req =
2038 struct drbd_peer_device *peer_device = peer_req->peer_device;
2040 sector_t sector = peer_req->i.sector;
2043 D_ASSERT(device, drbd_interval_empty(&peer_req->i));
2045 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
2046 drbd_set_in_sync(peer_device, sector, peer_req->i.size);
2047 err = drbd_send_ack(peer_device, P_RS_WRITE_ACK, peer_req);
2050 drbd_rs_failed_io(peer_device, sector, peer_req->i.size);
2052 err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
2063 struct drbd_peer_request *peer_req;
2065 peer_req = read_in_block(peer_device, ID_SYNCER, sector, pi);
2066 if (!peer_req)
2075 peer_req->w.cb = e_end_resync_block;
2076 peer_req->opf = REQ_OP_WRITE;
2077 peer_req->submit_jif = jiffies;
2080 list_add_tail(&peer_req->w.list, &device->sync_ee);
2084 if (drbd_submit_peer_request(peer_req) == 0)
2090 list_del(&peer_req->w.list);
2093 drbd_free_peer_req(device, peer_req);
2207 struct drbd_peer_request *peer_req =
2209 struct drbd_peer_device *peer_device = peer_req->peer_device;
2211 sector_t sector = peer_req->i.sector;
2214 if (peer_req->flags & EE_SEND_WRITE_ACK) {
2215 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
2218 peer_req->flags & EE_MAY_SET_IN_SYNC) ?
2220 err = drbd_send_ack(peer_device, pcmd, peer_req);
2222 drbd_set_in_sync(peer_device, sector, peer_req->i.size);
2224 err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
2233 if (peer_req->flags & EE_IN_INTERVAL_TREE) {
2235 D_ASSERT(device, !drbd_interval_empty(&peer_req->i));
2236 drbd_remove_epoch_entry_interval(device, peer_req);
2237 if (peer_req->flags & EE_RESTART_REQUESTS)
2238 restart_conflicting_writes(device, sector, peer_req->i.size);
2241 D_ASSERT(device, drbd_interval_empty(&peer_req->i));
2243 drbd_may_finish_epoch(peer_device->connection, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
2250 struct drbd_peer_request *peer_req =
2252 struct drbd_peer_device *peer_device = peer_req->peer_device;
2255 err = drbd_send_ack(peer_device, ack, peer_req);
2268 struct drbd_peer_request *peer_req =
2270 struct drbd_connection *connection = peer_req->peer_device->connection;
2313 static bool overlapping_resync_write(struct drbd_device *device, struct drbd_peer_request *peer_req)
2320 if (overlaps(peer_req->i.sector, peer_req->i.size,
2446 struct drbd_peer_request *peer_req)
2448 struct drbd_connection *connection = peer_req->peer_device->connection;
2450 sector_t sector = peer_req->i.sector;
2451 const unsigned int size = peer_req->i.size;
2460 drbd_insert_interval(&device->write_requests, &peer_req->i);
2464 if (i == &peer_req->i)
2500 peer_req->w.cb = superseded ? e_send_superseded :
2502 list_add_tail(&peer_req->w.list, &device->done_ee);
2503 queue_work(connection->ack_sender, &peer_req->peer_device->send_acks_work);
2542 peer_req->flags |= EE_RESTART_REQUESTS;
2549 drbd_remove_epoch_entry_interval(device, peer_req);
2560 struct drbd_peer_request *peer_req;
2590 peer_req = read_in_block(peer_device, p->block_id, sector, pi);
2591 if (!peer_req) {
2596 peer_req->w.cb = e_end_block;
2597 peer_req->submit_jif = jiffies;
2598 peer_req->flags |= EE_APPLICATION;
2601 peer_req->opf = wire_flags_to_bio(connection, dp_flags);
2603 D_ASSERT(peer_device, peer_req->i.size > 0);
2604 D_ASSERT(peer_device, peer_req_op(peer_req) == REQ_OP_DISCARD);
2605 D_ASSERT(peer_device, peer_req->pages == NULL);
2609 peer_req->flags |= EE_ZEROOUT;
2611 D_ASSERT(peer_device, peer_req->i.size > 0);
2612 D_ASSERT(peer_device, peer_req_op(peer_req) == REQ_OP_WRITE_ZEROES);
2613 D_ASSERT(peer_device, peer_req->pages == NULL);
2616 peer_req->flags |= EE_TRIM;
2617 } else if (peer_req->pages == NULL) {
2618 D_ASSERT(device, peer_req->i.size == 0);
2623 peer_req->flags |= EE_MAY_SET_IN_SYNC;
2626 peer_req->epoch = connection->current_epoch;
2627 atomic_inc(&peer_req->epoch->epoch_size);
2628 atomic_inc(&peer_req->epoch->active);
2647 peer_req->flags |= EE_SEND_WRITE_ACK;
2656 drbd_send_ack(peer_device, P_RECV_ACK, peer_req);
2662 peer_req->flags |= EE_IN_INTERVAL_TREE;
2667 err = handle_write_conflicts(device, peer_req);
2684 if ((peer_req->flags & (EE_TRIM | EE_ZEROOUT)) == 0)
2685 list_add_tail(&peer_req->w.list, &device->active_ee);
2689 wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req));
2693 drbd_set_out_of_sync(peer_device, peer_req->i.sector, peer_req->i.size);
2694 peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
2695 drbd_al_begin_io(device, &peer_req->i);
2696 peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
2699 err = drbd_submit_peer_request(peer_req);
2706 list_del(&peer_req->w.list);
2707 drbd_remove_epoch_entry_interval(device, peer_req);
2709 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO) {
2710 peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO;
2711 drbd_al_complete_io(device, &peer_req->i);
2715 drbd_may_finish_epoch(connection, peer_req->epoch, EV_PUT | EV_CLEANUP);
2717 drbd_free_peer_req(device, peer_req);
2808 struct drbd_peer_request *peer_req;
2864 peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size,
2866 if (!peer_req) {
2870 peer_req->opf = REQ_OP_READ;
2874 peer_req->w.cb = w_e_end_data_req;
2876 peer_req->flags |= EE_APPLICATION;
2884 peer_req->flags |= EE_RS_THIN_REQ;
2887 peer_req->w.cb = w_e_end_rsdata_req;
2901 peer_req->digest = di;
2902 peer_req->flags |= EE_HAS_DIGEST;
2909 peer_req->w.cb = w_e_end_csum_rs_req;
2917 peer_req->w.cb = w_e_end_ov_reply;
2941 peer_req->w.cb = w_e_end_ov_req;
2976 list_add_tail(&peer_req->w.list, &device->read_ee);
2993 if (drbd_submit_peer_request(peer_req) == 0)
3001 list_del(&peer_req->w.list);
3006 drbd_free_peer_req(device, peer_req);
4964 struct drbd_peer_request *peer_req;
4966 peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER, sector,
4968 if (!peer_req) {
4973 peer_req->w.cb = e_end_resync_block;
4974 peer_req->opf = REQ_OP_DISCARD;
4975 peer_req->submit_jif = jiffies;
4976 peer_req->flags |= EE_TRIM;
4979 list_add_tail(&peer_req->w.list, &device->sync_ee);
4983 err = drbd_submit_peer_request(peer_req);
4987 list_del(&peer_req->w.list);
4990 drbd_free_peer_req(device, peer_req);