/* one reference to be put by __drbd_make_request */
atomic_set(&req->completion_ref, 1); /* one kref as long as completion_ref > 0 */
kref_init(&req->kref); return req;
}
/* If called from mod_rq_state (expected normal case) or * drbd_send_and_submit (the less likely normal path), this holds the * req_lock, and req->tl_requests will typicaly be on ->transfer_log, * though it may be still empty (never added to the transfer log). * * If called from do_retry(), we do NOT hold the req_lock, but we are * still allowed to unconditionally list_del(&req->tl_requests),
* because it will be on a local on-stack list only. */
list_del_init(&req->tl_requests);
/* finally remove the request from the conflict detection
* respective block_id verification interval tree. */ if (!drbd_interval_empty(&req->i)) { struct rb_root *root;
/* if it was a write, we may have to set the corresponding * bit(s) out-of-sync first. If it had a local part, we need to
* release the reference to the activity log. */ if (s & RQ_WRITE) { /* Set out-of-sync unless both OK flags are set * (local only or remote failed). * Other places where we set out-of-sync:
* READ with local io-error */
/* There is a special case: * we may notice late that IO was suspended, * and postpone, or schedule for retry, a write, * before it even was submitted or sent. * In that case we do not want to touch the bitmap at all.
*/ struct drbd_peer_device *peer_device = first_peer_device(device); if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) { if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
drbd_set_out_of_sync(peer_device, req->i.sector, req->i.size);
if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
drbd_set_in_sync(peer_device, req->i.sector, req->i.size);
}
/* one might be tempted to move the drbd_al_complete_io * to the local io completion callback drbd_request_endio. * but, if this was a mirror write, we may only * drbd_al_complete_io after this is RQ_NET_DONE, * otherwise the extent could be dropped from the al * before it has actually been written on the peer. * if we crash before our peer knows about the request, * but after the extent has been dropped from the al, * we would forget to resync the corresponding extent.
*/ if (s & RQ_IN_ACT_LOG) { if (get_ldev_if_state(device, D_FAILED)) {
drbd_al_complete_io(device, &req->i);
put_ldev(device);
} elseif (drbd_ratelimit()) {
drbd_warn(device, "Should have called drbd_al_complete_io(, %llu, %u), " "but my Disk seems to have failed :(\n",
(unsignedlonglong) req->i.sector, req->i.size);
}
}
}
/* must hold resource->req_lock */ void start_new_tl_epoch(struct drbd_connection *connection)
{ /* no point closing an epoch, if it is empty, anyways. */ if (connection->current_tle_writes == 0) return;
/* Helper for __req_mod(). * Set m->bio to the master bio, if it is fit to be completed, * or leave it alone (it is initialized to NULL in __req_mod), * if it has already been completed, or cannot be completed yet. * If m->bio is set, the error status to be returned is placed in m->error.
*/ static void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
{ constunsigned s = req->rq_state; struct drbd_device *device = req->device; int error, ok;
/* we must not complete the master bio, while it is * still being processed by _drbd_send_zc_bio (drbd_send_dblock) * not yet acknowledged by the peer * not yet completed by the local io subsystem * these flags may get cleared in any order by * the worker, * the receiver, * the bio_endio completion callbacks.
*/ if ((s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) ||
(s & RQ_NET_QUEUED) || (s & RQ_NET_PENDING) ||
(s & RQ_COMPLETION_SUSP)) {
drbd_err(device, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s); return;
}
/* * figure out whether to report success or failure. * * report success when at least one of the operations succeeded. * or, to put the other way, * only report failure, when both operations failed. * * what to do about the failures is handled elsewhere. * what we need to do here is just: complete the master_bio. * * local completion error, if any, has been stored as ERR_PTR * in private_bio within drbd_request_endio.
*/
ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK);
error = PTR_ERR(req->private_bio);
/* Before we can signal completion to the upper layers, * we may need to close the current transfer log epoch. * We are within the request lock, so we can simply compare * the request epoch number with the current transfer log * epoch number. If they match, increase the current_tle_nr, * and reset the transfer log epoch write_cnt.
*/ if (op_is_write(bio_op(req->master_bio)) &&
req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr))
start_new_tl_epoch(first_peer_device(device)->connection);
/* Update disk stats */
bio_end_io_acct(req->master_bio, req->start_jif);
/* If READ failed, * have it be pushed back to the retry work queue, * so it will re-enter __drbd_make_request(), * and be re-assigned to a suitable local or remote path, * or failed if we do not have access to good data anymore. * * Unless it was failed early by __drbd_make_request(), * because no path was available, in which case * it was not even added to the transfer_log. * * read-ahead may fail, and will not be retried. * * WRITE should have used all available paths already.
*/ if (!ok &&
bio_op(req->master_bio) == REQ_OP_READ &&
!(req->master_bio->bi_opf & REQ_RAHEAD) &&
!list_empty(&req->tl_requests))
req->rq_state |= RQ_POSTPONED;
if (!(req->rq_state & RQ_POSTPONED)) {
m->error = ok ? 0 : (error ?: -EIO);
m->bio = req->master_bio;
req->master_bio = NULL; /* We leave it in the tree, to be able to verify later * write-acks in protocol != C during resync. * But we mark it as "complete", so it won't be counted as
* conflict in a multi-primary setup. */
req->i.completed = true;
}
if (req->i.waiting)
wake_up(&device->misc_wait);
/* Either we are about to complete to upper layers, * or we will restart this request. * In either case, the request object will be destroyed soon,
* so better remove it from all lists. */
list_del_init(&req->req_pending_master_completion);
}
/* still holds resource->req_lock */ staticvoid drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
{ struct drbd_device *device = req->device;
D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED));
if (!put) return;
if (!atomic_sub_and_test(put, &req->completion_ref)) return;
drbd_req_complete(req, m);
/* local completion may still come in later,
* we need to keep the req object around. */ if (req->rq_state & RQ_LOCAL_ABORTED) return;
if (req->rq_state & RQ_POSTPONED) { /* don't destroy the req object just yet,
* but queue it for retry */
drbd_restart_request(req); return;
}
/* I'd like this to be the only place that manipulates
* req->completion_ref and req->kref. */ staticvoid mod_rq_state(struct drbd_request *req, struct bio_and_error *m, int clear, int set)
{ struct drbd_device *device = req->device; struct drbd_peer_device *peer_device = first_peer_device(device); unsigned s = req->rq_state; int c_put = 0;
if (drbd_suspended(device) && !((s | clear) & RQ_COMPLETION_SUSP))
set |= RQ_COMPLETION_SUSP;
/* apply */
req->rq_state &= ~clear;
req->rq_state |= set;
/* no change? */ if (req->rq_state == s) return;
/* intent: get references */
kref_get(&req->kref);
if (!(s & RQ_LOCAL_PENDING) && (set & RQ_LOCAL_PENDING))
atomic_inc(&req->completion_ref);
if (!(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) { if (s & RQ_NET_SENT)
atomic_sub(req->i.size >> 9, &device->ap_in_flight); if (s & RQ_EXP_BARR_ACK)
kref_put(&req->kref, drbd_req_destroy);
req->net_done_jif = jiffies;
/* in ahead/behind mode, or just in case, * before we finally destroy this request,
* the caching pointers must not reference it anymore */
advance_conn_req_next(peer_device, req);
advance_conn_req_ack_pending(peer_device, req);
advance_conn_req_not_net_done(peer_device, req);
}
/* potentially complete and destroy */
/* If we made progress, retry conflicting peer requests, if any. */ if (req->i.waiting)
wake_up(&device->misc_wait);
drbd_req_put_completion_ref(req, m, c_put);
kref_put(&req->kref, drbd_req_destroy);
}
/* Helper for HANDED_OVER_TO_NETWORK. * Is this a protocol A write (neither WRITE_ACK nor RECEIVE_ACK expected)? * Is it also still "PENDING"? * --> If so, clear PENDING and set NET_OK below. * If it is a protocol A write, but not RQ_PENDING anymore, neg-ack was faster
* (and we must not set RQ_NET_OK) */ staticinlinebool is_pending_write_protocol_A(struct drbd_request *req)
{ return (req->rq_state &
(RQ_WRITE|RQ_NET_PENDING|RQ_EXP_WRITE_ACK|RQ_EXP_RECEIVE_ACK))
== (RQ_WRITE|RQ_NET_PENDING);
}
/* obviously this could be coded as many single functions * instead of one huge switch, * or by putting the code directly in the respective locations * (as it has been before). * * but having it this way * enforces that it is all in this one place, where it is easier to audit, * it makes it obvious that whatever "event" "happens" to a request should * happen "atomically" within the req_lock, * and it enforces that we have to think in a very structured manner * about the "events" that may happen to a request during its life time ... * * * peer_device == NULL means local disk
*/ int __req_mod(struct drbd_request *req, enum drbd_req_event what, struct drbd_peer_device *peer_device, struct bio_and_error *m)
{ struct drbd_device *const device = req->device; struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL; struct net_conf *nc; int p, rv = 0;
/* does not happen... * initialization done in drbd_req_new case CREATED: break;
*/
case TO_BE_SENT: /* via network */ /* reached via __drbd_make_request
* and from w_read_retry_remote */
D_ASSERT(device, !(req->rq_state & RQ_NET_MASK));
rcu_read_lock();
nc = rcu_dereference(connection->net_conf);
p = nc->wire_protocol;
rcu_read_unlock();
req->rq_state |=
p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK :
p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0;
mod_rq_state(req, m, 0, RQ_NET_PENDING); break;
case TO_BE_SUBMITTED: /* locally */ /* reached via __drbd_make_request */
D_ASSERT(device, !(req->rq_state & RQ_LOCAL_MASK));
mod_rq_state(req, m, 0, RQ_LOCAL_PENDING); break;
case COMPLETED_OK: if (req->rq_state & RQ_WRITE)
device->writ_cnt += req->i.size >> 9; else
device->read_cnt += req->i.size >> 9;
mod_rq_state(req, m, RQ_LOCAL_PENDING,
RQ_LOCAL_COMPLETED|RQ_LOCAL_OK); break;
case ABORT_DISK_IO:
mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED); break;
case WRITE_COMPLETED_WITH_ERROR:
drbd_report_io_error(device, req);
__drbd_chk_io_error(device, DRBD_WRITE_ERROR);
mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); break;
case READ_COMPLETED_WITH_ERROR:
drbd_set_out_of_sync(peer_device, req->i.sector, req->i.size);
drbd_report_io_error(device, req);
__drbd_chk_io_error(device, DRBD_READ_ERROR);
fallthrough; case READ_AHEAD_COMPLETED_WITH_ERROR: /* it is legal to fail read-ahead, no __drbd_chk_io_error in that case. */
mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); break;
case DISCARD_COMPLETED_NOTSUPP: case DISCARD_COMPLETED_WITH_ERROR: /* I'd rather not detach from local disk just because it
* failed a REQ_OP_DISCARD. */
mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); break;
case QUEUE_FOR_NET_READ: /* READ, and * no local disk, * or target area marked as invalid,
* or just got an io-error. */ /* from __drbd_make_request
* or from bio_endio during read io-error recovery */
/* So we can verify the handle in the answer packet. * Corresponding drbd_remove_request_interval is in
* drbd_req_complete() */
D_ASSERT(device, drbd_interval_empty(&req->i));
drbd_insert_interval(&device->read_requests, &req->i);
case QUEUE_FOR_NET_WRITE: /* assert something? */ /* from __drbd_make_request only */
/* Corresponding drbd_remove_request_interval is in
* drbd_req_complete() */
D_ASSERT(device, drbd_interval_empty(&req->i));
drbd_insert_interval(&device->write_requests, &req->i);
/* NOTE * In case the req ended up on the transfer log before being * queued on the worker, it could lead to this request being * missed during cleanup after connection loss. * So we have to do both operations here, * within the same lock that protects the transfer log. * * _req_add_to_epoch(req); this has to be after the * _maybe_start_new_epoch(req); which happened in * __drbd_make_request, because we now may set the bit * again ourselves to close the current epoch. *
* Add req to the (now) current epoch (barrier). */
/* otherwise we may lose an unplug, which may cause some remote * io-scheduler timeout to expire, increasing maximum latency,
* hurting performance. */
set_bit(UNPLUG_REMOTE, &device->flags);
/* queue work item to send data */
D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK);
req->w.cb = w_send_dblock;
drbd_queue_work(&connection->sender_work,
&req->w);
/* close the epoch, in case it outgrew the limit */
rcu_read_lock();
nc = rcu_dereference(connection->net_conf);
p = nc->max_epoch_size;
rcu_read_unlock(); if (connection->current_tle_writes >= p)
start_new_tl_epoch(connection);
break;
case QUEUE_FOR_SEND_OOS:
mod_rq_state(req, m, 0, RQ_NET_QUEUED);
req->w.cb = w_send_out_of_sync;
drbd_queue_work(&connection->sender_work,
&req->w); break;
case READ_RETRY_REMOTE_CANCELED: case SEND_CANCELED: case SEND_FAILED: /* real cleanup will be done from tl_clear. just update flags
* so it is no longer marked as on the worker queue */
mod_rq_state(req, m, RQ_NET_QUEUED, 0); break;
case HANDED_OVER_TO_NETWORK: /* assert something? */ if (is_pending_write_protocol_A(req)) /* this is what is dangerous about protocol A:
* pretend it was successfully written on the peer. */
mod_rq_state(req, m, RQ_NET_QUEUED|RQ_NET_PENDING,
RQ_NET_SENT|RQ_NET_OK); else
mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT); /* It is still not yet RQ_NET_DONE until the * corresponding epoch barrier got acked as well,
* so we know what to dirty on connection loss. */ break;
case OOS_HANDED_TO_NETWORK: /* Was not set PENDING, no longer QUEUED, so is now DONE
* as far as this connection is concerned. */
mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE); break;
case CONNECTION_LOST_WHILE_PENDING: /* transfer log cleanup after connection loss */
mod_rq_state(req, m,
RQ_NET_OK|RQ_NET_PENDING|RQ_COMPLETION_SUSP,
RQ_NET_DONE); break;
case CONFLICT_RESOLVED: /* for superseded conflicting writes of multiple primaries, * there is no need to keep anything in the tl, potential * node crashes are covered by the activity log. * * If this request had been marked as RQ_POSTPONED before, * it will actually not be completed, but "restarted",
* resubmitted from the retry worker context. */
D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK); break;
case WRITE_ACKED_BY_PEER_AND_SIS:
req->rq_state |= RQ_NET_SIS;
fallthrough; case WRITE_ACKED_BY_PEER: /* Normal operation protocol C: successfully written on peer. * During resync, even in protocol != C, * we requested an explicit write ack anyways. * Which means we cannot even assert anything here. * Nothing more to do here. * We want to keep the tl in place for all protocols, to cater
* for volatile write-back caches on lower level devices. */ goto ack_common; case RECV_ACKED_BY_PEER:
D_ASSERT(device, req->rq_state & RQ_EXP_RECEIVE_ACK); /* protocol B; pretends to be successfully written on peer. * see also notes above in HANDED_OVER_TO_NETWORK about
* protocol != C */
ack_common:
mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK); break;
case POSTPONE_WRITE:
D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK); /* If this node has already detected the write conflict, the * worker will be waiting on misc_wait. Wake it up once this * request has completed locally.
*/
D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
req->rq_state |= RQ_POSTPONED; if (req->i.waiting)
wake_up(&device->misc_wait); /* Do not clear RQ_NET_PENDING. This request will make further * progress via restart_conflicting_writes() or
* fail_postponed_requests(). Hopefully. */ break;
case NEG_ACKED:
mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, 0); break;
case FAIL_FROZEN_DISK_IO: if (!(req->rq_state & RQ_LOCAL_COMPLETED)) break;
mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0); break;
case RESTART_FROZEN_DISK_IO: if (!(req->rq_state & RQ_LOCAL_COMPLETED)) break;
mod_rq_state(req, m,
RQ_COMPLETION_SUSP|RQ_LOCAL_COMPLETED,
RQ_LOCAL_PENDING);
rv = MR_READ; if (bio_data_dir(req->master_bio) == WRITE)
rv = MR_WRITE;
get_ldev(device); /* always succeeds in this call path */
req->w.cb = w_restart_disk_io;
drbd_queue_work(&connection->sender_work,
&req->w); break;
case RESEND: /* Simply complete (local only) READs. */ if (!(req->rq_state & RQ_WRITE) && !req->w.cb) {
mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0); break;
}
/* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK before the connection loss (B&C only); only P_BARRIER_ACK (or the local completion?) was missing when we suspended. Throwing them out of the TL here by pretending we got a BARRIER_ACK.
During connection handshake, we ensure that the peer was not rebooted. */ if (!(req->rq_state & RQ_NET_OK)) { /* FIXME could this possibly be a req->dw.cb == w_send_out_of_sync?
* in that case we must not set RQ_NET_PENDING. */
mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING); if (req->w.cb) { /* w.cb expected to be w_send_dblock, or w_send_read_req */
drbd_queue_work(&connection->sender_work,
&req->w);
rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
} /* else: FIXME can this happen? */ break;
}
fallthrough; /* to BARRIER_ACKED */
case BARRIER_ACKED: /* barrier ack for READ requests does not make sense */ if (!(req->rq_state & RQ_WRITE)) break;
if (req->rq_state & RQ_NET_PENDING) { /* barrier came in before all requests were acked. * this is bad, because if the connection is lost now,
* we won't be able to clean them up... */
drbd_err(device, "FIXME (BARRIER_ACKED but pending)\n");
} /* Allowed to complete requests, even while suspended. * As this is called for all requests within a matching epoch, * we need to filter, and only set RQ_NET_DONE for those that
* have actually been on the wire. */
mod_rq_state(req, m, RQ_COMPLETION_SUSP,
(req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0); break;
case DATA_RECEIVED:
D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE); break;
case QUEUE_AS_DRBD_BARRIER:
start_new_tl_epoch(connection);
mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE); break;
}
return rv;
}
/* we may do a local read if: * - we are consistent (of course), * - or we are generally inconsistent, * BUT we are still/already IN SYNC for this area. * since size may be bigger than BM_BLOCK_SIZE, * we may need to check several bits.
*/ staticbool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size)
{ unsignedlong sbnr, ebnr;
sector_t esector, nr_sectors;
switch (rbm) { case RB_CONGESTED_REMOTE: returnfalse; case RB_LEAST_PENDING: return atomic_read(&device->local_cnt) >
atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt); case RB_32K_STRIPING: /* stripe_shift = 15 */ case RB_64K_STRIPING: case RB_128K_STRIPING: case RB_256K_STRIPING: case RB_512K_STRIPING: case RB_1M_STRIPING: /* stripe_shift = 20 */
stripe_shift = (rbm - RB_32K_STRIPING + 15); return (sector >> (stripe_shift - 9)) & 1; case RB_ROUND_ROBIN: return test_and_change_bit(READ_BALANCE_RR, &device->flags); case RB_PREFER_REMOTE: returntrue; case RB_PREFER_LOCAL: default: returnfalse;
}
}
/* * complete_conflicting_writes - wait for any conflicting write requests * * The write_requests tree contains all active write requests which we * currently know about. Wait for any requests to complete which conflict with * the new one. * * Only way out: remove the conflicting intervals from the tree.
*/ staticvoid complete_conflicting_writes(struct drbd_request *req)
{
DEFINE_WAIT(wait); struct drbd_device *device = req->device; struct drbd_interval *i;
sector_t sector = req->i.sector; int size = req->i.size;
for (;;) {
drbd_for_each_overlap(i, &device->write_requests, sector, size) { /* Ignore, if already completed to upper layers. */ if (i->completed) continue; /* Handle the first found overlap. After the schedule
* we have to restart the tree walk. */ break;
} if (!i) /* if any */ break;
/* Indicate to wake up device->misc_wait on progress. */
prepare_to_wait(&device->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
i->waiting = true;
spin_unlock_irq(&device->resource->req_lock);
schedule();
spin_lock_irq(&device->resource->req_lock);
}
finish_wait(&device->misc_wait, &wait);
}
if (on_congestion == OC_PULL_AHEAD && device->state.conn == C_AHEAD) return; /* nothing to do ... */
/* If I don't even have good local storage, we can not reasonably try * to pull ahead of the peer. We also need the local reference to make * sure device->act_log is there.
*/ if (!get_ldev_if_state(device, D_UP_TO_DATE)) return;
/* If this returns false, and req->private_bio is still set, * this should be submitted locally. * * If it returns false, but req->private_bio is not set, * we do not have access to good data :( * * Otherwise, this destroys req->private_bio, if any, * and returns true.
*/ staticbool do_remote_read(struct drbd_request *req)
{ struct drbd_device *device = req->device; enum drbd_read_balancing rbm;
if (req->private_bio) { if (!drbd_may_do_local_read(device,
req->i.sector, req->i.size)) {
bio_put(req->private_bio);
req->private_bio = NULL;
put_ldev(device);
}
}
if (device->state.pdsk != D_UP_TO_DATE) returnfalse;
if (req->private_bio == NULL) returntrue;
/* TODO: improve read balancing decisions, take into account drbd
* protocol, pending requests etc. */
if (remote_due_to_read_balancing(device, req->i.sector, rbm)) { if (req->private_bio) {
bio_put(req->private_bio);
req->private_bio = NULL;
put_ldev(device);
} returntrue;
}
returnfalse;
}
bool drbd_should_do_remote(union drbd_dev_state s)
{
return s.pdsk == D_UP_TO_DATE ||
(s.pdsk >= D_INCONSISTENT &&
s.conn >= C_WF_BITMAP_T &&
s.conn < C_AHEAD); /* Before proto 96 that was >= CONNECTED instead of >= C_WF_BITMAP_T. That is equivalent since before 96 IO was frozen in the C_WF_BITMAP*
states. */
}
staticbool drbd_should_send_out_of_sync(union drbd_dev_state s)
{ return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S; /* pdsk = D_INCONSISTENT as a consequence. Protocol 96 check not necessary
since we enter state C_AHEAD only if proto >= 96 */
}
/* returns number of connections (== 1, for drbd 8.4) * expected to actually write this data,
* which does NOT include those that we are L_AHEAD for. */ staticint drbd_process_write_request(struct drbd_request *req)
{ struct drbd_device *device = req->device; struct drbd_peer_device *peer_device = first_peer_device(device); int remote, send_oos;
/* Need to replicate writes. Unless it is an empty flush, * which is better mapped to a DRBD P_BARRIER packet, * also for drbd wire protocol compatibility reasons. * If this was a flush, just start a new epoch. * Unless the current epoch was empty anyways, or we are not currently
* replicating, in which case there is no point. */ if (unlikely(req->i.size == 0)) { /* The only size==0 bios we expect are empty flushes. */
D_ASSERT(device, req->master_bio->bi_opf & REQ_PREFLUSH); if (remote)
_req_mod(req, QUEUE_AS_DRBD_BARRIER, peer_device); return remote;
}
if (bio_op(bio) != REQ_OP_READ)
type = DRBD_FAULT_DT_WR; elseif (bio->bi_opf & REQ_RAHEAD)
type = DRBD_FAULT_DT_RA; else
type = DRBD_FAULT_DT_RD;
/* State may have changed since we grabbed our reference on the * ->ldev member. Double check, and short-circuit to endio. * In case the last activity log transaction failed to get on * stable storage, and this is a WRITE, we may not even submit
* this bio. */ if (get_ldev(device)) /* if (drbd_insert_fault(device, type)) bio_io_error(bio); else if (bio_op(bio) == REQ_OP_WRITE_ZEROES) drbd_process_discard_or_zeroes_req(req, EE_ZEROOUT | ((bio->bi_opf & REQ_NOUNMAP) ? 0 : EE_TRIM)); else if (bio_op(bio) == REQ_OP_DISCARD) drbd_process_discard_or_zeroes_req(req, EE_TRIM); else submit_bio_noacct(bio); put_ldev(device); } else bio_io_error(bio); }
/* returns the new drbd_request pointer, if the caller is expected to * drbd_send_and_submit() it (to save latency), or NULL if we queued the * request on the submitter thread. * Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request.
*/ staticstruct drbd_request *
drbd_request_prepare(struct drbd_device *device, struct bio *bio)
{ constint rw = bio_data_dir(bio); struct drbd_request *req;
/* allocate outside of all locks; */
req = drbd_req_new(device, bio); if (!req) {
dec_ap_biodevice); /* only pass the error to the upper layers.
* if user cannot handle io errors, that's not our business. */
drbd_errdevice"could notkmalloc req\n"
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
bio_endio(bio{ return ERR_PTR(-ENOMEM);
}
/* Update disk stats */
req = mempool_alloc(drbd_request_mempool GFP_NOIO);
/* Require at least one path to current data. * We don't want to allow writes on C_STANDALONE D_INCONSISTENT: * We would not allow to read what was written, * we would not have bumped the data generation uuids, * we would cause data divergence for all the wrong reasons. * * If we don't see at least one D_UP_TO_DATE, we will fail this request, * which either returns EIO, or, if OND_SUSPEND_IO is set, suspends IO, * and queues for retry later.
*/ staticbool may_do_writes(structjava.lang.StringIndexOutOfBoundsException: Index 33 out of bounds for length 1
{ constunion drbd_dev_state s = device->state; returns.isk == _UP_TO_DATE | s.pdsk= D_UP_TO_DATE
}
struct drbd_plug_cb { struct blk_plug_cb cb; struct drbd_request *most_recent_req; /* do we need more? */
};
staticvoid drbd_unplug(struct blk_plug_cb *cb, bool from_schedule)
{ struct drbd_plug_cbplug container_of(cb,struct,cbjava.lang.StringIndexOutOfBoundsException: Index 71 out of bounds for length 71 struct drbd_resource *resource = plug-cb;
* req_lock, and req->tl_requests will typicaly * though it may be still empty (never * If called from do_retry(), * still allowed to unconditionally list_del(t * because itjava.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
(cb)java.lang.StringIndexOutOfBoundsException: Index 11 out of bounds for length 11
(req return;
spin_lock_irq(&resource->req_lock); /* In case the sender did not process it yet, raise the flag to * * release
* have it followed with P_UNPLUG_REMOTE just after. */
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0 /* but also queue a generic unplug */ * before it * In that case we donot
(req-);
kref_put&eq->, drbd_req_destroy;
spin_unlock_irq(resource-req_lock
}
staticstruct drbd_plug_cb* * to the local io completion * but, ifthis was a * drbd_al_complete_io after * otherwise the extent could * before it has actually * if we crash before our * but after the extent has * we java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
{ /* A lot of text to say
* return (struct drbd_plug_cb*)blk_check_plugged(); */ struct drbd_plug_cb *plug; structjava.lang.StringIndexOutOfBoundsException: Index 2 out of bounds for length 2
static java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1
{ struct /* Will be sent to some peer.
* Remember to tag it with UNPLUG_REMOTE on unplug */
kref_get(&req->kref);
plug->most_recent_req = req;
i ()
kref_put(&tmp->kref
}
static java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
{ struct drbd_resource
s&)){ constint rw drbd_err, drbd_req_completeLogic =0%\")
truct {NULL}java.lang.StringIndexOutOfBoundsException: Index 36 out of bounds for length 36 bool no_remote = false; bool * what to do about the * what we need to do here *
spin_lock_irq(&resource->req_lock); if( = ) { /* This may temporarily give up the req_lock, * we may need to close the current * We are within the request lock, so we can simply compare * but will re-aquire it before it returns here.
* Needs to be before the check on drbd_suspended() */
complete_conflicting_writes(req); /* no more giving up req_lock from now on! */
/* check for congestion, and potentially stop sendingreq->master_bio, req->start_jif);
* full data updates, but start sending "dirty bits" only. */
maybe_pull_ahead(device);
}
if (
push and:*
req->rq_state |= !req->bi_opf) & if ( req- |= ;
(req-);
req- =NULL
put_ldev();
} goto out;
* java.lang.StringIndexOutOfBoundsException: Index 13 out of bounds for length 0
/* We fail READ early, if we can not serve it.wake_up&device->misc_wait; * We must do this before req is registered on any lists.
* Otherwise, drbd_req_complete() will queue failed READ for retry. */ if (rw != WRITE) {
static (struct *, bio_and_error,int)
}
/* which transfer log epoch does this belong to? */java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
/* no point in adding empty flushes to the transfer log,>rq_state& )
* they are mapped to drbd barriers already. */
* if (java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
first_peer_device)->>current_tle_writes;
(&>tl_requests&(device>>transfer_log
}
if (rw if(>private_bio&!(device {
(>);
>=)
(device); goto nodata;
} if (!drbd_process_write_request(req))
no_remote ;
} if( RQ_NET_QUEUED java.lang.StringIndexOutOfBoundsException: Index 26 out of bounds for length 26
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
* Otherwise we had done the goto nodata above.java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1 if (req->private_bio =if (connection
_req_mod(eq,TO_BE_SENT peer_devicejava.lang.StringIndexOutOfBoundsException: Index 42 out of bounds for length 42
_req_mod(}
} else
no_remote = structdrbd_connection *connection = peer_device ? peer_device-> : NULL;
}
if (no_remote == false) { struct drbd_plug_cbplug=drbd_check_pluggedresource)java.lang.StringIndexOutOfBoundsException: Index 59 out of bounds for length 59 if (plug
drbd_update_plugplug req;
}
/* If it took the fast path in drbd_request_prepare, add it here.
* The slow path has added it already. */ if (list_empty(&req->req_pending_master_completion))
list_add_tail(&req->req_pending_master_completion
&device-pending_master_completion == WRITE); if (req->private_bio) { /* needs to be marked within the same spinlock */
req-pre_submit_jif=jiffies
list_add_tail&req-req_pending_local,
&device->pending_completion[rw == WRITE]);
_req_mod(req, TO_BE_SUBMITTED, NULL /* but we need to give up the spinlock to submit */
submit_private_bio = true;
}elseif (o_remote{
nodata if (drbd_ratelimit())
(, " : neither local nor remote data sector %llu+n",
( struct *connection=peer_device peer_device-connection:NULLjava.lang.StringIndexOutOfBoundsException: Index 83 out of bounds for length 83 for send_oos, .
connection->req_not_net_done = req;
}
/* Even though above is a kref_put(), this is safe. * As long as we still need to submit our private bio, * we hold a completion ref, and the request cannot disappear. * If however this request did not even have a private bio to submit * (e.g. remote read), req may already be invalid now.
* That's why we cannot check on req->private_bio. */ if (submit_private_bio)
drbd_submit_req_private_bio(req); if (m.bio)
complete_master_bio(device, &m);
}
void __drbd_make_request(struct drbd_device *device, struct bio *bio)
{ struct drbd_request *req = drbd_request_prepare(device, bio); if (IS_ERR_OR_NULL(req)) return;
drbd_send_and_submit(device, req);
}
blk_start_plug(&plug return;
list_for_each_entry_safe, tmp , tl_requests java.lang.StringIndexOutOfBoundsException: Index 60 out of bounds for length 60 int (>master_bio
if (rw == WRITE
& req-private_bio&req-.java.lang.StringIndexOutOfBoundsException: Index 36 out of bounds for length 36
& static( *, *java.lang.StringIndexOutOfBoundsException: Index 75 out of bounds for length 75 struct peer_device()java.lang.StringIndexOutOfBoundsException: Index 66 out of bounds for length 66
n_lock_irqdevice-al_lock); while(req =list_first_entry_or_null(, struct drbd_request tl_requests)) {
err = drbd_al_begin_io_nonblock(device, &req->i); if (err == -ENOBUFS) break; if (err == -EBUSY)
wake = 1; if (java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
list_move_tail(&req->tl_requests, later);
java.lang.StringIndexOutOfBoundsException: Index 22 out of bounds for length 6
list_move_tail(&req-if!s&RQ_NET_DONE){
}
(&>al_lock if (peer_device req;
(&device-); return(pending)
java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1
blk_start_plug( +c_put; whileif(&RQ_LOCAL_PENDING)& (clear&RQ_LOCAL_PENDING java.lang.StringIndexOutOfBoundsException: Index 60 out of bounds for length 60
req- | ;
req->
(&>ap_actlog_cnt
list_del_init(+c_put
(device);
}
lk_finish_plug);
}
void(struct *ws
{ atomic_sub>is >>9 device-); struct drbd_device *deviceif( )
(); /* from drbd_make_request() */
LIST_HEAD(pendingjava.lang.StringIndexOutOfBoundsException: Range [43, 44) out of bounds for length 43
(peer_device);
/* grab new incoming requests */
spin_lock_irq(&device->resource->req_lock (eq->.aiting
spin_unlock_irq(&device-kref_put&>kref );
for;
{
/* move used-to-be-busy back to front of incoming */
java.lang.StringIndexOutOfBoundsException: Index 7 out of bounds for length 0
submit_fast_path(device unsigned )req->., if (list_empty(& >ldev-); break;
for (;;) {
prepare_to_wait(&device->al_wait, &wait, TASK_UNINTERRUPTIBLE);
list_splice_init(& * --> If so, clear PENDING and * If it is a protocol A *
req- & if(|); break;
schedule();
/* If all currently "hot" activity log extents are kept busy by * * incoming requests, we still must not totally starve new * requests to "cold" extents. * Something left on &incoming means there had not been * enough update slots available, and the activity log * has been marked as "starving". * * Try again now, without looking for new requests, * effectively blocking all new requests until we made * at least _some_ progress with what we currently have.
*/ if (!list_empty(&incoming)) continue;
* Nothing moved to pending, but nothing left * on incoming: all moved to busy!
* Grab new and iterate. */
spin_lock_irq:
list_splice_tail_init>ubmit &ncoming
spin_unlock_irq(&device-java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
}
finish_wait(&device->al_wait, &wait);
/* If the transaction was full, before all incoming requests * had been processed, skip ahead to commit, and iterate * without splicing in more incoming requests from upper layers. * * Else, if all incoming have been processed, * they have become either "pending" (to be submitted after * next transaction commit) or "busy" (blocked by resync). * * Maybe more was queued, while we prepared the transaction? * Try to stuff those into this transaction as well. * Be strictly non-blocking here, * we already have something to commit. * * Commit if we don't make any more progres.
*/
;
req ,);
T_HEAD booldrbd_chk_io_errordevice);
/* It is ok to look outside the lock,
* it's only an optimization anyways */ if (list_empty(&device->submit break (peer_device >i., req-.);
if (net_req-> (connection-,
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
jiffies_to_msecs if (connection-current_tle_writes >=p) returntrue;
}
/* We received an ACK already (or are using protocol A), * but are waiting for the epoch closing barrier ack. * Check if we sent the barrier already. We should not blame the peer
* for being unresponsive, if we did not even ask it yet. */ if (net_req->epoch == connection-> req-w. = w_send_out_of_sync
rn(device " did not a P_BARRIER %ums ko-count %u * timeout(%u* 0.1s; drbd kernel thread blocked?\n",
jiffies_to_msecs(now - net_req->pre_send_jif), ko_count, timeout); returnfalse;
}
/* Worst case: we may have been blocked for whatever reason, then * suddenly are able to send a lot of requests (and epoch separating * barriers) in quick succession. * The timestamp of the net_req may be much too old and not correspond * to the sending time of the relevant unack'ed barrier packet, so * would trigger a spurious timeout. The latest barrier packet may * have a too recent timestamp to trigger the timeout, potentially miss * a timeout. Right now we don't have a place to conveniently store * these timestamps. * But in this particular situation, the application requests are still * completed to upper layers, DRBD should still "feel" responsive. * No need yet to kill this connection, it may still recover. * If not, eventually we will have queued enough into the network for * us to block. From that point of view, the timestamp of the last sent * barrier packet is relevant enough.
*/ if (time_after(now, connection->send.last_sent_barrier_jif + ent)) {
drbd_warndevice, Remote failedtoanswer P_BARRIER(sentat% jifnow%lu)within ko-count%u) timeout(u *01s)\"java.lang.StringIndexOutOfBoundsException: Index 141 out of bounds for length 141
connection-send.ast_sent_barrier_jif now,
jiffies_to_msecs(now - connection->send.last_sent_barrier_jif), ko_count, timeout); returntrue;
} returnfalse;
}
/* A request is considered timed out, if * - we have some effective timeout from the configuration, * with some state restrictions applied, * - the oldest request is waiting for a response from the network * resp. the local disk, * - the oldest request is in fact older than the effective timeout, * - the connection was established (resp. disk was attached) * for longer than the timeout already. * Note that for 32bit jiffies and very stable connections/disks, * we may have a wrap around, which is catched by * !time_in_range(now, last_..._jif, last_..._jif + timeout). * * Side effect: once per 32bit wrap-around interval, which means every * ~198 days with 250 HZ, we have a window where the timeout would need * to expire twice (worst case) to become effective. Good enough.
*/
if (get_ldevdevice) {/* implicitstatedisk >=D_INCONSISTENT *java.lang.StringIndexOutOfBoundsException: Index 68 out of bounds for length 68
dt =rcu_dereference(evice-ldev-disk_conf-disk_timeout* HZ /1;
put_ldev( mod_rq_statereq, m, RQ_NET_PENDING, RQ_NET_DONE|Q_NET_OK;
}
rcu_read_unlock();
ent = timeout*HZ10*ko_count
et = min_not_zero(dt, ent)
/* maybe the oldest request waiting for the peer is in fact still * blocking in tcp sendmsg. That's ok, though, that's handled via the * socket send timeout, requesting a ping, and bumping ko-count in * we_should_drop_the_connection().
*/
ck oldest requestwe didsuccessfullysent
* but case RECV_ACKED_BY_PEER:
req_peer=connection->eq_ack_pending;
/* if we don't have such request (e.g. protocoll A) * check the oldest requests which is still waiting on its epoch
* closing barrier ack. */
req_peer = connection->req_not_net_done;
/* evaluate the oldest peer request only in one timer! */ if (req_peer && req_peer- mod_rq_statereq m, RQ_NET_PENDINGRQ_NET_OK;
req_peer = NULL;
/* do we have something to evaluate? */ if (req_peer == NULL java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0 gotoout
if (ent&& req_peer& net_timeout_reachedreq_peer connection , ent ko_count, timeout))
_conn_request_state(connection, NS(conn, C_TIMEOUT), CS_VERBOSE | CS_HARD);
if (dt &&oldest_submit_jif! now &&
time_after(now, oldest_submit_jif + dt) &&
!time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) {
drbd_warn(device, "Local backing device failed to meet the disk-timeout\n");
__drbd_chk_io_error(device, break
}
/java.lang.StringIndexOutOfBoundsException: Index 65 out of bounds for length 65 case FAIL_FROZEN_DISK_IO:
ent = (ent && req_peer && time_before(now, req_peer->pre_send_jif + ent))
? req_peer->pre_send_jif + break;
dt = (dt && oldest_submit_jif != break;
? RQ_LOCAL_PENDING);
nt = rv = MR_READ;
out:
spin_unlock_irq(&device- get_ldev(java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
mod_timer(&device->request_timer, &>w);
}
Messung V0.5
¤ Diese beiden folgenden Angebotsgruppen bietet das Unternehmen0.15Angebot
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.