/********************************************************************** * Whenever you change the file format, remember to bump the version. *
**********************************************************************/
#define RQ_HDR_2 "\tstart\tin AL\tsubmit"
seq_printf(m, "\t%d", jiffies_to_msecs(now - req->start_jif));
seq_print_age_or_dash(m, s & RQ_IN_ACT_LOG, now - req->in_actlog_jif);
seq_print_age_or_dash(m, s & RQ_LOCAL_PENDING, now - req->pre_submit_jif);
#define RQ_HDR_3 "\tsent\tacked\tdone"
seq_print_age_or_dash(m, s & RQ_NET_SENT, now - req->pre_send_jif);
seq_print_age_or_dash(m, (s & RQ_NET_SENT) && !(s & RQ_NET_PENDING), now - req->acked_jif);
seq_print_age_or_dash(m, s & RQ_NET_DONE, now - req->net_done_jif);
seq_puts(m, "minor\tvnr\tstart\tsubmit\tintent\n");
rcu_read_lock();
idr_for_each_entry(&resource->devices, device, i) { struct drbd_md_io tmp; /* In theory this is racy, * in the sense that there could have been a * drbd_md_put_buffer(); drbd_md_get_buffer();
* between accessing these members here. */
tmp = device->md_io; if (atomic_read(&tmp.in_use)) {
seq_printf(m, "%u\t%u\t%d\t",
device->minor, device->vnr,
jiffies_to_msecs(now - tmp.start_jif)); if (time_before(tmp.submit_jif, tmp.start_jif))
seq_puts(m, "-\t"); else
seq_printf(m, "%d\t", jiffies_to_msecs(now - tmp.submit_jif));
seq_printf(m, "%s\n", tmp.current_use);
}
}
rcu_read_unlock();
}
seq_puts(m, "minor\tvnr\tage\t#waiting\n");
rcu_read_lock();
idr_for_each_entry(&resource->devices, device, i) { unsignedlong jif; struct drbd_request *req; int n = atomic_read(&device->ap_actlog_cnt); if (n) {
spin_lock_irq(&device->resource->req_lock);
req = list_first_entry_or_null(&device->pending_master_completion[1], struct drbd_request, req_pending_master_completion); /* if the oldest request does not wait for the activity log
* it is not interesting for us here */ if (req && !(req->rq_state & RQ_IN_ACT_LOG))
jif = req->start_jif; else
req = NULL;
spin_unlock_irq(&device->resource->req_lock);
} if (n) {
seq_printf(m, "%u\t%u\t", device->minor, device->vnr); if (req)
seq_printf(m, "%u\t", jiffies_to_msecs(now - jif)); else
seq_puts(m, "-\t");
seq_printf(m, "%u\n", n);
}
}
rcu_read_unlock();
}
/* This is meant to summarize timing issues, to be able to tell * local disk problems from network problems. * Skip requests, if we have shown an even older request with
* similar aspects already. */ if (req->master_bio == NULL)
tmp |= 1; if ((s & RQ_LOCAL_MASK) && (s & RQ_LOCAL_PENDING))
tmp |= 2; if (s & RQ_NET_MASK) { if (!(s & RQ_NET_SENT))
tmp |= 4; if (s & RQ_NET_PENDING)
tmp |= 8; if (!(s & RQ_NET_DONE))
tmp |= 16;
} if ((tmp & show_state) == tmp) continue;
show_state |= tmp;
seq_printf(m, "%u\t", count);
seq_print_minor_vnr_req(m, req, now); if (show_state == 0x1f) break;
}
spin_unlock_irq(&resource->req_lock);
}
/* TODO: transfer_log and friends should be moved to resource */ staticint in_flight_summary_show(struct seq_file *m, void *pos)
{ struct drbd_resource *resource = m->private; struct drbd_connection *connection; unsignedlong jif = jiffies;
connection = first_connection(resource); /* This does not happen, actually.
* But be robust and prepare for future code changes. */ if (!connection || !kref_get_unless_zero(&connection->kref)) return -ESTALE;
/* BUMP me if you change the file format/content/presentation */
seq_printf(m, "v: %u\n\n", 0);
seq_puts(m, "meta data IO\n");
seq_print_resource_pending_meta_io(m, resource, jif);
seq_putc(m, '\n');
seq_puts(m, "socket buffer stats\n"); /* for each connection ... once we have more than one */
rcu_read_lock(); if (connection->data.socket) { /* open coded SIOCINQ, the "relevant" part */ struct tcp_sock *tp = tcp_sk(connection->data.socket->sk); int answ = tp->rcv_nxt - tp->copied_seq;
seq_printf(m, "unread receive buffer: %u Byte\n", answ); /* open coded SIOCOUTQ, the "relevant" part */
answ = tp->write_seq - tp->snd_una;
seq_printf(m, "unacked send buffer: %u Byte\n", answ);
}
rcu_read_unlock();
seq_putc(m, '\n');
jif = jiffies - jif; if (jif)
seq_printf(m, "generated in %d ms\n", jiffies_to_msecs(jif));
kref_put(&connection->kref, drbd_destroy_connection); return 0;
}
/* make sure at *open* time that the respective object won't go away. */ staticint drbd_single_open(struct file *file, int (*show)(struct seq_file *, void *), void *data, struct kref *kref, void (*release)(struct kref *))
{ struct dentry *parent; int ret = -ESTALE;
/* Are we still linked,
* or has debugfs_remove() already been called? */
parent = file->f_path.dentry->d_parent; /* serialize with d_delete() */
inode_lock(d_inode(parent)); /* Make sure the object is still alive */ if (simple_positive(file->f_path.dentry)
&& kref_get_unless_zero(kref))
ret = 0;
inode_unlock(d_inode(parent)); if (!ret) {
ret = single_open(file, show, data); if (ret)
kref_put(kref, release);
} return ret;
}
void drbd_debugfs_resource_cleanup(struct drbd_resource *resource)
{ /* it is ok to call debugfs_remove(NULL) */
drbd_debugfs_remove(&resource->debugfs_res_in_flight_summary);
drbd_debugfs_remove(&resource->debugfs_res_connections);
drbd_debugfs_remove(&resource->debugfs_res_volumes);
drbd_debugfs_remove(&resource->debugfs_res);
}
staticvoid seq_print_one_timing_detail(struct seq_file *m, conststruct drbd_thread_timing_details *tdp, unsignedlong now)
{ struct drbd_thread_timing_details td; /* No locking...
* use temporary assignment to get at consistent data. */ do {
td = *tdp;
} while (td.cb_nr != tdp->cb_nr); if (!td.cb_addr) return;
seq_printf(m, "%u\t%d\t%s:%u\t%ps\n",
td.cb_nr,
jiffies_to_msecs(now - td.start_jif),
td.caller_fn, td.line,
td.cb_addr);
}
seq_printf(m, "%s\n", title); /* If not much is going on, this will result in natural ordering. * If it is very busy, we will possibly skip events, or even see wrap * arounds, which could only be avoided with locking.
*/
start_idx = cb_nr % DRBD_THREAD_DETAILS_HIST; for (i = start_idx; i < DRBD_THREAD_DETAILS_HIST; i++)
seq_print_one_timing_detail(m, tdp+i, now); for (i = 0; i < start_idx; i++)
seq_print_one_timing_detail(m, tdp+i, now);
}
/* Once we enable mutliple peers, * these connections will have descriptive names.
* For now, it is just the one connection to the (only) "peer". */
dentry = debugfs_create_dir("peer", conns_dir);
connection->debugfs_conn = dentry;
/* not __exit, may be indirectly called
* from the module-load-failure path as well. */ void drbd_debugfs_cleanup(void)
{
drbd_debugfs_remove(&drbd_debugfs_resources);
drbd_debugfs_remove(&drbd_debugfs_minors);
drbd_debugfs_remove(&drbd_debugfs_version);
drbd_debugfs_remove(&drbd_debugfs_root);
}
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.3Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.