/* Because some systems will have one, many, or no * - partitions, * - slots per shelf, * - or shelves, * we need some flexibility in the way the minor numbers * are allocated. So they are dynamic.
*/ #define N_DEVS ((1U<<MINORBITS)/AOE_PARTITIONS)
spin_lock_irqsave(&used_minors_lock, flags);
n = find_first_zero_bit(used_minors, N_DEVS); if (n < N_DEVS)
set_bit(n, used_minors); else
error = -1;
spin_unlock_irqrestore(&used_minors_lock, flags);
*sysminor = n * AOE_PARTITIONS; return error;
}
staticint
minor_get_static(ulong *sysminor, ulong aoemaj, int aoemin)
{
ulong flags;
ulong n; int error = 0; enum { /* for backwards compatibility when !aoe_dyndevs,
* a static number of supported slots per shelf */
NPERSHELF = 16,
};
if (aoemin >= NPERSHELF) {
pr_err("aoe: %s %d slots per shelf\n", "static minor device numbers support only",
NPERSHELF);
error = -1; goto out;
}
n = aoemaj * NPERSHELF + aoemin; if (n >= N_DEVS) {
pr_err("aoe: %s with e%ld.%d\n", "cannot use static minor device numbers",
aoemaj, aoemin);
error = -1; goto out;
}
spin_lock_irqsave(&used_minors_lock, flags); if (test_bit(n, used_minors)) {
pr_err("aoe: %s %lu\n", "existing device already has static minor number",
n);
error = -1;
} else
set_bit(n, used_minors);
spin_unlock_irqrestore(&used_minors_lock, flags);
*sysminor = n * AOE_PARTITIONS;
out: return error;
}
staticint
minor_get(ulong *sysminor, ulong aoemaj, int aoemin)
{ if (aoe_dyndevs) return minor_get_dyn(sysminor); else return minor_get_static(sysminor, aoemaj, aoemin);
}
/* * Users who grab a pointer to the device with aoedev_by_aoeaddr * automatically get a reference count and must be responsible * for performing a aoedev_put. With the addition of async * kthread processing I'm no longer confident that we can * guarantee consistency in the face of device flushes. * * For the time being, we only bother to add extra references for * frames sitting on the iocq. When the kthreads finish processing * these frames, they will aoedev_put the device.
*/
/* clean out active and to-be-retransmitted buffers */ for (i = 0; i < NFACTIVE; i++) {
head = &d->factive[i];
list_for_each_safe(pos, nx, head)
downdev_frame(pos);
}
head = &d->rexmitq;
list_for_each_safe(pos, nx, head)
downdev_frame(pos);
/* reset window dressings */
tt = d->targets;
te = tt + d->ntargets; for (; tt < te && (t = *tt); tt++) {
aoecmd_wreset(t);
t->nout = 0;
}
/* clean out the in-process request (if any) */
aoe_failip(d);
/* clean out any queued block requests */
list_for_each_entry_safe(rq, rqnext, &d->rq_list, queuelist) {
list_del_init(&rq->queuelist);
blk_mq_start_request(rq);
blk_mq_end_request(rq, BLK_STS_IOERR);
}
/* fast fail all pending I/O */ if (d->blkq) { /* UP is cleared, freeze+quiesce to insure all are errored */ unsignedint memflags = blk_mq_freeze_queue(d->blkq);
/* return whether the user asked for this particular * device to be flushed
*/ staticint
user_req(char *s, size_t slen, struct aoedev *d)
{ constchar *p;
size_t lim;
if (!d->gd) return 0;
p = kbasename(d->gd->disk_name);
lim = sizeof(d->gd->disk_name);
lim -= p - d->gd->disk_name; if (slen < lim)
lim = slen;
timer_delete_sync(&d->timer); if (d->gd) {
aoedisk_rm_debugfs(d);
del_gendisk(d->gd);
put_disk(d->gd);
blk_mq_free_tag_set(&d->tag_set);
}
t = d->targets;
e = t + d->ntargets; for (; t < e && *t; t++)
freetgt(d, *t);
/* This has been confirmed to occur once with Tms=3*1000 due to the * driver changing link and not processing its transmit ring. The * problem is hard enough to solve by returning an error that I'm * still punting on "solving" this.
*/ staticvoid
skbfree(struct sk_buff *skb)
{ enum { Sms = 250, Tms = 30 * 1000}; int i = Tms / Sms;
/* find it or allocate it */ struct aoedev *
aoedev_by_aoeaddr(ulong maj, int min, int do_alloc)
{ struct aoedev *d; int i;
ulong flags;
ulong sysminor = 0;
spin_lock_irqsave(&devlist_lock, flags);
for (d=devlist; d; d=d->next) if (d->aoemajor == maj && d->aoeminor == min) {
spin_lock(&d->lock); if (d->flags & DEVFL_TKILL) {
spin_unlock(&d->lock);
d = NULL; goto out;
}
d->ref++;
spin_unlock(&d->lock); break;
} if (d || !do_alloc || minor_get(&sysminor, maj, min) < 0) goto out;
d = kcalloc(1, sizeof *d, GFP_ATOMIC); if (!d) goto out;
d->targets = kcalloc(NTARGETS, sizeof(*d->targets), GFP_ATOMIC); if (!d->targets) {
kfree(d);
d = NULL; goto out;
}
d->ntargets = NTARGETS;
INIT_WORK(&d->work, aoecmd_sleepwork);
spin_lock_init(&d->lock);
INIT_LIST_HEAD(&d->rq_list);
skb_queue_head_init(&d->skbpool);
timer_setup(&d->timer, dummy_timer, 0);
d->timer.expires = jiffies + HZ;
add_timer(&d->timer);
d->bufpool = NULL; /* defer to aoeblk_gdalloc */
d->tgt = d->targets;
d->ref = 1; for (i = 0; i < NFACTIVE; i++)
INIT_LIST_HEAD(&d->factive[i]);
INIT_LIST_HEAD(&d->rexmitq);
d->sysminor = sysminor;
d->aoemajor = maj;
d->aoeminor = min;
d->rttavg = RTTAVG_INIT;
d->rttdev = RTTDEV_INIT;
d->next = devlist;
devlist = d;
out:
spin_unlock_irqrestore(&devlist_lock, flags); return d;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.