if (unlikely(client->head == client->tail)) { /* * This effectively "drops" all unconsumed events, leaving * EV_SYN/SYN_DROPPED plus the newest event in the queue.
*/
client->tail = (client->head - 2) & (client->bufsize - 1);
/* * Grabs an event device (along with underlying input device). * This function is called with evdev->mutex taken.
*/ staticint evdev_grab(struct evdev *evdev, struct evdev_client *client)
{ int error;
if (evdev->grab) return -EBUSY;
error = input_grab_device(&evdev->handle); if (error) return error;
if (evdev->exist && !--evdev->open)
input_close_device(&evdev->handle);
mutex_unlock(&evdev->mutex);
}
/* * Wake up users waiting for IO so they can disconnect from * dead device.
*/ staticvoid evdev_hangup(struct evdev *evdev)
{ struct evdev_client *client;
/* * Limit amount of data we inject into the input subsystem so that * we do not hold evdev->mutex for too long. 4096 bytes corresponds * to 170 input events.
*/
count = min(count, 4096);
if (count != 0 && count < input_event_size()) return -EINVAL;
retval = mutex_lock_interruptible(&evdev->mutex); if (retval) return retval;
#ifdef __BIG_ENDIAN staticint bits_to_user(unsignedlong *bits, unsignedint maxbit, unsignedint maxlen, void __user *p, int compat)
{ int len, i;
if (compat) {
len = BITS_TO_LONGS_COMPAT(maxbit) * sizeof(compat_long_t); if (len > maxlen)
len = maxlen;
for (i = 0; i < len / sizeof(compat_long_t); i++) if (copy_to_user((compat_long_t __user *) p + i,
(compat_long_t *) bits +
i + 1 - ((i % 2) << 1), sizeof(compat_long_t))) return -EFAULT;
} else {
len = BITS_TO_LONGS(maxbit) * sizeof(long); if (len > maxlen)
len = maxlen;
if (copy_to_user(p, bits, len)) return -EFAULT;
}
return len;
}
staticint bits_from_user(unsignedlong *bits, unsignedint maxbit, unsignedint maxlen, constvoid __user *p, int compat)
{ int len, i;
if (compat) { if (maxlen % sizeof(compat_long_t)) return -EINVAL;
len = BITS_TO_LONGS_COMPAT(maxbit) * sizeof(compat_long_t); if (len > maxlen)
len = maxlen;
for (i = 0; i < len / sizeof(compat_long_t); i++) if (copy_from_user((compat_long_t *) bits +
i + 1 - ((i % 2) << 1),
(compat_long_t __user *) p + i, sizeof(compat_long_t))) return -EFAULT; if (i % 2)
*((compat_long_t *) bits + i - 1) = 0;
} else { if (maxlen % sizeof(long)) return -EINVAL;
len = BITS_TO_LONGS(maxbit) * sizeof(long); if (len > maxlen)
len = maxlen;
if (copy_from_user(bits, p, len)) return -EFAULT;
}
return len;
}
#else
staticint bits_to_user(unsignedlong *bits, unsignedint maxbit, unsignedint maxlen, void __user *p, int compat)
{ int len = compat ?
BITS_TO_LONGS_COMPAT(maxbit) * sizeof(compat_long_t) :
BITS_TO_LONGS(maxbit) * sizeof(long);
staticint handle_eviocgbit(struct input_dev *dev, unsignedint type, unsignedint size, void __user *p, int compat_mode)
{ unsignedlong *bits; int len;
switch (type) {
case 0: bits = dev->evbit; len = EV_MAX; break; case EV_KEY: bits = dev->keybit; len = KEY_MAX; break; case EV_REL: bits = dev->relbit; len = REL_MAX; break; case EV_ABS: bits = dev->absbit; len = ABS_MAX; break; case EV_MSC: bits = dev->mscbit; len = MSC_MAX; break; case EV_LED: bits = dev->ledbit; len = LED_MAX; break; case EV_SND: bits = dev->sndbit; len = SND_MAX; break; case EV_FF: bits = dev->ffbit; len = FF_MAX; break; case EV_SW: bits = dev->swbit; len = SW_MAX; break; default: return -EINVAL;
}
if (copy_from_user(&ke, p, sizeof(ke))) return -EFAULT;
if (ke.len > sizeof(ke.scancode)) return -EINVAL;
return input_set_keycode(dev, &ke);
}
/* * If we transfer state to the user, we should flush all pending events * of the same type from the client's queue. Otherwise, they might end up * with duplicate events, which can screw up client's state tracking. * If bits_to_user fails after flushing the queue, we queue a SYN_DROPPED * event so user-space will notice missing events. * * LOCKING: * We need to take event_lock before buffer_lock to avoid dead-locks. But we * need the even_lock only to guarantee consistent state. We can safely release * it while flushing the queue. This allows input-core to handle filters while * we flush the queue.
*/ staticint evdev_handle_get_val(struct evdev_client *client, struct input_dev *dev, unsignedint type, unsignedlong *bits, unsignedint maxbit, unsignedint maxlen, void __user *p, int compat)
{ int ret; unsignedlong *mem;
mem = bitmap_alloc(maxbit, GFP_KERNEL); if (!mem) return -ENOMEM;
ret = bits_to_user(mem, maxbit, maxlen, p, compat); if (ret < 0)
evdev_queue_syn_dropped(client);
bitmap_free(mem);
return ret;
}
staticint evdev_handle_mt_request(struct input_dev *dev, unsignedint size, int __user *ip)
{ conststruct input_mt *mt = dev->mt; unsignedint code; int max_slots; int i;
if (get_user(code, &ip[0])) return -EFAULT; if (!mt || !input_is_mt_value(code)) return -EINVAL;
max_slots = (size - sizeof(__u32)) / sizeof(__s32); for (i = 0; i < mt->num_slots && i < max_slots; i++) { int value = input_mt_get_value(&mt->slots[i], code); if (put_user(value, &ip[1 + i])) return -EFAULT;
}
/* must be called with evdev-mutex held */ staticint evdev_set_mask(struct evdev_client *client, unsignedint type, constvoid __user *codes,
u32 codes_size, int compat)
{ unsignedlong flags, *mask, *oldmask;
size_t cnt; int error;
/* we allow unknown types and 'codes_size > size' for forward-compat */
cnt = evdev_get_mask_cnt(type); if (!cnt) return 0;
mask = bitmap_zalloc(cnt, GFP_KERNEL); if (!mask) return -ENOMEM;
/* must be called with evdev-mutex held */ staticint evdev_get_mask(struct evdev_client *client, unsignedint type, void __user *codes,
u32 codes_size, int compat)
{ unsignedlong *mask;
size_t cnt, size, xfer_size; int i; int error;
/* we allow unknown types and 'codes_size > size' for forward-compat */
cnt = evdev_get_mask_cnt(type);
size = sizeof(unsignedlong) * BITS_TO_LONGS(cnt);
xfer_size = min_t(size_t, codes_size, size);
if (cnt > 0) {
mask = client->evmasks[type]; if (mask) {
error = bits_to_user(mask, cnt - 1,
xfer_size, codes, compat); if (error < 0) return error;
} else { /* fake mask with all bits set */ for (i = 0; i < xfer_size; i++) if (put_user(0xffU, (u8 __user *)codes + i)) return -EFAULT;
}
}
if (xfer_size < codes_size) if (clear_user(codes + xfer_size, codes_size - xfer_size)) return -EFAULT;
/* First we check for fixed-length commands */ switch (cmd) {
case EVIOCGVERSION: return put_user(EV_VERSION, ip);
case EVIOCGID: if (copy_to_user(p, &dev->id, sizeof(struct input_id))) return -EFAULT; return 0;
case EVIOCGREP: if (!test_bit(EV_REP, dev->evbit)) return -ENOSYS; if (put_user(dev->rep[REP_DELAY], ip)) return -EFAULT; if (put_user(dev->rep[REP_PERIOD], ip + 1)) return -EFAULT; return 0;
case EVIOCSREP: if (!test_bit(EV_REP, dev->evbit)) return -ENOSYS; if (get_user(u, ip)) return -EFAULT; if (get_user(v, ip + 1)) return -EFAULT;
if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) {
if (!dev->absinfo) return -EINVAL;
t = _IOC_NR(cmd) & ABS_MAX;
abs = dev->absinfo[t];
if (copy_to_user(p, &abs, min_t(size_t,
size, sizeof(struct input_absinfo)))) return -EFAULT;
return 0;
}
}
if (_IOC_DIR(cmd) == _IOC_WRITE) {
if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) {
if (!dev->absinfo) return -EINVAL;
t = _IOC_NR(cmd) & ABS_MAX;
if (copy_from_user(&abs, p, min_t(size_t,
size, sizeof(struct input_absinfo)))) return -EFAULT;
if (size < sizeof(struct input_absinfo))
abs.resolution = 0;
/* We can't change number of reserved MT slots */ if (t == ABS_MT_SLOT) return -EINVAL;
/* * Take event lock to ensure that we are not * changing device parameters in the middle * of event.
*/
spin_lock_irq(&dev->event_lock);
dev->absinfo[t] = abs;
spin_unlock_irq(&dev->event_lock);
/* * Mark device non-existent. This disables writes, ioctls and * prevents new users from opening the device. Already posted * blocking reads will stay, however new ones will fail.
*/ staticvoid evdev_mark_dead(struct evdev *evdev)
{
mutex_lock(&evdev->mutex);
evdev->exist = false;
mutex_unlock(&evdev->mutex);
}
dev_no = minor; /* Normalize device number if it falls into legacy range */ if (dev_no < EVDEV_MINOR_BASE + EVDEV_MINORS)
dev_no -= EVDEV_MINOR_BASE;
dev_set_name(&evdev->dev, "event%d", dev_no);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.