/****************************************************************************** * gntdev.c * * Device for accessing (in user-space) pages that have been granted by other * domains. * * Copyright (c) 2006-2007, D G Murray. * (c) 2009 Gerd Hoffmann <kraxel@redhat.com> * (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
staticunsignedint limit = 64*1024;
module_param(limit, uint, 0644);
MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by one mapping request");
/* True in PV mode, false otherwise */ staticint use_ptemod;
staticvoid unmap_grant_pages(struct gntdev_grant_map *map, int offset, int pages);
/* * Check if this mapping is requested to be backed * by a DMA buffer.
*/ if (dma_flags & (GNTDEV_DMA_FLAG_WC | GNTDEV_DMA_FLAG_COHERENT)) { struct gnttab_dma_alloc_args args;
add->frames = kvcalloc(count, sizeof(add->frames[0]),
GFP_KERNEL); if (!add->frames) goto err;
/* Remember the device, so we can free DMA memory. */
add->dma_dev = priv->dma_dev;
if (map->pages && !use_ptemod) { /* * Increment the reference count. This ensures that the * subsequent call to unmap_grant_pages() will not wind up * re-entering itself. It *can* wind up calling * gntdev_put_map() recursively, but such calls will be with a * reference count greater than 1, so they will return before * this code is reached. The recursion depth is thus limited to * 1. Do NOT use refcount_inc() here, as it will detect that * the reference count is zero and WARN().
*/
refcount_set(&map->users, 1);
/* * Unmap the grants. This may or may not be asynchronous, so it * is possible that the reference count is 1 on return, but it * could also be greater than 1.
*/
unmap_grant_pages(map, 0, map->count);
/* Check if the memory now needs to be freed */ if (!refcount_dec_and_test(&map->users)) return;
/* * All pages have been returned to the hypervisor, so free the * map.
*/
}
if (use_ptemod && map->notifier_init)
mmu_interval_notifier_remove(&map->notifier);
if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
notify_remote_via_evtchn(map->notify.event);
evtchn_put(map->notify.event);
}
gntdev_free_map(map);
}
int gntdev_map_grant_pages(struct gntdev_grant_map *map)
{
size_t alloced = 0; int i, err = 0;
if (!use_ptemod) { /* Note: it could already be mapped */ if (map->map_ops[0].handle != INVALID_GRANT_HANDLE) return 0; for (i = 0; i < map->count; i++) { unsignedlong addr = (unsignedlong)
pfn_to_kaddr(page_to_pfn(map->pages[i]));
gnttab_set_map_op(&map->map_ops[i], addr, map->flags,
map->grants[i].ref,
map->grants[i].domid);
gnttab_set_unmap_op(&map->unmap_ops[i], addr,
map->flags, INVALID_GRANT_HANDLE);
}
} else { /* * Setup the map_ops corresponding to the pte entries pointing * to the kernel linear addresses of the struct pages. * These ptes are completely different from the user ptes dealt * with find_grant_ptes. * Note that GNTMAP_device_map isn't needed here: The * dev_bus_addr output field gets consumed only from ->map_ops, * and by not requesting it when mapping we also avoid needing * to mirror dev_bus_addr into ->unmap_ops (and holding an extra * reference to the page in the hypervisor).
*/ unsignedint flags = (map->flags & ~GNTMAP_device_map) |
GNTMAP_host_map;
for (i = 0; i < map->count; i++) { unsignedlong address = (unsignedlong)
pfn_to_kaddr(page_to_pfn(map->pages[i]));
BUG_ON(PageHighMem(map->pages[i]));
/* * Decrease the live-grant counter. This must happen after the loop to * prevent premature reuse of the grants by gnttab_mmap().
*/
live_grants = atomic_sub_return(successful_unmaps, &map->live_grants); if (WARN_ON(live_grants < 0))
pr_err("%s: live_grants became negative (%d) after unmapping %d pages!\n",
__func__, live_grants, successful_unmaps);
/* Release reference taken by __unmap_grant_pages */
gntdev_put_map(NULL, map);
}
staticvoid __unmap_grant_pages(struct gntdev_grant_map *map, int offset, int pages)
{ if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { int pgno = (map->notify.addr >> PAGE_SHIFT);
if (pgno >= offset && pgno < offset + pages) { /* No need for kmap, pages are in lowmem */
uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
/* It is possible the requested range will have a "hole" where we * already unmapped some of the grants. Only unmap valid ranges.
*/ while (pages) { while (pages && map->being_removed[offset]) {
offset++;
pages--;
}
range = 0; while (range < pages) { if (map->being_removed[offset + range]) break;
map->being_removed[offset + range] = true;
range++;
} if (range)
__unmap_grant_pages(map, offset, range);
offset += range;
pages -= range;
}
}
/* * If the VMA is split or otherwise changed the notifier is not * updated, but we don't want to process VA's outside the modified * VMA. FIXME: It would be much more understandable to just prevent * modifying the VMA in the first place.
*/ if (map_start >= range->end || map_end <= range->start) returntrue;
staticlong gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
{ struct ioctl_gntdev_unmap_notify op; struct gntdev_grant_map *map; int rc; int out_flags;
evtchn_port_t out_event;
if (copy_from_user(&op, u, sizeof(op))) return -EFAULT;
if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT)) return -EINVAL;
/* We need to grab a reference to the event channel we are going to use * to send the notify before releasing the reference we may already have * (if someone has called this ioctl twice). This is required so that * it is possible to change the clear_byte part of the notification * without disturbing the event channel part, which may now be the last * reference to that event channel.
*/ if (op.action & UNMAP_NOTIFY_SEND_EVENT) { if (evtchn_get(op.event_channel_port)) return -EINVAL;
}
/* * For each completed op, update the status if the op failed * and all previous ops for the segment were successful.
*/ for (i = 0; i < batch->nr_ops; i++) {
s16 status = batch->ops[i].status;
s16 old_status;
if (status == GNTST_okay) continue;
if (__get_user(old_status, batch->status[i])) return -EFAULT;
if (old_status != GNTST_okay) continue;
if (__put_user(status, batch->status[i])) return -EFAULT;
}
/* * Disallow local -> local copies since there is only space in * batch->pages for one page per-op and this would be a very * expensive memcpy().
*/ if (!(seg->flags & (GNTCOPY_source_gref | GNTCOPY_dest_gref))) return -EINVAL;
/* Can't cross page if source/dest is a grant ref. */ if (seg->flags & GNTCOPY_source_gref) { if (seg->source.foreign.offset + seg->len > XEN_PAGE_SIZE) return -EINVAL;
} if (seg->flags & GNTCOPY_dest_gref) { if (seg->dest.foreign.offset + seg->len > XEN_PAGE_SIZE) return -EINVAL;
}
if (put_user(GNTST_okay, status)) return -EFAULT;
while (copied < seg->len) { struct gnttab_copy *op; void __user *virt;
size_t len, off; unsignedlong gfn; int ret;
if (batch->nr_ops >= GNTDEV_COPY_BATCH) {
ret = gntdev_copy(batch); if (ret < 0) return ret;
}
if (use_ptemod) { /* * gntdev takes the address of the PTE in find_grant_ptes() and * passes it to the hypervisor in gntdev_map_grant_pages(). The * purpose of the notifier is to prevent the hypervisor pointer * to the PTE from going stale. * * Since this vma's mappings can't be touched without the * mmap_lock, and we are holding it now, there is no need for * the notifier_range locking pattern.
*/
mmu_interval_read_begin(&map->notifier);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.