/* * Free all pages allocated for subpage protection maps and pointers. * Also makes sure that the subpage_prot_table structure is * reinitialized for the next user.
*/ void subpage_prot_free(struct mm_struct *mm)
{ struct subpage_prot_table *spt = mm_ctx_subpage_prot(&mm->context); unsignedlong i, j, addr;
u32 **p;
if (!spt) return;
for (i = 0; i < 4; ++i) { if (spt->low_prot[i]) {
free_page((unsignedlong)spt->low_prot[i]);
spt->low_prot[i] = NULL;
}
}
addr = 0; for (i = 0; i < (TASK_SIZE_USER64 >> 43); ++i) {
p = spt->protptrs[i]; if (!p) continue;
spt->protptrs[i] = NULL; for (j = 0; j < SBP_L2_COUNT && addr < spt->maxaddr;
++j, addr += PAGE_SIZE) if (p[j])
free_page((unsignedlong)p[j]);
free_page((unsignedlong)p);
}
spt->maxaddr = 0;
kfree(spt);
}
/* * We don't try too hard, we just mark all the vma in that range * VM_NOHUGEPAGE and split them.
*/
for_each_vma_range(vmi, vma, addr + len) {
vm_flags_set(vma, VM_NOHUGEPAGE);
walk_page_vma(vma, &subpage_walk_ops, NULL);
}
} #else staticvoid subpage_mark_vma_nohuge(struct mm_struct *mm, unsignedlong addr, unsignedlong len)
{ return;
} #endif
/* * Copy in a subpage protection map for an address range. * The map has 2 bits per 4k subpage, so 32 bits per 64k page. * Each 2-bit field is 0 to allow any access, 1 to prevent writes, * 2 or 3 to prevent all accesses. * Note that the normal page protections also apply; the subpage * protection mechanism is an additional constraint, so putting 0 * in a 2-bit field won't allow writes to a page that is otherwise * write-protected.
*/
SYSCALL_DEFINE3(subpage_prot, unsignedlong, addr, unsignedlong, len, u32 __user *, map)
{ struct mm_struct *mm = current->mm; struct subpage_prot_table *spt;
u32 **spm, *spp; unsignedlong i;
size_t nw; unsignedlong next, limit; int err;
if (radix_enabled()) return -ENOENT;
/* Check parameters */ if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) ||
addr >= mm->task_size || len >= mm->task_size ||
addr + len > mm->task_size) return -EINVAL;
if (is_hugepage_only_range(mm, addr, len)) return -EINVAL;
if (!map) { /* Clear out the protection map for the address range */
subpage_prot_clear(addr, len); return 0;
}
if (!access_ok(map, (len >> PAGE_SHIFT) * sizeof(u32))) return -EFAULT;
mmap_write_lock(mm);
spt = mm_ctx_subpage_prot(&mm->context); if (!spt) { /* * Allocate subpage prot table if not already done. * Do this with mmap_lock held
*/
spt = kzalloc(sizeof(struct subpage_prot_table), GFP_KERNEL); if (!spt) {
err = -ENOMEM; goto out;
}
mm->context.hash_context->spt = spt;
}
/* now flush any existing HPTEs for the range */
hpte_flush_range(mm, addr, nw);
} if (limit > spt->maxaddr)
spt->maxaddr = limit;
err = 0;
out:
mmap_write_unlock(mm); return err;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.16 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.