/* * If [vaddr; end] contains [vaddr & P4D_MASK; next], we don't * need to split, we'll change the protections on the whole P4D.
*/ if (next - vaddr >= P4D_SIZE &&
vaddr <= (vaddr & P4D_MASK) && end >= next) continue;
if (p4d_leaf(p4dp_get(p4dp))) { struct page *pud_page; unsignedlong pfn = _p4d_pfn(p4dp_get(p4dp));
pgprot_t prot = __pgprot(p4d_val(p4dp_get(p4dp)) & ~_PAGE_PFN_MASK);
pud_t *pudp_new; int i;
pud_page = alloc_page(GFP_KERNEL); if (!pud_page) return -ENOMEM;
/* * Fill the pud level with leaf puds that have the same * protections as the leaf p4d.
*/
pudp_new = (pud_t *)page_address(pud_page); for (i = 0; i < PTRS_PER_PUD; ++i, ++pudp_new)
set_pud(pudp_new,
pfn_pud(pfn + ((i * PUD_SIZE) >> PAGE_SHIFT), prot));
/* * Make sure the pud filling is not reordered with the * p4d store which could result in seeing a partially * filled pud level.
*/
smp_wmb();
do {
next = pgd_addr_end(vaddr, end); /* We never use PGD mappings for the linear mapping */
ret = __split_linear_mapping_p4d(pgdp, vaddr, next); if (ret) return ret;
} while (pgdp++, vaddr = next, vaddr != end);
#ifdef CONFIG_64BIT /* * We are about to change the permissions of a kernel mapping, we must * apply the same changes to its linear mapping alias, which may imply * splitting a huge mapping.
*/
if (is_vmalloc_or_module_addr((void *)start)) { struct vm_struct *area = NULL; int i, page_start;
ret = split_linear_mapping(lm_start, lm_end); if (ret) goto unlock;
ret = walk_kernel_page_table_range(lm_start, lm_end,
&pageattr_ops, NULL, &masks); if (ret) goto unlock;
}
ret = walk_kernel_page_table_range(start, end, &pageattr_ops, NULL,
&masks);
unlock:
mmap_write_unlock(&init_mm);
/* * We can't use flush_tlb_kernel_range() here as we may have split a * hugepage that is larger than that, so let's flush everything.
*/
flush_tlb_all(); #else
ret = walk_kernel_page_table_range(start, end, &pageattr_ops, NULL,
&masks);
mmap_write_unlock(&init_mm);
flush_tlb_kernel_range(start, end); #endif
return ret;
}
int set_memory_rw_nx(unsignedlong addr, int numpages)
{ return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE),
__pgprot(_PAGE_EXEC));
}
int set_memory_ro(unsignedlong addr, int numpages)
{ return __set_memory(addr, numpages, __pgprot(_PAGE_READ),
__pgprot(_PAGE_WRITE));
}
int set_memory_rw(unsignedlong addr, int numpages)
{ return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE),
__pgprot(0));
}
int set_memory_x(unsignedlong addr, int numpages)
{ return __set_memory(addr, numpages, __pgprot(_PAGE_EXEC), __pgprot(0));
}
int set_memory_nx(unsignedlong addr, int numpages)
{ return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_EXEC));
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.