/* increase and wrap the start pointer, returning the old value */ static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
{ int old; intnew; unsignedlong flags = 0;
if (!(prz->flags & PRZ_FLAG_NO_LOCK))
raw_spin_lock_irqsave(&prz->buffer_lock, flags);
old = atomic_read(&prz->buffer->start); new = old + a; while (unlikely(new >= prz->buffer_size)) new -= prz->buffer_size;
atomic_set(&prz->buffer->start, new);
if (!(prz->flags & PRZ_FLAG_NO_LOCK))
raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
return old;
}
/* increase the size counter until it hits the max size */ staticvoid buffer_size_add(struct persistent_ram_zone *prz, size_t a)
{
size_t old;
size_t new; unsignedlong flags = 0;
if (!(prz->flags & PRZ_FLAG_NO_LOCK))
raw_spin_lock_irqsave(&prz->buffer_lock, flags);
old = atomic_read(&prz->buffer->size); if (old == prz->buffer_size) gotoexit;
new = old + a; if (new > prz->buffer_size) new = prz->buffer_size;
atomic_set(&prz->buffer->size, new);
exit: if (!(prz->flags & PRZ_FLAG_NO_LOCK))
raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
}
int notrace persistent_ram_write(struct persistent_ram_zone *prz, constvoid *s, unsignedint count)
{ int rem; int c = count;
size_t start;
if (unlikely(c > prz->buffer_size)) {
s += c - prz->buffer_size;
c = prz->buffer_size;
}
buffer_size_add(prz, c);
start = buffer_start_add(prz, c);
rem = prz->buffer_size - start; if (unlikely(rem < c)) {
persistent_ram_update(prz, s, start, rem);
s += rem;
c -= rem;
start = 0;
}
persistent_ram_update(prz, s, start, c);
persistent_ram_update_header_ecc(prz);
return count;
}
int notrace persistent_ram_write_user(struct persistent_ram_zone *prz, constvoid __user *s, unsignedint count)
{ int rem, ret = 0, c = count;
size_t start;
if (unlikely(c > prz->buffer_size)) {
s += c - prz->buffer_size;
c = prz->buffer_size;
}
buffer_size_add(prz, c);
start = buffer_start_add(prz, c);
rem = prz->buffer_size - start; if (unlikely(rem < c)) {
ret = persistent_ram_update_user(prz, s, start, rem);
s += rem;
c -= rem;
start = 0;
} if (likely(!ret))
ret = persistent_ram_update_user(prz, s, start, c);
switch (memtype) { case MEM_TYPE_NORMAL:
prot = PAGE_KERNEL; break; case MEM_TYPE_NONCACHED:
prot = pgprot_noncached(PAGE_KERNEL); break; case MEM_TYPE_WCOMBINE:
prot = pgprot_writecombine(PAGE_KERNEL); break; default:
pr_err("invalid mem_type=%d\n", memtype); return NULL;
}
pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL); if (!pages) {
pr_err("%s: Failed to allocate array for %u pages\n",
__func__, page_count); return NULL;
}
for (i = 0; i < page_count; i++) {
phys_addr_t addr = page_start + i * PAGE_SIZE;
pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
} /* * VM_IOREMAP used here to bypass this region during vread() * and kmap_atomic() (i.e. kcore) to avoid __va() failures.
*/
vaddr = vmap(pages, page_count, VM_MAP | VM_IOREMAP, prot);
kfree(pages);
/* * Since vmap() uses page granularity, we must add the offset * into the page here, to get the byte granularity address * into the mapping to represent the actual "start" location.
*/ return vaddr + offset_in_page(start);
}
if (!request_mem_region(start, size, label ?: "ramoops")) {
pr_err("request mem region (%s 0x%llx@0x%llx) failed\n",
label ?: "ramoops",
(unsignedlonglong)size, (unsignedlonglong)start); return NULL;
}
if (memtype)
va = ioremap(start, size); else
va = ioremap_wc(start, size);
/* * Since request_mem_region() and ioremap() are byte-granularity * there is no need handle anything special like we do when the * vmap() case in persistent_ram_vmap() above.
*/ return va;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.