staticint __init z_erofs_zstd_init(void)
{ /* by default, use # of possible CPUs instead */ if (!z_erofs_zstd_nstrms)
z_erofs_zstd_nstrms = num_possible_cpus();
for (; z_erofs_zstd_avail_strms < z_erofs_zstd_nstrms;
++z_erofs_zstd_avail_strms) { struct z_erofs_zstd *strm;
/* in case 2 z_erofs_load_zstd_config() race to avoid deadlock */
mutex_lock(&zstd_resize_mutex); if (z_erofs_zstd_max_dictsize >= dict_size) {
mutex_unlock(&zstd_resize_mutex); return 0;
}
/* 1. collect/isolate all streams for the following check */ while (z_erofs_zstd_avail_strms) { struct z_erofs_zstd *n;
for (strm = z_erofs_isolate_strms(true); strm; strm = n) {
n = strm->next;
strm->next = head;
head = strm;
--z_erofs_zstd_avail_strms;
}
}
/* 2. walk each isolated stream and grow max dict_size if needed */
wkspsz = zstd_dstream_workspace_bound(dict_size); for (strm = head; strm; strm = strm->next) {
wksp = kvmalloc(wkspsz, GFP_KERNEL); if (!wksp) break;
kvfree(strm->wksp);
strm->wksp = wksp;
strm->wkspsz = wkspsz;
}
/* 3. push back all to the global list and update max dict_size */
spin_lock(&z_erofs_zstd_lock);
DBG_BUGON(z_erofs_zstd_head);
z_erofs_zstd_head = head;
spin_unlock(&z_erofs_zstd_lock);
z_erofs_zstd_avail_strms = z_erofs_zstd_nstrms;
wake_up_all(&z_erofs_zstd_wq); if (!strm)
z_erofs_zstd_max_dictsize = dict_size;
mutex_unlock(&zstd_resize_mutex); return strm ? -ENOMEM : 0;
}
if (dctx.kout)
kunmap_local(dctx.kout);
failed_zinit:
kunmap_local(dctx.kin); /* 4. push back ZSTD stream context to the global list */
spin_lock(&z_erofs_zstd_lock);
strm->next = z_erofs_zstd_head;
z_erofs_zstd_head = strm;
spin_unlock(&z_erofs_zstd_lock);
wake_up(&z_erofs_zstd_wq); return err;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.