// SPDX-License-Identifier: GPL-2.0 /* * Memory subsystem support * * Written by Matt Tolentino <matthew.e.tolentino@intel.com> * Dave Hansen <haveblue@us.ibm.com> * * This file provides the necessary infrastructure to represent * a SPARSEMEM-memory-model system's physical memory in /sysfs. * All arch-independent code that assumes MEMORY_HOTPLUG requires * SPARSEMEM should be contained here, or in mm/memory_hotplug.c.
*/
/* * Memory blocks are cached in a local radix tree to avoid * a costly linear search for the corresponding device on * the subsystem bus.
*/ static DEFINE_XARRAY(memory_blocks);
/* * Memory groups, indexed by memory group id (mgid).
*/ static DEFINE_XARRAY_FLAGS(memory_groups, XA_FLAGS_ALLOC); #define MEMORY_GROUP_MARK_DYNAMIC XA_MARK_1
static BLOCKING_NOTIFIER_HEAD(memory_chain);
int register_memory_notifier(struct notifier_block *nb)
{ return blocking_notifier_chain_register(&memory_chain, nb);
}
EXPORT_SYMBOL(register_memory_notifier);
staticvoid memory_block_release(struct device *dev)
{ struct memory_block *mem = to_memory_block(dev); /* Verify that the altmap is freed */
WARN_ON(mem->altmap);
kfree(mem);
}
/* Max block size to be set by memory_block_advise_max_size */ staticunsignedlong memory_block_advised_size; staticbool memory_block_advised_size_queried;
/** * memory_block_advise_max_size() - advise memory hotplug on the max suggested * block size, usually for alignment. * @size: suggestion for maximum block size. must be aligned on power of 2. * * Early boot software (pre-allocator init) may advise archs on the max block * size. This value can only decrease after initialization, as the intent is * to identify the largest supported alignment for all sources. * * Use of this value is arch-defined, as is min/max block size. * * Return: 0 on success * -EINVAL if size is 0 or not pow2 aligned * -EBUSY if value has already been probed
*/ int __init memory_block_advise_max_size(unsignedlong size)
{ if (!size || !is_power_of_2(size)) return -EINVAL;
if (memory_block_advised_size_queried) return -EBUSY;
if (memory_block_advised_size)
memory_block_advised_size = min(memory_block_advised_size, size); else
memory_block_advised_size = size;
return 0;
}
/** * memory_block_advised_max_size() - query advised max hotplug block size. * * After the first call, the value can never change. Callers looking for the * actual block size should use memory_block_size_bytes. This interface is * intended for use by arch-init when initializing the hotplug block size. * * Return: advised size in bytes, or 0 if never set.
*/ unsignedlong memory_block_advised_max_size(void)
{
memory_block_advised_size_queried = true; return memory_block_advised_size;
}
/* * We can probably put these states in a nice little array * so that they're not open-coded
*/ switch (mem->state) { case MEM_ONLINE:
output = "online"; break; case MEM_OFFLINE:
output = "offline"; break; case MEM_GOING_OFFLINE:
output = "going-offline"; break; default:
WARN_ON(1); return sysfs_emit(buf, "ERROR-UNKNOWN-%ld\n", mem->state);
}
/* * Must acquire mem_hotplug_lock in write mode.
*/ staticint memory_block_online(struct memory_block *mem)
{ unsignedlong start_pfn = section_nr_to_pfn(mem->start_section_nr); unsignedlong nr_pages = PAGES_PER_SECTION * sections_per_block; unsignedlong nr_vmemmap_pages = 0; struct memory_notify arg; struct zone *zone; int ret;
if (memblk_nr_poison(mem)) return -EHWPOISON;
zone = zone_for_pfn_range(mem->online_type, mem->nid, mem->group,
start_pfn, nr_pages);
/* * Although vmemmap pages have a different lifecycle than the pages * they describe (they remain until the memory is unplugged), doing * their initialization and accounting at memory onlining/offlining * stage helps to keep accounting easier to follow - e.g vmemmaps * belong to the same zone as the memory they backed.
*/ if (mem->altmap)
nr_vmemmap_pages = mem->altmap->free;
arg.altmap_start_pfn = start_pfn;
arg.altmap_nr_pages = nr_vmemmap_pages;
arg.start_pfn = start_pfn + nr_vmemmap_pages;
arg.nr_pages = nr_pages - nr_vmemmap_pages;
mem_hotplug_begin();
ret = memory_notify(MEM_PREPARE_ONLINE, &arg);
ret = notifier_to_errno(ret); if (ret) goto out_notifier;
if (nr_vmemmap_pages) {
ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages,
zone, mem->altmap->inaccessible); if (ret) goto out;
}
ret = online_pages(start_pfn + nr_vmemmap_pages,
nr_pages - nr_vmemmap_pages, zone, mem->group); if (ret) { if (nr_vmemmap_pages)
mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages); goto out;
}
/* * Account once onlining succeeded. If the zone was unpopulated, it is * now already properly populated.
*/ if (nr_vmemmap_pages)
adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
nr_vmemmap_pages);
/* * Unaccount before offlining, such that unpopulated zone and kthreads * can properly be torn down in offline_pages().
*/ if (mem->altmap)
nr_vmemmap_pages = mem->altmap->free;
mem_hotplug_begin(); if (nr_vmemmap_pages)
adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
-nr_vmemmap_pages);
ret = offline_pages(start_pfn + nr_vmemmap_pages,
nr_pages - nr_vmemmap_pages, mem->zone, mem->group); if (ret) { /* offline_pages() failed. Account back. */ if (nr_vmemmap_pages)
adjust_present_page_count(pfn_to_page(start_pfn),
mem->group, nr_vmemmap_pages); goto out;
}
if (nr_vmemmap_pages)
mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
/* * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is * OK to have direct references to sparsemem variables in here.
*/ staticint
memory_block_action(struct memory_block *mem, unsignedlong action)
{ int ret;
switch (action) { case MEM_ONLINE:
ret = memory_block_online(mem); break; case MEM_OFFLINE:
ret = memory_block_offline(mem); break; default:
WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: " "%ld\n", __func__, mem->start_section_nr, action, action);
ret = -EINVAL;
}
return ret;
}
staticint memory_block_change_state(struct memory_block *mem, unsignedlong to_state, unsignedlong from_state_req)
{ int ret = 0;
if (mem->state != from_state_req) return -EINVAL;
if (to_state == MEM_OFFLINE)
mem->state = MEM_GOING_OFFLINE;
ret = memory_block_action(mem, to_state);
mem->state = ret ? from_state_req : to_state;
return ret;
}
/* The device lock serializes operations on memory_subsys_[online|offline] */ staticint memory_subsys_online(struct device *dev)
{ struct memory_block *mem = to_memory_block(dev); int ret;
if (mem->state == MEM_ONLINE) return 0;
/* * When called via device_online() without configuring the online_type, * we want to default to MMOP_ONLINE.
*/ if (mem->online_type == MMOP_OFFLINE)
mem->online_type = MMOP_ONLINE;
ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
mem->online_type = MMOP_OFFLINE;
ret = lock_device_hotplug_sysfs(); if (ret) return ret;
switch (online_type) { case MMOP_ONLINE_KERNEL: case MMOP_ONLINE_MOVABLE: case MMOP_ONLINE: /* mem->online_type is protected by device_hotplug_lock */
mem->online_type = online_type;
ret = device_online(&mem->dev); break; case MMOP_OFFLINE:
ret = device_offline(&mem->dev); break; default:
ret = -EINVAL; /* should never happen */
}
unlock_device_hotplug();
if (ret < 0) return ret; if (ret) return -EINVAL;
return count;
}
/* * Legacy interface that we cannot remove: s390x exposes the storage increment * covered by a memory block, allowing for identifying which memory blocks * comprise a storage increment. Since a memory block spans complete * storage increments nowadays, this interface is basically unused. Other * archs never exposed != 0.
*/ static ssize_t phys_device_show(struct device *dev, struct device_attribute *attr, char *buf)
{ struct memory_block *mem = to_memory_block(dev); unsignedlong start_pfn = section_nr_to_pfn(mem->start_section_nr);
#ifdef CONFIG_MEMORY_HOTREMOVE staticint print_allowed_zone(char *buf, int len, int nid, struct memory_group *group, unsignedlong start_pfn, unsignedlong nr_pages, int online_type, struct zone *default_zone)
{ struct zone *zone;
zone = zone_for_pfn_range(online_type, nid, group, start_pfn, nr_pages); if (zone == default_zone) return 0;
static ssize_t valid_zones_show(struct device *dev, struct device_attribute *attr, char *buf)
{ struct memory_block *mem = to_memory_block(dev); unsignedlong start_pfn = section_nr_to_pfn(mem->start_section_nr); unsignedlong nr_pages = PAGES_PER_SECTION * sections_per_block; struct memory_group *group = mem->group; struct zone *default_zone; int nid = mem->nid; int len;
/* * Check the existing zone. Make sure that we do that only on the * online nodes otherwise the page_zone is not reliable
*/ if (mem->state == MEM_ONLINE) { /* * If !mem->zone, the memory block spans multiple zones and * cannot get offlined.
*/ return sysfs_emit(buf, "%s\n",
mem->zone ? mem->zone->name : "none");
}
/* * Some architectures will have custom drivers to do this, and * will not need to do it from userspace. The fake hot-add code * as well as ppc64 will do all of their discovery in userspace * and will require this interface.
*/ #ifdef CONFIG_ARCH_MEMORY_PROBE static ssize_t probe_store(struct device *dev, struct device_attribute *attr, constchar *buf, size_t count)
{
u64 phys_addr; int nid, ret; unsignedlong pages_per_block = PAGES_PER_SECTION * sections_per_block;
ret = kstrtoull(buf, 0, &phys_addr); if (ret) return ret;
if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1)) return -EINVAL;
ret = lock_device_hotplug_sysfs(); if (ret) return ret;
nid = memory_add_physaddr_to_nid(phys_addr);
ret = __add_memory(nid, phys_addr,
MIN_MEMORY_BLOCK_SIZE * sections_per_block,
MHP_NONE);
if (ret) goto out;
ret = count;
out:
unlock_device_hotplug(); return ret;
}
static DEVICE_ATTR_WO(probe); #endif
#ifdef CONFIG_MEMORY_FAILURE /* * Support for offlining pages of memory
*/
/* Soft offline a page */ static ssize_t soft_offline_page_store(struct device *dev, struct device_attribute *attr, constchar *buf, size_t count)
{ int ret;
u64 pfn; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (kstrtoull(buf, 0, &pfn) < 0) return -EINVAL;
pfn >>= PAGE_SHIFT;
ret = soft_offline_page(pfn, 0); return ret == 0 ? count : ret;
}
/* Forcibly offline a page, including killing processes. */ static ssize_t hard_offline_page_store(struct device *dev, struct device_attribute *attr, constchar *buf, size_t count)
{ int ret;
u64 pfn; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (kstrtoull(buf, 0, &pfn) < 0) return -EINVAL;
pfn >>= PAGE_SHIFT;
ret = memory_failure(pfn, MF_SW_SIMULATED); if (ret == -EOPNOTSUPP)
ret = 0; return ret ? ret : count;
}
/* See phys_device_show(). */ int __weak arch_get_memory_phys_device(unsignedlong start_pfn)
{ return 0;
}
/* * A reference for the returned memory block device is acquired. * * Called under device_hotplug_lock.
*/ struct memory_block *find_memory_block_by_id(unsignedlong block_id)
{ struct memory_block *mem;
mem = xa_load(&memory_blocks, block_id); if (mem)
get_device(&mem->dev); return mem;
}
/* * Called under device_hotplug_lock.
*/ struct memory_block *find_memory_block(unsignedlong section_nr)
{ unsignedlong block_id = memory_block_id(section_nr);
ret = device_register(&memory->dev); if (ret) {
put_device(&memory->dev); return ret;
}
ret = xa_err(xa_store(&memory_blocks, memory->dev.id, memory,
GFP_KERNEL)); if (ret)
device_unregister(&memory->dev);
return ret;
}
staticstruct zone *early_node_zone_for_memory_block(struct memory_block *mem, int nid)
{ constunsignedlong start_pfn = section_nr_to_pfn(mem->start_section_nr); constunsignedlong nr_pages = PAGES_PER_SECTION * sections_per_block; struct zone *zone, *matching_zone = NULL;
pg_data_t *pgdat = NODE_DATA(nid); int i;
/* * This logic only works for early memory, when the applicable zones * already span the memory block. We don't expect overlapping zones on * a single node for early memory. So if we're told that some PFNs * of a node fall into this memory block, we can assume that all node * zones that intersect with the memory block are actually applicable. * No need to look at the memmap.
*/ for (i = 0; i < MAX_NR_ZONES; i++) {
zone = pgdat->node_zones + i; if (!populated_zone(zone)) continue; if (!zone_intersects(zone, start_pfn, nr_pages)) continue; if (!matching_zone) {
matching_zone = zone; continue;
} /* Spans multiple zones ... */
matching_zone = NULL; break;
} return matching_zone;
}
#ifdef CONFIG_NUMA /** * memory_block_add_nid() - Indicate that system RAM falling into this memory * block device (partially) belongs to the given node. * @mem: The memory block device. * @nid: The node id. * @context: The memory initialization context. * * Indicate that system RAM falling into this memory block (partially) belongs * to the given node. If the context indicates ("early") that we are adding the * node during node device subsystem initialization, this will also properly * set/adjust mem->zone based on the zone ranges of the given node.
*/ void memory_block_add_nid(struct memory_block *mem, int nid, enum meminit_context context)
{ if (context == MEMINIT_EARLY && mem->nid != nid) { /* * For early memory we have to determine the zone when setting * the node id and handle multiple nodes spanning a single * memory block by indicate via zone == NULL that we're not * dealing with a single zone. So if we're setting the node id * the first time, determine if there is a single zone. If we're * setting the node id a second time to a different node, * invalidate the single detected zone.
*/ if (mem->nid == NUMA_NO_NODE)
mem->zone = early_node_zone_for_memory_block(mem, nid); else
mem->zone = NULL;
}
/* * If this memory block spans multiple nodes, we only indicate * the last processed node. If we span multiple nodes (not applicable * to hotplugged memory), zone == NULL will prohibit memory offlining * and consequently unplug.
*/
mem->nid = nid;
} #endif
staticint add_memory_block(unsignedlong block_id, unsignedlong state, struct vmem_altmap *altmap, struct memory_group *group)
{ struct memory_block *mem; int ret = 0;
mem = find_memory_block_by_id(block_id); if (mem) {
put_device(&mem->dev); return -EEXIST;
}
mem = kzalloc(sizeof(*mem), GFP_KERNEL); if (!mem) return -ENOMEM;
#ifndef CONFIG_NUMA if (state == MEM_ONLINE) /* * MEM_ONLINE at this point implies early memory. With NUMA, * we'll determine the zone when setting the node id via * memory_block_add_nid(). Memory hotplug updated the zone * manually when memory onlining/offlining succeeds.
*/
mem->zone = early_node_zone_for_memory_block(mem, NUMA_NO_NODE); #endif/* CONFIG_NUMA */
ret = __add_memory_block(mem); if (ret) return ret;
if (group) {
mem->group = group;
list_add(&mem->group_next, &group->memory_blocks);
}
if (memory->group) {
list_del(&memory->group_next);
memory->group = NULL;
}
/* drop the ref. we got via find_memory_block() */
put_device(&memory->dev);
device_unregister(&memory->dev);
}
/* * Create memory block devices for the given memory area. Start and size * have to be aligned to memory block granularity. Memory block devices * will be initialized as offline. * * Called under device_hotplug_lock.
*/ int create_memory_block_devices(unsignedlong start, unsignedlong size, struct vmem_altmap *altmap, struct memory_group *group)
{ constunsignedlong start_block_id = pfn_to_block_id(PFN_DOWN(start)); unsignedlong end_block_id = pfn_to_block_id(PFN_DOWN(start + size)); struct memory_block *mem; unsignedlong block_id; int ret = 0;
if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
!IS_ALIGNED(size, memory_block_size_bytes()))) return -EINVAL;
for (block_id = start_block_id; block_id != end_block_id; block_id++) {
ret = add_hotplug_memory_block(block_id, altmap, group); if (ret) break;
} if (ret) {
end_block_id = block_id; for (block_id = start_block_id; block_id != end_block_id;
block_id++) {
mem = find_memory_block_by_id(block_id); if (WARN_ON_ONCE(!mem)) continue;
remove_memory_block(mem);
}
} return ret;
}
/* * Remove memory block devices for the given memory area. Start and size * have to be aligned to memory block granularity. Memory block devices * have to be offline. * * Called under device_hotplug_lock.
*/ void remove_memory_block_devices(unsignedlong start, unsignedlong size)
{ constunsignedlong start_block_id = pfn_to_block_id(PFN_DOWN(start)); constunsignedlong end_block_id = pfn_to_block_id(PFN_DOWN(start + size)); struct memory_block *mem; unsignedlong block_id;
if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
!IS_ALIGNED(size, memory_block_size_bytes()))) return;
for (block_id = start_block_id; block_id != end_block_id; block_id++) {
mem = find_memory_block_by_id(block_id); if (WARN_ON_ONCE(!mem)) continue;
num_poisoned_pages_sub(-1UL, memblk_nr_poison(mem));
unregister_memory_block_under_nodes(mem);
remove_memory_block(mem);
}
}
/* * Initialize the sysfs support for memory devices. At the time this function * is called, we cannot have concurrent creation/deletion of memory block * devices, the device_hotplug_lock is not needed.
*/ void __init memory_dev_init(void)
{ int ret; unsignedlong block_sz, block_id, nr;
/* Validate the configured memory block size */
block_sz = memory_block_size_bytes(); if (!is_power_of_2(block_sz) || block_sz < MIN_MEMORY_BLOCK_SIZE)
panic("Memory block size not suitable: 0x%lx\n", block_sz);
sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
ret = subsys_system_register(&memory_subsys, memory_root_attr_groups); if (ret)
panic("%s() failed to register subsystem: %d\n", __func__, ret);
/* * Create entries for memory sections that were found during boot * and have been initialized. Use @block_id to track the last * handled block and initialize it to an invalid value (ULONG_MAX) * to bypass the block ID matching check for the first present * block so that it can be covered.
*/
block_id = ULONG_MAX;
for_each_present_section_nr(0, nr) { if (block_id != ULONG_MAX && memory_block_id(nr) == block_id) continue;
block_id = memory_block_id(nr);
ret = add_memory_block(block_id, MEM_ONLINE, NULL, NULL); if (ret) {
panic("%s() failed to add memory block: %d\n",
__func__, ret);
}
}
}
/** * walk_memory_blocks - walk through all present memory blocks overlapped * by the range [start, start + size) * * @start: start address of the memory range * @size: size of the memory range * @arg: argument passed to func * @func: callback for each memory section walked * * This function walks through all present memory blocks overlapped by the * range [start, start + size), calling func on each memory block. * * In case func() returns an error, walking is aborted and the error is * returned. * * Called under device_hotplug_lock.
*/ int walk_memory_blocks(unsignedlong start, unsignedlong size, void *arg, walk_memory_blocks_func_t func)
{ constunsignedlong start_block_id = phys_to_block_id(start); constunsignedlong end_block_id = phys_to_block_id(start + size - 1); struct memory_block *mem; unsignedlong block_id; int ret = 0;
if (!size) return 0;
for (block_id = start_block_id; block_id <= end_block_id; block_id++) {
mem = find_memory_block_by_id(block_id); if (!mem) continue;
ret = func(mem, arg);
put_device(&mem->dev); if (ret) break;
} return ret;
}
/** * for_each_memory_block - walk through all present memory blocks * * @arg: argument passed to func * @func: callback for each memory block walked * * This function walks through all present memory blocks, calling func on * each memory block. * * In case func() returns an error, walking is aborted and the error is * returned.
*/ int for_each_memory_block(void *arg, walk_memory_blocks_func_t func)
{ struct for_each_memory_block_cb_data cb_data = {
.func = func,
.arg = arg,
};
/* * This is an internal helper to unify allocation and initialization of * memory groups. Note that the passed memory group will be copied to a * dynamically allocated memory group. After this call, the passed * memory group should no longer be used.
*/ staticint memory_group_register(struct memory_group group)
{ struct memory_group *new_group;
uint32_t mgid; int ret;
/** * memory_group_register_static() - Register a static memory group. * @nid: The node id. * @max_pages: The maximum number of pages we'll have in this static memory * group. * * Register a new static memory group and return the memory group id. * All memory in the group belongs to a single unit, such as a DIMM. All * memory belonging to a static memory group is added in one go to be removed * in one go -- it's static. * * Returns an error if out of memory, if the node id is invalid, if no new * memory groups can be registered, or if max_pages is invalid (0). Otherwise, * returns the new memory group id.
*/ int memory_group_register_static(int nid, unsignedlong max_pages)
{ struct memory_group group = {
.nid = nid,
.s = {
.max_pages = max_pages,
},
};
if (!max_pages) return -EINVAL; return memory_group_register(group);
}
EXPORT_SYMBOL_GPL(memory_group_register_static);
/** * memory_group_register_dynamic() - Register a dynamic memory group. * @nid: The node id. * @unit_pages: Unit in pages in which is memory added/removed in this dynamic * memory group. * * Register a new dynamic memory group and return the memory group id. * Memory within a dynamic memory group is added/removed dynamically * in unit_pages. * * Returns an error if out of memory, if the node id is invalid, if no new * memory groups can be registered, or if unit_pages is invalid (0, not a * power of two, smaller than a single memory block). Otherwise, returns the * new memory group id.
*/ int memory_group_register_dynamic(int nid, unsignedlong unit_pages)
{ struct memory_group group = {
.nid = nid,
.is_dynamic = true,
.d = {
.unit_pages = unit_pages,
},
};
/** * memory_group_unregister() - Unregister a memory group. * @mgid: the memory group id * * Unregister a memory group. If any memory block still belongs to this * memory group, unregistering will fail. * * Returns -EINVAL if the memory group id is invalid, returns -EBUSY if some * memory blocks still belong to this memory group and returns 0 if * unregistering succeeded.
*/ int memory_group_unregister(int mgid)
{ struct memory_group *group;
if (mgid < 0) return -EINVAL;
group = xa_load(&memory_groups, mgid); if (!group) return -EINVAL; if (!list_empty(&group->memory_blocks)) return -EBUSY;
xa_erase(&memory_groups, mgid);
kfree(group); return 0;
}
EXPORT_SYMBOL_GPL(memory_group_unregister);
/* * This is an internal helper only to be used in core memory hotplug code to * lookup a memory group. We don't care about locking, as we don't expect a * memory group to get unregistered while adding memory to it -- because * the group and the memory is managed by the same driver.
*/ struct memory_group *memory_group_find_by_id(int mgid)
{ return xa_load(&memory_groups, mgid);
}
/* * This is an internal helper only to be used in core memory hotplug code to * walk all dynamic memory groups excluding a given memory group, either * belonging to a specific node, or belonging to any node.
*/ int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func, struct memory_group *excluded, void *arg)
{ struct memory_group *group; unsignedlong index; int ret = 0;
xa_for_each_marked(&memory_groups, index, group,
MEMORY_GROUP_MARK_DYNAMIC) { if (group == excluded) continue; #ifdef CONFIG_NUMA if (nid != NUMA_NO_NODE && group->nid != nid) continue; #endif/* CONFIG_NUMA */
ret = func(group, arg); if (ret) break;
} return ret;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.