// SPDX-License-Identifier: GPL-2.0 /* * HMM stands for Heterogeneous Memory Management, it is a helper layer inside * the linux kernel to help device drivers mirror a process address space in * the device. This allows the device to use the same address space which * makes communication and data exchange a lot easier. * * This framework's sole purpose is to exercise various code paths inside * the kernel to make sure that HMM performs as expected and to flush out any * bugs.
*/
/* * This is a private UAPI to the kernel test module so it isn't exported * in the usual include/uapi/... directory.
*/ #include <lib/test_hmm_uapi.h> #include <mm/gup_test.h>
/* * Initialize buffer in system memory but leave the first two pages * zero (pte_none and pfn_zero).
*/
i = 2 * self->page_size / sizeof(*ptr); for (ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Set buffer permission to read-only. */
ret = mprotect(buffer->ptr, size, PROT_READ);
ASSERT_EQ(ret, 0);
/* Populate the CPU page table with a special zero page. */
val = *(int *)(buffer->ptr + self->page_size);
ASSERT_EQ(val, 0);
/* Simulate a device reading system memory. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
ASSERT_EQ(buffer->faults, 1);
/* Check what the device read. */
ptr = buffer->mirror; for (i = 0; i < 2 * self->page_size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], 0); for (; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
hmm_buffer_free(buffer);
}
/* * Read private anonymous memory which has been protected with * mprotect() PROT_NONE.
*/
TEST_F(hmm, anon_read_prot)
{ struct hmm_buffer *buffer; unsignedlong npages; unsignedlong size; unsignedlong i; int *ptr; int ret;
/* Initialize buffer in system memory. */ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Initialize mirror buffer so we can verify it isn't written. */ for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ptr[i] = -i;
/* Protect buffer from reading. */
ret = mprotect(buffer->ptr, size, PROT_NONE);
ASSERT_EQ(ret, 0);
/* Simulate a device reading system memory. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
ASSERT_EQ(ret, -EFAULT);
/* Allow CPU to read the buffer so we can check it. */
ret = mprotect(buffer->ptr, size, PROT_READ);
ASSERT_EQ(ret, 0); for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
/* Check what the device read. */ for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], -i);
/* Simulate a device reading a zero page of memory. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, 1);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, 1);
ASSERT_EQ(buffer->faults, 1);
/* Initialize data that the device will write to buffer->ptr. */ for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Simulate a device writing system memory. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
ASSERT_EQ(ret, -EPERM);
/* Check what the device wrote. */ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], 0);
/* Now allow writing and see that the zero page is replaced. */
ret = mprotect(buffer->ptr, size, PROT_WRITE | PROT_READ);
ASSERT_EQ(ret, 0);
/* Simulate a device writing system memory. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
ASSERT_EQ(buffer->faults, 1);
/* Check what the device wrote. */ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
hmm_buffer_free(buffer);
}
/* * Check that a device writing an anonymous private mapping * will copy-on-write if a child process inherits the mapping.
*/
TEST_F(hmm, anon_write_child)
{ struct hmm_buffer *buffer; unsignedlong npages; unsignedlong size; unsignedlong i; int *ptr;
pid_t pid; int child_fd; int ret;
/* Initialize buffer->ptr so we can tell if it is written. */ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Initialize data that the device will write to buffer->ptr. */ for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ptr[i] = -i;
pid = fork(); if (pid == -1)
ASSERT_EQ(pid, 0); if (pid != 0) {
waitpid(pid, &ret, 0);
ASSERT_EQ(WIFEXITED(ret), 1);
/* Check that the parent's buffer did not change. */ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i); return;
}
/* Check that we see the parent's values. */ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i); for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], -i);
/* The child process needs its own mirror to its own mm. */
child_fd = hmm_open(0);
ASSERT_GE(child_fd, 0);
/* Simulate a device writing system memory. */
ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
ASSERT_EQ(buffer->faults, 1);
/* Check what the device wrote. */ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], -i);
close(child_fd); exit(0);
}
/* * Check that a device writing an anonymous shared mapping * will not copy-on-write if a child process inherits the mapping.
*/
TEST_F(hmm, anon_write_child_shared)
{ struct hmm_buffer *buffer; unsignedlong npages; unsignedlong size; unsignedlong i; int *ptr;
pid_t pid; int child_fd; int ret;
/* Initialize buffer->ptr so we can tell if it is written. */ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Initialize data that the device will write to buffer->ptr. */ for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ptr[i] = -i;
pid = fork(); if (pid == -1)
ASSERT_EQ(pid, 0); if (pid != 0) {
waitpid(pid, &ret, 0);
ASSERT_EQ(WIFEXITED(ret), 1);
/* Check that the parent's buffer did change. */ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], -i); return;
}
/* Check that we see the parent's values. */ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i); for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], -i);
/* The child process needs its own mirror to its own mm. */
child_fd = hmm_open(0);
ASSERT_GE(child_fd, 0);
/* Simulate a device writing system memory. */
ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
ASSERT_EQ(buffer->faults, 1);
/* Check what the device wrote. */ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], -i);
/* Initialize data that the device will write to buffer->ptr. */ for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Simulate a device writing system memory. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
ASSERT_EQ(buffer->faults, 1);
/* Check what the device wrote. */ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
buffer->ptr = old_ptr;
hmm_buffer_free(buffer);
}
/* * Read numeric data from raw and tagged kernel status files. Used to read * /proc and /sys data (without a tag) and from /proc/meminfo (with a tag).
*/ staticlong file_read_ulong(char *file, constchar *tag)
{ int fd; char buf[2048]; int len; char *p, *q; long val;
fd = open(file, O_RDONLY); if (fd < 0) { /* Error opening the file */ return -1;
}
len = read(fd, buf, sizeof(buf));
close(fd); if (len < 0) { /* Error in reading the file */ return -1;
} if (len == sizeof(buf)) { /* Error file is too large */ return -1;
}
buf[len] = '\0';
/* Search for a tag if provided */ if (tag) {
p = strstr(buf, tag); if (!p) return -1; /* looks like the line we want isn't there */
p += strlen(tag);
} else
p = buf;
val = strtol(p, &q, 0); if (*q != ' ') { /* Error parsing the file */ return -1;
}
default_hsize = file_read_ulong("/proc/meminfo", "Hugepagesize:"); if (default_hsize < 0 || default_hsize*1024 < default_hsize)
SKIP(return, "Huge page size could not be determined");
default_hsize = default_hsize*1024; /* KB to B */
/* Initialize data that the device will write to buffer->ptr. */ for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Simulate a device writing system memory. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
ASSERT_EQ(buffer->faults, 1);
/* Check what the device wrote. */ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
/* Check that the device also wrote the file. */
len = pread(fd, buffer->mirror, size, 0);
ASSERT_EQ(len, size); for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
hmm_buffer_free(buffer);
}
/* * Migrate anonymous memory to device private memory.
*/
TEST_F(hmm, migrate)
{ struct hmm_buffer *buffer; unsignedlong npages; unsignedlong size; unsignedlong i; int *ptr; int ret;
/* Initialize buffer in system memory. */ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Migrate memory to device. */
ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
/* Check what the device read. */ for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
hmm_buffer_free(buffer);
}
/* * Migrate anonymous memory to device private memory and fault some of it back * to system memory, then try migrating the resulting mix of system and device * private memory to the device.
*/
TEST_F(hmm, migrate_fault)
{ struct hmm_buffer *buffer; unsignedlong npages; unsignedlong size; unsignedlong i; int *ptr; int ret;
/* Initialize buffer in system memory. */ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Migrate memory to device. */
ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
/* Check what the device read. */ for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
/* Fault half the pages back to system memory and check them. */ for (i = 0, ptr = buffer->ptr; i < size / (2 * sizeof(*ptr)); ++i)
ASSERT_EQ(ptr[i], i);
/* Migrate memory to the device again. */
ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
/* Check what the device read. */ for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
hmm_buffer_free(buffer);
}
TEST_F(hmm, migrate_release)
{ struct hmm_buffer *buffer; unsignedlong npages; unsignedlong size; unsignedlong i; int *ptr; int ret;
/* Now try to migrate pages 2-5 to device 1. */
buffer->ptr = p + 2 * self->page_size;
ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 4);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, 4);
/* Page 5 won't be migrated to device 0 because it's on device 1. */
buffer->ptr = p + 5 * self->page_size;
ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1);
ASSERT_EQ(ret, -ENOENT);
buffer->ptr = p;
buffer->ptr = p;
hmm_buffer_free(buffer);
}
/* * Migrate anonymous memory to device memory and back to system memory * multiple times. In case of private zone configuration, this is done * through fault pages accessed by CPU. In case of coherent zone configuration, * the pages from the device should be explicitly migrated back to system memory. * The reason is Coherent device zone has coherent access by CPU, therefore * it will not generate any page fault.
*/
TEST_F(hmm, migrate_multiple)
{ struct hmm_buffer *buffer; unsignedlong npages; unsignedlong size; unsignedlong i; unsignedlong c; int *ptr; int ret;
/* Initialize buffer in system memory. */ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Migrate memory to device. */
ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
/* Check what the device read. */ for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
/* Migrate back to system memory and check them. */ if (hmm_is_coherent_type(variant->device_number)) {
ret = hmm_migrate_dev_to_sys(self->fd, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
}
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
/* Delay for a bit and then unmap buffer while it is being read. */
hmm_nanosleep(hmm_random() % 32000);
munmap(buffer->ptr + buffer->size / 2, buffer->size / 2);
buffer->ptr = NULL;
return NULL;
}
/* * Try reading anonymous memory while it is being unmapped.
*/
TEST_F(hmm, anon_teardown)
{ unsignedlong npages; unsignedlong size; unsignedlong c; void *ret;
/* * Test memory snapshot without faulting in pages accessed by the device.
*/
TEST_F(hmm, mixedmap)
{ struct hmm_buffer *buffer; unsignedlong npages; unsignedlong size; unsignedchar *m; int ret;
/* Reserve a range of addresses. */
buffer->ptr = mmap(NULL, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE,
self->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
/* Simulate a device snapshotting CPU pagetables. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
/* Check what the device saw. */
m = buffer->mirror;
ASSERT_EQ(m[0], HMM_DMIRROR_PROT_READ);
hmm_buffer_free(buffer);
}
/* * Test memory snapshot without faulting in pages accessed by the device.
*/
TEST_F(hmm2, snapshot)
{ struct hmm_buffer *buffer; unsignedlong npages; unsignedlong size; int *ptr; unsignedchar *p; unsignedchar *m; int ret; int val;
/* * Test the hmm_range_fault() HMM_PFN_PMD flag for large pages that * should be mapped by a large page table entry.
*/
TEST_F(hmm, compound)
{ struct hmm_buffer *buffer; unsignedlong npages; unsignedlong size; unsignedlong default_hsize; int *ptr; unsignedchar *m; int ret; unsignedlong i;
/* Skip test if we can't allocate a hugetlbfs page. */
default_hsize = file_read_ulong("/proc/meminfo", "Hugepagesize:"); if (default_hsize < 0 || default_hsize*1024 < default_hsize)
SKIP(return, "Huge page size could not be determined");
default_hsize = default_hsize*1024; /* KB to B */
/* * Test two devices reading the same memory (double mapped).
*/
TEST_F(hmm2, double_map)
{ struct hmm_buffer *buffer; unsignedlong npages; unsignedlong size; unsignedlong i; int *ptr; int ret;
/* Reserve a range of addresses. */
buffer->ptr = mmap(NULL, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
/* Initialize buffer in system memory. */ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Make region read-only. */
ret = mprotect(buffer->ptr, size, PROT_READ);
ASSERT_EQ(ret, 0);
/* Simulate device 0 reading system memory. */
ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_READ, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
ASSERT_EQ(buffer->faults, 1);
/* Check what the device read. */ for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
/* Simulate device 1 reading system memory. */
ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_READ, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
ASSERT_EQ(buffer->faults, 1);
/* Check what the device read. */ for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
/* Migrate pages to device 1 and try to read from device 0. */
ret = hmm_migrate_sys_to_dev(self->fd1, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
/* Initialize buffer in system memory. */ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Map memory exclusively for device access. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
fork();
/* Fault pages back to system memory and check them. */ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i]++, i);
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i+1);
hmm_buffer_free(buffer);
}
staticint gup_test_exec(int gup_fd, unsignedlong addr, int cmd, int npages, int size, int flags)
{ struct gup_test gup = {
.nr_pages_per_call = npages,
.addr = addr,
.gup_flags = FOLL_WRITE | flags,
.size = size,
};
if (ioctl(gup_fd, cmd, &gup)) {
perror("ioctl on error\n"); return errno;
}
return 0;
}
/* * Test get user device pages through gup_test. Setting PIN_LONGTERM flag. * This should trigger a migration back to system memory for both, private * and coherent type pages. * This test makes use of gup_test module. Make sure GUP_TEST_CONFIG is added * to your configuration before you run it.
*/
TEST_F(hmm, hmm_gup_test)
{ struct hmm_buffer *buffer; int gup_fd; unsignedlong npages; unsignedlong size; unsignedlong i; int *ptr; int ret; unsignedchar *m;
gup_fd = open("/sys/kernel/debug/gup_test", O_RDWR); if (gup_fd == -1)
SKIP(return, "Skipping test, could not find gup_test driver");
/* Take snapshot to CPU pagetables */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
m = buffer->mirror; if (hmm_is_coherent_type(variant->device_number)) {
ASSERT_EQ(HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL | HMM_DMIRROR_PROT_WRITE, m[0]);
ASSERT_EQ(HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL | HMM_DMIRROR_PROT_WRITE, m[1]);
} else {
ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[0]);
ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[1]);
}
ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[2]);
ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[3]); /* * Check again the content on the pages. Make sure there's no * corrupted data.
*/ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
close(gup_fd);
hmm_buffer_free(buffer);
}
/* * Test copy-on-write in device pages. * In case of writing to COW private page(s), a page fault will migrate pages * back to system memory first. Then, these pages will be duplicated. In case * of COW device coherent type, pages are duplicated directly from device * memory.
*/
TEST_F(hmm, hmm_cow_in_device)
{ struct hmm_buffer *buffer; unsignedlong npages; unsignedlong size; unsignedlong i; int *ptr; int ret; unsignedchar *m;
pid_t pid; int status;
/* Initialize buffer in system memory. */ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Migrate memory to device. */
ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
pid = fork(); if (pid == -1)
ASSERT_EQ(pid, 0); if (!pid) { /* Child process waitd for SIGTERM from the parent. */ while (1) {
}
perror("Should not reach this\n"); exit(0);
} /* Parent process writes to COW pages(s) and gets a * new copy in system. In case of device private pages, * this write causes a migration to system mem first.
*/ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Take snapshot to CPU pagetables */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
m = buffer->mirror; for (i = 0; i < npages; i++)
ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[i]);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.