for (i = 0; i < item; i++) {
ptr = (char *)mte_allocate_memory(sizes[i], mem_type, 0, true); if (check_allocated_memory(ptr, sizes[i], mem_type, true) != KSFT_PASS) return KSFT_FAIL;
mte_initialize_current_context(mode, (uintptr_t)ptr, sizes[i]); /* Set some value in tagged memory */ for (j = 0; j < sizes[i]; j++)
ptr[j] = '1';
mte_wait_after_trig();
err = cur_mte_cxt.fault_valid; /* Check the buffer whether it is filled. */ for (j = 0; j < sizes[i] && !err; j++) { if (ptr[j] != '1')
err = true;
}
mte_free_memory((void *)ptr, sizes[i], mem_type, true);
if (err) break;
} if (!err) return KSFT_PASS; else return KSFT_FAIL;
}
staticint check_buffer_underflow_by_byte(int mem_type, int mode, int underflow_range)
{ char *ptr; int i, j, item, last_index; bool err; char *und_ptr = NULL;
mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, false);
item = ARRAY_SIZE(sizes); for (i = 0; i < item; i++) {
ptr = (char *)mte_allocate_memory_tag_range(sizes[i], mem_type, 0,
underflow_range, 0); if (check_allocated_memory_range(ptr, sizes[i], mem_type,
underflow_range, 0) != KSFT_PASS) return KSFT_FAIL;
mte_initialize_current_context(mode, (uintptr_t)ptr, -underflow_range);
last_index = 0; /* Set some value in tagged memory and make the buffer underflow */ for (j = sizes[i] - 1; (j >= -underflow_range) &&
(!cur_mte_cxt.fault_valid); j--) {
ptr[j] = '1';
last_index = j;
}
mte_wait_after_trig();
err = false; /* Check whether the buffer is filled */ for (j = 0; j < sizes[i]; j++) { if (ptr[j] != '1') {
err = true;
ksft_print_msg("Buffer is not filled at index:%d of ptr:0x%p\n",
j, ptr); break;
}
} if (err) goto check_buffer_underflow_by_byte_err;
switch (mode) { case MTE_NONE_ERR: if (cur_mte_cxt.fault_valid == true || last_index != -underflow_range) {
err = true; break;
} /* There were no fault so the underflow area should be filled */
und_ptr = (char *) MT_CLEAR_TAG((size_t) ptr - underflow_range); for (j = 0 ; j < underflow_range; j++) { if (und_ptr[j] != '1') {
err = true; break;
}
} break; case MTE_ASYNC_ERR: /* Imprecise fault should occur otherwise return error */ if (cur_mte_cxt.fault_valid == false) {
err = true; break;
} /* * The imprecise fault is checked after the write to the buffer, * so the underflow area before the fault should be filled.
*/
und_ptr = (char *) MT_CLEAR_TAG((size_t) ptr); for (j = last_index ; j < 0 ; j++) { if (und_ptr[j] != '1') {
err = true; break;
}
} break; case MTE_SYNC_ERR: /* Precise fault should occur otherwise return error */ if (!cur_mte_cxt.fault_valid || (last_index != (-1))) {
err = true; break;
} /* Underflow area should not be filled */
und_ptr = (char *) MT_CLEAR_TAG((size_t) ptr); if (und_ptr[-1] == '1')
err = true; break; default:
err = true; break;
}
check_buffer_underflow_by_byte_err:
mte_free_memory_tag_range((void *)ptr, sizes[i], mem_type, underflow_range, 0); if (err) break;
} return (err ? KSFT_FAIL : KSFT_PASS);
}
staticint check_buffer_overflow_by_byte(int mem_type, int mode, int overflow_range)
{ char *ptr; int i, j, item, last_index; bool err;
size_t tagged_size, overflow_size; char *over_ptr = NULL;
mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, false);
item = ARRAY_SIZE(sizes); for (i = 0; i < item; i++) {
ptr = (char *)mte_allocate_memory_tag_range(sizes[i], mem_type, 0,
0, overflow_range); if (check_allocated_memory_range(ptr, sizes[i], mem_type,
0, overflow_range) != KSFT_PASS) return KSFT_FAIL;
/* Set some value in tagged memory and make the buffer underflow */ for (j = 0, last_index = 0 ; (j < (sizes[i] + overflow_range)) &&
(cur_mte_cxt.fault_valid == false); j++) {
ptr[j] = '1';
last_index = j;
}
mte_wait_after_trig();
err = false; /* Check whether the buffer is filled */ for (j = 0; j < sizes[i]; j++) { if (ptr[j] != '1') {
err = true;
ksft_print_msg("Buffer is not filled at index:%d of ptr:0x%p\n",
j, ptr); break;
}
} if (err) goto check_buffer_overflow_by_byte_err;
/* Buffer by byte tests */
evaluate_test(check_buffer_by_byte(USE_MMAP, MTE_SYNC_ERR), "Check buffer correctness by byte with sync err mode and mmap memory\n");
evaluate_test(check_buffer_by_byte(USE_MMAP, MTE_ASYNC_ERR), "Check buffer correctness by byte with async err mode and mmap memory\n");
evaluate_test(check_buffer_by_byte(USE_MPROTECT, MTE_SYNC_ERR), "Check buffer correctness by byte with sync err mode and mmap/mprotect memory\n");
evaluate_test(check_buffer_by_byte(USE_MPROTECT, MTE_ASYNC_ERR), "Check buffer correctness by byte with async err mode and mmap/mprotect memory\n");
/* Check buffer underflow with underflow size as 16 */
evaluate_test(check_buffer_underflow_by_byte(USE_MMAP, MTE_SYNC_ERR, MT_GRANULE_SIZE), "Check buffer write underflow by byte with sync mode and mmap memory\n");
evaluate_test(check_buffer_underflow_by_byte(USE_MMAP, MTE_ASYNC_ERR, MT_GRANULE_SIZE), "Check buffer write underflow by byte with async mode and mmap memory\n");
evaluate_test(check_buffer_underflow_by_byte(USE_MMAP, MTE_NONE_ERR, MT_GRANULE_SIZE), "Check buffer write underflow by byte with tag check fault ignore and mmap memory\n");
/* Check buffer underflow with underflow size as page size */
evaluate_test(check_buffer_underflow_by_byte(USE_MMAP, MTE_SYNC_ERR, page_size), "Check buffer write underflow by byte with sync mode and mmap memory\n");
evaluate_test(check_buffer_underflow_by_byte(USE_MMAP, MTE_ASYNC_ERR, page_size), "Check buffer write underflow by byte with async mode and mmap memory\n");
evaluate_test(check_buffer_underflow_by_byte(USE_MMAP, MTE_NONE_ERR, page_size), "Check buffer write underflow by byte with tag check fault ignore and mmap memory\n");
/* Check buffer overflow with overflow size as 16 */
evaluate_test(check_buffer_overflow_by_byte(USE_MMAP, MTE_SYNC_ERR, MT_GRANULE_SIZE), "Check buffer write overflow by byte with sync mode and mmap memory\n");
evaluate_test(check_buffer_overflow_by_byte(USE_MMAP, MTE_ASYNC_ERR, MT_GRANULE_SIZE), "Check buffer write overflow by byte with async mode and mmap memory\n");
evaluate_test(check_buffer_overflow_by_byte(USE_MMAP, MTE_NONE_ERR, MT_GRANULE_SIZE), "Check buffer write overflow by byte with tag fault ignore mode and mmap memory\n");
/* Buffer by block tests */
evaluate_test(check_buffer_by_block(USE_MMAP, MTE_SYNC_ERR), "Check buffer write correctness by block with sync mode and mmap memory\n");
evaluate_test(check_buffer_by_block(USE_MMAP, MTE_ASYNC_ERR), "Check buffer write correctness by block with async mode and mmap memory\n");
evaluate_test(check_buffer_by_block(USE_MMAP, MTE_NONE_ERR), "Check buffer write correctness by block with tag fault ignore and mmap memory\n");
/* Initial tags are supposed to be 0 */
evaluate_test(check_memory_initial_tags(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE), "Check initial tags with private mapping, sync error mode and mmap memory\n");
evaluate_test(check_memory_initial_tags(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE), "Check initial tags with private mapping, sync error mode and mmap/mprotect memory\n");
evaluate_test(check_memory_initial_tags(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED), "Check initial tags with shared mapping, sync error mode and mmap memory\n");
evaluate_test(check_memory_initial_tags(USE_MPROTECT, MTE_SYNC_ERR, MAP_SHARED), "Check initial tags with shared mapping, sync error mode and mmap/mprotect memory\n");
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.