if (pid == 0) {
ret = sys_close_range(open_fds[0], open_fds[50],
CLOSE_RANGE_UNSHARE); if (ret) exit(EXIT_FAILURE);
for (i = 0; i <= 50; i++) if (fcntl(open_fds[i], F_GETFL) != -1) exit(EXIT_FAILURE);
for (i = 51; i <= 100; i++) if (fcntl(open_fds[i], F_GETFL) == -1) exit(EXIT_FAILURE);
/* create a couple of gaps */
close(57);
close(78);
close(81);
close(82);
close(84);
close(90);
ret = sys_close_range(open_fds[51], open_fds[92],
CLOSE_RANGE_UNSHARE); if (ret) exit(EXIT_FAILURE);
for (i = 51; i <= 92; i++) if (fcntl(open_fds[i], F_GETFL) != -1) exit(EXIT_FAILURE);
for (i = 93; i <= 100; i++) if (fcntl(open_fds[i], F_GETFL) == -1) exit(EXIT_FAILURE);
/* test that the kernel caps and still closes all fds */
ret = sys_close_range(open_fds[93], open_fds[99],
CLOSE_RANGE_UNSHARE); if (ret) exit(EXIT_FAILURE);
for (i = 93; i <= 99; i++) if (fcntl(open_fds[i], F_GETFL) != -1) exit(EXIT_FAILURE);
if (fcntl(open_fds[100], F_GETFL) == -1) exit(EXIT_FAILURE);
ret = sys_close_range(open_fds[100], open_fds[100],
CLOSE_RANGE_UNSHARE); if (ret) exit(EXIT_FAILURE);
if (fcntl(open_fds[100], F_GETFL) != -1) exit(EXIT_FAILURE);
TEST(close_range_cloexec)
{ int i, ret; int open_fds[101]; struct rlimit rlimit;
for (i = 0; i < ARRAY_SIZE(open_fds); i++) { int fd;
fd = open("/dev/null", O_RDONLY);
ASSERT_GE(fd, 0) { if (errno == ENOENT)
SKIP(return, "Skipping test since /dev/null does not exist");
}
open_fds[i] = fd;
}
ret = sys_close_range(1000, 1000, CLOSE_RANGE_CLOEXEC); if (ret < 0) { if (errno == ENOSYS)
SKIP(return, "close_range() syscall not supported"); if (errno == EINVAL)
SKIP(return, "close_range() doesn't support CLOSE_RANGE_CLOEXEC");
}
/* Ensure the FD_CLOEXEC bit is set also with a resource limit in place. */
ASSERT_EQ(0, getrlimit(RLIMIT_NOFILE, &rlimit));
rlimit.rlim_cur = 25;
ASSERT_EQ(0, setrlimit(RLIMIT_NOFILE, &rlimit));
/* Set close-on-exec for two ranges: [0-50] and [75-100]. */
ret = sys_close_range(open_fds[0], open_fds[50], CLOSE_RANGE_CLOEXEC);
ASSERT_EQ(0, ret);
ret = sys_close_range(open_fds[75], open_fds[100], CLOSE_RANGE_CLOEXEC);
ASSERT_EQ(0, ret);
for (i = 0; i <= 50; i++) { int flags = fcntl(open_fds[i], F_GETFD);
/* Test a common pattern. */
ret = sys_close_range(3, UINT_MAX, CLOSE_RANGE_CLOEXEC); for (i = 0; i <= 100; i++) { int flags = fcntl(open_fds[i], F_GETFD);
TEST(close_range_cloexec_unshare)
{ int i, ret; int open_fds[101]; struct rlimit rlimit;
for (i = 0; i < ARRAY_SIZE(open_fds); i++) { int fd;
fd = open("/dev/null", O_RDONLY);
ASSERT_GE(fd, 0) { if (errno == ENOENT)
SKIP(return, "Skipping test since /dev/null does not exist");
}
open_fds[i] = fd;
}
ret = sys_close_range(1000, 1000, CLOSE_RANGE_CLOEXEC); if (ret < 0) { if (errno == ENOSYS)
SKIP(return, "close_range() syscall not supported"); if (errno == EINVAL)
SKIP(return, "close_range() doesn't support CLOSE_RANGE_CLOEXEC");
}
/* Ensure the FD_CLOEXEC bit is set also with a resource limit in place. */
ASSERT_EQ(0, getrlimit(RLIMIT_NOFILE, &rlimit));
rlimit.rlim_cur = 25;
ASSERT_EQ(0, setrlimit(RLIMIT_NOFILE, &rlimit));
/* Set close-on-exec for two ranges: [0-50] and [75-100]. */
ret = sys_close_range(open_fds[0], open_fds[50],
CLOSE_RANGE_CLOEXEC | CLOSE_RANGE_UNSHARE);
ASSERT_EQ(0, ret);
ret = sys_close_range(open_fds[75], open_fds[100],
CLOSE_RANGE_CLOEXEC | CLOSE_RANGE_UNSHARE);
ASSERT_EQ(0, ret);
for (i = 0; i <= 50; i++) { int flags = fcntl(open_fds[i], F_GETFD);
/* Test a common pattern. */
ret = sys_close_range(3, UINT_MAX,
CLOSE_RANGE_CLOEXEC | CLOSE_RANGE_UNSHARE); for (i = 0; i <= 100; i++) { int flags = fcntl(open_fds[i], F_GETFD);
if (pid == 0) {
ret = sys_close_range(3, ~0U, CLOSE_RANGE_CLOEXEC); if (ret) exit(EXIT_FAILURE);
/* * We now have a private file descriptor table and all * our open fds should still be open but made * close-on-exec.
*/
flags = fcntl(fd1, F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, FD_CLOEXEC);
/* * We had a shared file descriptor table before along with requesting * close-on-exec so the original fds must not be close-on-exec.
*/
flags = fcntl(fd1, F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, FD_CLOEXEC);
/* * Regression test for syzbot+96cfd2b22b3213646a93@syzkaller.appspotmail.com
*/
TEST(close_range_cloexec_unshare_syzbot)
{ int i, fd1, fd2, fd3, flags, ret, status;
pid_t pid; struct __clone_args args = {
.flags = CLONE_FILES,
.exit_signal = SIGCHLD,
};
/* * Create a huge gap in the fd table. When we now call * CLOSE_RANGE_UNSHARE with a shared fd table and and with ~0U as upper * bound the kernel will only copy up to fd1 file descriptors into the * new fd table. If the kernel is buggy and doesn't handle * CLOSE_RANGE_CLOEXEC correctly it will not have copied all file * descriptors and we will oops! * * On a buggy kernel this should immediately oops. But let's loop just * to be sure.
*/
fd1 = open("/dev/null", O_RDWR);
EXPECT_GT(fd1, 0);
if (pid == 0) {
ret = sys_close_range(3, ~0U, CLOSE_RANGE_UNSHARE |
CLOSE_RANGE_CLOEXEC); if (ret) exit(EXIT_FAILURE);
/* * We now have a private file descriptor table and all * our open fds should still be open but made * close-on-exec.
*/
flags = fcntl(fd1, F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, FD_CLOEXEC);
/* * We created a private file descriptor table before along with * requesting close-on-exec so the original fds must not be * close-on-exec.
*/
flags = fcntl(fd1, F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, 0);
if (pid == 0) { /* unshare and truncate descriptor table down to 64 */ if (sys_close_range(64, ~0U, CLOSE_RANGE_UNSHARE)) exit(EXIT_FAILURE);
ASSERT_EQ(fcntl(64, F_GETFD), -1); /* ... and verify that the range 64..127 is not
stuck "fully used" according to secondary bitmap */
EXPECT_EQ(dup(0), 64) exit(EXIT_FAILURE); exit(EXIT_SUCCESS);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.