From 56402acaf32c442fa6c4fe199e53c6658fcbe102 Mon Sep 17 00:00:00 2001 From: Ludovico de Nittis <ludovico.denittis@collabora.com> Date: Thu, 25 Feb 2021 12:01:49 +0100 Subject: [PATCH] wrap: execute nvidia-modprobe before entering the container If needed, try to run `nvidia-modprobe` to load the necessary `nvidia_uvm.ko` kernel module. Fixes: #59 Signed-off-by: Ludovico de Nittis <ludovico.denittis@collabora.com> --- pressure-vessel/wrap.c | 77 +++++++++++++++++------------------------- 1 file changed, 31 insertions(+), 46 deletions(-) diff --git a/pressure-vessel/wrap.c b/pressure-vessel/wrap.c index 51825c26e..bafd4e2f0 100644 --- a/pressure-vessel/wrap.c +++ b/pressure-vessel/wrap.c @@ -349,6 +349,34 @@ export_contents_of_run (FlatpakBwrap *bwrap, return TRUE; } +/* Nvidia Vulkan ray-tracing requires to load the `nvidia_uvm.ko` kernel + * module, and this is usually done in `libcuda.so.1` by running the setuid + * binary `nvidia-modprobe`. But when we are inside a container we don't bind + * `nvidia-modprobe` and, even if we did, its setuid would not be effective + * because we have `PR_SET_NO_NEW_PRIVS` and we don't have `CAP_SYS_MODULE` in + * our capability bounding set. + * For this reason if the current system is using the proprietary Nvidia + * drivers, and `nvidia_uvm.ko` has not been already loaded, we should execute + * `nvidia-modprobe` before entering in the container environment. */ +static gboolean +maybe_load_nvidia_modules (GError **error) +{ + const char *nvidia_modprobe_argv[] = + { + "nvidia-modprobe", + "-u", + NULL + }; + + g_return_val_if_fail (error == NULL || *error == NULL, FALSE); + + if (g_file_test ("/sys/module/nvidia/version", G_FILE_TEST_IS_REGULAR) + && !g_file_test ("/sys/module/nvidia_uvm", G_FILE_TEST_IS_DIR)) + return pv_run_sync (nvidia_modprobe_argv, NULL, NULL, NULL, error); + + return TRUE; +} + typedef enum { ENV_MOUNT_FLAGS_COLON_DELIMITED = (1 << 0), @@ -2697,56 +2725,13 @@ main (int argc, } } - /* Nvidia Vulkan ray-tracing requires to load the `nvidia_uvm.ko` kernel - * module, and this is usually done in `libcuda.so.1` by running the setuid - * binary `nvidia-modprobe`. But when we are inside a container we don't bind - * `nvidia-modprobe` and, even if we did, it's setuid would not be effective - * because we have `PR_SET_NO_NEW_PRIVS` and we don't have `CAP_SYS_MODULE` in - * our capability bounding set. - * For this reason if the current system is using the proprietary Nvidia - * drivers and `nvidia_uvm.ko` has not been already loaded, we execute - * `nvidia-modprobe` before entering in the container environment. */ - if (g_file_test ("/sys/module/nvidia/version", G_FILE_TEST_IS_REGULAR) - && !g_file_test ("/sys/module/nvidia_uvm", G_FILE_TEST_IS_DIR)) + if (!is_flatpak_env) { - g_autofree gchar *child_stdout = NULL; - g_autofree gchar *child_stderr = NULL; - int wait_status; - const char *nvidia_modprobe_argv[] = - { - "nvidia-modprobe", - "-u", - NULL - }; - - g_debug ("Running nvidia-modprobe..."); - - /* We use LEAVE_DESCRIPTORS_OPEN to work around a deadlock in older GLib, - * see flatpak_close_fds_workaround */ - if (!g_spawn_sync (NULL, /* cwd */ - (gchar **) nvidia_modprobe_argv, - NULL, /* environ */ - (G_SPAWN_SEARCH_PATH | - G_SPAWN_LEAVE_DESCRIPTORS_OPEN), - NULL, NULL, - &child_stdout, - &child_stderr, - &wait_status, - error)) + if (!maybe_load_nvidia_modules (error)) { - g_debug ("Cannot run nvidia-modprobe: %s", local_error->message); + g_debug ("Cannot load nvidia modules: %s", local_error->message); g_clear_error (&local_error); } - else if (wait_status != 0) - { - g_debug ("Cannot run nvidia-modprobe: wait status %d", wait_status); - - if (child_stdout != NULL && child_stdout[0] != '\0') - g_debug ("Output:\n%s", child_stdout); - - if (child_stderr != NULL && child_stderr[0] != '\0') - g_debug ("Diagnostic output:\n%s", child_stderr); - } } if (opt_only_prepare) -- GitLab