diff --git a/pressure-vessel/wrap.c b/pressure-vessel/wrap.c
index 1c86297deb184706b686d2a84c06baa328f29d92..bafd4e2f005133e5a763e0db1a3da84d7be589bc 100644
--- a/pressure-vessel/wrap.c
+++ b/pressure-vessel/wrap.c
@@ -349,6 +349,34 @@ export_contents_of_run (FlatpakBwrap *bwrap,
   return TRUE;
 }
 
+/* Nvidia Vulkan ray-tracing requires to load the `nvidia_uvm.ko` kernel
+ * module, and this is usually done in `libcuda.so.1` by running the setuid
+ * binary `nvidia-modprobe`. But when we are inside a container we don't bind
+ * `nvidia-modprobe` and, even if we did, its setuid would not be effective
+ * because we have `PR_SET_NO_NEW_PRIVS` and we don't have `CAP_SYS_MODULE` in
+ * our capability bounding set.
+ * For this reason if the current system is using the proprietary Nvidia
+ * drivers, and `nvidia_uvm.ko` has not been already loaded, we should execute
+ * `nvidia-modprobe` before entering in the container environment. */
+static gboolean
+maybe_load_nvidia_modules (GError **error)
+{
+  const char *nvidia_modprobe_argv[] =
+  {
+    "nvidia-modprobe",
+    "-u",
+    NULL
+  };
+
+  g_return_val_if_fail (error == NULL || *error == NULL, FALSE);
+
+  if (g_file_test ("/sys/module/nvidia/version", G_FILE_TEST_IS_REGULAR)
+      && !g_file_test ("/sys/module/nvidia_uvm", G_FILE_TEST_IS_DIR))
+    return pv_run_sync (nvidia_modprobe_argv, NULL, NULL, NULL, error);
+
+  return TRUE;
+}
+
 typedef enum
 {
   ENV_MOUNT_FLAGS_COLON_DELIMITED = (1 << 0),
@@ -2697,6 +2725,15 @@ main (int argc,
         }
     }
 
+  if (!is_flatpak_env)
+    {
+      if (!maybe_load_nvidia_modules (error))
+        {
+          g_debug ("Cannot load nvidia modules: %s", local_error->message);
+          g_clear_error (&local_error);
+        }
+    }
+
   if (opt_only_prepare)
     ret = 0;
   else