feat: add --dangerously-bypass-approvals-and-sandbox (#1384)
This PR reworks `assess_command_safety()` so that the combination of `AskForApproval::Never` and `SandboxPolicy::DangerFullAccess` ensures that commands are run without _any_ sandbox and the user should never be prompted. In turn, it adds support for a new `--dangerously-bypass-approvals-and-sandbox` flag (that cannot be used with `--approval-policy` or `--full-auto`) that sets both of those options. Fixes https://github.com/openai/codex/issues/1254
This commit is contained in:
@@ -31,6 +31,7 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
|
||||
model,
|
||||
config_profile,
|
||||
full_auto,
|
||||
dangerously_bypass_approvals_and_sandbox,
|
||||
cwd,
|
||||
skip_git_repo_check,
|
||||
color,
|
||||
@@ -85,6 +86,8 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
|
||||
|
||||
let sandbox_policy = if full_auto {
|
||||
Some(SandboxPolicy::new_workspace_write_policy())
|
||||
} else if dangerously_bypass_approvals_and_sandbox {
|
||||
Some(SandboxPolicy::DangerFullAccess)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user