Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
57 changes: 41 additions & 16 deletions crates/bashkit/src/builtins/sqlite/dot_commands.rs
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ pub(super) fn dispatch(
engine: &SqliteEngine,
opts: &mut OutputOpts,
deadline: Deadline,
max_rows: usize,
) -> Result<DotOutcome, DotError> {
let (name, args) = tokenize_dot(line);
match name.as_str() {
Expand All @@ -83,10 +84,12 @@ pub(super) fn dispatch(
"mode" => set_mode(args, opts).map(|_| DotOutcome::Configured),
"separator" | "sep" => set_separator(args, opts).map(|_| DotOutcome::Configured),
"nullvalue" | "null" => set_null(args, opts).map(|_| DotOutcome::Configured),
"tables" => tables(args, engine, opts, deadline).map(DotOutcome::Stdout),
"schema" => schema(args, engine, deadline).map(DotOutcome::Stdout),
"indexes" | "indices" => indexes(args, engine, opts, deadline).map(DotOutcome::Stdout),
"dump" => dump(engine, deadline).map(DotOutcome::Stdout),
"tables" => tables(args, engine, opts, deadline, max_rows).map(DotOutcome::Stdout),
"schema" => schema(args, engine, deadline, max_rows).map(DotOutcome::Stdout),
"indexes" | "indices" => {
indexes(args, engine, opts, deadline, max_rows).map(DotOutcome::Stdout)
}
"dump" => dump(engine, deadline, max_rows).map(DotOutcome::Stdout),
"read" => {
let path = args
.into_iter()
Expand Down Expand Up @@ -188,6 +191,7 @@ fn tables(
engine: &SqliteEngine,
opts: &OutputOpts,
deadline: Deadline,
max_rows: usize,
) -> Result<String, DotError> {
let pattern = args.into_iter().next();
let sql = match pattern {
Expand All @@ -197,7 +201,9 @@ fn tables(
),
None => "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name".to_string(),
};
let outcome = engine.execute(&sql, deadline).map_err(DotError::Engine)?;
let outcome = engine
.execute(&sql, deadline, max_rows)
.map_err(DotError::Engine)?;
let mut names = Vec::new();
for row in &outcome.rows {
if let Some(Value::Text(t)) = row.first() {
Expand All @@ -219,6 +225,7 @@ fn schema(
args: Vec<String>,
engine: &SqliteEngine,
deadline: Deadline,
max_rows: usize,
) -> Result<String, DotError> {
let pattern = args.into_iter().next();
let sql = match pattern {
Expand All @@ -228,7 +235,9 @@ fn schema(
),
None => "SELECT sql FROM sqlite_master WHERE sql IS NOT NULL ORDER BY name".to_string(),
};
let outcome = engine.execute(&sql, deadline).map_err(DotError::Engine)?;
let outcome = engine
.execute(&sql, deadline, max_rows)
.map_err(DotError::Engine)?;
let mut out = String::new();
for row in &outcome.rows {
if let Some(Value::Text(t)) = row.first() {
Expand All @@ -244,6 +253,7 @@ fn indexes(
engine: &SqliteEngine,
_opts: &OutputOpts,
deadline: Deadline,
max_rows: usize,
) -> Result<String, DotError> {
let pattern = args.into_iter().next();
let sql = match pattern {
Expand All @@ -253,7 +263,9 @@ fn indexes(
),
None => "SELECT name FROM sqlite_master WHERE type='index' ORDER BY name".to_string(),
};
let outcome = engine.execute(&sql, deadline).map_err(DotError::Engine)?;
let outcome = engine
.execute(&sql, deadline, max_rows)
.map_err(DotError::Engine)?;
let mut out = String::new();
for row in &outcome.rows {
if let Some(Value::Text(t)) = row.first() {
Expand All @@ -267,14 +279,15 @@ fn indexes(
/// Emit `BEGIN; <CREATE TABLE>...; <INSERT INTO ... VALUES (...)>; COMMIT;`.
/// This matches sqlite3's `.dump` for tables; views/triggers/indexes only get
/// their CREATE statement, no rows. Blob literals are emitted as `X'..'`.
fn dump(engine: &SqliteEngine, deadline: Deadline) -> Result<String, DotError> {
fn dump(engine: &SqliteEngine, deadline: Deadline, max_rows: usize) -> Result<String, DotError> {
let mut out = String::from("PRAGMA foreign_keys=OFF;\nBEGIN TRANSACTION;\n");

// Schema first.
let schema_outcome = engine
.execute(
"SELECT type, name, sql FROM sqlite_master WHERE sql IS NOT NULL ORDER BY rowid",
deadline,
max_rows,
)
.map_err(DotError::Engine)?;
for row in &schema_outcome.rows {
Expand All @@ -289,6 +302,7 @@ fn dump(engine: &SqliteEngine, deadline: Deadline) -> Result<String, DotError> {
.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'sqlite_%' ORDER BY name",
deadline,
max_rows,
)
.map_err(DotError::Engine)?;
for row in &tables_outcome.rows {
Expand All @@ -298,7 +312,9 @@ fn dump(engine: &SqliteEngine, deadline: Deadline) -> Result<String, DotError> {
let name = t.as_str().to_string();
let quoted = name.replace('"', "\"\"");
let sql = format!("SELECT * FROM \"{quoted}\"");
let data = engine.execute(&sql, deadline).map_err(DotError::Engine)?;
let data = engine
.execute(&sql, deadline, max_rows)
.map_err(DotError::Engine)?;
for data_row in &data.rows {
let values: Vec<String> = data_row.iter().map(format_sql_literal).collect();
out.push_str(&format!(
Expand Down Expand Up @@ -357,7 +373,7 @@ mod tests {
engine: &SqliteEngine,
opts: &mut OutputOpts,
) -> Result<DotOutcome, DotError> {
dispatch(line, engine, opts, no_deadline())
dispatch(line, engine, opts, no_deadline(), usize::MAX)
}

#[test]
Expand Down Expand Up @@ -450,10 +466,10 @@ mod tests {
fn tables_lists_existing() {
let engine = mk_engine();
engine
.execute("CREATE TABLE foo(a)", no_deadline())
.execute("CREATE TABLE foo(a)", no_deadline(), usize::MAX)
.unwrap();
engine
.execute("CREATE TABLE bar(b)", no_deadline())
.execute("CREATE TABLE bar(b)", no_deadline(), usize::MAX)
.unwrap();
let mut o = opts();
let DotOutcome::Stdout(s) = dispatch_t(".tables", &engine, &mut o).unwrap() else {
Expand All @@ -467,10 +483,10 @@ mod tests {
fn tables_with_pattern() {
let engine = mk_engine();
engine
.execute("CREATE TABLE foo(a)", no_deadline())
.execute("CREATE TABLE foo(a)", no_deadline(), usize::MAX)
.unwrap();
engine
.execute("CREATE TABLE bar(b)", no_deadline())
.execute("CREATE TABLE bar(b)", no_deadline(), usize::MAX)
.unwrap();
let mut o = opts();
let DotOutcome::Stdout(s) = dispatch_t(".tables foo", &engine, &mut o).unwrap() else {
Expand All @@ -494,7 +510,11 @@ mod tests {
fn schema_returns_create() {
let engine = mk_engine();
engine
.execute("CREATE TABLE foo(a INTEGER, b TEXT)", no_deadline())
.execute(
"CREATE TABLE foo(a INTEGER, b TEXT)",
no_deadline(),
usize::MAX,
)
.unwrap();
let mut o = opts();
let DotOutcome::Stdout(s) = dispatch_t(".schema", &engine, &mut o).unwrap() else {
Expand All @@ -507,12 +527,17 @@ mod tests {
fn dump_round_trips() {
let engine = mk_engine();
engine
.execute("CREATE TABLE t(x INTEGER, y TEXT)", no_deadline())
.execute(
"CREATE TABLE t(x INTEGER, y TEXT)",
no_deadline(),
usize::MAX,
)
.unwrap();
engine
.execute(
"INSERT INTO t VALUES (1, 'hello'), (2, 'O''Brien')",
no_deadline(),
usize::MAX,
)
.unwrap();
let mut o = opts();
Expand Down
13 changes: 12 additions & 1 deletion crates/bashkit/src/builtins/sqlite/engine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,12 @@ impl SqliteEngine {
/// `deadline` carries the wall-clock budget shared across all statements
/// in this invocation. Once it expires, we issue `stmt.interrupt()` and
/// return a timeout error rather than continuing the step loop.
pub(super) fn execute(&self, sql: &str, deadline: Deadline) -> EngineResult<StatementOutcome> {
pub(super) fn execute(
&self,
sql: &str,
deadline: Deadline,
max_rows: usize,
) -> EngineResult<StatementOutcome> {
let mut stmt = self.conn.prepare(sql).map_err(turso_msg)?;
let mut outcome = StatementOutcome::default();
for idx in 0..stmt.num_columns() {
Expand All @@ -173,6 +178,12 @@ impl SqliteEngine {
}
match stmt.step().map_err(turso_msg)? {
StepResult::Row => {
let next_row = outcome.rows.len() + 1;
if next_row > max_rows {
return Err(format!(
"result set exceeds row cap ({next_row} > {max_rows})"
));
}
if let Some(row) = stmt.row() {
let values: Vec<Value> = (0..stmt.num_columns())
.map(|idx| row.get_value(idx).clone())
Expand Down
19 changes: 10 additions & 9 deletions crates/bashkit/src/builtins/sqlite/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -724,19 +724,20 @@ async fn run_statements(
match stmt {
Stmt::Sql(sql) => {
check_sql_policy(&sql, limits)?;
let outcome = engine.execute(&sql, deadline).map_err(|e| sanitize(&e))?;
if outcome.rows.len() > limits.max_rows_per_query {
return Err(format!(
"result set exceeds row cap ({} > {})",
outcome.rows.len(),
limits.max_rows_per_query
));
}
let outcome = engine
.execute(&sql, deadline, limits.max_rows_per_query)
.map_err(|e| sanitize(&e))?;
let rendered = render(&outcome.columns, &outcome.rows, opts);
stdout.push_str(&rendered);
}
Stmt::Dot(line) => {
let result = dot_commands::dispatch(&line, engine, opts, deadline);
let result = dot_commands::dispatch(
&line,
engine,
opts,
deadline,
limits.max_rows_per_query,
);
match result {
Ok(DotOutcome::Stdout(s)) => stdout.push_str(&s),
Ok(DotOutcome::Configured) => {}
Expand Down
47 changes: 47 additions & 0 deletions crates/bashkit/src/builtins/sqlite/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,16 @@ async fn run(args: &[&str], stdin: Option<&str>) -> ExecResult {
run_with(args, SqliteBackend::Memory, fs, stdin, &env).await
}

async fn run_with_limits(args: &[&str], stdin: Option<&str>, limits: SqliteLimits) -> ExecResult {
let env = opt_in_env();
let fs: Arc<dyn FileSystem> = Arc::new(InMemoryFs::new());
let owned: Vec<String> = args.iter().map(|s| s.to_string()).collect();
let mut variables = HashMap::new();
let mut cwd = PathBuf::from("/home/user");
let ctx = Context::new_for_test(&owned, &env, &mut variables, &mut cwd, fs, stdin);
Sqlite::with_limits(limits).execute(ctx).await.unwrap()
}

async fn run_vfs(args: &[&str], stdin: Option<&str>) -> ExecResult {
let env = opt_in_env();
let fs: Arc<dyn FileSystem> = Arc::new(InMemoryFs::new());
Expand Down Expand Up @@ -471,6 +481,43 @@ async fn deadline_already_expired_aborts_with_timeout() {
assert!(r.stderr.contains("timed out"), "stderr was: {}", r.stderr);
}

#[tokio::test]
async fn row_cap_rejects_oversize_select() {
let limits = SqliteLimits::default()
.max_rows_per_query(1)
.max_duration(std::time::Duration::ZERO);
let r = run_with_limits(&[":memory:", "SELECT 1 UNION ALL SELECT 2"], None, limits).await;
assert_eq!(r.exit_code, 1);
assert_eq!(r.stdout, "");
assert!(
r.stderr.contains("result set exceeds row cap (2 > 1)"),
"stderr was: {}",
r.stderr
);
}

#[tokio::test]
async fn dump_data_respects_row_cap() {
let limits = SqliteLimits::default()
.max_rows_per_query(1)
.max_duration(std::time::Duration::ZERO);
let r = run_with_limits(
&[
":memory:",
"CREATE TABLE t(x); INSERT INTO t VALUES (1), (2);\n.dump",
],
None,
limits,
)
.await;
assert_eq!(r.exit_code, 1);
assert!(
r.stderr.contains("result set exceeds row cap (2 > 1)"),
"stderr was: {}",
r.stderr
);
}

#[test]
fn limits_builder_round_trips() {
let l = SqliteLimits::default()
Expand Down
11 changes: 4 additions & 7 deletions crates/bashkit/tests/sqlite_security_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -87,18 +87,15 @@ async fn tm_sql_004_oversize_result_set_aborts() {
let r = bash
.exec(
r#"sqlite :memory: '
CREATE TABLE big AS WITH RECURSIVE r(n) AS (SELECT 1 UNION ALL SELECT n+1 FROM r WHERE n<100) SELECT n FROM r;
SELECT * FROM big' 2>&1 || echo SAW_ROW_CAP"#,
SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL SELECT 5
UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9 UNION ALL SELECT 10
UNION ALL SELECT 11' 2>&1 || echo SAW_ROW_CAP"#,
)
.await
.unwrap();
// Either turso supports recursive CTE and we hit our cap, OR it returns
// an error. Both outcomes prove the cap is wired and the user gets
// back to the shell.
assert!(
r.stdout.contains("SAW_ROW_CAP")
|| r.stderr.contains("row cap")
|| r.stderr.contains("sqlite:"),
&& r.stdout.contains("result set exceeds row cap (11 > 10)"),
"stdout={:?} stderr={:?}",
r.stdout,
r.stderr,
Expand Down
2 changes: 1 addition & 1 deletion specs/sqlite-builtin.md
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ leading SQL keyword via the parser's lightweight tokeniser
| TM-SQL-001 | Code execution via BETA upstream | Off by default (cargo feature) + runtime opt-in env var |
| TM-SQL-002 | Sandbox escape via host filesystem | All paths resolve through `Arc<dyn FileSystem>`; Phase 2 IO is bound to that FS only |
| TM-SQL-003 | DoS via large SQL input | `SqliteLimits::max_script_bytes` (4 MiB default) |
| TM-SQL-004 | DoS via huge result set | `SqliteLimits::max_rows_per_query` (1M default) |
| TM-SQL-004 | DoS via huge result set | `SqliteLimits::max_rows_per_query` (1M default), checked before materialising each row |
| TM-SQL-005 | DoS via huge DB file | `SqliteLimits::max_db_bytes` (256 MiB default) at load time and while growing DBs |
| TM-SQL-005a | DoS via wall-clock burn (regex-style queries, CTEs) | `SqliteLimits::max_duration` enforced via per-step deadline + `Statement::interrupt()` |
| TM-SQL-005b | DoS via statement-flood (millions of `;`) | `SqliteLimits::max_statements` checked after splitting |
Expand Down
2 changes: 1 addition & 1 deletion specs/threat-model.md
Original file line number Diff line number Diff line change
Expand Up @@ -1876,7 +1876,7 @@ filesystem.
| TM-SQL-001 | Code execution via BETA upstream | Critical | Cargo feature + builder registration + runtime opt-in gate | `tm_sql_001_default_disabled_without_opt_in` |
| TM-SQL-002 | Sandbox escape via host filesystem | Critical | DB files, `.read`, and VFS backend paths resolve through `Arc<dyn FileSystem>` | `tm_sql_002_paths_resolve_only_to_vfs`, `tm_sql_002b_vfs_backend_isolated_to_bash_fs` |
| TM-SQL-003 | DoS via large SQL input | High | `SqliteLimits::max_script_bytes` | `tm_sql_003_oversize_script_rejected` |
| TM-SQL-004 | DoS via huge result set | High | `SqliteLimits::max_rows_per_query` | `tm_sql_004_oversize_result_set_aborts` |
| TM-SQL-004 | DoS via huge result set | High | `SqliteLimits::max_rows_per_query`, enforced before materialising each row | `tm_sql_004_oversize_result_set_aborts` |
| TM-SQL-005 | DoS via huge DB file | High | `SqliteLimits::max_db_bytes` on load and while growing DBs on both backends | `tm_sql_005_oversize_db_file_rejected`, `db_growth_beyond_max_rejected_on_both_backends` |
| TM-SQL-005a | DoS via wall-clock burn | High | Per-step deadline and `Statement::interrupt()` | `deadline_already_expired_aborts_with_timeout` |
| TM-SQL-005b | DoS via statement flood | Medium | `SqliteLimits::max_statements` after splitting | `too_many_statements_rejected` |
Expand Down
Loading