diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f70b677ac0c..5b62299ef7e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -18,7 +18,7 @@ concurrency: cancel-in-progress: true jobs: - docker_smoketests: + smoketests: needs: [lints] name: Smoketests strategy: @@ -26,18 +26,16 @@ jobs: runner: [spacetimedb-new-runner, windows-latest] include: - runner: spacetimedb-new-runner - smoketest_args: --docker container: image: localhost:5000/spacetimedb-ci:latest options: --privileged - runner: windows-latest - smoketest_args: --no-build-cli container: null runs-on: ${{ matrix.runner }} container: ${{ matrix.container }} timeout-minutes: 120 - env: - CARGO_TARGET_DIR: ${{ github.workspace }}/target + #env: + # CARGO_TARGET_DIR: ${{ github.workspace }}/target steps: - name: Find Git ref env: @@ -84,41 +82,36 @@ jobs: if: runner.os == 'Windows' run: choco install psql -y --no-progress shell: powershell - - name: Build crates - run: cargo build -p spacetimedb-cli -p spacetimedb-standalone -p spacetimedb-update - - name: Start Docker daemon - if: runner.os == 'Linux' - run: /usr/local/bin/start-docker.sh - - - name: Build and start database (Linux) - if: runner.os == 'Linux' - run: | - # Our .dockerignore omits `target`, which our CI Dockerfile needs. - rm .dockerignore - docker compose -f .github/docker-compose.yml up -d - - name: Build and start database (Windows) + + - name: Update dotnet workloads if: runner.os == 'Windows' run: | # Fail properly if any individual command fails $ErrorActionPreference = 'Stop' $PSNativeCommandUseErrorActionPreference = $true - Start-Process target/debug/spacetimedb-cli.exe -ArgumentList 'start --pg-port 5432' cd modules # the sdk-manifests on windows-latest are messed up, so we need to update them dotnet workload config --update-mode manifests dotnet workload update - - uses: actions/setup-python@v5 - with: { python-version: "3.12" } - if: runner.os == 'Windows' - - name: Install python deps - run: python -m pip install -r smoketests/requirements.txt + + # This step shouldn't be needed, but somehow we end up with caches that are missing librusty_v8.a. + # ChatGPT suspects that this could be due to different build invocations using the same target dir, + # and this makes sense to me because we only see it in this job where we mix `cargo build -p` with + # `cargo build --manifest-path` (which apparently build different dependency trees). + # However, we've been unable to fix it so... /shrug + - name: Check v8 outputs + shell: bash + run: | + find "${CARGO_TARGET_DIR}"/ -type f | grep '[/_]v8' || true + if ! [ -f "${CARGO_TARGET_DIR}"/debug/gn_out/obj/librusty_v8.a ]; then + echo "Could not find v8 output file librusty_v8.a; rebuilding manually." + cargo clean -p v8 || true + cargo build -p v8 + fi + - name: Run smoketests - # Note: clear_database and replication only work in private - run: cargo ci smoketests -- ${{ matrix.smoketest_args }} -x clear_database replication teams - - name: Stop containers (Linux) - if: always() && runner.os == 'Linux' - run: docker compose -f .github/docker-compose.yml down + run: cargo ci smoketests test: needs: [lints] diff --git a/Cargo.lock b/Cargo.lock index 79823fbcf32..a2dea2be560 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8206,6 +8206,19 @@ dependencies = [ "tokio-tungstenite", ] +[[package]] +name = "spacetimedb-smoketests" +version = "1.11.3" +dependencies = [ + "anyhow", + "cargo_metadata", + "regex", + "serde_json", + "spacetimedb-guard", + "tempfile", + "toml 0.8.23", +] + [[package]] name = "spacetimedb-snapshot" version = "1.11.3" diff --git a/Cargo.toml b/Cargo.toml index 3fd9392aca3..7a372cb46ef 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,6 +26,7 @@ members = [ "crates/query", "crates/sats", "crates/schema", + "crates/smoketests", "sdks/rust", "sdks/unreal", "crates/snapshot", diff --git a/crates/guard/src/lib.rs b/crates/guard/src/lib.rs index 147bddf16b4..fadb0d4dca1 100644 --- a/crates/guard/src/lib.rs +++ b/crates/guard/src/lib.rs @@ -4,18 +4,92 @@ use std::{ env, io::{BufRead, BufReader}, net::SocketAddr, + path::{Path, PathBuf}, process::{Child, Command, Stdio}, - sync::{Arc, Mutex}, + sync::{Arc, Mutex, OnceLock}, thread::{self, sleep}, time::{Duration, Instant}, }; +/// Lazily-initialized path to the pre-built CLI binary. +static CLI_BINARY_PATH: OnceLock = OnceLock::new(); + +/// Ensures `spacetimedb-cli` and `spacetimedb-standalone` are built once, +/// returning the path to the CLI binary. +/// +/// This is useful for tests that need to run CLI commands directly. +pub fn ensure_binaries_built() -> PathBuf { + CLI_BINARY_PATH + .get_or_init(|| { + // Navigate from crates/guard/ to workspace root + let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + let workspace_root = manifest_dir + .parent() // crates/ + .and_then(|p| p.parent()) // workspace root + .expect("Failed to find workspace root"); + + // Determine target directory + let target_dir = env::var("CARGO_TARGET_DIR") + .map(PathBuf::from) + .unwrap_or_else(|_| workspace_root.join("target")); + + // Determine profile + let profile = if cfg!(debug_assertions) { "debug" } else { "release" }; + + // Build both binaries (standalone needed by CLI's start command) + for pkg in ["spacetimedb-standalone", "spacetimedb-cli"] { + let mut args = vec!["build", "-p", pkg]; + if profile == "release" { + args.push("--release"); + } + + // Clear cargo-provided env vars to avoid unnecessary rebuilds. + // When running under `cargo test`, cargo sets env vars like + // CARGO_ENCODED_RUSTFLAGS that differ from a normal build, + // causing the child cargo to think it needs to recompile. + let mut cmd = Command::new("cargo"); + cmd.args(&args).current_dir(workspace_root); + for (key, _) in env::vars() { + if key.starts_with("CARGO") && key != "CARGO_HOME" && key != "CARGO_TARGET_DIR" { + cmd.env_remove(&key); + } + } + + let status = cmd + .status() + .unwrap_or_else(|e| panic!("Failed to build {}: {}", pkg, e)); + + assert!(status.success(), "Building {} failed", pkg); + } + + // Return path to CLI binary + let cli_name = if cfg!(windows) { + "spacetimedb-cli.exe" + } else { + "spacetimedb-cli" + }; + let cli_path = target_dir.join(profile).join(cli_name); + + assert!(cli_path.exists(), "CLI binary not found at {}", cli_path.display()); + + cli_path + }) + .clone() +} + use reqwest::blocking::Client; pub struct SpacetimeDbGuard { pub child: Child, pub host_url: String, pub logs: Arc>, + /// The PostgreSQL wire protocol port, if enabled. + pub pg_port: Option, + /// The data directory path (for restart scenarios). + pub data_dir: PathBuf, + /// Owns the temporary data directory (if created by spawn_in_temp_data_dir). + /// When this is Some, dropping the guard will clean up the temp dir. + _data_dir_handle: Option, } // Remove all Cargo-provided env vars from a child process. These are set by the fact that we're running in a cargo @@ -25,71 +99,184 @@ impl SpacetimeDbGuard { /// Start `spacetimedb` in a temporary data directory via: /// cargo run -p spacetimedb-cli -- start --data-dir --listen-addr pub fn spawn_in_temp_data_dir() -> Self { - let temp_dir = tempfile::tempdir().expect("failed to create temp dir"); - let data_dir = temp_dir.path().display().to_string(); + Self::spawn_in_temp_data_dir_with_pg_port(None) + } - Self::spawn_spacetime_start(false, &["start", "--data-dir", &data_dir]) + /// Start `spacetimedb` in a temporary data directory with optional PostgreSQL wire protocol. + pub fn spawn_in_temp_data_dir_with_pg_port(pg_port: Option) -> Self { + let temp_dir = tempfile::tempdir().expect("failed to create temp dir"); + let data_dir_path = temp_dir.path().to_path_buf(); + let data_dir_str = data_dir_path.display().to_string(); + + Self::spawn_spacetime_start_with_data_dir( + false, + &["start", "--data-dir", &data_dir_str], + pg_port, + data_dir_path, + Some(temp_dir), + ) } /// Start `spacetimedb` in a temporary data directory via: /// spacetime start --data-dir --listen-addr pub fn spawn_in_temp_data_dir_use_cli() -> Self { let temp_dir = tempfile::tempdir().expect("failed to create temp dir"); - let data_dir = temp_dir.path().display().to_string(); + let data_dir_path = temp_dir.path().to_path_buf(); + let data_dir_str = data_dir_path.display().to_string(); + + Self::spawn_spacetime_start_with_data_dir( + true, + &["start", "--data-dir", &data_dir_str], + None, + data_dir_path, + Some(temp_dir), + ) + } - Self::spawn_spacetime_start(true, &["start", "--data-dir", &data_dir]) + /// Start `spacetimedb` with an explicit data directory (for restart scenarios). + /// + /// Unlike `spawn_in_temp_data_dir`, this method does not create a temporary directory. + /// The caller is responsible for managing the data directory lifetime. + pub fn spawn_with_data_dir(data_dir: PathBuf, pg_port: Option) -> Self { + let data_dir_str = data_dir.display().to_string(); + Self::spawn_spacetime_start_with_data_dir( + false, + &["start", "--data-dir", &data_dir_str], + pg_port, + data_dir, + None, + ) } - fn spawn_spacetime_start(use_installed_cli: bool, extra_args: &[&str]) -> Self { - // Ask SpacetimeDB/OS to allocate an ephemeral port. - // Using loopback avoids needing to "connect to 0.0.0.0". - let address = "127.0.0.1:0".to_string(); + fn spawn_spacetime_start_with_data_dir( + use_installed_cli: bool, + _extra_args: &[&str], + pg_port: Option, + data_dir: PathBuf, + _data_dir_handle: Option, + ) -> Self { + if use_installed_cli { + // Use the installed CLI (rare case, mainly for spawn_in_temp_data_dir_use_cli) + let address = "127.0.0.1:0".to_string(); + let data_dir_str = data_dir.display().to_string(); + + let args = ["start", "--data-dir", &data_dir_str, "--listen-addr", &address]; + let cmd = Command::new("spacetime"); + let (child, logs) = Self::spawn_child(cmd, env!("CARGO_MANIFEST_DIR"), &args); + + let listen_addr = wait_for_listen_addr(&logs, Duration::from_secs(10)) + .unwrap_or_else(|| panic!("Timed out waiting for SpacetimeDB to report listen address")); + let host_url = format!("http://{}", listen_addr); + let guard = SpacetimeDbGuard { + child, + host_url, + logs, + pg_port, + data_dir, + _data_dir_handle, + }; + guard.wait_until_http_ready(Duration::from_secs(10)); + guard + } else { + // Use the built CLI (common case) + let (child, logs, host_url) = Self::spawn_server(&data_dir, pg_port); + SpacetimeDbGuard { + child, + host_url, + logs, + pg_port, + data_dir, + _data_dir_handle, + } + } + } - // Workspace root for `cargo run -p ...` - let workspace_dir = env!("CARGO_MANIFEST_DIR"); + /// Stop the server process without dropping the guard. + /// + /// This kills the server process but preserves the data directory. + /// Use `restart()` to start the server again with the same data. + pub fn stop(&mut self) { + self.kill_process(); + } - let mut args = vec![]; + /// Restart the server with the same data directory. + /// + /// This stops the current server process and starts a new one + /// with the same data directory, preserving all data. + pub fn restart(&mut self) { + self.stop(); - let (child, logs) = if use_installed_cli { - args.extend_from_slice(extra_args); - args.extend_from_slice(&["--listen-addr", &address]); + let (child, logs, host_url) = Self::spawn_server(&self.data_dir, self.pg_port); - let cmd = Command::new("spacetime"); - Self::spawn_child(cmd, env!("CARGO_MANIFEST_DIR"), &args) - } else { - Self::build_prereqs(workspace_dir); - args.extend(vec!["run", "-p", "spacetimedb-cli", "--"]); - args.extend(extra_args); - args.extend(["--listen-addr", &address]); + self.child = child; + self.logs = logs; + self.host_url = host_url; + } - let cmd = Command::new("cargo"); - Self::spawn_child(cmd, workspace_dir, &args) - }; + /// Kills the current server process and waits for it to exit. + fn kill_process(&mut self) { + // Kill the process tree to ensure all child processes are terminated. + // On Windows, child.kill() only kills the direct child (spacetimedb-cli), + // leaving spacetimedb-standalone running as an orphan. + #[cfg(windows)] + { + let pid = self.child.id(); + let _ = Command::new("taskkill") + .args(["/F", "/T", "/PID", &pid.to_string()]) + .stdout(Stdio::null()) + .stderr(Stdio::null()) + .status(); + } + + #[cfg(not(windows))] + { + let _ = self.child.kill(); + } + + let _ = self.child.wait(); + } + + /// Spawns a new server process with the given data directory. + /// Returns (child, logs, host_url). + fn spawn_server(data_dir: &Path, pg_port: Option) -> (Child, Arc>, String) { + let data_dir_str = data_dir.display().to_string(); + let pg_port_str = pg_port.map(|p| p.to_string()); + + let address = "127.0.0.1:0".to_string(); + let cli_path = ensure_binaries_built(); + + let mut args = vec!["start", "--data-dir", &data_dir_str, "--listen-addr", &address]; + if let Some(ref port) = pg_port_str { + args.extend(["--pg-port", port]); + } + + let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + let workspace_root = manifest_dir + .parent() + .and_then(|p| p.parent()) + .expect("Failed to find workspace root"); + + let cmd = Command::new(&cli_path); + let (child, logs) = Self::spawn_child(cmd, workspace_root.to_str().unwrap(), &args); - // Parse the actual bound address from logs. + // Wait for the server to be ready let listen_addr = wait_for_listen_addr(&logs, Duration::from_secs(10)) .unwrap_or_else(|| panic!("Timed out waiting for SpacetimeDB to report listen address")); let host_url = format!("http://{}", listen_addr); - let guard = SpacetimeDbGuard { child, host_url, logs }; - guard.wait_until_http_ready(Duration::from_secs(10)); - guard - } - // Ensure standalone is built before we start, if that’s needed. - // This is best-effort and usually a no-op when already built. - // Also build the CLI before running it to avoid that being included in the - // timeout for readiness. - fn build_prereqs(workspace_dir: &str) { - let targets = ["spacetimedb-standalone", "spacetimedb-cli"]; - - for pkg in targets { - let mut cmd = Command::new("cargo"); - let _ = cmd - .args(["build", "-p", pkg]) - .current_dir(workspace_dir) - .status() - .unwrap_or_else(|_| panic!("failed to build {}", pkg)); + // Wait until HTTP is ready + let client = Client::new(); + let deadline = Instant::now() + Duration::from_secs(10); + while Instant::now() < deadline { + let url = format!("{}/v1/ping", host_url); + if let Ok(resp) = client.get(&url).send() { + if resp.status().is_success() { + return (child, logs, host_url); + } + } + sleep(Duration::from_millis(50)); } + panic!("Timed out waiting for SpacetimeDB HTTP /v1/ping at {}", host_url); } fn spawn_child(mut cmd: Command, workspace_dir: &str, args: &[&str]) -> (Child, Arc>) { @@ -195,9 +382,7 @@ fn parse_listen_addr_from_line(line: &str) -> Option { impl Drop for SpacetimeDbGuard { fn drop(&mut self) { - // Best-effort cleanup. - let _ = self.child.kill(); - let _ = self.child.wait(); + self.kill_process(); // Only print logs if the test is currently panicking if std::thread::panicking() { diff --git a/crates/smoketests/Cargo.toml b/crates/smoketests/Cargo.toml new file mode 100644 index 00000000000..1aa3d3ba8a6 --- /dev/null +++ b/crates/smoketests/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "spacetimedb-smoketests" +version.workspace = true +edition.workspace = true +rust-version.workspace = true + +[dependencies] +# Test utilities (needed in lib for test helpers) +spacetimedb-guard.workspace = true +tempfile.workspace = true +serde_json.workspace = true +toml.workspace = true +regex.workspace = true +anyhow.workspace = true + +[dev-dependencies] +cargo_metadata.workspace = true + +[lints] +workspace = true diff --git a/crates/smoketests/DEVELOP.md b/crates/smoketests/DEVELOP.md new file mode 100644 index 00000000000..0fe3d6e7eb3 --- /dev/null +++ b/crates/smoketests/DEVELOP.md @@ -0,0 +1,69 @@ +# Smoketests Development Guide + +## Running Tests + +### Recommended: cargo-nextest + +For faster test execution, use [cargo-nextest](https://nexte.st/): + +```bash +# Install (one-time) +cargo install cargo-nextest --locked + +# Run all smoketests +cargo nextest run -p spacetimedb-smoketests + +# Run a specific test +cargo nextest run -p spacetimedb-smoketests test_sql_format +``` + +**Why nextest?** Standard `cargo test` compiles each test file in `tests/` as a separate binary and runs them sequentially. Nextest runs all test binaries in parallel, reducing total runtime by ~40% (160s vs 265s for 25 tests). + +### Alternative: cargo test + +Standard `cargo test` also works: + +```bash +cargo test -p spacetimedb-smoketests +``` + +Tests within each file run in parallel, but files run sequentially. + +## Test Performance + +Each test takes ~15-20s due to: +- **WASM compilation** (~12s): Each test compiles a fresh Rust module to WASM +- **Server spawn** (~2s): Each test starts its own SpacetimeDB server +- **Module publish** (~2s): Server processes and initializes the WASM module + +When running tests in parallel, resource contention increases individual test times but reduces overall runtime. + +## Writing Tests + +See existing tests for patterns. Key points: + +```rust +use spacetimedb_smoketests::Smoketest; + +const MODULE_CODE: &str = r#" +use spacetimedb::{ReducerContext, Table}; + +#[spacetimedb::table(name = example, public)] +pub struct Example { value: u64 } + +#[spacetimedb::reducer] +pub fn add(ctx: &ReducerContext, value: u64) { + ctx.db.example().insert(Example { value }); +} +"#; + +#[test] +fn test_example() { + let test = Smoketest::builder() + .module_code(MODULE_CODE) + .build(); + + test.call("add", &["42"]).unwrap(); + test.assert_sql("SELECT * FROM example", "value\n-----\n42"); +} +``` diff --git a/crates/smoketests/src/lib.rs b/crates/smoketests/src/lib.rs new file mode 100644 index 00000000000..64f8641f361 --- /dev/null +++ b/crates/smoketests/src/lib.rs @@ -0,0 +1,989 @@ +#![allow(clippy::disallowed_macros)] +//! Rust smoketest infrastructure for SpacetimeDB. +//! +//! This crate provides utilities for writing end-to-end tests that compile and publish +//! SpacetimeDB modules, then exercise them via CLI commands. +//! +//! # Example +//! +//! ```ignore +//! use spacetimedb_smoketests::Smoketest; +//! +//! const MODULE_CODE: &str = r#" +//! use spacetimedb::{table, reducer}; +//! +//! #[spacetimedb::table(name = person, public)] +//! pub struct Person { +//! name: String, +//! } +//! +//! #[spacetimedb::reducer] +//! pub fn add(ctx: &ReducerContext, name: String) { +//! ctx.db.person().insert(Person { name }); +//! } +//! "#; +//! +//! #[test] +//! fn test_example() { +//! let mut test = Smoketest::builder() +//! .module_code(MODULE_CODE) +//! .build(); +//! +//! test.call("add", &["Alice"]).unwrap(); +//! test.assert_sql("SELECT * FROM person", "name\n-----\nAlice"); +//! } +//! ``` + +use anyhow::{bail, Context, Result}; +use regex::Regex; +use spacetimedb_guard::{ensure_binaries_built, SpacetimeDbGuard}; +use std::env; +use std::fs; +use std::path::PathBuf; +use std::process::{Command, Output, Stdio}; +use std::sync::OnceLock; +use std::time::Instant; + +/// Helper macro for timing operations and printing results +macro_rules! timed { + ($label:expr, $expr:expr) => {{ + let start = Instant::now(); + let result = $expr; + let elapsed = start.elapsed(); + eprintln!("[TIMING] {}: {:?}", $label, elapsed); + result + }}; +} + +/// Returns the workspace root directory. +pub fn workspace_root() -> PathBuf { + let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + manifest_dir + .parent() + .and_then(|p| p.parent()) + .expect("Failed to find workspace root") + .to_path_buf() +} + +/// Generates a random lowercase alphabetic string suitable for database names. +pub fn random_string() -> String { + use std::time::{SystemTime, UNIX_EPOCH}; + let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_nanos(); + // Convert to base-26 using lowercase letters only (a-z) + let mut result = String::with_capacity(20); + let mut n = timestamp; + while n > 0 || result.len() < 10 { + let c = (b'a' + (n % 26) as u8) as char; + result.push(c); + n /= 26; + } + result +} + +/// Returns true if dotnet 8.0+ is available on the system. +pub fn have_dotnet() -> bool { + static HAVE_DOTNET: OnceLock = OnceLock::new(); + *HAVE_DOTNET.get_or_init(|| { + Command::new("dotnet") + .args(["--list-sdks"]) + .output() + .map(|output| { + if !output.status.success() { + return false; + } + let stdout = String::from_utf8_lossy(&output.stdout); + // Check for dotnet 8.0 or higher + stdout + .lines() + .any(|line| line.starts_with("8.") || line.starts_with("9.") || line.starts_with("10.")) + }) + .unwrap_or(false) + }) +} + +/// Returns true if psql (PostgreSQL client) is available on the system. +pub fn have_psql() -> bool { + static HAVE_PSQL: OnceLock = OnceLock::new(); + *HAVE_PSQL.get_or_init(|| { + Command::new("psql") + .args(["--version"]) + .output() + .map(|output| output.status.success()) + .unwrap_or(false) + }) +} + +/// Returns true if pnpm is available on the system. +pub fn have_pnpm() -> bool { + static HAVE_PNPM: OnceLock = OnceLock::new(); + *HAVE_PNPM.get_or_init(|| { + Command::new("pnpm") + .args(["--version"]) + .output() + .map(|output| output.status.success()) + .unwrap_or(false) + }) +} + +/// Parse code blocks from quickstart markdown documentation. +/// Extracts code blocks with the specified language tag. +/// +/// - `language`: "rust", "csharp", or "typescript" +/// - `module_name`: The name to replace "quickstart-chat" with +/// - `server`: If true, look for server code blocks (e.g. "rust server"), else client blocks +pub fn parse_quickstart(doc_content: &str, language: &str, module_name: &str, server: bool) -> String { + // Normalize line endings to Unix style (LF) for consistent regex matching + let doc_content = doc_content.replace("\r\n", "\n"); + + // Determine the codeblock language tag to search for + let codeblock_lang = if server { + if language == "typescript" { + "ts server".to_string() + } else { + format!("{} server", language) + } + } else if language == "typescript" { + "ts".to_string() + } else { + language.to_string() + }; + + // Extract code blocks with the specified language + let pattern = format!(r"```{}\n([\s\S]*?)\n```", regex::escape(&codeblock_lang)); + let re = Regex::new(&pattern).unwrap(); + let mut blocks: Vec = re + .captures_iter(&doc_content) + .map(|cap| cap.get(1).unwrap().as_str().to_string()) + .collect(); + + let mut end = String::new(); + + // C# specific fixups + if language == "csharp" { + let mut found_on_connected = false; + let mut filtered_blocks = Vec::new(); + + for mut block in blocks { + // The doc first creates an empty class Module, so we need to fixup the closing brace + if block.contains("partial class Module") { + block = block.replace("}", ""); + end = "\n}".to_string(); + } + // Remove the first `OnConnected` block, which body is later updated + if block.contains("OnConnected(DbConnection conn") && !found_on_connected { + found_on_connected = true; + continue; + } + filtered_blocks.push(block); + } + blocks = filtered_blocks; + } + + // Join blocks and replace module name + let result = blocks.join("\n").replace("quickstart-chat", module_name); + result + &end +} + +/// A smoketest instance that manages a SpacetimeDB server and module project. +pub struct Smoketest { + /// The SpacetimeDB server guard (stops server on drop). + pub guard: SpacetimeDbGuard, + /// Temporary directory containing the module project. + pub project_dir: tempfile::TempDir, + /// Database identity after publishing (if any). + pub database_identity: Option, + /// The server URL (e.g., "http://127.0.0.1:3000"). + pub server_url: String, + /// Path to the test-specific CLI config file (isolates tests from user config). + pub config_path: std::path::PathBuf, +} + +/// Response from an HTTP API call. +pub struct ApiResponse { + /// HTTP status code. + pub status_code: u16, + /// Response body. + pub body: Vec, +} + +impl ApiResponse { + /// Returns the body as a string. + pub fn text(&self) -> Result { + String::from_utf8(self.body.clone()).context("Response body is not valid UTF-8") + } + + /// Parses the body as JSON. + pub fn json(&self) -> Result { + serde_json::from_slice(&self.body).context("Failed to parse response as JSON") + } + + /// Returns true if the status code indicates success (2xx). + pub fn is_success(&self) -> bool { + (200..300).contains(&self.status_code) + } +} + +/// Builder for creating `Smoketest` instances. +pub struct SmoketestBuilder { + module_code: Option, + bindings_features: Vec, + extra_deps: String, + autopublish: bool, + pg_port: Option, +} + +impl Default for SmoketestBuilder { + fn default() -> Self { + Self::new() + } +} + +impl SmoketestBuilder { + /// Creates a new builder with default settings. + pub fn new() -> Self { + Self { + module_code: None, + bindings_features: vec!["unstable".to_string()], + extra_deps: String::new(), + autopublish: true, + pg_port: None, + } + } + + /// Enables the PostgreSQL wire protocol on the specified port. + pub fn pg_port(mut self, port: u16) -> Self { + self.pg_port = Some(port); + self + } + + /// Sets the module code to compile and publish. + pub fn module_code(mut self, code: &str) -> Self { + self.module_code = Some(code.to_string()); + self + } + + /// Sets additional features for the spacetimedb bindings dependency. + pub fn bindings_features(mut self, features: &[&str]) -> Self { + self.bindings_features = features.iter().map(|s| s.to_string()).collect(); + self + } + + /// Adds extra dependencies to the module's Cargo.toml. + pub fn extra_deps(mut self, deps: &str) -> Self { + self.extra_deps = deps.to_string(); + self + } + + /// Sets whether to automatically publish the module on build. + /// Default is true. + pub fn autopublish(mut self, yes: bool) -> Self { + self.autopublish = yes; + self + } + + /// Builds the `Smoketest` instance. + /// + /// This spawns a SpacetimeDB server, creates a temporary project directory, + /// writes the module code, and optionally publishes the module. + pub fn build(self) -> Smoketest { + let build_start = Instant::now(); + + let guard = timed!( + "server spawn", + SpacetimeDbGuard::spawn_in_temp_data_dir_with_pg_port(self.pg_port) + ); + let project_dir = tempfile::tempdir().expect("Failed to create temp project directory"); + + let project_setup_start = Instant::now(); + + // Create project structure + fs::create_dir_all(project_dir.path().join("src")).expect("Failed to create src directory"); + + // Write Cargo.toml + let workspace_root = workspace_root(); + let bindings_path = workspace_root.join("crates/bindings"); + let bindings_path_str = bindings_path.display().to_string().replace('\\', "/"); + let features_str = format!("{:?}", self.bindings_features); + + let cargo_toml = format!( + r#"[package] +name = "smoketest-module" +version = "0.1.0" +edition = "2021" + +[lib] +crate-type = ["cdylib"] + +[dependencies] +spacetimedb = {{ path = "{}", features = {} }} +log = "0.4" +{} +"#, + bindings_path_str, features_str, self.extra_deps + ); + fs::write(project_dir.path().join("Cargo.toml"), cargo_toml).expect("Failed to write Cargo.toml"); + + // Copy rust-toolchain.toml + let toolchain_src = workspace_root.join("rust-toolchain.toml"); + if toolchain_src.exists() { + fs::copy(&toolchain_src, project_dir.path().join("rust-toolchain.toml")) + .expect("Failed to copy rust-toolchain.toml"); + } + + // Write module code + let module_code = self.module_code.unwrap_or_else(|| { + r#"use spacetimedb::ReducerContext; + +#[spacetimedb::reducer] +pub fn noop(_ctx: &ReducerContext) {} +"# + .to_string() + }); + fs::write(project_dir.path().join("src/lib.rs"), &module_code).expect("Failed to write lib.rs"); + + eprintln!("[TIMING] project setup: {:?}", project_setup_start.elapsed()); + + let server_url = guard.host_url.clone(); + let config_path = project_dir.path().join("config.toml"); + let mut smoketest = Smoketest { + guard, + project_dir, + database_identity: None, + server_url, + config_path, + }; + + if self.autopublish { + smoketest.publish_module().expect("Failed to publish module"); + } + + eprintln!("[TIMING] total build: {:?}", build_start.elapsed()); + smoketest + } +} + +impl Smoketest { + /// Creates a new builder for configuring a smoketest. + pub fn builder() -> SmoketestBuilder { + SmoketestBuilder::new() + } + + /// Restart the SpacetimeDB server. + /// + /// This stops the current server process and starts a new one with the + /// same data directory. All data is preserved across the restart. + /// The server URL may change since a new ephemeral port is allocated. + pub fn restart_server(&mut self) { + self.guard.restart(); + // Update server_url since the port may have changed + self.server_url = self.guard.host_url.clone(); + } + + /// Returns the server host (without protocol), e.g., "127.0.0.1:3000". + pub fn server_host(&self) -> &str { + self.server_url + .strip_prefix("http://") + .or_else(|| self.server_url.strip_prefix("https://")) + .unwrap_or(&self.server_url) + } + + /// Returns the PostgreSQL wire protocol port, if enabled. + pub fn pg_port(&self) -> Option { + self.guard.pg_port + } + + /// Reads the authentication token from the config file. + pub fn read_token(&self) -> Result { + let config_content = fs::read_to_string(&self.config_path).context("Failed to read config file")?; + + // Parse as TOML and extract spacetimedb_token + let config: toml::Value = config_content.parse().context("Failed to parse config as TOML")?; + + config + .get("spacetimedb_token") + .and_then(|v| v.as_str()) + .map(String::from) + .context("No spacetimedb_token found in config") + } + + /// Runs psql command against the PostgreSQL wire protocol server. + /// + /// Returns the output on success, or an error with stderr on failure. + pub fn psql(&self, database: &str, sql: &str) -> Result { + let pg_port = self.pg_port().context("PostgreSQL wire protocol not enabled")?; + let token = self.read_token()?; + + // Extract just the host part (without port) + let host = self.server_host().split(':').next().unwrap_or("127.0.0.1"); + + let output = Command::new("psql") + .args([ + "-h", + host, + "-p", + &pg_port.to_string(), + "-U", + "postgres", + "-d", + database, + "--quiet", + "-c", + sql, + ]) + .env("PGPASSWORD", &token) + .output() + .context("Failed to run psql")?; + + let stderr = String::from_utf8_lossy(&output.stderr); + if !stderr.is_empty() && !output.status.success() { + bail!("{}", stderr.trim()); + } + + Ok(String::from_utf8_lossy(&output.stdout).trim().to_string()) + } + + /// Asserts that psql output matches the expected value. + pub fn assert_psql(&self, database: &str, sql: &str, expected: &str) { + let output = self.psql(database, sql).expect("psql failed"); + let output_normalized: String = output.lines().map(|l| l.trim_end()).collect::>().join("\n"); + let expected_normalized: String = expected.lines().map(|l| l.trim_end()).collect::>().join("\n"); + assert_eq!( + output_normalized, expected_normalized, + "psql output mismatch for query: {}\n\nExpected:\n{}\n\nActual:\n{}", + sql, expected_normalized, output_normalized + ); + } + + /// Runs a spacetime CLI command. + /// + /// Returns the command output. The command is run but not yet asserted. + /// Uses --config-path to isolate test config from user config. + /// Callers should pass `--server` explicitly when the command needs it. + pub fn spacetime_cmd(&self, args: &[&str]) -> Output { + let start = Instant::now(); + let cli_path = ensure_binaries_built(); + let output = Command::new(&cli_path) + .arg("--config-path") + .arg(&self.config_path) + .args(args) + .current_dir(self.project_dir.path()) + .output() + .expect("Failed to execute spacetime command"); + + let cmd_name = args.first().unwrap_or(&"unknown"); + eprintln!("[TIMING] spacetime {}: {:?}", cmd_name, start.elapsed()); + output + } + + /// Runs a spacetime CLI command and returns stdout as a string. + /// + /// Panics if the command fails. + /// Callers should pass `--server` explicitly when the command needs it. + pub fn spacetime(&self, args: &[&str]) -> Result { + let output = self.spacetime_cmd(args); + if !output.status.success() { + bail!( + "spacetime {:?} failed:\nstdout: {}\nstderr: {}", + args, + String::from_utf8_lossy(&output.stdout), + String::from_utf8_lossy(&output.stderr) + ); + } + Ok(String::from_utf8_lossy(&output.stdout).to_string()) + } + + /// Writes new module code to the project. + pub fn write_module_code(&self, code: &str) -> Result<()> { + fs::write(self.project_dir.path().join("src/lib.rs"), code).context("Failed to write module code")?; + Ok(()) + } + + /// Runs `spacetime build` and returns the raw output. + /// + /// Use this when you need to check for build failures (e.g., wasm_bindgen detection). + pub fn spacetime_build(&self) -> Output { + let start = Instant::now(); + let project_path = self.project_dir.path().to_str().unwrap(); + let cli_path = ensure_binaries_built(); + let output = Command::new(&cli_path) + .args(["build", "--project-path", project_path]) + .current_dir(self.project_dir.path()) + .output() + .expect("Failed to execute spacetime build"); + eprintln!("[TIMING] spacetime build: {:?}", start.elapsed()); + output + } + + /// Publishes the module and stores the database identity. + pub fn publish_module(&mut self) -> Result { + self.publish_module_opts(None, false) + } + + /// Publishes the module with a specific name and optional clear flag. + /// + /// If `name` is provided, the database will be published with that name. + /// If `clear` is true, the database will be cleared before publishing. + pub fn publish_module_named(&mut self, name: &str, clear: bool) -> Result { + self.publish_module_opts(Some(name), clear) + } + + /// Re-publishes the module to the existing database identity with optional clear. + /// + /// This is useful for testing auto-migrations where you want to update + /// the module without clearing the database. + pub fn publish_module_clear(&mut self, clear: bool) -> Result { + let identity = self + .database_identity + .as_ref() + .context("No database published yet")? + .clone(); + self.publish_module_opts(Some(&identity), clear) + } + + /// Internal helper for publishing with options. + fn publish_module_opts(&mut self, name: Option<&str>, clear: bool) -> Result { + let start = Instant::now(); + let project_path = self.project_dir.path().to_str().unwrap().to_string(); + + // First, run spacetime build to compile the WASM module (separate from publish) + let build_start = Instant::now(); + let cli_path = ensure_binaries_built(); + let build_output = Command::new(&cli_path) + .args(["build", "--project-path", &project_path]) + .current_dir(self.project_dir.path()) + .output() + .expect("Failed to execute spacetime build"); + eprintln!("[TIMING] spacetime build: {:?}", build_start.elapsed()); + + if !build_output.status.success() { + bail!( + "spacetime build failed:\nstdout: {}\nstderr: {}", + String::from_utf8_lossy(&build_output.stdout), + String::from_utf8_lossy(&build_output.stderr) + ); + } + + // Construct the wasm path (module name is smoketest-module -> smoketest_module.wasm) + let wasm_path = self + .project_dir + .path() + .join("target/wasm32-unknown-unknown/release/smoketest_module.wasm"); + let wasm_path_str = wasm_path.to_str().unwrap().to_string(); + + // Now publish with --bin-path to skip rebuild + let publish_start = Instant::now(); + let mut args = vec![ + "publish", + "--server", + &self.server_url, + "--bin-path", + &wasm_path_str, + "--yes", + ]; + + if clear { + args.push("--clear-database"); + } + + let name_owned; + if let Some(n) = name { + name_owned = n.to_string(); + args.push(&name_owned); + } + + let output = self.spacetime(&args)?; + eprintln!( + "[TIMING] spacetime publish (after build): {:?}", + publish_start.elapsed() + ); + eprintln!("[TIMING] publish_module total: {:?}", start.elapsed()); + + // Parse the identity from output like "identity: abc123..." + let re = Regex::new(r"identity: ([0-9a-fA-F]+)").unwrap(); + if let Some(caps) = re.captures(&output) { + let identity = caps.get(1).unwrap().as_str().to_string(); + self.database_identity = Some(identity.clone()); + Ok(identity) + } else { + bail!("Failed to parse database identity from publish output: {}", output); + } + } + + /// Calls a reducer or procedure with the given arguments. + /// + /// Arguments are passed directly to the CLI as strings. + pub fn call(&self, name: &str, args: &[&str]) -> Result { + let identity = self.database_identity.as_ref().context("No database published")?; + + let mut cmd_args = vec!["call", "--server", &self.server_url, "--", identity.as_str(), name]; + cmd_args.extend(args); + + self.spacetime(&cmd_args) + } + + /// Calls a reducer/procedure and returns the full output including stderr. + pub fn call_output(&self, name: &str, args: &[&str]) -> Output { + let identity = self.database_identity.as_ref().expect("No database published"); + + let mut cmd_args = vec!["call", "--server", &self.server_url, "--", identity.as_str(), name]; + cmd_args.extend(args); + + self.spacetime_cmd(&cmd_args) + } + + /// Executes a SQL query against the database. + pub fn sql(&self, query: &str) -> Result { + let identity = self.database_identity.as_ref().context("No database published")?; + + self.spacetime(&["sql", "--server", &self.server_url, identity.as_str(), query]) + } + + /// Executes a SQL query with the --confirmed flag. + pub fn sql_confirmed(&self, query: &str) -> Result { + let identity = self.database_identity.as_ref().context("No database published")?; + + self.spacetime(&[ + "sql", + "--server", + &self.server_url, + "--confirmed", + identity.as_str(), + query, + ]) + } + + /// Asserts that a SQL query produces the expected output. + /// + /// Both the actual output and expected string have trailing whitespace + /// trimmed from each line for comparison. + pub fn assert_sql(&self, query: &str, expected: &str) { + let actual = self.sql(query).expect("SQL query failed"); + let actual_normalized = normalize_whitespace(&actual); + let expected_normalized = normalize_whitespace(expected); + + assert_eq!( + actual_normalized, expected_normalized, + "SQL output mismatch for query: {}\n\nExpected:\n{}\n\nActual:\n{}", + query, expected_normalized, actual_normalized + ); + } + + /// Fetches the last N log entries from the database. + pub fn logs(&self, n: usize) -> Result> { + let records = self.log_records(n)?; + Ok(records + .into_iter() + .filter_map(|r| r.get("message").and_then(|m| m.as_str()).map(String::from)) + .collect()) + } + + /// Fetches the last N log records as JSON values. + pub fn log_records(&self, n: usize) -> Result> { + let identity = self.database_identity.as_ref().context("No database published")?; + let n_str = n.to_string(); + + let output = self.spacetime(&[ + "logs", + "--server", + &self.server_url, + "--format=json", + "-n", + &n_str, + "--", + identity, + ])?; + + output + .lines() + .filter(|line| !line.trim().is_empty()) + .map(|line| serde_json::from_str(line).context("Failed to parse log record")) + .collect() + } + + /// Creates a new identity by logging out and logging back in with a server-issued identity. + /// + /// This is useful for tests that need to test with multiple identities. + pub fn new_identity(&self) -> Result<()> { + let cli_path = ensure_binaries_built(); + let config_path_str = self.config_path.to_str().unwrap(); + + // Logout first (ignore errors - may not be logged in) + let _ = Command::new(&cli_path) + .args(["--config-path", config_path_str, "logout"]) + .output(); + + // Login with server-issued identity + // Format: login --server-issued-login + let output = Command::new(&cli_path) + .args([ + "--config-path", + config_path_str, + "login", + "--server-issued-login", + &self.server_url, + ]) + .output() + .context("Failed to login with new identity")?; + + if !output.status.success() { + bail!( + "Failed to create new identity:\nstdout: {}\nstderr: {}", + String::from_utf8_lossy(&output.stdout), + String::from_utf8_lossy(&output.stderr) + ); + } + + Ok(()) + } + + /// Makes an HTTP API call to the server. + /// + /// Returns the response body as bytes, or an error with the HTTP status code. + pub fn api_call(&self, method: &str, path: &str) -> Result { + self.api_call_with_body(method, path, None) + } + + /// Makes an HTTP API call with an optional request body. + pub fn api_call_with_body(&self, method: &str, path: &str, body: Option<&[u8]>) -> Result { + use std::io::{Read, Write}; + use std::net::TcpStream; + + // Parse server URL to get host and port + let url = &self.server_url; + let host_port = url + .strip_prefix("http://") + .or_else(|| url.strip_prefix("https://")) + .unwrap_or(url); + + let mut stream = TcpStream::connect(host_port).context("Failed to connect to server")?; + stream.set_read_timeout(Some(std::time::Duration::from_secs(30))).ok(); + + // Build HTTP request + let content_length = body.map(|b| b.len()).unwrap_or(0); + let request = format!( + "{} {} HTTP/1.1\r\nHost: {}\r\nContent-Length: {}\r\nConnection: close\r\n\r\n", + method, path, host_port, content_length + ); + + stream.write_all(request.as_bytes())?; + if let Some(body) = body { + stream.write_all(body)?; + } + + // Read response + let mut response = Vec::new(); + stream.read_to_end(&mut response)?; + + // Parse HTTP response + let response_str = String::from_utf8_lossy(&response); + let mut lines = response_str.lines(); + + // Parse status line + let status_line = lines.next().context("Empty response")?; + let status_code: u16 = status_line + .split_whitespace() + .nth(1) + .and_then(|s| s.parse().ok()) + .context("Failed to parse status code")?; + + // Find body (after empty line) + let header_end = response_str.find("\r\n\r\n").unwrap_or(response_str.len()); + let body_start = header_end + 4; + let body = if body_start < response.len() { + response[body_start..].to_vec() + } else { + Vec::new() + }; + + Ok(ApiResponse { status_code, body }) + } + + /// Starts a subscription and waits for N updates (synchronous). + /// + /// Returns the updates as JSON values. + /// For tests that need to perform actions while subscribed, use `subscribe_background` instead. + pub fn subscribe(&self, queries: &[&str], n: usize) -> Result> { + self.subscribe_opts(queries, n, false) + } + + /// Starts a subscription with --confirmed flag and waits for N updates. + pub fn subscribe_confirmed(&self, queries: &[&str], n: usize) -> Result> { + self.subscribe_opts(queries, n, true) + } + + /// Internal helper for subscribe with options. + fn subscribe_opts(&self, queries: &[&str], n: usize, confirmed: bool) -> Result> { + let start = Instant::now(); + let identity = self.database_identity.as_ref().context("No database published")?; + let config_path_str = self.config_path.to_str().unwrap(); + + let cli_path = ensure_binaries_built(); + let mut cmd = Command::new(&cli_path); + let mut args = vec![ + "--config-path", + config_path_str, + "subscribe", + "--server", + &self.server_url, + identity, + "-t", + "30", + "-n", + ]; + let n_str = n.to_string(); + args.push(&n_str); + args.push("--print-initial-update"); + if confirmed { + args.push("--confirmed"); + } + args.push("--"); + cmd.args(&args) + .args(queries) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()); + + let output = cmd.output().context("Failed to run subscribe command")?; + eprintln!("[TIMING] subscribe (n={}): {:?}", n, start.elapsed()); + + if !output.status.success() { + bail!("subscribe failed:\nstderr: {}", String::from_utf8_lossy(&output.stderr)); + } + + let stdout = String::from_utf8_lossy(&output.stdout); + stdout + .lines() + .filter(|line| !line.trim().is_empty()) + .map(|line| serde_json::from_str(line).context("Failed to parse subscription update")) + .collect() + } + + /// Starts a subscription in the background and returns a handle. + /// + /// This matches Python's subscribe semantics - start subscription first, + /// perform actions, then call the handle to collect results. + pub fn subscribe_background(&self, queries: &[&str], n: usize) -> Result { + self.subscribe_background_opts(queries, n, false) + } + + /// Starts a subscription in the background with --confirmed flag. + pub fn subscribe_background_confirmed(&self, queries: &[&str], n: usize) -> Result { + self.subscribe_background_opts(queries, n, true) + } + + /// Internal helper for background subscribe with options. + fn subscribe_background_opts(&self, queries: &[&str], n: usize, confirmed: bool) -> Result { + use std::io::{BufRead, BufReader}; + + let identity = self + .database_identity + .as_ref() + .context("No database published")? + .clone(); + + let cli_path = ensure_binaries_built(); + let mut cmd = Command::new(&cli_path); + // Use --print-initial-update so we know when subscription is established + let config_path_str = self.config_path.to_str().unwrap().to_string(); + let mut args = vec![ + "--config-path".to_string(), + config_path_str, + "subscribe".to_string(), + "--server".to_string(), + self.server_url.clone(), + identity, + "-t".to_string(), + "30".to_string(), + "-n".to_string(), + n.to_string(), + "--print-initial-update".to_string(), + ]; + if confirmed { + args.push("--confirmed".to_string()); + } + args.push("--".to_string()); + cmd.args(&args) + .args(queries) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()); + + let mut child = cmd.spawn().context("Failed to spawn subscribe command")?; + let stdout = child.stdout.take().context("No stdout from subscribe")?; + let stderr = child.stderr.take().context("No stderr from subscribe")?; + let mut reader = BufReader::new(stdout); + + // Wait for initial update line - this blocks until subscription is established + let mut init_line = String::new(); + reader + .read_line(&mut init_line) + .context("Failed to read initial update from subscribe")?; + eprintln!("[SUBSCRIBE] initial update received: {}", init_line.trim()); + + Ok(SubscriptionHandle { + child, + reader, + stderr, + n, + start: Instant::now(), + }) + } +} + +/// Handle for a background subscription. +pub struct SubscriptionHandle { + child: std::process::Child, + reader: std::io::BufReader, + stderr: std::process::ChildStderr, + n: usize, + start: Instant, +} + +impl SubscriptionHandle { + /// Wait for the subscription to complete and return the updates. + pub fn collect(mut self) -> Result> { + use std::io::{BufRead, Read}; + + // Read remaining lines from stdout + let mut updates = Vec::new(); + for line in self.reader.by_ref().lines() { + let line = line.context("Failed to read line from subscribe")?; + if !line.trim().is_empty() { + let value: serde_json::Value = + serde_json::from_str(&line).context("Failed to parse subscription update")?; + updates.push(value); + } + } + + // Wait for child to complete + let status = self.child.wait().context("Failed to wait for subscribe")?; + eprintln!( + "[TIMING] subscribe_background (n={}): {:?}", + self.n, + self.start.elapsed() + ); + + if !status.success() { + let mut stderr_buf = String::new(); + self.stderr.read_to_string(&mut stderr_buf).ok(); + bail!("subscribe failed:\nstderr: {}", stderr_buf); + } + + Ok(updates) + } +} + +/// Normalizes whitespace by trimming trailing whitespace from each line. +fn normalize_whitespace(s: &str) -> String { + s.lines().map(|line| line.trim_end()).collect::>().join("\n") +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_normalize_whitespace() { + let input = "hello \nworld \n foo "; + let expected = "hello\nworld\n foo"; + assert_eq!(normalize_whitespace(input), expected); + } +} diff --git a/crates/smoketests/tests/add_remove_index.rs b/crates/smoketests/tests/add_remove_index.rs new file mode 100644 index 00000000000..0d166d3774f --- /dev/null +++ b/crates/smoketests/tests/add_remove_index.rs @@ -0,0 +1,89 @@ +//! Add/remove index tests translated from smoketests/tests/add_remove_index.py + +use spacetimedb_smoketests::Smoketest; + +const MODULE_CODE: &str = r#" +use spacetimedb::{ReducerContext, Table}; + +#[spacetimedb::table(name = t1)] +pub struct T1 { id: u64 } + +#[spacetimedb::table(name = t2)] +pub struct T2 { id: u64 } + +#[spacetimedb::reducer(init)] +pub fn init(ctx: &ReducerContext) { + for id in 0..1_000 { + ctx.db.t1().insert(T1 { id }); + ctx.db.t2().insert(T2 { id }); + } +} +"#; + +const MODULE_CODE_INDEXED: &str = r#" +use spacetimedb::{ReducerContext, Table}; + +#[spacetimedb::table(name = t1)] +pub struct T1 { #[index(btree)] id: u64 } + +#[spacetimedb::table(name = t2)] +pub struct T2 { #[index(btree)] id: u64 } + +#[spacetimedb::reducer(init)] +pub fn init(ctx: &ReducerContext) { + for id in 0..1_000 { + ctx.db.t1().insert(T1 { id }); + ctx.db.t2().insert(T2 { id }); + } +} + +#[spacetimedb::reducer] +pub fn add(ctx: &ReducerContext) { + let id = 1_001; + ctx.db.t1().insert(T1 { id }); + ctx.db.t2().insert(T2 { id }); +} +"#; + +const JOIN_QUERY: &str = "select t1.* from t1 join t2 on t1.id = t2.id where t2.id = 1001"; + +/// First publish without the indices, +/// then add the indices, and publish, +/// and finally remove the indices, and publish again. +/// There should be no errors +/// and the unindexed versions should reject subscriptions. +#[test] +fn test_add_then_remove_index() { + let mut test = Smoketest::builder().module_code(MODULE_CODE).autopublish(false).build(); + + let name = format!("test-db-{}", std::process::id()); + + // Publish and attempt a subscribing to a join query. + // There are no indices, resulting in an unsupported unindexed join. + test.publish_module_named(&name, false).unwrap(); + let result = test.subscribe(&[JOIN_QUERY], 0); + assert!(result.is_err(), "Expected subscription to fail without indices"); + + // Publish the indexed version. + // Now we have indices, so the query should be accepted. + test.write_module_code(MODULE_CODE_INDEXED).unwrap(); + test.publish_module_named(&name, false).unwrap(); + + // Subscription should work now (n=0 just verifies the query is accepted) + let result = test.subscribe(&[JOIN_QUERY], 0); + assert!( + result.is_ok(), + "Expected subscription to succeed with indices, got: {:?}", + result.err() + ); + + // Verify call works too + test.call("add", &[]).unwrap(); + + // Publish the unindexed version again, removing the index. + // The initial subscription should be rejected again. + test.write_module_code(MODULE_CODE).unwrap(); + test.publish_module_named(&name, false).unwrap(); + let result = test.subscribe(&[JOIN_QUERY], 0); + assert!(result.is_err(), "Expected subscription to fail after removing indices"); +} diff --git a/crates/smoketests/tests/auto_inc.rs b/crates/smoketests/tests/auto_inc.rs new file mode 100644 index 00000000000..dce8c1ec781 --- /dev/null +++ b/crates/smoketests/tests/auto_inc.rs @@ -0,0 +1,177 @@ +//! Auto-increment tests translated from smoketests/tests/auto_inc.py +//! +//! This is a simplified version that tests representative integer types +//! rather than all 10 types in the Python version. + +use spacetimedb_smoketests::Smoketest; + +/// Generate module code for basic auto-increment test with a specific integer type +fn autoinc_basic_module_code(int_ty: &str) -> String { + format!( + r#" +#![allow(non_camel_case_types)] +use spacetimedb::{{log, ReducerContext, Table}}; + +#[spacetimedb::table(name = person_{int_ty})] +pub struct Person_{int_ty} {{ + #[auto_inc] + key_col: {int_ty}, + name: String, +}} + +#[spacetimedb::reducer] +pub fn add_{int_ty}(ctx: &ReducerContext, name: String, expected_value: {int_ty}) {{ + let value = ctx.db.person_{int_ty}().insert(Person_{int_ty} {{ key_col: 0, name }}); + assert_eq!(value.key_col, expected_value); +}} + +#[spacetimedb::reducer] +pub fn say_hello_{int_ty}(ctx: &ReducerContext) {{ + for person in ctx.db.person_{int_ty}().iter() {{ + log::info!("Hello, {{}}:{{}}!", person.key_col, person.name); + }} + log::info!("Hello, World!"); +}} +"# + ) +} + +fn do_test_autoinc_basic(int_ty: &str) { + let module_code = autoinc_basic_module_code(int_ty); + let test = Smoketest::builder().module_code(&module_code).build(); + + test.call(&format!("add_{}", int_ty), &[r#""Robert""#, "1"]).unwrap(); + test.call(&format!("add_{}", int_ty), &[r#""Julie""#, "2"]).unwrap(); + test.call(&format!("add_{}", int_ty), &[r#""Samantha""#, "3"]).unwrap(); + test.call(&format!("say_hello_{}", int_ty), &[]).unwrap(); + + let logs = test.logs(4).unwrap(); + assert!( + logs.iter().any(|msg| msg.contains("Hello, 3:Samantha!")), + "Expected 'Hello, 3:Samantha!' in logs, got: {:?}", + logs + ); + assert!( + logs.iter().any(|msg| msg.contains("Hello, 2:Julie!")), + "Expected 'Hello, 2:Julie!' in logs, got: {:?}", + logs + ); + assert!( + logs.iter().any(|msg| msg.contains("Hello, 1:Robert!")), + "Expected 'Hello, 1:Robert!' in logs, got: {:?}", + logs + ); + assert!( + logs.iter().any(|msg| msg.contains("Hello, World!")), + "Expected 'Hello, World!' in logs, got: {:?}", + logs + ); +} + +#[test] +fn test_autoinc_u32() { + do_test_autoinc_basic("u32"); +} + +#[test] +fn test_autoinc_u64() { + do_test_autoinc_basic("u64"); +} + +#[test] +fn test_autoinc_i32() { + do_test_autoinc_basic("i32"); +} + +#[test] +fn test_autoinc_i64() { + do_test_autoinc_basic("i64"); +} + +/// Generate module code for auto-increment with unique constraint test +fn autoinc_unique_module_code(int_ty: &str) -> String { + format!( + r#" +#![allow(non_camel_case_types)] +use std::error::Error; +use spacetimedb::{{log, ReducerContext, Table}}; + +#[spacetimedb::table(name = person_{int_ty})] +pub struct Person_{int_ty} {{ + #[auto_inc] + #[unique] + key_col: {int_ty}, + #[unique] + name: String, +}} + +#[spacetimedb::reducer] +pub fn add_new_{int_ty}(ctx: &ReducerContext, name: String) -> Result<(), Box> {{ + let value = ctx.db.person_{int_ty}().try_insert(Person_{int_ty} {{ key_col: 0, name }})?; + log::info!("Assigned Value: {{}} -> {{}}", value.key_col, value.name); + Ok(()) +}} + +#[spacetimedb::reducer] +pub fn update_{int_ty}(ctx: &ReducerContext, name: String, new_id: {int_ty}) {{ + ctx.db.person_{int_ty}().name().delete(&name); + let _value = ctx.db.person_{int_ty}().insert(Person_{int_ty} {{ key_col: new_id, name }}); +}} + +#[spacetimedb::reducer] +pub fn say_hello_{int_ty}(ctx: &ReducerContext) {{ + for person in ctx.db.person_{int_ty}().iter() {{ + log::info!("Hello, {{}}:{{}}!", person.key_col, person.name); + }} + log::info!("Hello, World!"); +}} +"# + ) +} + +fn do_test_autoinc_unique(int_ty: &str) { + let module_code = autoinc_unique_module_code(int_ty); + let test = Smoketest::builder().module_code(&module_code).build(); + + // Insert Robert with explicit id 2 + test.call(&format!("update_{}", int_ty), &[r#""Robert""#, "2"]).unwrap(); + + // Auto-inc should assign id 1 to Success + test.call(&format!("add_new_{}", int_ty), &[r#""Success""#]).unwrap(); + + // Auto-inc tries to assign id 2, but Robert already has it - should fail + let result = test.call(&format!("add_new_{}", int_ty), &[r#""Failure""#]); + assert!( + result.is_err(), + "Expected add_new to fail due to unique constraint violation" + ); + + test.call(&format!("say_hello_{}", int_ty), &[]).unwrap(); + + let logs = test.logs(4).unwrap(); + assert!( + logs.iter().any(|msg| msg.contains("Hello, 2:Robert!")), + "Expected 'Hello, 2:Robert!' in logs, got: {:?}", + logs + ); + assert!( + logs.iter().any(|msg| msg.contains("Hello, 1:Success!")), + "Expected 'Hello, 1:Success!' in logs, got: {:?}", + logs + ); + assert!( + logs.iter().any(|msg| msg.contains("Hello, World!")), + "Expected 'Hello, World!' in logs, got: {:?}", + logs + ); +} + +#[test] +fn test_autoinc_unique_u64() { + do_test_autoinc_unique("u64"); +} + +#[test] +fn test_autoinc_unique_i64() { + do_test_autoinc_unique("i64"); +} diff --git a/crates/smoketests/tests/auto_migration.rs b/crates/smoketests/tests/auto_migration.rs new file mode 100644 index 00000000000..bac8b14e68e --- /dev/null +++ b/crates/smoketests/tests/auto_migration.rs @@ -0,0 +1,266 @@ +//! Tests translated from smoketests/tests/auto_migration.py + +use spacetimedb_smoketests::Smoketest; + +const MODULE_CODE_SIMPLE: &str = r#" +use spacetimedb::{log, ReducerContext, Table}; + +#[spacetimedb::table(name = person)] +pub struct Person { + name: String, +} + +#[spacetimedb::reducer] +pub fn add_person(ctx: &ReducerContext, name: String) { + ctx.db.person().insert(Person { name }); +} + +#[spacetimedb::reducer] +pub fn print_persons(ctx: &ReducerContext, prefix: String) { + for person in ctx.db.person().iter() { + log::info!("{}: {}", prefix, person.name); + } +} +"#; + +const MODULE_CODE_UPDATED_INCOMPATIBLE: &str = r#" +use spacetimedb::{log, ReducerContext, Table}; + +#[spacetimedb::table(name = person)] +pub struct Person { + name: String, + age: u128, +} + +#[spacetimedb::reducer] +pub fn add_person(ctx: &ReducerContext, name: String) { + ctx.db.person().insert(Person { name, age: 70 }); +} + +#[spacetimedb::reducer] +pub fn print_persons(ctx: &ReducerContext, prefix: String) { + for person in ctx.db.person().iter() { + log::info!("{}: {}", prefix, person.name); + } +} +"#; + +/// Tests that a module with invalid schema changes cannot be published without -c or a migration. +#[test] +fn test_reject_schema_changes() { + let mut test = Smoketest::builder().module_code(MODULE_CODE_SIMPLE).build(); + + // Try to update with incompatible schema (adding column without default) + test.write_module_code(MODULE_CODE_UPDATED_INCOMPATIBLE).unwrap(); + let result = test.publish_module_clear(false); + + assert!( + result.is_err(), + "Expected publish to fail with incompatible schema change" + ); +} + +const MODULE_CODE_INIT: &str = r#" +use spacetimedb::{log, ReducerContext, Table, SpacetimeType}; +use PersonKind::*; + +#[spacetimedb::table(name = person, public)] +pub struct Person { + name: String, + kind: PersonKind, +} + +#[spacetimedb::reducer] +pub fn add_person(ctx: &ReducerContext, name: String, kind: String) { + let kind = kind_from_string(kind); + ctx.db.person().insert(Person { name, kind }); +} + +#[spacetimedb::reducer] +pub fn print_persons(ctx: &ReducerContext, prefix: String) { + for person in ctx.db.person().iter() { + let kind = kind_to_string(person.kind); + log::info!("{prefix}: {} - {kind}", person.name); + } +} + +#[spacetimedb::table(name = point_mass)] +pub struct PointMass { + mass: f64, + position: Vector2, +} + +#[derive(SpacetimeType, Clone, Copy)] +pub struct Vector2 { + x: f64, + y: f64, +} + +#[spacetimedb::table(name = person_info)] +pub struct PersonInfo { + #[primary_key] + id: u64, +} + +#[derive(SpacetimeType, Clone, Copy, PartialEq, Eq)] +pub enum PersonKind { + Student, +} + +fn kind_from_string(_: String) -> PersonKind { + Student +} + +fn kind_to_string(Student: PersonKind) -> &'static str { + "Student" +} +"#; + +const MODULE_CODE_UPDATED: &str = r#" +use spacetimedb::{log, ReducerContext, Table, SpacetimeType}; +use PersonKind::*; + +#[spacetimedb::table(name = person, public)] +pub struct Person { + name: String, + kind: PersonKind, +} + +#[spacetimedb::reducer] +pub fn add_person(ctx: &ReducerContext, name: String, kind: String) { + let kind = kind_from_string(kind); + ctx.db.person().insert(Person { name, kind }); +} + +#[spacetimedb::reducer] +pub fn print_persons(ctx: &ReducerContext, prefix: String) { + for person in ctx.db.person().iter() { + let kind = kind_to_string(person.kind); + log::info!("{prefix}: {} - {kind}", person.name); + } +} + +#[spacetimedb::table(name = point_mass)] +pub struct PointMass { + mass: f64, + position: Vector2, +} + +#[derive(SpacetimeType, Clone, Copy)] +pub struct Vector2 { + x: f64, + y: f64, +} + +#[spacetimedb::table(name = person_info)] +pub struct PersonInfo { + #[primary_key] + #[auto_inc] + id: u64, +} + +#[derive(SpacetimeType, Clone, Copy, PartialEq, Eq)] +pub enum PersonKind { + Student, + Professor, +} + +fn kind_from_string(kind: String) -> PersonKind { + match &*kind { + "Student" => Student, + "Professor" => Professor, + _ => panic!(), + } +} + +fn kind_to_string(kind: PersonKind) -> &'static str { + match kind { + Student => "Student", + Professor => "Professor", + } +} + +#[spacetimedb::table(name = book, public)] +pub struct Book { + isbn: String, +} + +#[spacetimedb::reducer] +pub fn add_book(ctx: &ReducerContext, isbn: String) { + ctx.db.book().insert(Book { isbn }); +} + +#[spacetimedb::reducer] +pub fn print_books(ctx: &ReducerContext, prefix: String) { + for book in ctx.db.book().iter() { + log::info!("{}: {}", prefix, book.isbn); + } +} +"#; + +/// Tests uploading a module with a schema change that should not require clearing the database. +#[test] +fn test_add_table_auto_migration() { + let mut test = Smoketest::builder().module_code(MODULE_CODE_INIT).build(); + + // Add initial data + test.call("add_person", &["Robert", "Student"]).unwrap(); + test.call("add_person", &["Julie", "Student"]).unwrap(); + test.call("add_person", &["Samantha", "Student"]).unwrap(); + test.call("print_persons", &["BEFORE"]).unwrap(); + + let logs = test.logs(100).unwrap(); + assert!( + logs.iter().any(|l| l.contains("BEFORE: Samantha - Student")), + "Expected Samantha in logs: {:?}", + logs + ); + assert!( + logs.iter().any(|l| l.contains("BEFORE: Julie - Student")), + "Expected Julie in logs: {:?}", + logs + ); + assert!( + logs.iter().any(|l| l.contains("BEFORE: Robert - Student")), + "Expected Robert in logs: {:?}", + logs + ); + + // Update module without clearing database + test.write_module_code(MODULE_CODE_UPDATED).unwrap(); + test.publish_module_clear(false).unwrap(); + + // Add new data with updated schema + test.call("add_person", &["Husserl", "Student"]).unwrap(); + test.call("add_person", &["Husserl", "Professor"]).unwrap(); + test.call("add_book", &["1234567890"]).unwrap(); + test.call("print_persons", &["AFTER_PERSON"]).unwrap(); + test.call("print_books", &["AFTER_BOOK"]).unwrap(); + + let logs = test.logs(100).unwrap(); + assert!( + logs.iter().any(|l| l.contains("AFTER_PERSON: Samantha - Student")), + "Expected Samantha in AFTER logs: {:?}", + logs + ); + assert!( + logs.iter().any(|l| l.contains("AFTER_PERSON: Julie - Student")), + "Expected Julie in AFTER logs: {:?}", + logs + ); + assert!( + logs.iter().any(|l| l.contains("AFTER_PERSON: Robert - Student")), + "Expected Robert in AFTER logs: {:?}", + logs + ); + assert!( + logs.iter().any(|l| l.contains("AFTER_PERSON: Husserl - Professor")), + "Expected Husserl Professor in AFTER logs: {:?}", + logs + ); + assert!( + logs.iter().any(|l| l.contains("AFTER_BOOK: 1234567890")), + "Expected book ISBN in AFTER logs: {:?}", + logs + ); +} diff --git a/crates/smoketests/tests/call.rs b/crates/smoketests/tests/call.rs new file mode 100644 index 00000000000..8ba7c717cda --- /dev/null +++ b/crates/smoketests/tests/call.rs @@ -0,0 +1,242 @@ +//! Reducer/procedure call tests translated from smoketests/tests/call.py + +use spacetimedb_smoketests::Smoketest; + +const CALL_REDUCER_PROCEDURE_MODULE_CODE: &str = r#" +use spacetimedb::{log, ProcedureContext, ReducerContext, Table}; + +#[spacetimedb::table(name = person)] +pub struct Person { + name: String, +} + +#[spacetimedb::reducer] +pub fn say_hello(_ctx: &ReducerContext) { + log::info!("Hello, World!"); +} + +#[spacetimedb::procedure] +pub fn return_person(_ctx: &mut ProcedureContext) -> Person { + return Person { name: "World".to_owned() }; +} +"#; + +/// Check calling a reducer (no return) and procedure (return) +#[test] +fn test_call_reducer_procedure() { + let test = Smoketest::builder() + .module_code(CALL_REDUCER_PROCEDURE_MODULE_CODE) + .build(); + + // Reducer returns empty + let msg = test.call("say_hello", &[]).unwrap(); + assert_eq!(msg.trim(), ""); + + // Procedure returns a value + let msg = test.call("return_person", &[]).unwrap(); + assert_eq!(msg.trim(), r#"["World"]"#); +} + +/// Check calling a non-existent reducer/procedure raises error +#[test] +fn test_call_errors() { + let test = Smoketest::builder() + .module_code(CALL_REDUCER_PROCEDURE_MODULE_CODE) + .build(); + + let identity = test.database_identity.as_ref().unwrap(); + + // Non-existent reducer + let output = test.call_output("non_existent_reducer", &[]); + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + let expected = format!( + "WARNING: This command is UNSTABLE and subject to breaking changes. + +Error: No such reducer OR procedure `non_existent_reducer` for database `{identity}` resolving to identity `{identity}`. + +Here are some existing reducers: +- say_hello + +Here are some existing procedures: +- return_person" + ); + assert!( + expected.contains(stderr.trim()), + "Expected stderr to be contained in expected message.\nExpected:\n{}\n\nActual stderr:\n{}", + expected, + stderr.trim() + ); + + // Non-existent procedure + let output = test.call_output("non_existent_procedure", &[]); + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + let expected = format!( + "WARNING: This command is UNSTABLE and subject to breaking changes. + +Error: No such reducer OR procedure `non_existent_procedure` for database `{identity}` resolving to identity `{identity}`. + +Here are some existing reducers: +- say_hello + +Here are some existing procedures: +- return_person" + ); + assert!( + expected.contains(stderr.trim()), + "Expected stderr to be contained in expected message.\nExpected:\n{}\n\nActual stderr:\n{}", + expected, + stderr.trim() + ); + + // Similar name to reducer - should suggest similar + let output = test.call_output("say_hell", &[]); + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + let expected = format!( + "WARNING: This command is UNSTABLE and subject to breaking changes. + +Error: No such reducer OR procedure `say_hell` for database `{identity}` resolving to identity `{identity}`. + +A reducer with a similar name exists: `say_hello`" + ); + assert!( + expected.contains(stderr.trim()), + "Expected stderr to be contained in expected message.\nExpected:\n{}\n\nActual stderr:\n{}", + expected, + stderr.trim() + ); + + // Similar name to procedure - should suggest similar + let output = test.call_output("return_perso", &[]); + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + let expected = format!( + "WARNING: This command is UNSTABLE and subject to breaking changes. + +Error: No such reducer OR procedure `return_perso` for database `{identity}` resolving to identity `{identity}`. + +A procedure with a similar name exists: `return_person`" + ); + assert!( + expected.contains(stderr.trim()), + "Expected stderr to be contained in expected message.\nExpected:\n{}\n\nActual stderr:\n{}", + expected, + stderr.trim() + ); +} + +const CALL_EMPTY_MODULE_CODE: &str = r#" +use spacetimedb::{log, ReducerContext, Table}; + +#[spacetimedb::table(name = person)] +pub struct Person { + name: String, +} +"#; + +/// Check calling into a database with no reducers/procedures raises error +#[test] +fn test_call_empty_errors() { + let test = Smoketest::builder().module_code(CALL_EMPTY_MODULE_CODE).build(); + + let identity = test.database_identity.as_ref().unwrap(); + + let output = test.call_output("non_existent", &[]); + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + let expected = format!( + "WARNING: This command is UNSTABLE and subject to breaking changes. + +Error: No such reducer OR procedure `non_existent` for database `{identity}` resolving to identity `{identity}`. + +The database has no reducers. + +The database has no procedures." + ); + assert!( + expected.contains(stderr.trim()), + "Expected stderr to be contained in expected message.\nExpected:\n{}\n\nActual stderr:\n{}", + expected, + stderr.trim() + ); +} + +/// Generate module code with many reducers and procedures +fn generate_many_module_code() -> String { + let mut code = String::from( + r#" +use spacetimedb::{log, ProcedureContext, ReducerContext}; +"#, + ); + + for i in 0..11 { + code.push_str(&format!( + r#" +#[spacetimedb::reducer] +pub fn say_reducer_{i}(_ctx: &ReducerContext) {{ + log::info!("Hello from reducer {i}!"); +}} + +#[spacetimedb::procedure] +pub fn say_procedure_{i}(_ctx: &mut ProcedureContext) {{ + log::info!("Hello from procedure {i}!"); +}} +"# + )); + } + + code +} + +/// Check calling into a database with many reducers/procedures raises error with listing +#[test] +fn test_call_many_errors() { + let module_code = generate_many_module_code(); + let test = Smoketest::builder().module_code(&module_code).build(); + + let identity = test.database_identity.as_ref().unwrap(); + + let output = test.call_output("non_existent", &[]); + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + + let expected = format!( + "WARNING: This command is UNSTABLE and subject to breaking changes. + +Error: No such reducer OR procedure `non_existent` for database `{identity}` resolving to identity `{identity}`. + +Here are some existing reducers: +- say_reducer_0 +- say_reducer_1 +- say_reducer_2 +- say_reducer_3 +- say_reducer_4 +- say_reducer_5 +- say_reducer_6 +- say_reducer_7 +- say_reducer_8 +- say_reducer_9 +... (1 reducer not shown) + +Here are some existing procedures: +- say_procedure_0 +- say_procedure_1 +- say_procedure_2 +- say_procedure_3 +- say_procedure_4 +- say_procedure_5 +- say_procedure_6 +- say_procedure_7 +- say_procedure_8 +- say_procedure_9 +... (1 procedure not shown)" + ); + assert!( + expected.contains(stderr.trim()), + "Expected stderr to be contained in expected message.\nExpected:\n{}\n\nActual stderr:\n{}", + expected, + stderr.trim() + ); +} diff --git a/crates/smoketests/tests/client_connection_errors.rs b/crates/smoketests/tests/client_connection_errors.rs new file mode 100644 index 00000000000..906d4569629 --- /dev/null +++ b/crates/smoketests/tests/client_connection_errors.rs @@ -0,0 +1,98 @@ +//! Tests translated from smoketests/tests/client_connected_error_rejects_connection.py + +use spacetimedb_smoketests::Smoketest; + +const MODULE_CODE_REJECT: &str = r#" +use spacetimedb::{ReducerContext, Table}; + +#[spacetimedb::table(name = all_u8s, public)] +pub struct AllU8s { + number: u8, +} + +#[spacetimedb::reducer(init)] +pub fn init(ctx: &ReducerContext) { + for i in u8::MIN..=u8::MAX { + ctx.db.all_u8s().insert(AllU8s { number: i }); + } +} + +#[spacetimedb::reducer(client_connected)] +pub fn identity_connected(_ctx: &ReducerContext) -> Result<(), String> { + Err("Rejecting connection from client".to_string()) +} + +#[spacetimedb::reducer(client_disconnected)] +pub fn identity_disconnected(_ctx: &ReducerContext) { + panic!("This should never be called, since we reject all connections!") +} +"#; + +const MODULE_CODE_DISCONNECT_PANIC: &str = r#" +use spacetimedb::{ReducerContext, Table}; + +#[spacetimedb::table(name = all_u8s, public)] +pub struct AllU8s { + number: u8, +} + +#[spacetimedb::reducer(init)] +pub fn init(ctx: &ReducerContext) { + for i in u8::MIN..=u8::MAX { + ctx.db.all_u8s().insert(AllU8s { number: i }); + } +} + +#[spacetimedb::reducer(client_connected)] +pub fn identity_connected(_ctx: &ReducerContext) -> Result<(), String> { + Ok(()) +} + +#[spacetimedb::reducer(client_disconnected)] +pub fn identity_disconnected(_ctx: &ReducerContext) { + panic!("This should be called, but the `st_client` row should still be deleted") +} +"#; + +/// Test that client_connected returning an error rejects the connection +#[test] +fn test_client_connected_error_rejects_connection() { + let test = Smoketest::builder().module_code(MODULE_CODE_REJECT).build(); + + // Subscribe should fail because client_connected returns an error + let result = test.subscribe(&["SELECT * FROM all_u8s"], 0); + assert!( + result.is_err(), + "Expected subscribe to fail when client_connected returns error" + ); + + let logs = test.logs(100).unwrap(); + assert!( + logs.iter().any(|l| l.contains("Rejecting connection from client")), + "Expected rejection message in logs: {:?}", + logs + ); + assert!( + !logs.iter().any(|l| l.contains("This should never be called")), + "client_disconnected should not have been called: {:?}", + logs + ); +} + +/// Test that client_disconnected panicking still cleans up the st_client row +#[test] +fn test_client_disconnected_error_still_deletes_st_client() { + let test = Smoketest::builder().module_code(MODULE_CODE_DISCONNECT_PANIC).build(); + + // Subscribe should succeed (client_connected returns Ok) + let result = test.subscribe(&["SELECT * FROM all_u8s"], 0); + assert!(result.is_ok(), "Expected subscribe to succeed"); + + let logs = test.logs(100).unwrap(); + assert!( + logs.iter() + .any(|l| { l.contains("This should be called, but the `st_client` row should still be deleted") }), + "Expected disconnect panic message in logs: {:?}", + logs + ); +} diff --git a/crates/smoketests/tests/confirmed_reads.rs b/crates/smoketests/tests/confirmed_reads.rs new file mode 100644 index 00000000000..413211571bc --- /dev/null +++ b/crates/smoketests/tests/confirmed_reads.rs @@ -0,0 +1,75 @@ +//! Tests translated from smoketests/tests/confirmed_reads.py +//! +//! TODO: We only test that we can pass a --confirmed flag and that things +//! appear to work as if we hadn't. Without controlling the server, we can't +//! test that there is any difference in behavior. + +use spacetimedb_smoketests::Smoketest; + +const MODULE_CODE: &str = r#" +use spacetimedb::{ReducerContext, Table}; + +#[spacetimedb::table(name = person, public)] +pub struct Person { + name: String, +} + +#[spacetimedb::reducer] +pub fn add(ctx: &ReducerContext, name: String) { + ctx.db.person().insert(Person { name }); +} +"#; + +/// Tests that subscribing with confirmed=true receives updates +#[test] +fn test_confirmed_reads_receive_updates() { + let test = Smoketest::builder().module_code(MODULE_CODE).build(); + + // Start subscription in background with confirmed flag + let sub = test + .subscribe_background_confirmed(&["SELECT * FROM person"], 2) + .unwrap(); + + // Insert via reducer + test.call("add", &["Horst"]).unwrap(); + + // Insert via SQL + test.sql("INSERT INTO person (name) VALUES ('Egon')").unwrap(); + + // Collect updates + let events = sub.collect().unwrap(); + + assert_eq!(events.len(), 2, "Expected 2 updates, got {:?}", events); + + // Check that we got the expected inserts + let horst_insert = serde_json::json!({ + "person": { + "deletes": [], + "inserts": [{"name": "Horst"}] + } + }); + let egon_insert = serde_json::json!({ + "person": { + "deletes": [], + "inserts": [{"name": "Egon"}] + } + }); + + assert_eq!(events[0], horst_insert); + assert_eq!(events[1], egon_insert); +} + +/// Tests that an SQL operation with confirmed=true returns a result +#[test] +fn test_sql_with_confirmed_reads_receives_result() { + let test = Smoketest::builder().module_code(MODULE_CODE).build(); + + // Insert with confirmed + test.sql_confirmed("INSERT INTO person (name) VALUES ('Horst')") + .unwrap(); + + // Query with confirmed + let result = test.sql_confirmed("SELECT * FROM person").unwrap(); + + assert!(result.contains("Horst"), "Expected 'Horst' in result: {}", result); +} diff --git a/crates/smoketests/tests/connect_disconnect_from_cli.rs b/crates/smoketests/tests/connect_disconnect_from_cli.rs new file mode 100644 index 00000000000..c5eeb4d87a0 --- /dev/null +++ b/crates/smoketests/tests/connect_disconnect_from_cli.rs @@ -0,0 +1,47 @@ +//! Tests translated from smoketests/tests/connect_disconnect_from_cli.py + +use spacetimedb_smoketests::Smoketest; + +const MODULE_CODE: &str = r#" +use spacetimedb::{log, ReducerContext}; + +#[spacetimedb::reducer(client_connected)] +pub fn connected(_ctx: &ReducerContext) { + log::info!("_connect called"); +} + +#[spacetimedb::reducer(client_disconnected)] +pub fn disconnected(_ctx: &ReducerContext) { + log::info!("disconnect called"); +} + +#[spacetimedb::reducer] +pub fn say_hello(_ctx: &ReducerContext) { + log::info!("Hello, World!"); +} +"#; + +/// Ensure that the connect and disconnect functions are called when invoking a reducer from the CLI +#[test] +fn test_conn_disconn() { + let test = Smoketest::builder().module_code(MODULE_CODE).build(); + + test.call("say_hello", &[]).unwrap(); + + let logs = test.logs(10).unwrap(); + assert!( + logs.iter().any(|l| l.contains("_connect called")), + "Expected '_connect called' in logs: {:?}", + logs + ); + assert!( + logs.iter().any(|l| l.contains("disconnect called")), + "Expected 'disconnect called' in logs: {:?}", + logs + ); + assert!( + logs.iter().any(|l| l.contains("Hello, World!")), + "Expected 'Hello, World!' in logs: {:?}", + logs + ); +} diff --git a/crates/smoketests/tests/create_project.rs b/crates/smoketests/tests/create_project.rs new file mode 100644 index 00000000000..566778888d4 --- /dev/null +++ b/crates/smoketests/tests/create_project.rs @@ -0,0 +1,74 @@ +//! Tests translated from smoketests/tests/create_project.py + +use spacetimedb_guard::ensure_binaries_built; +use std::process::Command; +use tempfile::tempdir; + +/// Ensure that the CLI is able to create a local project. +/// This test does not depend on a running spacetimedb instance. +#[test] +fn test_create_project() { + let cli_path = ensure_binaries_built(); + let tmpdir = tempdir().expect("Failed to create temp dir"); + let tmpdir_path = tmpdir.path().to_str().unwrap(); + + // Without --lang, init should fail + let output = Command::new(&cli_path) + .args(["init", "--non-interactive", "test-project"]) + .current_dir(tmpdir_path) + .output() + .expect("Failed to run spacetime init"); + assert!(!output.status.success(), "Expected init without --lang to fail"); + + // Without --project-path to specify location, init should fail + let output = Command::new(&cli_path) + .args([ + "init", + "--non-interactive", + "--project-path", + tmpdir_path, + "test-project", + ]) + .output() + .expect("Failed to run spacetime init"); + assert!( + !output.status.success(), + "Expected init without --lang to fail even with --project-path" + ); + + // With all required args, init should succeed + let output = Command::new(&cli_path) + .args([ + "init", + "--non-interactive", + "--lang=rust", + "--project-path", + tmpdir_path, + "test-project", + ]) + .output() + .expect("Failed to run spacetime init"); + assert!( + output.status.success(), + "Expected init to succeed:\nstdout: {}\nstderr: {}", + String::from_utf8_lossy(&output.stdout), + String::from_utf8_lossy(&output.stderr) + ); + + // Running init again in the same directory should fail (already exists) + let output = Command::new(&cli_path) + .args([ + "init", + "--non-interactive", + "--lang=rust", + "--project-path", + tmpdir_path, + "test-project", + ]) + .output() + .expect("Failed to run spacetime init"); + assert!( + !output.status.success(), + "Expected init to fail when project already exists" + ); +} diff --git a/crates/smoketests/tests/csharp_module.rs b/crates/smoketests/tests/csharp_module.rs new file mode 100644 index 00000000000..7763e03227d --- /dev/null +++ b/crates/smoketests/tests/csharp_module.rs @@ -0,0 +1,127 @@ +#![allow(clippy::disallowed_macros)] +//! Tests translated from smoketests/tests/csharp_module.py + +use spacetimedb_smoketests::{have_dotnet, workspace_root}; +use std::fs; +use std::process::Command; + +/// Ensure that the CLI is able to create and compile a C# project. +/// This test does not depend on a running SpacetimeDB instance. +/// Skips if dotnet 8.0+ is not available. +#[test] +fn test_build_csharp_module() { + if !have_dotnet() { + eprintln!("Skipping test_build_csharp_module: dotnet 8.0+ not available"); + return; + } + + let workspace = workspace_root(); + let bindings = workspace.join("crates/bindings-csharp"); + let cli_path = workspace.join("target/debug/spacetimedb-cli"); + + // Build the CLI if needed + let status = Command::new("cargo") + .args(["build", "-p", "spacetimedb-cli"]) + .current_dir(&workspace) + .status() + .expect("Failed to build CLI"); + assert!(status.success(), "Failed to build spacetimedb-cli"); + + // Clear nuget locals + let status = Command::new("dotnet") + .args(["nuget", "locals", "all", "--clear"]) + .current_dir(&bindings) + .status() + .expect("Failed to clear nuget locals"); + assert!(status.success(), "Failed to clear nuget locals"); + + // Install wasi-experimental workload + let _status = Command::new("dotnet") + .args(["workload", "install", "wasi-experimental", "--skip-manifest-update"]) + .current_dir(workspace.join("modules")) + .status() + .expect("Failed to install wasi workload"); + // This may fail if already installed, so we don't assert success + + // Pack the bindings + let status = Command::new("dotnet") + .args(["pack"]) + .current_dir(&bindings) + .status() + .expect("Failed to pack bindings"); + assert!(status.success(), "Failed to pack C# bindings"); + + // Create temp directory for the project + let tmpdir = tempfile::tempdir().expect("Failed to create temp directory"); + + // Initialize C# project + let output = Command::new(&cli_path) + .args([ + "init", + "--non-interactive", + "--lang=csharp", + "--project-path", + tmpdir.path().to_str().unwrap(), + "csharp-project", + ]) + .output() + .expect("Failed to run spacetime init"); + assert!( + output.status.success(), + "spacetime init failed:\nstdout: {}\nstderr: {}", + String::from_utf8_lossy(&output.stdout), + String::from_utf8_lossy(&output.stderr) + ); + + let server_path = tmpdir.path().join("spacetimedb"); + + // Create nuget.config with local package sources + let packed_projects = ["BSATN.Runtime", "Runtime"]; + let mut sources = String::new(); + let mut mappings = String::new(); + + for project in &packed_projects { + let path = bindings.join(project).join("bin/Release"); + let package_name = format!("SpacetimeDB.{}", project); + sources.push_str(&format!( + " \n", + package_name, + path.display() + )); + mappings.push_str(&format!( + " \n \n \n", + package_name, package_name + )); + } + // Add fallback for other packages + mappings.push_str(" \n \n \n"); + + let nuget_config = format!( + r#" + + +{} + +{} + +"#, + sources, mappings + ); + + eprintln!("Writing nuget.config contents:\n{}", nuget_config); + fs::write(server_path.join("nuget.config"), &nuget_config).expect("Failed to write nuget.config"); + + // Run dotnet publish + let output = Command::new("dotnet") + .args(["publish"]) + .current_dir(&server_path) + .output() + .expect("Failed to run dotnet publish"); + + assert!( + output.status.success(), + "dotnet publish failed:\nstdout: {}\nstderr: {}", + String::from_utf8_lossy(&output.stdout), + String::from_utf8_lossy(&output.stderr) + ); +} diff --git a/crates/smoketests/tests/default_module_clippy.rs b/crates/smoketests/tests/default_module_clippy.rs new file mode 100644 index 00000000000..4af80f3aa4d --- /dev/null +++ b/crates/smoketests/tests/default_module_clippy.rs @@ -0,0 +1,24 @@ +//! Tests translated from smoketests/tests/default_module_clippy.py + +use spacetimedb_smoketests::Smoketest; +use std::process::Command; + +/// Ensure that the default rust module has no clippy errors or warnings +#[test] +fn test_default_module_clippy_check() { + // Build a smoketest with the default module code (no custom code) + let test = Smoketest::builder().autopublish(false).build(); + + let output = Command::new("cargo") + .args(["clippy", "--", "-Dwarnings"]) + .current_dir(test.project_dir.path()) + .output() + .expect("Failed to run cargo clippy"); + + assert!( + output.status.success(), + "Default module should have no clippy warnings:\nstdout: {}\nstderr: {}", + String::from_utf8_lossy(&output.stdout), + String::from_utf8_lossy(&output.stderr) + ); +} diff --git a/crates/smoketests/tests/delete_database.rs b/crates/smoketests/tests/delete_database.rs new file mode 100644 index 00000000000..fc4bcddf629 --- /dev/null +++ b/crates/smoketests/tests/delete_database.rs @@ -0,0 +1,78 @@ +//! Tests translated from smoketests/tests/delete_database.py + +use spacetimedb_smoketests::Smoketest; +use std::thread; +use std::time::Duration; + +const MODULE_CODE: &str = r#" +use spacetimedb::{ReducerContext, Table, duration}; + +#[spacetimedb::table(name = counter, public)] +pub struct Counter { + #[primary_key] + id: u64, + val: u64 +} + +#[spacetimedb::table(name = scheduled_counter, public, scheduled(inc, at = sched_at))] +pub struct ScheduledCounter { + #[primary_key] + #[auto_inc] + scheduled_id: u64, + sched_at: spacetimedb::ScheduleAt, +} + +#[spacetimedb::reducer] +pub fn inc(ctx: &ReducerContext, arg: ScheduledCounter) { + if let Some(mut counter) = ctx.db.counter().id().find(arg.scheduled_id) { + counter.val += 1; + ctx.db.counter().id().update(counter); + } else { + ctx.db.counter().insert(Counter { + id: arg.scheduled_id, + val: 1, + }); + } +} + +#[spacetimedb::reducer(init)] +pub fn init(ctx: &ReducerContext) { + ctx.db.scheduled_counter().insert(ScheduledCounter { + scheduled_id: 0, + sched_at: duration!(100ms).into(), + }); +} +"#; + +/// Test that deleting a database stops the module. +/// The module is considered stopped if its scheduled reducer stops +/// producing update events. +#[test] +fn test_delete_database() { + let mut test = Smoketest::builder().module_code(MODULE_CODE).autopublish(false).build(); + + let name = format!("test-db-{}", std::process::id()); + test.publish_module_named(&name, false).unwrap(); + + // Start subscription in background to collect updates + // We request many updates but will stop early when we delete the db + let sub = test.subscribe_background(&["SELECT * FROM counter"], 1000).unwrap(); + + // Let the scheduled reducer run for a bit + thread::sleep(Duration::from_secs(2)); + + // Delete the database + test.spacetime(&["delete", "--server", &test.server_url, &name]) + .unwrap(); + + // Collect whatever updates we got + let updates = sub.collect().unwrap(); + + // At a rate of 100ms, we shouldn't have more than 20 updates in 2secs. + // But let's say 50, in case the delete gets delayed for some reason. + assert!( + updates.len() <= 50, + "Expected at most 50 updates, got {}. Database may not have stopped.", + updates.len() + ); +} diff --git a/crates/smoketests/tests/describe.rs b/crates/smoketests/tests/describe.rs new file mode 100644 index 00000000000..a00b723aae6 --- /dev/null +++ b/crates/smoketests/tests/describe.rs @@ -0,0 +1,61 @@ +//! Module description tests translated from smoketests/tests/describe.py + +use spacetimedb_smoketests::Smoketest; + +const MODULE_CODE: &str = r#" +use spacetimedb::{log, ReducerContext, Table}; + +#[spacetimedb::table(name = person)] +pub struct Person { + name: String, +} + +#[spacetimedb::reducer] +pub fn add(ctx: &ReducerContext, name: String) { + ctx.db.person().insert(Person { name }); +} + +#[spacetimedb::reducer] +pub fn say_hello(ctx: &ReducerContext) { + for person in ctx.db.person().iter() { + log::info!("Hello, {}!", person.name); + } + log::info!("Hello, World!"); +} +"#; + +/// Check describing a module +#[test] +fn test_describe() { + let test = Smoketest::builder().module_code(MODULE_CODE).build(); + + let identity = test.database_identity.as_ref().unwrap(); + + // Describe the whole module + test.spacetime(&["describe", "--server", &test.server_url, "--json", identity]) + .unwrap(); + + // Describe a specific reducer + test.spacetime(&[ + "describe", + "--server", + &test.server_url, + "--json", + identity, + "reducer", + "say_hello", + ]) + .unwrap(); + + // Describe a specific table + test.spacetime(&[ + "describe", + "--server", + &test.server_url, + "--json", + identity, + "table", + "person", + ]) + .unwrap(); +} diff --git a/crates/smoketests/tests/detect_wasm_bindgen.rs b/crates/smoketests/tests/detect_wasm_bindgen.rs new file mode 100644 index 00000000000..9b99ccb6280 --- /dev/null +++ b/crates/smoketests/tests/detect_wasm_bindgen.rs @@ -0,0 +1,68 @@ +//! Tests translated from smoketests/tests/detect_wasm_bindgen.py + +use spacetimedb_smoketests::Smoketest; + +/// Module code that uses wasm_bindgen (should be rejected) +const MODULE_CODE_WASM_BINDGEN: &str = r#" +use spacetimedb::{log, ReducerContext}; + +#[spacetimedb::reducer] +pub fn test(_ctx: &ReducerContext) { + log::info!("Hello! {}", now()); +} + +#[wasm_bindgen::prelude::wasm_bindgen] +extern "C" { + fn now() -> i32; +} +"#; + +/// Module code that uses getrandom via rand (should be rejected) +const MODULE_CODE_GETRANDOM: &str = r#" +use spacetimedb::{log, ReducerContext}; + +#[spacetimedb::reducer] +pub fn test(_ctx: &ReducerContext) { + log::info!("Hello! {}", rand::random::()); +} +"#; + +/// Ensure that spacetime build properly catches wasm_bindgen imports +#[test] +fn test_detect_wasm_bindgen() { + let test = Smoketest::builder() + .module_code(MODULE_CODE_WASM_BINDGEN) + .extra_deps(r#"wasm-bindgen = "0.2""#) + .autopublish(false) + .build(); + + let output = test.spacetime_build(); + assert!(!output.status.success(), "Expected build to fail with wasm_bindgen"); + + let stderr = String::from_utf8_lossy(&output.stderr); + assert!( + stderr.contains("wasm-bindgen detected"), + "Expected 'wasm-bindgen detected' in stderr, got: {}", + stderr + ); +} + +/// Ensure that spacetime build properly catches getrandom usage +#[test] +fn test_detect_getrandom() { + let test = Smoketest::builder() + .module_code(MODULE_CODE_GETRANDOM) + .extra_deps(r#"rand = "0.8""#) + .autopublish(false) + .build(); + + let output = test.spacetime_build(); + assert!(!output.status.success(), "Expected build to fail with getrandom"); + + let stderr = String::from_utf8_lossy(&output.stderr); + assert!( + stderr.contains("getrandom usage detected"), + "Expected 'getrandom usage detected' in stderr, got: {}", + stderr + ); +} diff --git a/crates/smoketests/tests/dml.rs b/crates/smoketests/tests/dml.rs new file mode 100644 index 00000000000..5458c0d9830 --- /dev/null +++ b/crates/smoketests/tests/dml.rs @@ -0,0 +1,43 @@ +//! DML tests translated from smoketests/tests/dml.py + +use spacetimedb_smoketests::Smoketest; + +const MODULE_CODE: &str = r#" +use spacetimedb::{ReducerContext, Table}; + +#[spacetimedb::table(name = t, public)] +pub struct T { + name: String, +} +"#; + +/// Test that we receive subscription updates from DML +#[test] +fn test_subscribe() { + use std::thread; + use std::time::Duration; + + let test = Smoketest::builder().module_code(MODULE_CODE).build(); + + // Start subscription FIRST (in background), matching Python semantics + let sub = test.subscribe_background(&["SELECT * FROM t"], 2).unwrap(); + + // Small delay to ensure subscription is connected before inserts + thread::sleep(Duration::from_millis(500)); + + // Then do the SQL inserts while subscription is running + test.sql("INSERT INTO t (name) VALUES ('Alice')").unwrap(); + test.sql("INSERT INTO t (name) VALUES ('Bob')").unwrap(); + + // Collect the subscription results + let updates = sub.collect().unwrap(); + + assert_eq!( + updates, + vec![ + serde_json::json!({"t": {"deletes": [], "inserts": [{"name": "Alice"}]}}), + serde_json::json!({"t": {"deletes": [], "inserts": [{"name": "Bob"}]}}), + ], + "Expected subscription updates for Alice and Bob inserts" + ); +} diff --git a/crates/smoketests/tests/domains.rs b/crates/smoketests/tests/domains.rs new file mode 100644 index 00000000000..df845edf63d --- /dev/null +++ b/crates/smoketests/tests/domains.rs @@ -0,0 +1,78 @@ +//! Tests translated from smoketests/tests/domains.py + +use spacetimedb_smoketests::Smoketest; + +/// Tests the functionality of the rename command +#[test] +fn test_set_name() { + let mut test = Smoketest::builder().autopublish(false).build(); + + let orig_name = format!("test-db-{}", std::process::id()); + test.publish_module_named(&orig_name, false).unwrap(); + + let rand_name = format!("test-db-{}-renamed", std::process::id()); + + // This should fail before there's a db with this name + let result = test.spacetime(&["logs", "--server", &test.server_url, &rand_name]); + assert!(result.is_err(), "Expected logs to fail for non-existent name"); + + // Rename the database + let identity = test.database_identity.as_ref().unwrap(); + test.spacetime(&["rename", "--server", &test.server_url, "--to", &rand_name, identity]) + .unwrap(); + + // Now logs should work with the new name + test.spacetime(&["logs", "--server", &test.server_url, &rand_name]) + .unwrap(); + + // Original name should no longer work + let result = test.spacetime(&["logs", "--server", &test.server_url, &orig_name]); + assert!(result.is_err(), "Expected logs to fail for original name after rename"); +} + +/// Test how we treat the / character in published names +#[test] +fn test_subdomain_behavior() { + let mut test = Smoketest::builder().autopublish(false).build(); + + let root_name = format!("test-db-{}", std::process::id()); + test.publish_module_named(&root_name, false).unwrap(); + + // Double slash should fail + let double_slash_name = format!("{}//test", root_name); + let result = test.publish_module_named(&double_slash_name, false); + assert!(result.is_err(), "Expected publish to fail with double slash in name"); + + // Trailing slash should fail + let trailing_slash_name = format!("{}/test/", root_name); + let result = test.publish_module_named(&trailing_slash_name, false); + assert!(result.is_err(), "Expected publish to fail with trailing slash in name"); +} + +/// Test that we can't rename to a name already in use +#[test] +fn test_set_to_existing_name() { + let mut test = Smoketest::builder().autopublish(false).build(); + + // Publish first database (no name) + test.publish_module().unwrap(); + let id_to_rename = test.database_identity.clone().unwrap(); + + // Publish second database with a name + let rename_to = format!("test-db-{}-target", std::process::id()); + test.publish_module_named(&rename_to, false).unwrap(); + + // Try to rename first db to the name of the second - should fail + let result = test.spacetime(&[ + "rename", + "--server", + &test.server_url, + "--to", + &rename_to, + &id_to_rename, + ]); + assert!( + result.is_err(), + "Expected rename to fail when target name is already in use" + ); +} diff --git a/crates/smoketests/tests/energy.rs b/crates/smoketests/tests/energy.rs new file mode 100644 index 00000000000..65e1eba5a1e --- /dev/null +++ b/crates/smoketests/tests/energy.rs @@ -0,0 +1,16 @@ +//! Tests translated from smoketests/tests/energy.py + +use regex::Regex; +use spacetimedb_smoketests::Smoketest; + +/// Test getting energy balance. +#[test] +fn test_energy_balance() { + let test = Smoketest::builder().build(); + + let output = test + .spacetime(&["energy", "balance", "--server", &test.server_url]) + .unwrap(); + let re = Regex::new(r#"\{"balance":"-?[0-9]+"\}"#).unwrap(); + assert!(re.is_match(&output), "Expected energy balance JSON, got: {}", output); +} diff --git a/crates/smoketests/tests/fail_initial_publish.rs b/crates/smoketests/tests/fail_initial_publish.rs new file mode 100644 index 00000000000..0435d09076c --- /dev/null +++ b/crates/smoketests/tests/fail_initial_publish.rs @@ -0,0 +1,91 @@ +//! Tests translated from smoketests/tests/fail_initial_publish.py + +use spacetimedb_smoketests::Smoketest; + +/// Module code with a bug: `Person` is the wrong table name, should be `person` +const MODULE_CODE_BROKEN: &str = r#" +use spacetimedb::{client_visibility_filter, Filter}; + +#[spacetimedb::table(name = person, public)] +pub struct Person { + name: String, +} + +#[client_visibility_filter] +// Bug: `Person` is the wrong table name, should be `person`. +const HIDE_PEOPLE_EXCEPT_ME: Filter = Filter::Sql("SELECT * FROM Person WHERE name = 'me'"); +"#; + +/// Fixed module code with correct table name +const MODULE_CODE_FIXED: &str = r#" +use spacetimedb::{client_visibility_filter, Filter}; + +#[spacetimedb::table(name = person, public)] +pub struct Person { + name: String, +} + +#[client_visibility_filter] +const HIDE_PEOPLE_EXCEPT_ME: Filter = Filter::Sql("SELECT * FROM person WHERE name = 'me'"); +"#; + +const FIXED_QUERY: &str = r#""sql": "SELECT * FROM person WHERE name = 'me'""#; + +/// This tests that publishing an invalid module does not leave a broken entry in the control DB. +#[test] +fn test_fail_initial_publish() { + let mut test = Smoketest::builder() + .module_code(MODULE_CODE_BROKEN) + .autopublish(false) + .build(); + + let name = format!("test-db-{}", std::process::id()); + + // First publish should fail due to broken module + let result = test.publish_module_named(&name, false); + assert!(result.is_err(), "Expected publish to fail with broken module"); + + // Describe should fail because database doesn't exist + let describe_output = test.spacetime_cmd(&["describe", "--json", &name]); + assert!( + !describe_output.status.success(), + "Expected describe to fail for non-existent database" + ); + let stderr = String::from_utf8_lossy(&describe_output.stderr); + assert!( + stderr.contains("No such database"), + "Expected 'No such database' in stderr, got: {}", + stderr + ); + + // We can publish a fixed module under the same database name. + // This used to be broken; the failed initial publish would leave + // the control database in a bad state. + test.write_module_code(MODULE_CODE_FIXED).unwrap(); + test.publish_module_named(&name, false).unwrap(); + + let describe_output = test + .spacetime(&["describe", "--server", &test.server_url, "--json", &name]) + .unwrap(); + assert!( + describe_output.contains(FIXED_QUERY), + "Expected describe output to contain fixed query.\nGot: {}", + describe_output + ); + + // Publishing the broken code again fails, but the database still exists afterwards, + // with the previous version of the module code. + test.write_module_code(MODULE_CODE_BROKEN).unwrap(); + let result = test.publish_module_named(&name, false); + assert!(result.is_err(), "Expected publish to fail with broken module"); + + // Database should still exist with the fixed code + let describe_output = test + .spacetime(&["describe", "--server", &test.server_url, "--json", &name]) + .unwrap(); + assert!( + describe_output.contains(FIXED_QUERY), + "Expected describe output to still contain fixed query after failed update.\nGot: {}", + describe_output + ); +} diff --git a/crates/smoketests/tests/filtering.rs b/crates/smoketests/tests/filtering.rs new file mode 100644 index 00000000000..6f39a504b0e --- /dev/null +++ b/crates/smoketests/tests/filtering.rs @@ -0,0 +1,479 @@ +//! Filtering tests translated from smoketests/tests/filtering.py + +use spacetimedb_smoketests::Smoketest; + +const MODULE_CODE: &str = r#" +use spacetimedb::{log, Identity, ReducerContext, Table}; + +#[spacetimedb::table(name = person)] +pub struct Person { + #[unique] + id: i32, + + name: String, + + #[unique] + nick: String, +} + +#[spacetimedb::reducer] +pub fn insert_person(ctx: &ReducerContext, id: i32, name: String, nick: String) { + ctx.db.person().insert(Person { id, name, nick} ); +} + +#[spacetimedb::reducer] +pub fn insert_person_twice(ctx: &ReducerContext, id: i32, name: String, nick: String) { + // We'd like to avoid an error due to a set-semantic error. + let name2 = format!("{name}2"); + ctx.db.person().insert(Person { id, name, nick: nick.clone()} ); + match ctx.db.person().try_insert(Person { id, name: name2, nick: nick.clone()}) { + Ok(_) => {}, + Err(_) => { + log::info!("UNIQUE CONSTRAINT VIOLATION ERROR: id = {}, nick = {}", id, nick) + } + } +} + +#[spacetimedb::reducer] +pub fn delete_person(ctx: &ReducerContext, id: i32) { + ctx.db.person().id().delete(&id); +} + +#[spacetimedb::reducer] +pub fn find_person(ctx: &ReducerContext, id: i32) { + match ctx.db.person().id().find(&id) { + Some(person) => log::info!("UNIQUE FOUND: id {}: {}", id, person.name), + None => log::info!("UNIQUE NOT FOUND: id {}", id), + } +} + +#[spacetimedb::reducer] +pub fn find_person_read_only(ctx: &ReducerContext, id: i32) { + let ctx = ctx.as_read_only(); + match ctx.db.person().id().find(&id) { + Some(person) => log::info!("UNIQUE FOUND: id {}: {}", id, person.name), + None => log::info!("UNIQUE NOT FOUND: id {}", id), + } +} + +#[spacetimedb::reducer] +pub fn find_person_by_name(ctx: &ReducerContext, name: String) { + for person in ctx.db.person().iter().filter(|p| p.name == name) { + log::info!("UNIQUE FOUND: id {}: {} aka {}", person.id, person.name, person.nick); + } +} + +#[spacetimedb::reducer] +pub fn find_person_by_nick(ctx: &ReducerContext, nick: String) { + match ctx.db.person().nick().find(&nick) { + Some(person) => log::info!("UNIQUE FOUND: id {}: {}", person.id, person.nick), + None => log::info!("UNIQUE NOT FOUND: nick {}", nick), + } +} + +#[spacetimedb::reducer] +pub fn find_person_by_nick_read_only(ctx: &ReducerContext, nick: String) { + let ctx = ctx.as_read_only(); + match ctx.db.person().nick().find(&nick) { + Some(person) => log::info!("UNIQUE FOUND: id {}: {}", person.id, person.nick), + None => log::info!("UNIQUE NOT FOUND: nick {}", nick), + } +} + +#[spacetimedb::table(name = nonunique_person)] +pub struct NonuniquePerson { + #[index(btree)] + id: i32, + name: String, + is_human: bool, +} + +#[spacetimedb::reducer] +pub fn insert_nonunique_person(ctx: &ReducerContext, id: i32, name: String, is_human: bool) { + ctx.db.nonunique_person().insert(NonuniquePerson { id, name, is_human } ); +} + +#[spacetimedb::reducer] +pub fn find_nonunique_person(ctx: &ReducerContext, id: i32) { + for person in ctx.db.nonunique_person().id().filter(&id) { + log::info!("NONUNIQUE FOUND: id {}: {}", id, person.name) + } +} + +#[spacetimedb::reducer] +pub fn find_nonunique_person_read_only(ctx: &ReducerContext, id: i32) { + let ctx = ctx.as_read_only(); + for person in ctx.db.nonunique_person().id().filter(&id) { + log::info!("NONUNIQUE FOUND: id {}: {}", id, person.name) + } +} + +#[spacetimedb::reducer] +pub fn find_nonunique_humans(ctx: &ReducerContext) { + for person in ctx.db.nonunique_person().iter().filter(|p| p.is_human) { + log::info!("HUMAN FOUND: id {}: {}", person.id, person.name); + } +} + +#[spacetimedb::reducer] +pub fn find_nonunique_non_humans(ctx: &ReducerContext) { + for person in ctx.db.nonunique_person().iter().filter(|p| !p.is_human) { + log::info!("NON-HUMAN FOUND: id {}: {}", person.id, person.name); + } +} + +// Ensure that [Identity] is filterable and a legal unique column. +#[spacetimedb::table(name = identified_person)] +struct IdentifiedPerson { + #[unique] + identity: Identity, + name: String, +} + +fn identify(id_number: u64) -> Identity { + let mut bytes = [0u8; 32]; + bytes[..8].clone_from_slice(&id_number.to_le_bytes()); + Identity::from_byte_array(bytes) +} + +#[spacetimedb::reducer] +fn insert_identified_person(ctx: &ReducerContext, id_number: u64, name: String) { + let identity = identify(id_number); + ctx.db.identified_person().insert(IdentifiedPerson { identity, name }); +} + +#[spacetimedb::reducer] +fn find_identified_person(ctx: &ReducerContext, id_number: u64) { + let identity = identify(id_number); + match ctx.db.identified_person().identity().find(&identity) { + Some(person) => log::info!("IDENTIFIED FOUND: {}", person.name), + None => log::info!("IDENTIFIED NOT FOUND"), + } +} + +// Ensure that indices on non-unique columns behave as we expect. +#[spacetimedb::table(name = indexed_person)] +struct IndexedPerson { + #[unique] + id: i32, + given_name: String, + #[index(btree)] + surname: String, +} + +#[spacetimedb::reducer] +fn insert_indexed_person(ctx: &ReducerContext, id: i32, given_name: String, surname: String) { + ctx.db.indexed_person().insert(IndexedPerson { id, given_name, surname }); +} + +#[spacetimedb::reducer] +fn delete_indexed_person(ctx: &ReducerContext, id: i32) { + ctx.db.indexed_person().id().delete(&id); +} + +#[spacetimedb::reducer] +fn find_indexed_people(ctx: &ReducerContext, surname: String) { + for person in ctx.db.indexed_person().surname().filter(&surname) { + log::info!("INDEXED FOUND: id {}: {}, {}", person.id, person.surname, person.given_name); + } +} + +#[spacetimedb::reducer] +fn find_indexed_people_read_only(ctx: &ReducerContext, surname: String) { + let ctx = ctx.as_read_only(); + for person in ctx.db.indexed_person().surname().filter(&surname) { + log::info!("INDEXED FOUND: id {}: {}, {}", person.id, person.surname, person.given_name); + } +} +"#; + +/// Test filtering reducers +#[test] +fn test_filtering() { + let test = Smoketest::builder().module_code(MODULE_CODE).build(); + + test.call("insert_person", &["23", r#""Alice""#, r#""al""#]).unwrap(); + test.call("insert_person", &["42", r#""Bob""#, r#""bo""#]).unwrap(); + test.call("insert_person", &["64", r#""Bob""#, r#""b2""#]).unwrap(); + + // Find a person who is there. + test.call("find_person", &["23"]).unwrap(); + let logs = test.logs(2).unwrap(); + assert!( + logs.iter().any(|msg| msg.contains("UNIQUE FOUND: id 23: Alice")), + "Expected 'UNIQUE FOUND: id 23: Alice' in logs, got: {:?}", + logs + ); + + // Find persons with the same name. + test.call("find_person_by_name", &[r#""Bob""#]).unwrap(); + let logs = test.logs(4).unwrap(); + assert!( + logs.iter().any(|msg| msg.contains("UNIQUE FOUND: id 42: Bob aka bo")), + "Expected 'UNIQUE FOUND: id 42: Bob aka bo' in logs, got: {:?}", + logs + ); + assert!( + logs.iter().any(|msg| msg.contains("UNIQUE FOUND: id 64: Bob aka b2")), + "Expected 'UNIQUE FOUND: id 64: Bob aka b2' in logs, got: {:?}", + logs + ); + + // Fail to find a person who is not there. + test.call("find_person", &["43"]).unwrap(); + let logs = test.logs(2).unwrap(); + assert!( + logs.iter().any(|msg| msg.contains("UNIQUE NOT FOUND: id 43")), + "Expected 'UNIQUE NOT FOUND: id 43' in logs, got: {:?}", + logs + ); + test.call("find_person_read_only", &["43"]).unwrap(); + let logs = test.logs(2).unwrap(); + assert!( + logs.iter().any(|msg| msg.contains("UNIQUE NOT FOUND: id 43")), + "Expected 'UNIQUE NOT FOUND: id 43' in logs, got: {:?}", + logs + ); + + // Find a person by nickname. + test.call("find_person_by_nick", &[r#""al""#]).unwrap(); + let logs = test.logs(2).unwrap(); + assert!( + logs.iter().any(|msg| msg.contains("UNIQUE FOUND: id 23: al")), + "Expected 'UNIQUE FOUND: id 23: al' in logs, got: {:?}", + logs + ); + test.call("find_person_by_nick_read_only", &[r#""al""#]).unwrap(); + let logs = test.logs(2).unwrap(); + assert!( + logs.iter().any(|msg| msg.contains("UNIQUE FOUND: id 23: al")), + "Expected 'UNIQUE FOUND: id 23: al' in logs, got: {:?}", + logs + ); + + // Remove a person, and then fail to find them. + test.call("delete_person", &["23"]).unwrap(); + test.call("find_person", &["23"]).unwrap(); + let logs = test.logs(2).unwrap(); + assert!( + logs.iter().any(|msg| msg.contains("UNIQUE NOT FOUND: id 23")), + "Expected 'UNIQUE NOT FOUND: id 23' in logs, got: {:?}", + logs + ); + test.call("find_person_read_only", &["23"]).unwrap(); + let logs = test.logs(2).unwrap(); + assert!( + logs.iter().any(|msg| msg.contains("UNIQUE NOT FOUND: id 23")), + "Expected 'UNIQUE NOT FOUND: id 23' in logs, got: {:?}", + logs + ); + // Also fail by nickname + test.call("find_person_by_nick", &[r#""al""#]).unwrap(); + let logs = test.logs(2).unwrap(); + assert!( + logs.iter().any(|msg| msg.contains("UNIQUE NOT FOUND: nick al")), + "Expected 'UNIQUE NOT FOUND: nick al' in logs, got: {:?}", + logs + ); + test.call("find_person_by_nick_read_only", &[r#""al""#]).unwrap(); + let logs = test.logs(2).unwrap(); + assert!( + logs.iter().any(|msg| msg.contains("UNIQUE NOT FOUND: nick al")), + "Expected 'UNIQUE NOT FOUND: nick al' in logs, got: {:?}", + logs + ); + + // Add some nonunique people. + test.call("insert_nonunique_person", &["23", r#""Alice""#, "true"]) + .unwrap(); + test.call("insert_nonunique_person", &["42", r#""Bob""#, "true"]) + .unwrap(); + + // Find a nonunique person who is there. + test.call("find_nonunique_person", &["23"]).unwrap(); + let logs = test.logs(2).unwrap(); + assert!( + logs.iter().any(|msg| msg.contains("NONUNIQUE FOUND: id 23: Alice")), + "Expected 'NONUNIQUE FOUND: id 23: Alice' in logs, got: {:?}", + logs + ); + test.call("find_nonunique_person_read_only", &["23"]).unwrap(); + let logs = test.logs(2).unwrap(); + assert!( + logs.iter().any(|msg| msg.contains("NONUNIQUE FOUND: id 23: Alice")), + "Expected 'NONUNIQUE FOUND: id 23: Alice' in logs, got: {:?}", + logs + ); + + // Fail to find a nonunique person who is not there. + test.call("find_nonunique_person", &["43"]).unwrap(); + let logs = test.logs(2).unwrap(); + assert!( + !logs.iter().any(|msg| msg.contains("NONUNIQUE NOT FOUND: id 43")), + "Expected no 'NONUNIQUE NOT FOUND: id 43' in logs, got: {:?}", + logs + ); + test.call("find_nonunique_person_read_only", &["43"]).unwrap(); + let logs = test.logs(2).unwrap(); + assert!( + !logs.iter().any(|msg| msg.contains("NONUNIQUE NOT FOUND: id 43")), + "Expected no 'NONUNIQUE NOT FOUND: id 43' in logs, got: {:?}", + logs + ); + + // Insert a non-human, then find humans, then find non-humans + test.call("insert_nonunique_person", &["64", r#""Jibbitty""#, "false"]) + .unwrap(); + test.call("find_nonunique_humans", &[]).unwrap(); + let logs = test.logs(4).unwrap(); + assert!( + logs.iter().any(|msg| msg.contains("HUMAN FOUND: id 23: Alice")), + "Expected 'HUMAN FOUND: id 23: Alice' in logs, got: {:?}", + logs + ); + assert!( + logs.iter().any(|msg| msg.contains("HUMAN FOUND: id 42: Bob")), + "Expected 'HUMAN FOUND: id 42: Bob' in logs, got: {:?}", + logs + ); + test.call("find_nonunique_non_humans", &[]).unwrap(); + let logs = test.logs(2).unwrap(); + assert!( + logs.iter().any(|msg| msg.contains("NON-HUMAN FOUND: id 64: Jibbitty")), + "Expected 'NON-HUMAN FOUND: id 64: Jibbitty' in logs, got: {:?}", + logs + ); + + // Add another person with the same id, and find them both. + test.call("insert_nonunique_person", &["23", r#""Claire""#, "true"]) + .unwrap(); + test.call("find_nonunique_person", &["23"]).unwrap(); + let logs = test.logs(4).unwrap(); + assert!( + logs.iter().any(|msg| msg.contains("NONUNIQUE FOUND: id 23: Alice")), + "Expected 'NONUNIQUE FOUND: id 23: Alice' in logs, got: {:?}", + logs + ); + assert!( + logs.iter().any(|msg| msg.contains("NONUNIQUE FOUND: id 23: Claire")), + "Expected 'NONUNIQUE FOUND: id 23: Claire' in logs, got: {:?}", + logs + ); + test.call("find_nonunique_person_read_only", &["23"]).unwrap(); + let logs = test.logs(4).unwrap(); + assert!( + logs.iter().any(|msg| msg.contains("NONUNIQUE FOUND: id 23: Alice")), + "Expected 'NONUNIQUE FOUND: id 23: Alice' in logs, got: {:?}", + logs + ); + assert!( + logs.iter().any(|msg| msg.contains("NONUNIQUE FOUND: id 23: Claire")), + "Expected 'NONUNIQUE FOUND: id 23: Claire' in logs, got: {:?}", + logs + ); + + // Check for issues with things present in index but not DB + test.call("insert_person", &["101", r#""Fee""#, r#""fee""#]).unwrap(); + test.call("insert_person", &["102", r#""Fi""#, r#""fi""#]).unwrap(); + test.call("insert_person", &["103", r#""Fo""#, r#""fo""#]).unwrap(); + test.call("insert_person", &["104", r#""Fum""#, r#""fum""#]).unwrap(); + test.call("delete_person", &["103"]).unwrap(); + test.call("find_person", &["104"]).unwrap(); + let logs = test.logs(2).unwrap(); + assert!( + logs.iter().any(|msg| msg.contains("UNIQUE FOUND: id 104: Fum")), + "Expected 'UNIQUE FOUND: id 104: Fum' in logs, got: {:?}", + logs + ); + test.call("find_person_read_only", &["104"]).unwrap(); + let logs = test.logs(2).unwrap(); + assert!( + logs.iter().any(|msg| msg.contains("UNIQUE FOUND: id 104: Fum")), + "Expected 'UNIQUE FOUND: id 104: Fum' in logs, got: {:?}", + logs + ); + + // As above, but for non-unique indices: check for consistency between index and DB + test.call("insert_indexed_person", &["7", r#""James""#, r#""Bond""#]) + .unwrap(); + test.call("insert_indexed_person", &["79", r#""Gold""#, r#""Bond""#]) + .unwrap(); + test.call("insert_indexed_person", &["1", r#""Hydrogen""#, r#""Bond""#]) + .unwrap(); + test.call("insert_indexed_person", &["100", r#""Whiskey""#, r#""Bond""#]) + .unwrap(); + test.call("delete_indexed_person", &["100"]).unwrap(); + test.call("find_indexed_people", &[r#""Bond""#]).unwrap(); + let logs = test.logs(10).unwrap(); + assert!( + logs.iter().any(|msg| msg.contains("INDEXED FOUND: id 7: Bond, James")), + "Expected 'INDEXED FOUND: id 7: Bond, James' in logs, got: {:?}", + logs + ); + assert!( + logs.iter().any(|msg| msg.contains("INDEXED FOUND: id 79: Bond, Gold")), + "Expected 'INDEXED FOUND: id 79: Bond, Gold' in logs, got: {:?}", + logs + ); + assert!( + logs.iter() + .any(|msg| msg.contains("INDEXED FOUND: id 1: Bond, Hydrogen")), + "Expected 'INDEXED FOUND: id 1: Bond, Hydrogen' in logs, got: {:?}", + logs + ); + assert!( + !logs + .iter() + .any(|msg| msg.contains("INDEXED FOUND: id 100: Bond, Whiskey")), + "Expected no 'INDEXED FOUND: id 100: Bond, Whiskey' in logs, got: {:?}", + logs + ); + test.call("find_indexed_people_read_only", &[r#""Bond""#]).unwrap(); + let logs = test.logs(10).unwrap(); + assert!( + logs.iter().any(|msg| msg.contains("INDEXED FOUND: id 7: Bond, James")), + "Expected 'INDEXED FOUND: id 7: Bond, James' in logs, got: {:?}", + logs + ); + assert!( + logs.iter().any(|msg| msg.contains("INDEXED FOUND: id 79: Bond, Gold")), + "Expected 'INDEXED FOUND: id 79: Bond, Gold' in logs, got: {:?}", + logs + ); + assert!( + logs.iter() + .any(|msg| msg.contains("INDEXED FOUND: id 1: Bond, Hydrogen")), + "Expected 'INDEXED FOUND: id 1: Bond, Hydrogen' in logs, got: {:?}", + logs + ); + assert!( + !logs + .iter() + .any(|msg| msg.contains("INDEXED FOUND: id 100: Bond, Whiskey")), + "Expected no 'INDEXED FOUND: id 100: Bond, Whiskey' in logs, got: {:?}", + logs + ); + + // Filter by Identity + test.call("insert_identified_person", &["23", r#""Alice""#]).unwrap(); + test.call("find_identified_person", &["23"]).unwrap(); + let logs = test.logs(2).unwrap(); + assert!( + logs.iter().any(|msg| msg.contains("IDENTIFIED FOUND: Alice")), + "Expected 'IDENTIFIED FOUND: Alice' in logs, got: {:?}", + logs + ); + + // Inserting into a table with unique constraints fails + // when the second row has the same value in the constrained columns as the first row. + // In this case, the table has `#[unique] id` and `#[unique] nick` but not `#[unique] name`. + test.call("insert_person_twice", &["23", r#""Alice""#, r#""al""#]) + .unwrap(); + let logs = test.logs(2).unwrap(); + assert!( + logs.iter() + .any(|msg| msg.contains("UNIQUE CONSTRAINT VIOLATION ERROR: id = 23, nick = al")), + "Expected 'UNIQUE CONSTRAINT VIOLATION ERROR: id = 23, nick = al' in logs, got: {:?}", + logs + ); +} diff --git a/crates/smoketests/tests/module_nested_op.rs b/crates/smoketests/tests/module_nested_op.rs new file mode 100644 index 00000000000..99d38a4acf0 --- /dev/null +++ b/crates/smoketests/tests/module_nested_op.rs @@ -0,0 +1,63 @@ +//! Nested table operation tests translated from smoketests/tests/module_nested_op.py + +use spacetimedb_smoketests::Smoketest; + +const MODULE_CODE: &str = r#" +use spacetimedb::{log, ReducerContext, Table}; + +#[spacetimedb::table(name = account)] +pub struct Account { + name: String, + #[unique] + id: i32, +} + +#[spacetimedb::table(name = friends)] +pub struct Friends { + friend_1: i32, + friend_2: i32, +} + +#[spacetimedb::reducer] +pub fn create_account(ctx: &ReducerContext, account_id: i32, name: String) { + ctx.db.account().insert(Account { id: account_id, name } ); +} + +#[spacetimedb::reducer] +pub fn add_friend(ctx: &ReducerContext, my_id: i32, their_id: i32) { + // Make sure our friend exists + for account in ctx.db.account().iter() { + if account.id == their_id { + ctx.db.friends().insert(Friends { friend_1: my_id, friend_2: their_id }); + return; + } + } +} + +#[spacetimedb::reducer] +pub fn say_friends(ctx: &ReducerContext) { + for friendship in ctx.db.friends().iter() { + let friend1 = ctx.db.account().id().find(&friendship.friend_1).unwrap(); + let friend2 = ctx.db.account().id().find(&friendship.friend_2).unwrap(); + log::info!("{} is friends with {}", friend1.name, friend2.name); + } +} +"#; + +/// This tests uploading a basic module and calling some functions and checking logs afterwards. +#[test] +fn test_module_nested_op() { + let test = Smoketest::builder().module_code(MODULE_CODE).build(); + + test.call("create_account", &["1", r#""House""#]).unwrap(); + test.call("create_account", &["2", r#""Wilson""#]).unwrap(); + test.call("add_friend", &["1", "2"]).unwrap(); + test.call("say_friends", &[]).unwrap(); + + let logs = test.logs(2).unwrap(); + assert!( + logs.iter().any(|msg| msg.contains("House is friends with Wilson")), + "Expected 'House is friends with Wilson' in logs, got: {:?}", + logs + ); +} diff --git a/crates/smoketests/tests/modules.rs b/crates/smoketests/tests/modules.rs new file mode 100644 index 00000000000..2340bfa1e16 --- /dev/null +++ b/crates/smoketests/tests/modules.rs @@ -0,0 +1,131 @@ +//! Tests translated from smoketests/tests/modules.py + +use spacetimedb_smoketests::Smoketest; + +const MODULE_CODE: &str = r#" +use spacetimedb::{log, ReducerContext, Table}; + +#[spacetimedb::table(name = person)] +pub struct Person { + #[primary_key] + #[auto_inc] + id: u64, + name: String, +} + +#[spacetimedb::reducer] +pub fn add(ctx: &ReducerContext, name: String) { + ctx.db.person().insert(Person { id: 0, name }); +} + +#[spacetimedb::reducer] +pub fn say_hello(ctx: &ReducerContext) { + for person in ctx.db.person().iter() { + log::info!("Hello, {}!", person.name); + } + log::info!("Hello, World!"); +} +"#; + +/// Breaking change: adds a new column to Person +const MODULE_CODE_BREAKING: &str = r#" +#[spacetimedb::table(name = person)] +pub struct Person { + #[primary_key] + #[auto_inc] + id: u64, + name: String, + age: u8, +} +"#; + +/// Non-breaking change: adds a new table +const MODULE_CODE_ADD_TABLE: &str = r#" +use spacetimedb::{log, ReducerContext, Table}; + +#[spacetimedb::table(name = person)] +pub struct Person { + #[primary_key] + #[auto_inc] + id: u64, + name: String, +} + +#[spacetimedb::table(name = pets)] +pub struct Pet { + species: String, +} + +#[spacetimedb::reducer] +pub fn are_we_updated_yet(ctx: &ReducerContext) { + log::info!("MODULE UPDATED"); +} +"#; + +/// Test publishing a module without the --delete-data option +#[test] +fn test_module_update() { + let mut test = Smoketest::builder().module_code(MODULE_CODE).autopublish(false).build(); + + let name = format!("test-db-{}", std::process::id()); + + // Initial publish + test.publish_module_named(&name, false).unwrap(); + + test.call("add", &["Robert"]).unwrap(); + test.call("add", &["Julie"]).unwrap(); + test.call("add", &["Samantha"]).unwrap(); + test.call("say_hello", &[]).unwrap(); + + let logs = test.logs(100).unwrap(); + assert!(logs.iter().any(|l| l.contains("Hello, Samantha!"))); + assert!(logs.iter().any(|l| l.contains("Hello, Julie!"))); + assert!(logs.iter().any(|l| l.contains("Hello, Robert!"))); + assert!(logs.iter().any(|l| l.contains("Hello, World!"))); + + // Unchanged module is ok + test.publish_module_named(&name, false).unwrap(); + + // Changing an existing table isn't + test.write_module_code(MODULE_CODE_BREAKING).unwrap(); + let result = test.publish_module_named(&name, false); + assert!(result.is_err(), "Expected publish to fail with breaking change"); + let err = result.unwrap_err().to_string(); + assert!( + err.contains("manual migration") || err.contains("breaking"), + "Expected migration error, got: {}", + err + ); + + // Check that the old module is still running by calling say_hello + test.call("say_hello", &[]).unwrap(); + + // Adding a table is ok + test.write_module_code(MODULE_CODE_ADD_TABLE).unwrap(); + test.publish_module_named(&name, false).unwrap(); + test.call("are_we_updated_yet", &[]).unwrap(); + + let logs = test.logs(2).unwrap(); + assert!( + logs.iter().any(|l| l.contains("MODULE UPDATED")), + "Expected 'MODULE UPDATED' in logs, got: {:?}", + logs + ); +} + +/// Test uploading a basic module and calling some functions and checking logs +#[test] +fn test_upload_module() { + let test = Smoketest::builder().module_code(MODULE_CODE).build(); + + test.call("add", &["Robert"]).unwrap(); + test.call("add", &["Julie"]).unwrap(); + test.call("add", &["Samantha"]).unwrap(); + test.call("say_hello", &[]).unwrap(); + + let logs = test.logs(100).unwrap(); + assert!(logs.iter().any(|l| l.contains("Hello, Samantha!"))); + assert!(logs.iter().any(|l| l.contains("Hello, Julie!"))); + assert!(logs.iter().any(|l| l.contains("Hello, Robert!"))); + assert!(logs.iter().any(|l| l.contains("Hello, World!"))); +} diff --git a/crates/smoketests/tests/namespaces.rs b/crates/smoketests/tests/namespaces.rs new file mode 100644 index 00000000000..d83f416f381 --- /dev/null +++ b/crates/smoketests/tests/namespaces.rs @@ -0,0 +1,135 @@ +//! Namespace tests translated from smoketests/tests/namespaces.py + +use spacetimedb_smoketests::Smoketest; +use std::fs; +use std::path::Path; + +/// Template module code matching the Python test's default +const TEMPLATE_MODULE_CODE: &str = r#" +use spacetimedb::{ReducerContext, Table}; + +#[spacetimedb::table(name = person, public)] +pub struct Person { + name: String, +} + +#[spacetimedb::reducer(init)] +pub fn init(_ctx: &ReducerContext) { + // Called when the module is initially published +} + +#[spacetimedb::reducer(client_connected)] +pub fn identity_connected(_ctx: &ReducerContext) { + // Called everytime a new client connects +} + +#[spacetimedb::reducer(client_disconnected)] +pub fn identity_disconnected(_ctx: &ReducerContext) { + // Called everytime a client disconnects +} + +#[spacetimedb::reducer] +pub fn add(ctx: &ReducerContext, name: String) { + ctx.db.person().insert(Person { name }); +} + +#[spacetimedb::reducer] +pub fn say_hello(ctx: &ReducerContext) { + for person in ctx.db.person().iter() { + log::info!("Hello, {}!", person.name); + } + log::info!("Hello, World!"); +} +"#; + +/// Count occurrences of a needle string in all .cs files under a directory +fn count_matches(dir: &Path, needle: &str) -> usize { + let mut count = 0; + if let Ok(entries) = fs::read_dir(dir) { + for entry in entries.flatten() { + let path = entry.path(); + if path.is_dir() { + count += count_matches(&path, needle); + } else if path.extension().is_some_and(|ext| ext == "cs") { + if let Ok(contents) = fs::read_to_string(&path) { + count += contents.matches(needle).count(); + } + } + } + } + count +} + +/// Ensure that the default namespace is working properly +#[test] +fn test_spacetimedb_ns_csharp() { + let test = Smoketest::builder() + .module_code(TEMPLATE_MODULE_CODE) + .autopublish(false) + .build(); + + let tmpdir = tempfile::tempdir().expect("Failed to create temp dir"); + let project_path = test.project_dir.path().to_str().unwrap(); + + test.spacetime(&[ + "generate", + "--out-dir", + tmpdir.path().to_str().unwrap(), + "--lang=csharp", + "--project-path", + project_path, + ]) + .unwrap(); + + let namespace = "SpacetimeDB.Types"; + assert_eq!( + count_matches(tmpdir.path(), &format!("namespace {}", namespace)), + 7, + "Expected 7 occurrences of 'namespace {}'", + namespace + ); + assert_eq!( + count_matches(tmpdir.path(), "using SpacetimeDB;"), + 0, + "Expected 0 occurrences of 'using SpacetimeDB;'" + ); +} + +/// Ensure that when a custom namespace is specified on the command line, it actually gets used in generation +#[test] +fn test_custom_ns_csharp() { + let test = Smoketest::builder() + .module_code(TEMPLATE_MODULE_CODE) + .autopublish(false) + .build(); + + let tmpdir = tempfile::tempdir().expect("Failed to create temp dir"); + let project_path = test.project_dir.path().to_str().unwrap(); + + // Use a unique namespace name + let namespace = "CustomTestNamespace"; + + test.spacetime(&[ + "generate", + "--out-dir", + tmpdir.path().to_str().unwrap(), + "--lang=csharp", + "--namespace", + namespace, + "--project-path", + project_path, + ]) + .unwrap(); + + assert_eq!( + count_matches(tmpdir.path(), &format!("namespace {}", namespace)), + 7, + "Expected 7 occurrences of 'namespace {}'", + namespace + ); + assert_eq!( + count_matches(tmpdir.path(), "using SpacetimeDB;"), + 7, + "Expected 7 occurrences of 'using SpacetimeDB;'" + ); +} diff --git a/crates/smoketests/tests/new_user_flow.rs b/crates/smoketests/tests/new_user_flow.rs new file mode 100644 index 00000000000..7cc13388900 --- /dev/null +++ b/crates/smoketests/tests/new_user_flow.rs @@ -0,0 +1,63 @@ +//! Tests translated from smoketests/tests/new_user_flow.py + +use spacetimedb_smoketests::Smoketest; + +const MODULE_CODE: &str = r#" +use spacetimedb::{log, ReducerContext, Table}; + +#[spacetimedb::table(name = person)] +pub struct Person { + name: String +} + +#[spacetimedb::reducer] +pub fn add(ctx: &ReducerContext, name: String) { + ctx.db.person().insert(Person { name }); +} + +#[spacetimedb::reducer] +pub fn say_hello(ctx: &ReducerContext) { + for person in ctx.db.person().iter() { + log::info!("Hello, {}!", person.name); + } + log::info!("Hello, World!"); +} +"#; + +/// Test the entirety of the new user flow. +#[test] +fn test_new_user_flow() { + let mut test = Smoketest::builder().module_code(MODULE_CODE).autopublish(false).build(); + + // Create a new identity and publish + test.new_identity().unwrap(); + test.publish_module().unwrap(); + + // Calling our database + test.call("say_hello", &[]).unwrap(); + let logs = test.logs(2).unwrap(); + assert!( + logs.iter().any(|l| l.contains("Hello, World!")), + "Expected 'Hello, World!' in logs: {:?}", + logs + ); + + // Calling functions with arguments + test.call("add", &["Tyler"]).unwrap(); + test.call("say_hello", &[]).unwrap(); + + let logs = test.logs(5).unwrap(); + let hello_world_count = logs.iter().filter(|l| l.contains("Hello, World!")).count(); + let hello_tyler_count = logs.iter().filter(|l| l.contains("Hello, Tyler!")).count(); + + assert_eq!(hello_world_count, 2, "Expected 2 'Hello, World!' in logs"); + assert_eq!(hello_tyler_count, 1, "Expected 1 'Hello, Tyler!' in logs"); + + // Query via SQL + test.assert_sql( + "SELECT * FROM person", + r#" name +--------- + "Tyler""#, + ); +} diff --git a/crates/smoketests/tests/panic.rs b/crates/smoketests/tests/panic.rs new file mode 100644 index 00000000000..71c75470bbd --- /dev/null +++ b/crates/smoketests/tests/panic.rs @@ -0,0 +1,70 @@ +//! Panic and error handling tests translated from smoketests/tests/panic.py + +use spacetimedb_smoketests::Smoketest; + +const PANIC_MODULE_CODE: &str = r#" +use spacetimedb::{log, ReducerContext}; +use std::cell::RefCell; + +thread_local! { + static X: RefCell = RefCell::new(0); +} +#[spacetimedb::reducer] +fn first(_ctx: &ReducerContext) { + X.with(|x| { + let _x = x.borrow_mut(); + panic!() + }) +} +#[spacetimedb::reducer] +fn second(_ctx: &ReducerContext) { + X.with(|x| *x.borrow_mut()); + log::info!("Test Passed"); +} +"#; + +/// Tests to check if a SpacetimeDB module can handle a panic without corrupting +#[test] +fn test_panic() { + let test = Smoketest::builder().module_code(PANIC_MODULE_CODE).build(); + + // First reducer should panic/fail + let result = test.call("first", &[]); + assert!(result.is_err(), "Expected first reducer to fail due to panic"); + + // Second reducer should succeed, proving state wasn't corrupted + test.call("second", &[]).unwrap(); + + let logs = test.logs(2).unwrap(); + assert!( + logs.iter().any(|msg| msg.contains("Test Passed")), + "Expected 'Test Passed' in logs, got: {:?}", + logs + ); +} + +const REDUCER_ERROR_MODULE_CODE: &str = r#" +use spacetimedb::ReducerContext; + +#[spacetimedb::reducer] +fn fail(_ctx: &ReducerContext) -> Result<(), String> { + Err("oopsie :(".into()) +} +"#; + +/// Tests to ensure an error message returned from a reducer gets printed to logs +#[test] +fn test_reducer_error_message() { + let test = Smoketest::builder().module_code(REDUCER_ERROR_MODULE_CODE).build(); + + // Reducer should fail with error + let result = test.call("fail", &[]); + assert!(result.is_err(), "Expected fail reducer to return error"); + + let logs = test.logs(2).unwrap(); + assert!( + logs.iter().any(|msg| msg.contains("oopsie :(")), + "Expected 'oopsie :(' in logs, got: {:?}", + logs + ); +} diff --git a/crates/smoketests/tests/permissions.rs b/crates/smoketests/tests/permissions.rs new file mode 100644 index 00000000000..d42f902610d --- /dev/null +++ b/crates/smoketests/tests/permissions.rs @@ -0,0 +1,116 @@ +//! Tests translated from smoketests/tests/permissions.py + +use spacetimedb_smoketests::Smoketest; + +const MODULE_CODE_PRIVATE: &str = r#" +use spacetimedb::{ReducerContext, Table}; + +#[spacetimedb::table(name = secret, private)] +pub struct Secret { + answer: u8, +} + +#[spacetimedb::table(name = common_knowledge, public)] +pub struct CommonKnowledge { + thing: String, +} + +#[spacetimedb::reducer(init)] +pub fn init(ctx: &ReducerContext) { + ctx.db.secret().insert(Secret { answer: 42 }); +} + +#[spacetimedb::reducer] +pub fn do_thing(ctx: &ReducerContext, thing: String) { + ctx.db.secret().insert(Secret { answer: 20 }); + ctx.db.common_knowledge().insert(CommonKnowledge { thing }); +} +"#; + +/// Ensure that a private table can only be queried by the database owner +#[test] +fn test_private_table() { + let test = Smoketest::builder().module_code(MODULE_CODE_PRIVATE).build(); + + // Owner can query private table + test.assert_sql( + "SELECT * FROM secret", + r#" answer +-------- + 42"#, + ); + + // Switch to a new identity + test.new_identity().unwrap(); + + // Non-owner cannot query private table + let result = test.sql("SELECT * FROM secret"); + assert!(result.is_err(), "Expected query on private table to fail for non-owner"); + + // Subscribing to the private table fails + let result = test.subscribe(&["SELECT * FROM secret"], 0); + assert!( + result.is_err(), + "Expected subscribe to private table to fail for non-owner" + ); + + // Subscribing to the public table works + let sub = test + .subscribe_background(&["SELECT * FROM common_knowledge"], 1) + .unwrap(); + test.call("do_thing", &["godmorgon"]).unwrap(); + let events = sub.collect().unwrap(); + assert_eq!(events.len(), 1, "Expected 1 update, got {:?}", events); + + let expected = serde_json::json!({ + "common_knowledge": { + "deletes": [], + "inserts": [{"thing": "godmorgon"}] + } + }); + assert_eq!(events[0], expected); +} + +/// Ensure that you cannot delete a database that you do not own +#[test] +fn test_cannot_delete_others_database() { + let test = Smoketest::builder().build(); + + let identity = test.database_identity.as_ref().unwrap().clone(); + + // Switch to a new identity + test.new_identity().unwrap(); + + // Try to delete the database - should fail + let result = test.spacetime(&["delete", "--server", &test.server_url, &identity, "--yes"]); + assert!(result.is_err(), "Expected delete to fail for non-owner"); +} + +const MODULE_CODE_LIFECYCLE: &str = r#" +#[spacetimedb::reducer(init)] +fn lifecycle_init(_ctx: &spacetimedb::ReducerContext) {} + +#[spacetimedb::reducer(client_connected)] +fn lifecycle_client_connected(_ctx: &spacetimedb::ReducerContext) {} + +#[spacetimedb::reducer(client_disconnected)] +fn lifecycle_client_disconnected(_ctx: &spacetimedb::ReducerContext) {} +"#; + +/// Ensure that lifecycle reducers (init, on_connect, etc) can't be called directly +#[test] +fn test_lifecycle_reducers_cant_be_called() { + let test = Smoketest::builder().module_code(MODULE_CODE_LIFECYCLE).build(); + + let lifecycle_kinds = ["init", "client_connected", "client_disconnected"]; + + for kind in lifecycle_kinds { + let reducer_name = format!("lifecycle_{}", kind); + let result = test.call(&reducer_name, &[]); + assert!( + result.is_err(), + "Expected call to lifecycle reducer '{}' to fail", + reducer_name + ); + } +} diff --git a/crates/smoketests/tests/pg_wire.rs b/crates/smoketests/tests/pg_wire.rs new file mode 100644 index 00000000000..71bcacd754c --- /dev/null +++ b/crates/smoketests/tests/pg_wire.rs @@ -0,0 +1,290 @@ +#![allow(clippy::disallowed_macros)] +//! Tests translated from smoketests/tests/pg_wire.py + +use spacetimedb_smoketests::{have_psql, Smoketest}; + +const MODULE_CODE: &str = r#" +use spacetimedb::sats::{i256, u256}; +use spacetimedb::{ConnectionId, Identity, ReducerContext, SpacetimeType, Table, Timestamp, TimeDuration, Uuid}; + +#[derive(Copy, Clone)] +#[spacetimedb::table(name = t_ints, public)] +pub struct TInts { + i8: i8, + i16: i16, + i32: i32, + i64: i64, + i128: i128, + i256: i256, +} + +#[spacetimedb::table(name = t_ints_tuple, public)] +pub struct TIntsTuple { + tuple: TInts, +} + +#[derive(Copy, Clone)] +#[spacetimedb::table(name = t_uints, public)] +pub struct TUints { + u8: u8, + u16: u16, + u32: u32, + u64: u64, + u128: u128, + u256: u256, +} + +#[spacetimedb::table(name = t_uints_tuple, public)] +pub struct TUintsTuple { + tuple: TUints, +} + +#[derive(Clone)] +#[spacetimedb::table(name = t_others, public)] +pub struct TOthers { + bool: bool, + f32: f32, + f64: f64, + str: String, + bytes: Vec, + identity: Identity, + connection_id: ConnectionId, + timestamp: Timestamp, + duration: TimeDuration, + uuid: Uuid, +} + +#[spacetimedb::table(name = t_others_tuple, public)] +pub struct TOthersTuple { + tuple: TOthers +} + +#[derive(SpacetimeType, Debug, Clone, Copy)] +pub enum Action { + Inactive, + Active, +} + +#[derive(SpacetimeType, Debug, Clone, Copy)] +pub enum Color { + Gray(u8), +} + +#[derive(Copy, Clone)] +#[spacetimedb::table(name = t_simple_enum, public)] +pub struct TSimpleEnum { + id: u32, + action: Action, +} + +#[spacetimedb::table(name = t_enum, public)] +pub struct TEnum { + id: u32, + color: Color, +} + +#[spacetimedb::table(name = t_nested, public)] +pub struct TNested { + en: TEnum, + se: TSimpleEnum, + ints: TInts, +} + +#[derive(Clone)] +#[spacetimedb::table(name = t_enums)] +pub struct TEnums { + bool_opt: Option, + bool_result: Result, + action: Action, +} + +#[spacetimedb::table(name = t_enums_tuple)] +pub struct TEnumsTuple { + tuple: TEnums, +} + +#[spacetimedb::reducer] +pub fn test(ctx: &ReducerContext) { + let tuple = TInts { + i8: -25, + i16: -3224, + i32: -23443, + i64: -2344353, + i128: -234434897853, + i256: (-234434897853i128).into(), + }; + let ints = tuple; + ctx.db.t_ints().insert(tuple); + ctx.db.t_ints_tuple().insert(TIntsTuple { tuple }); + + let tuple = TUints { + u8: 105, + u16: 1050, + u32: 83892, + u64: 48937498, + u128: 4378528978889, + u256: 4378528978889u128.into(), + }; + ctx.db.t_uints().insert(tuple); + ctx.db.t_uints_tuple().insert(TUintsTuple { tuple }); + + let tuple = TOthers { + bool: true, + f32: 594806.58906, + f64: -3454353.345389043278459, + str: "This is spacetimedb".to_string(), + bytes: vec!(1, 2, 3, 4, 5, 6, 7), + identity: Identity::ONE, + connection_id: ConnectionId::ZERO, + timestamp: Timestamp::UNIX_EPOCH, + duration: TimeDuration::from_micros(1000 * 10000), + uuid: Uuid::NIL, + }; + ctx.db.t_others().insert(tuple.clone()); + ctx.db.t_others_tuple().insert(TOthersTuple { tuple }); + + ctx.db.t_simple_enum().insert(TSimpleEnum { id: 1, action: Action::Inactive }); + ctx.db.t_simple_enum().insert(TSimpleEnum { id: 2, action: Action::Active }); + + ctx.db.t_enum().insert(TEnum { id: 1, color: Color::Gray(128) }); + + ctx.db.t_nested().insert(TNested { + en: TEnum { id: 1, color: Color::Gray(128) }, + se: TSimpleEnum { id: 2, action: Action::Active }, + ints, + }); + + let tuple = TEnums { + bool_opt: Some(true), + bool_result: Ok(false), + action: Action::Active, + }; + + ctx.db.t_enums().insert(tuple.clone()); + ctx.db.t_enums_tuple().insert(TEnumsTuple { tuple }); +} +"#; + +/// Test SQL output formatting via psql +#[test] +fn test_sql_format() { + if !have_psql() { + eprintln!("Skipping test_sql_format: psql not available"); + return; + } + + let mut test = Smoketest::builder() + .module_code(MODULE_CODE) + .pg_port(5433) // Use non-standard port to avoid conflicts + .autopublish(false) + .build(); + + test.publish_module_named("quickstart", true).unwrap(); + test.call("test", &[]).unwrap(); + + test.assert_psql( + "quickstart", + "SELECT * FROM t_ints", + r#"i8 | i16 | i32 | i64 | i128 | i256 +-----+-------+--------+----------+---------------+--------------- + -25 | -3224 | -23443 | -2344353 | -234434897853 | -234434897853 +(1 row)"#, + ); + + test.assert_psql( + "quickstart", + "SELECT * FROM t_ints_tuple", + r#"tuple +--------------------------------------------------------------------------------------------------------- + {"i8": -25, "i16": -3224, "i32": -23443, "i64": -2344353, "i128": -234434897853, "i256": -234434897853} +(1 row)"#, + ); + + test.assert_psql( + "quickstart", + "SELECT * FROM t_uints", + r#"u8 | u16 | u32 | u64 | u128 | u256 +-----+------+-------+----------+---------------+--------------- + 105 | 1050 | 83892 | 48937498 | 4378528978889 | 4378528978889 +(1 row)"#, + ); + + test.assert_psql( + "quickstart", + "SELECT * FROM t_uints_tuple", + r#"tuple +------------------------------------------------------------------------------------------------------- + {"u8": 105, "u16": 1050, "u32": 83892, "u64": 48937498, "u128": 4378528978889, "u256": 4378528978889} +(1 row)"#, + ); + + test.assert_psql( + "quickstart", + "SELECT * FROM t_simple_enum", + r#"id | action +----+---------- + 1 | Inactive + 2 | Active +(2 rows)"#, + ); + + test.assert_psql( + "quickstart", + "SELECT * FROM t_enum", + r#"id | color +----+--------------- + 1 | {"Gray": 128} +(1 row)"#, + ); +} + +/// Test failure cases +#[test] +fn test_failures() { + if !have_psql() { + eprintln!("Skipping test_failures: psql not available"); + return; + } + + let mut test = Smoketest::builder() + .module_code(MODULE_CODE) + .pg_port(5434) // Use different port from test_sql_format + .autopublish(false) + .build(); + + test.publish_module_named("quickstart", true).unwrap(); + + // Empty query returns empty result + let output = test.psql("quickstart", "").unwrap(); + assert!( + output.is_empty(), + "Expected empty output for empty query, got: {}", + output + ); + + // Connection fails with invalid token - we can't easily test this without + // modifying the token, so skip this part + + // Returns error for unsupported sql statements + let result = test.psql( + "quickstart", + "SELECT CASE a WHEN 1 THEN 'one' ELSE 'other' END FROM t_uints", + ); + assert!(result.is_err(), "Expected error for unsupported SQL"); + let err = result.unwrap_err().to_string(); + assert!( + err.contains("Unsupported") || err.contains("unsupported"), + "Expected 'Unsupported' in error message, got: {}", + err + ); + + // And prepared statements + let result = test.psql("quickstart", "SELECT * FROM t_uints where u8 = $1"); + assert!(result.is_err(), "Expected error for prepared statement"); + let err = result.unwrap_err().to_string(); + assert!( + err.contains("Unsupported") || err.contains("unsupported"), + "Expected 'Unsupported' in error message, got: {}", + err + ); +} diff --git a/crates/smoketests/tests/quickstart.rs b/crates/smoketests/tests/quickstart.rs new file mode 100644 index 00000000000..48a781bc3d2 --- /dev/null +++ b/crates/smoketests/tests/quickstart.rs @@ -0,0 +1,670 @@ +#![allow(clippy::disallowed_macros)] +//! Tests translated from smoketests/tests/quickstart.py +//! +//! This test validates that the quickstart documentation is correct by extracting +//! code from markdown docs and running it. + +use anyhow::{bail, Context, Result}; +use spacetimedb_smoketests::{have_dotnet, have_pnpm, parse_quickstart, workspace_root, Smoketest}; +use std::fs; +use std::path::{Path, PathBuf}; +use std::process::{Command, Stdio}; + +/// Write content to a file, creating parent directories as needed. +fn write_file(path: &Path, content: &str) -> Result<()> { + if let Some(parent) = path.parent() { + fs::create_dir_all(parent)?; + } + fs::write(path, content)?; + Ok(()) +} + +/// Append content to a file. +fn append_to_file(path: &Path, content: &str) -> Result<()> { + use std::io::Write; + let mut file = fs::OpenOptions::new().append(true).open(path)?; + file.write_all(content.as_bytes())?; + Ok(()) +} + +/// Run a command and return stdout as a string. +fn run_cmd(args: &[&str], cwd: &Path, input: Option<&str>) -> Result { + let mut cmd = Command::new(args[0]); + cmd.args(&args[1..]) + .current_dir(cwd) + .stderr(Stdio::piped()) + .stdout(Stdio::piped()); + + if input.is_some() { + cmd.stdin(Stdio::piped()); + } + + let mut child = cmd.spawn().context(format!("Failed to spawn {:?}", args))?; + + if let Some(input_str) = input { + use std::io::Write; + if let Some(stdin) = child.stdin.as_mut() { + stdin.write_all(input_str.as_bytes())?; + } + } + + let output = child.wait_with_output()?; + + if !output.status.success() { + bail!( + "Command {:?} failed:\nstdout: {}\nstderr: {}", + args, + String::from_utf8_lossy(&output.stdout), + String::from_utf8_lossy(&output.stderr) + ); + } + + Ok(String::from_utf8_lossy(&output.stdout).to_string()) +} + +/// Run pnpm command. +fn pnpm(args: &[&str], cwd: &Path) -> Result { + let mut full_args = vec!["pnpm"]; + full_args.extend(args); + run_cmd(&full_args, cwd, None) +} + +/// Build the TypeScript SDK. +fn build_typescript_sdk() -> Result<()> { + let workspace = workspace_root(); + let ts_bindings = workspace.join("crates/bindings-typescript"); + pnpm(&["install"], &ts_bindings)?; + pnpm(&["build"], &ts_bindings)?; + Ok(()) +} + +/// Load NuGet config from a file, returning a simple representation. +/// We'll use a string-based approach for simplicity since we don't have xmltodict. +fn create_nuget_config(sources: &[(String, PathBuf)], mappings: &[(String, String)]) -> String { + let mut source_lines = String::new(); + let mut mapping_lines = String::new(); + + for (key, path) in sources { + source_lines.push_str(&format!(" \n", key, path.display())); + } + + for (key, pattern) in mappings { + mapping_lines.push_str(&format!( + " \n \n \n", + key, pattern + )); + } + + format!( + r#" + + +{} + +{} + +"#, + source_lines, mapping_lines + ) +} + +/// Override nuget config to use a local NuGet package on a .NET project. +fn override_nuget_package(project_dir: &Path, package: &str, source_dir: &Path, build_subdir: &str) -> Result<()> { + // Make sure the local package is built + let output = Command::new("dotnet") + .args(["pack"]) + .current_dir(source_dir) + .output() + .context("Failed to run dotnet pack")?; + + if !output.status.success() { + bail!( + "dotnet pack failed:\nstdout: {}\nstderr: {}", + String::from_utf8_lossy(&output.stdout), + String::from_utf8_lossy(&output.stderr) + ); + } + + let nuget_config_path = project_dir.join("nuget.config"); + let package_path = source_dir.join(build_subdir); + + // Read existing config or create new one + let (mut sources, mut mappings) = if nuget_config_path.exists() { + // Parse existing config - simplified approach + let content = fs::read_to_string(&nuget_config_path)?; + parse_nuget_config(&content) + } else { + (Vec::new(), Vec::new()) + }; + + // Add new source + sources.push((package.to_string(), package_path)); + + // Add mapping for the package + mappings.push((package.to_string(), package.to_string())); + + // Ensure nuget.org fallback exists + if !mappings.iter().any(|(k, _)| k == "nuget.org") { + mappings.push(("nuget.org".to_string(), "*".to_string())); + } + + // Write config + let config = create_nuget_config(&sources, &mappings); + fs::write(&nuget_config_path, config)?; + + // Clear nuget caches + let _ = Command::new("dotnet") + .args(["nuget", "locals", "--clear", "all"]) + .stderr(Stdio::null()) + .output(); + + Ok(()) +} + +/// Parse an existing nuget.config file (simplified). +#[allow(clippy::type_complexity)] +fn parse_nuget_config(content: &str) -> (Vec<(String, PathBuf)>, Vec<(String, String)>) { + let mut sources = Vec::new(); + let mut mappings = Vec::new(); + + // Simple regex-based parsing + let source_re = regex::Regex::new(r#"\s* Self { + Self { + lang: "rust", + client_lang: "rust", + server_file: "src/lib.rs", + client_file: "src/main.rs", + module_bindings: "src/module_bindings", + run_cmd: &["cargo", "run"], + build_cmd: &["cargo", "build"], + replacements: &[ + // Replace the interactive user input to allow direct testing + ("user_input_loop(&ctx)", "user_input_direct(&ctx)"), + // Don't cache the token, because it will cause the test to fail if we run against a non-default server + (".with_token(creds_store()", "//.with_token(creds_store()"), + ], + extra_code: r#" +fn user_input_direct(ctx: &DbConnection) { + let mut line = String::new(); + std::io::stdin().read_line(&mut line).expect("Failed to read from stdin."); + if let Some(name) = line.strip_prefix("/name ") { + ctx.reducers.set_name(name.to_string()).unwrap(); + } else { + ctx.reducers.send_message(line).unwrap(); + } + std::thread::sleep(std::time::Duration::from_secs(1)); + std::process::exit(0); +} +"#, + connected_str: "connected", + } + } + + fn csharp() -> Self { + Self { + lang: "csharp", + client_lang: "csharp", + server_file: "Lib.cs", + client_file: "Program.cs", + module_bindings: "module_bindings", + run_cmd: &["dotnet", "run"], + build_cmd: &["dotnet", "build"], + replacements: &[ + // Replace the interactive user input to allow direct testing + ("InputLoop();", "UserInputDirect();"), + (".OnConnect(OnConnected)", ".OnConnect(OnConnectedSignal)"), + ( + ".OnConnectError(OnConnectError)", + ".OnConnectError(OnConnectErrorSignal)", + ), + // Don't cache the token + (".WithToken(AuthToken.Token)", "//.WithToken(AuthToken.Token)"), + // To put the main function at the end so it can see the new functions + ("Main();", ""), + ], + extra_code: r#" +var connectedEvent = new ManualResetEventSlim(false); +var connectionFailed = new ManualResetEventSlim(false); +void OnConnectErrorSignal(Exception e) +{ + OnConnectError(e); + connectionFailed.Set(); +} +void OnConnectedSignal(DbConnection conn, Identity identity, string authToken) +{ + OnConnected(conn, identity, authToken); + connectedEvent.Set(); +} + +void UserInputDirect() { + string? line = Console.In.ReadToEnd()?.Trim(); + if (line == null) Environment.Exit(0); + + if (!WaitHandle.WaitAny( + new[] { connectedEvent.WaitHandle, connectionFailed.WaitHandle }, + TimeSpan.FromSeconds(5) + ).Equals(0)) + { + Console.WriteLine("Failed to connect to server within timeout."); + Environment.Exit(1); + } + + if (line.StartsWith("/name ")) { + input_queue.Enqueue(("name", line[6..])); + } else { + input_queue.Enqueue(("message", line)); + } + Thread.Sleep(1000); +} +Main(); +"#, + connected_str: "Connected", + } + } + + fn typescript() -> Self { + // TypeScript server uses Rust client because the TypeScript client + // quickstart is a React app, which is difficult to smoketest. + Self { + lang: "typescript", + client_lang: "rust", + server_file: "src/index.ts", + // Client uses Rust config + client_file: "src/main.rs", + module_bindings: "src/module_bindings", + run_cmd: &["cargo", "run"], + build_cmd: &["cargo", "build"], + replacements: &[ + ("user_input_loop(&ctx)", "user_input_direct(&ctx)"), + (".with_token(creds_store()", "//.with_token(creds_store()"), + ], + extra_code: r#" +fn user_input_direct(ctx: &DbConnection) { + let mut line = String::new(); + std::io::stdin().read_line(&mut line).expect("Failed to read from stdin."); + if let Some(name) = line.strip_prefix("/name ") { + ctx.reducers.set_name(name.to_string()).unwrap(); + } else { + ctx.reducers.send_message(line).unwrap(); + } + std::thread::sleep(std::time::Duration::from_secs(1)); + std::process::exit(0); +} +"#, + connected_str: "connected", + } + } +} + +/// Quickstart test runner. +struct QuickstartTest { + test: Smoketest, + config: QuickstartConfig, + project_path: PathBuf, + /// Temp directory for server/client - kept alive for duration of test + _temp_dir: Option, +} + +impl QuickstartTest { + fn new(config: QuickstartConfig) -> Self { + let test = Smoketest::builder().autopublish(false).build(); + Self { + test, + config, + project_path: PathBuf::new(), + _temp_dir: None, + } + } + + fn module_name(&self) -> String { + format!("quickstart-chat-{}", self.config.lang) + } + + fn doc_path(&self) -> PathBuf { + workspace_root().join("docs/docs/00100-intro/00300-tutorials/00100-chat-app.md") + } + + /// Generate the server code from the quickstart documentation. + fn generate_server(&mut self, server_path: &Path) -> Result { + let workspace = workspace_root(); + eprintln!("Generating server code {}: {:?}...", self.config.lang, server_path); + + // Initialize the project (local operation, doesn't need server) + let output = self.test.spacetime(&[ + "init", + "--non-interactive", + "--lang", + self.config.lang, + "--project-path", + server_path.to_str().unwrap(), + "spacetimedb-project", + ])?; + eprintln!("spacetime init output: {}", output); + + let project_path = server_path.join("spacetimedb"); + self.project_path = project_path.clone(); + + // Copy rust-toolchain.toml + let toolchain_src = workspace.join("rust-toolchain.toml"); + if toolchain_src.exists() { + fs::copy(&toolchain_src, project_path.join("rust-toolchain.toml"))?; + } + + // Read and parse the documentation + let doc_content = fs::read_to_string(self.doc_path())?; + let server_code = parse_quickstart(&doc_content, self.config.lang, &self.module_name(), true); + + // Write server code + write_file(&project_path.join(self.config.server_file), &server_code)?; + + // Language-specific server postprocessing + self.server_postprocess(&project_path)?; + + // Build the server (local operation) + self.test + .spacetime(&["build", "-d", "-p", project_path.to_str().unwrap()])?; + + Ok(project_path) + } + + /// Language-specific server postprocessing. + fn server_postprocess(&self, server_path: &Path) -> Result<()> { + let workspace = workspace_root(); + + match self.config.lang { + "rust" => { + // Write the Cargo.toml with local bindings path + let bindings_path = workspace.join("crates/bindings"); + let bindings_path_str = bindings_path.display().to_string().replace('\\', "/"); + + let cargo_toml = format!( + r#"[package] +name = "spacetimedb-quickstart-module" +version = "0.1.0" +edition = "2021" + +[lib] +crate-type = ["cdylib"] + +[dependencies] +spacetimedb = {{ path = "{}", features = ["unstable"] }} +log = "0.4" +"#, + bindings_path_str + ); + fs::write(server_path.join("Cargo.toml"), cargo_toml)?; + } + "csharp" => { + // Set up local NuGet packages + override_nuget_package( + server_path, + "SpacetimeDB.Runtime", + &workspace.join("crates/bindings-csharp/Runtime"), + "bin/Release", + )?; + override_nuget_package( + server_path, + "SpacetimeDB.BSATN.Runtime", + &workspace.join("crates/bindings-csharp/BSATN.Runtime"), + "bin/Release", + )?; + } + "typescript" => { + // Build and link the TypeScript SDK + build_typescript_sdk()?; + + // Uninstall spacetimedb first to avoid pnpm issues + let _ = pnpm(&["uninstall", "spacetimedb"], server_path); + + // Install the local SDK + let ts_bindings = workspace.join("crates/bindings-typescript"); + pnpm(&["install", ts_bindings.to_str().unwrap()], server_path)?; + } + _ => {} + } + + Ok(()) + } + + /// Initialize the client project. + fn project_init(&self, client_path: &Path) -> Result<()> { + match self.config.client_lang { + "rust" => { + let parent = client_path.parent().unwrap(); + run_cmd( + &["cargo", "new", "--bin", "--name", "quickstart_chat_client", "client"], + parent, + None, + )?; + } + "csharp" => { + run_cmd( + &[ + "dotnet", + "new", + "console", + "--name", + "QuickstartChatClient", + "--output", + client_path.to_str().unwrap(), + ], + client_path.parent().unwrap(), + None, + )?; + } + _ => {} + } + Ok(()) + } + + /// Set up the SDK for the client. + fn sdk_setup(&self, client_path: &Path) -> Result<()> { + let workspace = workspace_root(); + + match self.config.client_lang { + "rust" => { + let sdk_rust_path = workspace.join("sdks/rust"); + let sdk_rust_toml_escaped = sdk_rust_path.display().to_string().replace('\\', "\\\\\\\\"); // double escape for toml + let sdk_rust_toml = format!( + "spacetimedb-sdk = {{ path = \"{}\" }}\nlog = \"0.4\"\nhex = \"0.4\"\n", + sdk_rust_toml_escaped + ); + append_to_file(&client_path.join("Cargo.toml"), &sdk_rust_toml)?; + } + "csharp" => { + // Set up NuGet packages for C# SDK + override_nuget_package( + &workspace.join("sdks/csharp"), + "SpacetimeDB.BSATN.Runtime", + &workspace.join("crates/bindings-csharp/BSATN.Runtime"), + "bin/Release", + )?; + override_nuget_package( + &workspace.join("sdks/csharp"), + "SpacetimeDB.Runtime", + &workspace.join("crates/bindings-csharp/Runtime"), + "bin/Release", + )?; + override_nuget_package( + client_path, + "SpacetimeDB.BSATN.Runtime", + &workspace.join("crates/bindings-csharp/BSATN.Runtime"), + "bin/Release", + )?; + override_nuget_package( + client_path, + "SpacetimeDB.ClientSDK", + &workspace.join("sdks/csharp"), + "bin~/Release", + )?; + + run_cmd( + &["dotnet", "add", "package", "SpacetimeDB.ClientSDK"], + client_path, + None, + )?; + } + _ => {} + } + Ok(()) + } + + /// Run the client with input and check output. + fn check(&self, input: &str, client_path: &Path, contains: &str) -> Result<()> { + let output = run_cmd(self.config.run_cmd, client_path, Some(input))?; + eprintln!("Output for {} client:\n{}", self.config.lang, output); + + if !output.contains(contains) { + bail!("Expected output to contain '{}', but got:\n{}", contains, output); + } + Ok(()) + } + + /// Publish the module and return the client path. + fn publish(&mut self) -> Result { + let temp_dir = tempfile::tempdir()?; + let base_path = temp_dir.path().to_path_buf(); + self._temp_dir = Some(temp_dir); + let server_path = base_path.join("server"); + + self.generate_server(&server_path)?; + + // Publish the module + let project_path_str = self.project_path.to_str().unwrap().to_string(); + let publish_output = self.test.spacetime(&[ + "publish", + "--server", + &self.test.server_url, + "--project-path", + &project_path_str, + "--yes", + "--clear-database", + &self.module_name(), + ])?; + + // Parse the identity from publish output + let re = regex::Regex::new(r"identity: ([0-9a-fA-F]+)").unwrap(); + if let Some(caps) = re.captures(&publish_output) { + let identity = caps.get(1).unwrap().as_str().to_string(); + self.test.database_identity = Some(identity); + } else { + bail!( + "Failed to parse database identity from publish output: {}", + publish_output + ); + } + + Ok(base_path.join("client")) + } + + /// Run the full quickstart test. + fn run_quickstart(&mut self) -> Result<()> { + let client_path = self.publish()?; + + self.project_init(&client_path)?; + self.sdk_setup(&client_path)?; + + // Build the client + run_cmd(self.config.build_cmd, &client_path, None)?; + + // Generate bindings (local operation) + let bindings_path = client_path.join(self.config.module_bindings); + let project_path_str = self.project_path.to_str().unwrap().to_string(); + self.test.spacetime(&[ + "generate", + "--lang", + self.config.client_lang, + "--out-dir", + bindings_path.to_str().unwrap(), + "--project-path", + &project_path_str, + ])?; + + // Read and parse client code from documentation + let doc_content = fs::read_to_string(self.doc_path())?; + let mut main_code = parse_quickstart(&doc_content, self.config.client_lang, &self.module_name(), false); + + // Apply replacements + for (src, dst) in self.config.replacements { + main_code = main_code.replace(src, dst); + } + + // Add extra code + main_code.push('\n'); + main_code.push_str(self.config.extra_code); + + // Replace server address + let host = self.test.server_host(); + let protocol = "http"; // The smoketest server uses http + main_code = main_code.replace("http://localhost:3000", &format!("{}://{}", protocol, host)); + + // Write the client code + write_file(&client_path.join(self.config.client_file), &main_code)?; + + // Run the three test interactions + self.check("", &client_path, self.config.connected_str)?; + self.check("/name Alice", &client_path, "Alice")?; + self.check("Hello World", &client_path, "Hello World")?; + + Ok(()) + } +} + +/// Run the Rust quickstart guides for server and client. +#[test] +fn test_quickstart_rust() { + let mut qt = QuickstartTest::new(QuickstartConfig::rust()); + qt.run_quickstart().expect("Rust quickstart test failed"); +} + +/// Run the C# quickstart guides for server and client. +#[test] +fn test_quickstart_csharp() { + if !have_dotnet() { + eprintln!("Skipping test_quickstart_csharp: dotnet 8.0+ not available"); + return; + } + + let mut qt = QuickstartTest::new(QuickstartConfig::csharp()); + qt.run_quickstart().expect("C# quickstart test failed"); +} + +/// Run the TypeScript quickstart for server (with Rust client). +#[test] +fn test_quickstart_typescript() { + if !have_pnpm() { + eprintln!("Skipping test_quickstart_typescript: pnpm not available"); + return; + } + + let mut qt = QuickstartTest::new(QuickstartConfig::typescript()); + qt.run_quickstart().expect("TypeScript quickstart test failed"); +} diff --git a/crates/smoketests/tests/restart.rs b/crates/smoketests/tests/restart.rs new file mode 100644 index 00000000000..ab412604736 --- /dev/null +++ b/crates/smoketests/tests/restart.rs @@ -0,0 +1,259 @@ +//! Tests for server restart behavior. +//! Translated from smoketests/tests/zz_docker.py + +use spacetimedb_smoketests::Smoketest; + +const PERSON_MODULE: &str = r#" +use spacetimedb::{log, ReducerContext, Table}; + +#[spacetimedb::table(name = person, index(name = name_idx, btree(columns = [name])))] +pub struct Person { + #[primary_key] + #[auto_inc] + id: u32, + name: String, +} + +#[spacetimedb::reducer] +pub fn add(ctx: &ReducerContext, name: String) { + ctx.db.person().insert(Person { id: 0, name }); +} + +#[spacetimedb::reducer] +pub fn say_hello(ctx: &ReducerContext) { + for person in ctx.db.person().iter() { + log::info!("Hello, {}!", person.name); + } + log::info!("Hello, World!"); +} +"#; + +const CONNECTED_CLIENT_MODULE: &str = r#" +use log::info; +use spacetimedb::{ConnectionId, Identity, ReducerContext, Table}; + +#[spacetimedb::table(name = connected_client)] +pub struct ConnectedClient { + identity: Identity, + connection_id: ConnectionId, +} + +#[spacetimedb::reducer(client_connected)] +fn on_connect(ctx: &ReducerContext) { + ctx.db.connected_client().insert(ConnectedClient { + identity: ctx.sender, + connection_id: ctx.connection_id.expect("sender connection id unset"), + }); +} + +#[spacetimedb::reducer(client_disconnected)] +fn on_disconnect(ctx: &ReducerContext) { + let sender_identity = &ctx.sender; + let sender_connection_id = ctx.connection_id.as_ref().expect("sender connection id unset"); + let match_client = |row: &ConnectedClient| { + &row.identity == sender_identity && &row.connection_id == sender_connection_id + }; + if let Some(client) = ctx.db.connected_client().iter().find(match_client) { + ctx.db.connected_client().delete(client); + } +} + +#[spacetimedb::reducer] +fn print_num_connected(ctx: &ReducerContext) { + let n = ctx.db.connected_client().count(); + info!("CONNECTED CLIENTS: {n}") +} +"#; + +/// Test data persistence across server restart. +/// +/// This tests to see if SpacetimeDB can be queried after a restart. +#[test] +fn test_restart_module() { + let mut test = Smoketest::builder().module_code(PERSON_MODULE).build(); + + test.call("add", &["Robert"]).unwrap(); + + test.restart_server(); + + test.call("add", &["Julie"]).unwrap(); + test.call("add", &["Samantha"]).unwrap(); + test.call("say_hello", &[]).unwrap(); + + let logs = test.logs(100).unwrap(); + assert!( + logs.iter().any(|l| l.contains("Hello, Robert!")), + "Missing 'Hello, Robert!' in logs" + ); + assert!( + logs.iter().any(|l| l.contains("Hello, Julie!")), + "Missing 'Hello, Julie!' in logs" + ); + assert!( + logs.iter().any(|l| l.contains("Hello, Samantha!")), + "Missing 'Hello, Samantha!' in logs" + ); + assert!( + logs.iter().any(|l| l.contains("Hello, World!")), + "Missing 'Hello, World!' in logs" + ); +} + +/// Test SQL queries work after restart. +#[test] +fn test_restart_sql() { + let mut test = Smoketest::builder().module_code(PERSON_MODULE).build(); + + test.call("add", &["Robert"]).unwrap(); + test.call("add", &["Julie"]).unwrap(); + test.call("add", &["Samantha"]).unwrap(); + + test.restart_server(); + + let output = test.sql("SELECT name FROM person WHERE id = 3").unwrap(); + assert!( + output.contains("Samantha"), + "Expected 'Samantha' in SQL output: {}", + output + ); +} + +/// Test clients are auto-disconnected on restart. +#[test] +fn test_restart_auto_disconnect() { + let mut test = Smoketest::builder().module_code(CONNECTED_CLIENT_MODULE).build(); + + // Start two subscribers in the background + let sub1 = test + .subscribe_background(&["SELECT * FROM connected_client"], 2) + .unwrap(); + let sub2 = test + .subscribe_background(&["SELECT * FROM connected_client"], 2) + .unwrap(); + + // Call print_num_connected and check we have 3 clients (2 subscribers + the call) + test.call("print_num_connected", &[]).unwrap(); + let logs = test.logs(10).unwrap(); + assert!( + logs.iter().any(|l| l.contains("CONNECTED CLIENTS: 3")), + "Expected 3 connected clients before restart, logs: {:?}", + logs + ); + + // Restart the server - this should disconnect all clients + test.restart_server(); + + // The subscriptions should fail/complete since the server restarted + // We don't wait for them, just drop the handles + drop(sub1); + drop(sub2); + + // After restart, only the current call should be connected + test.call("print_num_connected", &[]).unwrap(); + let logs = test.logs(10).unwrap(); + assert!( + logs.iter().any(|l| l.contains("CONNECTED CLIENTS: 1")), + "Expected 1 connected client after restart, logs: {:?}", + logs + ); +} + +// Module code for add_remove_index test (without indices) +const ADD_REMOVE_MODULE: &str = r#" +use spacetimedb::{ReducerContext, Table}; + +#[spacetimedb::table(name = t1)] +pub struct T1 { id: u64 } + +#[spacetimedb::table(name = t2)] +pub struct T2 { id: u64 } + +#[spacetimedb::reducer(init)] +pub fn init(ctx: &ReducerContext) { + for id in 0..1_000 { + ctx.db.t1().insert(T1 { id }); + ctx.db.t2().insert(T2 { id }); + } +} +"#; + +// Module code for add_remove_index test (with indices) +const ADD_REMOVE_MODULE_INDEXED: &str = r#" +use spacetimedb::{ReducerContext, Table}; + +#[spacetimedb::table(name = t1)] +pub struct T1 { #[index(btree)] id: u64 } + +#[spacetimedb::table(name = t2)] +pub struct T2 { #[index(btree)] id: u64 } + +#[spacetimedb::reducer(init)] +pub fn init(ctx: &ReducerContext) { + for id in 0..1_000 { + ctx.db.t1().insert(T1 { id }); + ctx.db.t2().insert(T2 { id }); + } +} + +#[spacetimedb::reducer] +pub fn add(ctx: &ReducerContext) { + let id = 1_001; + ctx.db.t1().insert(T1 { id }); + ctx.db.t2().insert(T2 { id }); +} +"#; + +const JOIN_QUERY: &str = "select t1.* from t1 join t2 on t1.id = t2.id where t2.id = 1001"; + +/// Test autoinc sequences work correctly after restart. +/// +/// This is the `AddRemoveIndex` test from add_remove_index.py, +/// but restarts the server between each publish. +/// +/// This detects a bug we once had where the system autoinc sequences +/// were borked after restart, leading newly-created database objects +/// to re-use IDs. +#[test] +fn test_add_remove_index_after_restart() { + let mut test = Smoketest::builder() + .module_code(ADD_REMOVE_MODULE) + .autopublish(false) + .build(); + + let name = format!("test-db-{}", std::process::id()); + + // Publish and attempt subscribing to a join query. + // There are no indices, resulting in an unsupported unindexed join. + test.publish_module_named(&name, false).unwrap(); + let result = test.subscribe(&[JOIN_QUERY], 0); + assert!(result.is_err(), "Expected subscription to fail without indices"); + + // Restart before adding indices + test.restart_server(); + + // Publish the indexed version. + // Now we have indices, so the query should be accepted. + test.write_module_code(ADD_REMOVE_MODULE_INDEXED).unwrap(); + test.publish_module_named(&name, false).unwrap(); + + // Subscription should work now + let result = test.subscribe(&[JOIN_QUERY], 0); + assert!( + result.is_ok(), + "Expected subscription to succeed with indices, got: {:?}", + result.err() + ); + + // Verify call works too + test.call("add", &[]).unwrap(); + + // Restart before removing indices + test.restart_server(); + + // Publish the unindexed version again, removing the index. + // The initial subscription should be rejected again. + test.write_module_code(ADD_REMOVE_MODULE).unwrap(); + test.publish_module_named(&name, false).unwrap(); + let result = test.subscribe(&[JOIN_QUERY], 0); + assert!(result.is_err(), "Expected subscription to fail after removing indices"); +} diff --git a/crates/smoketests/tests/rls.rs b/crates/smoketests/tests/rls.rs new file mode 100644 index 00000000000..d60f774159d --- /dev/null +++ b/crates/smoketests/tests/rls.rs @@ -0,0 +1,52 @@ +//! Tests translated from smoketests/tests/rls.py + +use spacetimedb_smoketests::Smoketest; + +const MODULE_CODE: &str = r#" +use spacetimedb::{Identity, ReducerContext, Table}; + +#[spacetimedb::table(name = users, public)] +pub struct Users { + name: String, + identity: Identity, +} + +#[spacetimedb::client_visibility_filter] +const USER_FILTER: spacetimedb::Filter = spacetimedb::Filter::Sql( + "SELECT * FROM users WHERE identity = :sender" +); + +#[spacetimedb::reducer] +pub fn add_user(ctx: &ReducerContext, name: String) { + ctx.db.users().insert(Users { name, identity: ctx.sender }); +} +"#; + +/// Tests for querying tables with RLS rules +#[test] +fn test_rls_rules() { + let test = Smoketest::builder().module_code(MODULE_CODE).build(); + + // Insert a user for Alice (current identity) + test.call("add_user", &["Alice"]).unwrap(); + + // Create a new identity for Bob + test.new_identity().unwrap(); + test.call("add_user", &["Bob"]).unwrap(); + + // Query the users table using Bob's identity - should only see Bob + test.assert_sql( + "SELECT name FROM users", + r#" name +------- + "Bob""#, + ); + + // Create another new identity - should see no users + test.new_identity().unwrap(); + test.assert_sql( + "SELECT name FROM users", + r#" name +------"#, + ); +} diff --git a/crates/smoketests/tests/schedule_reducer.rs b/crates/smoketests/tests/schedule_reducer.rs new file mode 100644 index 00000000000..9c602867f38 --- /dev/null +++ b/crates/smoketests/tests/schedule_reducer.rs @@ -0,0 +1,178 @@ +//! Scheduled reducer tests translated from smoketests/tests/schedule_reducer.py + +use spacetimedb_smoketests::Smoketest; +use std::thread; +use std::time::Duration; + +const CANCEL_REDUCER_MODULE_CODE: &str = r#" +use spacetimedb::{duration, log, ReducerContext, Table}; + +#[spacetimedb::reducer(init)] +fn init(ctx: &ReducerContext) { + let schedule = ctx.db.scheduled_reducer_args().insert(ScheduledReducerArgs { + num: 1, + scheduled_id: 0, + scheduled_at: duration!(100ms).into(), + }); + ctx.db.scheduled_reducer_args().scheduled_id().delete(&schedule.scheduled_id); + + let schedule = ctx.db.scheduled_reducer_args().insert(ScheduledReducerArgs { + num: 2, + scheduled_id: 0, + scheduled_at: duration!(1000ms).into(), + }); + do_cancel(ctx, schedule.scheduled_id); +} + +#[spacetimedb::table(name = scheduled_reducer_args, public, scheduled(reducer))] +pub struct ScheduledReducerArgs { + #[primary_key] + #[auto_inc] + scheduled_id: u64, + scheduled_at: spacetimedb::ScheduleAt, + num: i32, +} + +#[spacetimedb::reducer] +fn do_cancel(ctx: &ReducerContext, schedule_id: u64) { + ctx.db.scheduled_reducer_args().scheduled_id().delete(&schedule_id); +} + +#[spacetimedb::reducer] +fn reducer(_ctx: &ReducerContext, args: ScheduledReducerArgs) { + log::info!("the reducer ran: {}", args.num); +} +"#; + +/// Ensure cancelling a reducer works +#[test] +fn test_cancel_reducer() { + let test = Smoketest::builder().module_code(CANCEL_REDUCER_MODULE_CODE).build(); + + // Wait for any scheduled reducers to potentially run + thread::sleep(Duration::from_secs(2)); + + let logs = test.logs(5).unwrap(); + let logs_str = logs.join("\n"); + assert!( + !logs_str.contains("the reducer ran"), + "Expected no 'the reducer ran' in logs, got: {:?}", + logs + ); +} + +const SUBSCRIBE_SCHEDULED_TABLE_MODULE_CODE: &str = r#" +use spacetimedb::{log, duration, ReducerContext, Table, Timestamp}; + +#[spacetimedb::table(name = scheduled_table, public, scheduled(my_reducer, at = sched_at))] +pub struct ScheduledTable { + #[primary_key] + #[auto_inc] + scheduled_id: u64, + sched_at: spacetimedb::ScheduleAt, + prev: Timestamp, +} + +#[spacetimedb::reducer] +fn schedule_reducer(ctx: &ReducerContext) { + ctx.db.scheduled_table().insert(ScheduledTable { prev: Timestamp::from_micros_since_unix_epoch(0), scheduled_id: 2, sched_at: Timestamp::from_micros_since_unix_epoch(0).into(), }); +} + +#[spacetimedb::reducer] +fn schedule_repeated_reducer(ctx: &ReducerContext) { + ctx.db.scheduled_table().insert(ScheduledTable { prev: Timestamp::from_micros_since_unix_epoch(0), scheduled_id: 1, sched_at: duration!(100ms).into(), }); +} + +#[spacetimedb::reducer] +pub fn my_reducer(ctx: &ReducerContext, arg: ScheduledTable) { + log::info!("Invoked: ts={:?}, delta={:?}", ctx.timestamp, ctx.timestamp.duration_since(arg.prev)); +} +"#; + +/// Test deploying a module with a scheduled reducer and check if client receives +/// subscription update for scheduled table entry and deletion of reducer once it ran +#[test] +fn test_scheduled_table_subscription() { + let test = Smoketest::builder() + .module_code(SUBSCRIBE_SCHEDULED_TABLE_MODULE_CODE) + .build(); + + // Call a reducer to schedule a reducer (runs immediately since timestamp is 0) + test.call("schedule_reducer", &[]).unwrap(); + + // Wait for the scheduled reducer to run + thread::sleep(Duration::from_secs(2)); + + let logs = test.logs(100).unwrap(); + let invoked_count = logs.iter().filter(|line| line.contains("Invoked:")).count(); + assert_eq!( + invoked_count, 1, + "Expected scheduled reducer to run exactly once, but it ran {} times. Logs: {:?}", + invoked_count, logs + ); +} + +/// Test that repeated reducers run multiple times +#[test] +fn test_scheduled_table_subscription_repeated_reducer() { + let test = Smoketest::builder() + .module_code(SUBSCRIBE_SCHEDULED_TABLE_MODULE_CODE) + .build(); + + // Call a reducer to schedule a repeated reducer + test.call("schedule_repeated_reducer", &[]).unwrap(); + + // Wait for the scheduled reducer to run multiple times + thread::sleep(Duration::from_secs(2)); + + let logs = test.logs(100).unwrap(); + let invoked_count = logs.iter().filter(|line| line.contains("Invoked:")).count(); + assert!( + invoked_count > 2, + "Expected repeated reducer to run more than twice, but it ran {} times. Logs: {:?}", + invoked_count, + logs + ); +} + +const VOLATILE_NONATOMIC_MODULE_CODE: &str = r#" +use spacetimedb::{ReducerContext, Table}; + +#[spacetimedb::table(name = my_table, public)] +pub struct MyTable { + x: String, +} + +#[spacetimedb::reducer] +fn do_schedule(_ctx: &ReducerContext) { + spacetimedb::volatile_nonatomic_schedule_immediate!(do_insert("hello".to_owned())); +} + +#[spacetimedb::reducer] +fn do_insert(ctx: &ReducerContext, x: String) { + ctx.db.my_table().insert(MyTable { x }); +} +"#; + +/// Check that volatile_nonatomic_schedule_immediate works +#[test] +fn test_volatile_nonatomic_schedule_immediate() { + let test = Smoketest::builder().module_code(VOLATILE_NONATOMIC_MODULE_CODE).build(); + + // Insert directly first + test.call("do_insert", &[r#""yay!""#]).unwrap(); + + // Schedule another insert + test.call("do_schedule", &[]).unwrap(); + + // Wait a moment for the scheduled insert to complete + thread::sleep(Duration::from_millis(500)); + + // Query the table to verify both inserts happened + let result = test.sql("SELECT * FROM my_table").unwrap(); + assert!( + result.contains("yay!") && result.contains("hello"), + "Expected both 'yay!' and 'hello' in table, got: {}", + result + ); +} diff --git a/crates/smoketests/tests/servers.rs b/crates/smoketests/tests/servers.rs new file mode 100644 index 00000000000..2c10b8bc471 --- /dev/null +++ b/crates/smoketests/tests/servers.rs @@ -0,0 +1,91 @@ +//! Tests translated from smoketests/tests/servers.py + +use regex::Regex; +use spacetimedb_smoketests::Smoketest; + +/// Verify that we can add and list server configurations +#[test] +fn test_servers() { + let test = Smoketest::builder().autopublish(false).build(); + + // Add a test server (local-only command, no --server flag needed) + let output = test + .spacetime(&[ + "server", + "add", + "--url", + "https://testnet.spacetimedb.com", + "testnet", + "--no-fingerprint", + ]) + .unwrap(); + + assert!( + output.contains("testnet.spacetimedb.com"), + "Expected host in output: {}", + output + ); + + // List servers (local-only command) + let servers = test.spacetime(&["server", "list"]).unwrap(); + + let testnet_re = Regex::new(r"(?m)^\s*testnet\.spacetimedb\.com\s+https\s+testnet\s*$").unwrap(); + assert!( + testnet_re.is_match(&servers), + "Expected testnet in server list: {}", + servers + ); + + // Add the local test server to the config so we can check its fingerprint + test.spacetime(&[ + "server", + "add", + "--url", + &test.server_url, + "test-local", + "--no-fingerprint", + ]) + .unwrap(); + + // Check fingerprint commands (local-only command) + let output = test.spacetime(&["server", "fingerprint", "test-local", "-y"]).unwrap(); + // The exact message may vary, just check it doesn't error + assert!( + output.contains("fingerprint") || output.contains("Fingerprint"), + "Expected fingerprint message: {}", + output + ); +} + +/// Verify that we can edit server configurations +#[test] +fn test_edit_server() { + let test = Smoketest::builder().autopublish(false).build(); + + // Add a server to edit (local-only command) + test.spacetime(&["server", "add", "--url", "https://foo.com", "foo", "--no-fingerprint"]) + .unwrap(); + + // Edit the server (local-only command) + test.spacetime(&[ + "server", + "edit", + "foo", + "--url", + "https://edited-testnet.spacetimedb.com", + "--new-name", + "edited-testnet", + "--no-fingerprint", + "--yes", + ]) + .unwrap(); + + // Verify the edit (local-only command) + let servers = test.spacetime(&["server", "list"]).unwrap(); + let edited_re = Regex::new(r"(?m)^\s*edited-testnet\.spacetimedb\.com\s+https\s+edited-testnet\s*$").unwrap(); + assert!( + edited_re.is_match(&servers), + "Expected edited server in list: {}", + servers + ); +} diff --git a/crates/smoketests/tests/sql.rs b/crates/smoketests/tests/sql.rs new file mode 100644 index 00000000000..a908196aeab --- /dev/null +++ b/crates/smoketests/tests/sql.rs @@ -0,0 +1,192 @@ +//! SQL format tests translated from smoketests/tests/sql.py + +use spacetimedb_smoketests::Smoketest; + +const SQL_FORMAT_MODULE_CODE: &str = r#" +use spacetimedb::sats::{i256, u256}; +use spacetimedb::{ConnectionId, Identity, ReducerContext, Table, Timestamp, TimeDuration, SpacetimeType, Uuid}; + +#[derive(Copy, Clone)] +#[spacetimedb::table(name = t_ints)] +pub struct TInts { + i8: i8, + i16: i16, + i32: i32, + i64: i64, + i128: i128, + i256: i256, +} + +#[spacetimedb::table(name = t_ints_tuple)] +pub struct TIntsTuple { + tuple: TInts, +} + +#[derive(Copy, Clone)] +#[spacetimedb::table(name = t_uints)] +pub struct TUints { + u8: u8, + u16: u16, + u32: u32, + u64: u64, + u128: u128, + u256: u256, +} + +#[spacetimedb::table(name = t_uints_tuple)] +pub struct TUintsTuple { + tuple: TUints, +} + +#[derive(Clone)] +#[spacetimedb::table(name = t_others)] +pub struct TOthers { + bool: bool, + f32: f32, + f64: f64, + str: String, + bytes: Vec, + identity: Identity, + connection_id: ConnectionId, + timestamp: Timestamp, + duration: TimeDuration, + uuid: Uuid, +} + +#[spacetimedb::table(name = t_others_tuple)] +pub struct TOthersTuple { + tuple: TOthers +} + +#[derive(SpacetimeType, Debug, Clone, Copy)] +pub enum Action { + Inactive, + Active, +} + +#[derive(Clone)] +#[spacetimedb::table(name = t_enums)] +pub struct TEnums { + bool_opt: Option, + bool_result: Result, + action: Action, +} + +#[spacetimedb::table(name = t_enums_tuple)] +pub struct TEnumsTuple { + tuple: TEnums, +} + +#[spacetimedb::reducer] +pub fn test(ctx: &ReducerContext) { + let tuple = TInts { + i8: -25, + i16: -3224, + i32: -23443, + i64: -2344353, + i128: -234434897853, + i256: (-234434897853i128).into(), + }; + ctx.db.t_ints().insert(tuple); + ctx.db.t_ints_tuple().insert(TIntsTuple { tuple }); + + let tuple = TUints { + u8: 105, + u16: 1050, + u32: 83892, + u64: 48937498, + u128: 4378528978889, + u256: 4378528978889u128.into(), + }; + ctx.db.t_uints().insert(tuple); + ctx.db.t_uints_tuple().insert(TUintsTuple { tuple }); + + let tuple = TOthers { + bool: true, + f32: 594806.58906, + f64: -3454353.345389043278459, + str: "This is spacetimedb".to_string(), + bytes: vec!(1, 2, 3, 4, 5, 6, 7), + identity: Identity::ONE, + connection_id: ConnectionId::ZERO, + timestamp: Timestamp::UNIX_EPOCH, + duration: TimeDuration::ZERO, + uuid: Uuid::NIL, + }; + ctx.db.t_others().insert(tuple.clone()); + ctx.db.t_others_tuple().insert(TOthersTuple { tuple }); + + let tuple = TEnums { + bool_opt: Some(true), + bool_result: Ok(false), + action: Action::Active, + }; + + ctx.db.t_enums().insert(tuple.clone()); + ctx.db.t_enums_tuple().insert(TEnumsTuple { tuple }); +} +"#; + +/// This test is designed to test the format of the output of sql queries +#[test] +fn test_sql_format() { + let test = Smoketest::builder().module_code(SQL_FORMAT_MODULE_CODE).build(); + + test.call("test", &[]).unwrap(); + + test.assert_sql( + "SELECT * FROM t_ints", + r#" i8 | i16 | i32 | i64 | i128 | i256 +-----+-------+--------+----------+---------------+--------------- + -25 | -3224 | -23443 | -2344353 | -234434897853 | -234434897853"#, + ); + + test.assert_sql( + "SELECT * FROM t_ints_tuple", + r#" tuple +--------------------------------------------------------------------------------------------------- + (i8 = -25, i16 = -3224, i32 = -23443, i64 = -2344353, i128 = -234434897853, i256 = -234434897853)"#, + ); + + test.assert_sql( + "SELECT * FROM t_uints", + r#" u8 | u16 | u32 | u64 | u128 | u256 +-----+------+-------+----------+---------------+--------------- + 105 | 1050 | 83892 | 48937498 | 4378528978889 | 4378528978889"#, + ); + + test.assert_sql( + "SELECT * FROM t_uints_tuple", + r#" tuple +------------------------------------------------------------------------------------------------- + (u8 = 105, u16 = 1050, u32 = 83892, u64 = 48937498, u128 = 4378528978889, u256 = 4378528978889)"#, + ); + + test.assert_sql( + "SELECT * FROM t_others", + r#" bool | f32 | f64 | str | bytes | identity | connection_id | timestamp | duration | uuid +------+-----------+--------------------+-----------------------+------------------+--------------------------------------------------------------------+------------------------------------+---------------------------+-----------+---------------------------------------- + true | 594806.56 | -3454353.345389043 | "This is spacetimedb" | 0x01020304050607 | 0x0000000000000000000000000000000000000000000000000000000000000001 | 0x00000000000000000000000000000000 | 1970-01-01T00:00:00+00:00 | +0.000000 | "00000000-0000-0000-0000-000000000000""#, + ); + + test.assert_sql( + "SELECT * FROM t_others_tuple", + r#" tuple +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (bool = true, f32 = 594806.56, f64 = -3454353.345389043, str = "This is spacetimedb", bytes = 0x01020304050607, identity = 0x0000000000000000000000000000000000000000000000000000000000000001, connection_id = 0x00000000000000000000000000000000, timestamp = 1970-01-01T00:00:00+00:00, duration = +0.000000, uuid = "00000000-0000-0000-0000-000000000000")"#, + ); + + test.assert_sql( + "SELECT * FROM t_enums", + r#" bool_opt | bool_result | action +---------------+--------------+--------------- + (some = true) | (ok = false) | (Active = ())"#, + ); + + test.assert_sql( + "SELECT * FROM t_enums_tuple", + r#" tuple +-------------------------------------------------------------------------------- + (bool_opt = (some = true), bool_result = (ok = false), action = (Active = ()))"#, + ); +} diff --git a/crates/smoketests/tests/timestamp_route.rs b/crates/smoketests/tests/timestamp_route.rs new file mode 100644 index 00000000000..a177d1e4444 --- /dev/null +++ b/crates/smoketests/tests/timestamp_route.rs @@ -0,0 +1,55 @@ +//! Tests translated from smoketests/tests/timestamp_route.py + +use spacetimedb_smoketests::{random_string, Smoketest}; + +const TIMESTAMP_TAG: &str = "__timestamp_micros_since_unix_epoch__"; + +/// Test the /v1/database/{name}/unstable/timestamp endpoint +#[test] +fn test_timestamp_route() { + let mut test = Smoketest::builder().autopublish(false).build(); + + let name = random_string(); + + // A request for the timestamp at a non-existent database is an error with code 404 + let resp = test + .api_call("GET", &format!("/v1/database/{}/unstable/timestamp", name)) + .unwrap(); + assert_eq!( + resp.status_code, 404, + "Expected 404 for non-existent database, got {}", + resp.status_code + ); + + // Publish a module with the random name + test.publish_module_named(&name, false).unwrap(); + + // A request for the timestamp at an extant database is a success + let resp = test + .api_call("GET", &format!("/v1/database/{}/unstable/timestamp", name)) + .unwrap(); + assert!( + resp.is_success(), + "Expected success for existing database, got {}", + resp.status_code + ); + + // The response body is a SATS-JSON encoded `Timestamp` + let timestamp = resp.json().unwrap(); + assert!( + timestamp.is_object(), + "Expected timestamp to be an object, got {:?}", + timestamp + ); + assert!( + timestamp.get(TIMESTAMP_TAG).is_some(), + "Expected timestamp to have '{}' field, got {:?}", + TIMESTAMP_TAG, + timestamp + ); + assert!( + timestamp[TIMESTAMP_TAG].is_i64() || timestamp[TIMESTAMP_TAG].is_u64(), + "Expected timestamp value to be an integer, got {:?}", + timestamp[TIMESTAMP_TAG] + ); +} diff --git a/crates/smoketests/tests/views.rs b/crates/smoketests/tests/views.rs new file mode 100644 index 00000000000..d3cf7c4eadf --- /dev/null +++ b/crates/smoketests/tests/views.rs @@ -0,0 +1,234 @@ +//! Tests translated from smoketests/tests/views.py + +use spacetimedb_smoketests::Smoketest; + +const MODULE_CODE_VIEWS: &str = r#" +use spacetimedb::ViewContext; + +#[derive(Copy, Clone)] +#[spacetimedb::table(name = player_state)] +pub struct PlayerState { + #[primary_key] + id: u64, + #[index(btree)] + level: u64, +} + +#[spacetimedb::view(name = player, public)] +pub fn player(ctx: &ViewContext) -> Option { + ctx.db.player_state().id().find(0u64) +} +"#; + +/// Tests that views populate the st_view_* system tables +#[test] +fn test_st_view_tables() { + let test = Smoketest::builder().module_code(MODULE_CODE_VIEWS).build(); + + test.assert_sql( + "SELECT * FROM st_view", + r#" view_id | view_name | table_id | is_public | is_anonymous +---------+-----------+---------------+-----------+-------------- + 4096 | "player" | (some = 4097) | true | false"#, + ); + + test.assert_sql( + "SELECT * FROM st_view_column", + r#" view_id | col_pos | col_name | col_type +---------+---------+----------+---------- + 4096 | 0 | "id" | 0x0d + 4096 | 1 | "level" | 0x0d"#, + ); +} + +const MODULE_CODE_BROKEN_NAMESPACE: &str = r#" +use spacetimedb::ViewContext; + +#[spacetimedb::table(name = person, public)] +pub struct Person { + name: String, +} + +#[spacetimedb::view(name = person, public)] +pub fn person(ctx: &ViewContext) -> Option { + None +} +"#; + +const MODULE_CODE_BROKEN_RETURN_TYPE: &str = r#" +use spacetimedb::{SpacetimeType, ViewContext}; + +#[derive(SpacetimeType)] +pub enum ABC { + A, + B, + C, +} + +#[spacetimedb::view(name = person, public)] +pub fn person(ctx: &ViewContext) -> Option { + None +} +"#; + +/// Publishing a module should fail if a table and view have the same name +#[test] +fn test_fail_publish_namespace_collision() { + let mut test = Smoketest::builder() + .module_code(MODULE_CODE_BROKEN_NAMESPACE) + .autopublish(false) + .build(); + + let result = test.publish_module(); + assert!( + result.is_err(), + "Expected publish to fail when table and view have same name" + ); +} + +/// Publishing a module should fail if the inner return type is not a product type +#[test] +fn test_fail_publish_wrong_return_type() { + let mut test = Smoketest::builder() + .module_code(MODULE_CODE_BROKEN_RETURN_TYPE) + .autopublish(false) + .build(); + + let result = test.publish_module(); + assert!( + result.is_err(), + "Expected publish to fail when view return type is not a product type" + ); +} + +const MODULE_CODE_SQL_VIEWS: &str = r#" +use spacetimedb::{AnonymousViewContext, ReducerContext, Table, ViewContext}; + +#[derive(Copy, Clone)] +#[spacetimedb::table(name = player_state)] +#[spacetimedb::table(name = player_level)] +pub struct PlayerState { + #[primary_key] + id: u64, + #[index(btree)] + level: u64, +} + +#[derive(Clone)] +#[spacetimedb::table(name = player_info, index(name=age_level_index, btree(columns = [age, level])))] +pub struct PlayerInfo { + #[primary_key] + id: u64, + age: u64, + level: u64, +} + +#[spacetimedb::reducer] +pub fn add_player_level(ctx: &ReducerContext, id: u64, level: u64) { + ctx.db.player_level().insert(PlayerState { id, level }); +} + +#[spacetimedb::view(name = my_player_and_level, public)] +pub fn my_player_and_level(ctx: &AnonymousViewContext) -> Option { + ctx.db.player_level().id().find(0) +} + +#[spacetimedb::view(name = player_and_level, public)] +pub fn player_and_level(ctx: &AnonymousViewContext) -> Vec { + ctx.db.player_level().level().filter(2u64).collect() +} + +#[spacetimedb::view(name = player, public)] +pub fn player(ctx: &ViewContext) -> Option { + log::info!("player view called"); + ctx.db.player_state().id().find(42) +} + +#[spacetimedb::view(name = player_none, public)] +pub fn player_none(_ctx: &ViewContext) -> Option { + None +} + +#[spacetimedb::view(name = player_vec, public)] +pub fn player_vec(ctx: &ViewContext) -> Vec { + let first = ctx.db.player_state().id().find(42).unwrap(); + let second = PlayerState { id: 7, level: 3 }; + vec![first, second] +} + +#[spacetimedb::view(name = player_info_multi_index, public)] +pub fn player_info_view(ctx: &ViewContext) -> Option { + log::info!("player_info called"); + ctx.db.player_info().age_level_index().filter((25u64, 7u64)).next() +} +"#; + +/// Tests that views can be queried over HTTP SQL +#[test] +fn test_http_sql_views() { + let test = Smoketest::builder().module_code(MODULE_CODE_SQL_VIEWS).build(); + + // Insert initial data + test.sql("INSERT INTO player_state (id, level) VALUES (42, 7)").unwrap(); + + test.assert_sql( + "SELECT * FROM player", + r#" id | level +----+------- + 42 | 7"#, + ); + + test.assert_sql( + "SELECT * FROM player_none", + r#" id | level +----+-------"#, + ); + + test.assert_sql( + "SELECT * FROM player_vec", + r#" id | level +----+------- + 42 | 7 + 7 | 3"#, + ); +} + +/// Tests that anonymous views are updated for reducers +#[test] +fn test_query_anonymous_view_reducer() { + let test = Smoketest::builder().module_code(MODULE_CODE_SQL_VIEWS).build(); + + test.call("add_player_level", &["0", "1"]).unwrap(); + test.call("add_player_level", &["1", "2"]).unwrap(); + + test.assert_sql( + "SELECT * FROM my_player_and_level", + r#" id | level +----+------- + 0 | 1"#, + ); + + test.assert_sql( + "SELECT * FROM player_and_level", + r#" id | level +----+------- + 1 | 2"#, + ); + + test.call("add_player_level", &["2", "2"]).unwrap(); + + test.assert_sql( + "SELECT * FROM player_and_level", + r#" id | level +----+------- + 1 | 2 + 2 | 2"#, + ); + + test.assert_sql( + "SELECT * FROM player_and_level WHERE id = 2", + r#" id | level +----+------- + 2 | 2"#, + ); +} diff --git a/tools/ci/src/main.rs b/tools/ci/src/main.rs index d0eeb7a4ace..bce50903e4b 100644 --- a/tools/ci/src/main.rs +++ b/tools/ci/src/main.rs @@ -219,15 +219,6 @@ fn run_all_clap_subcommands(skips: &[String]) -> Result<()> { Ok(()) } -fn infer_python() -> String { - let py3_available = cmd!("python3", "--version").run().is_ok(); - if py3_available { - "python3".to_string() - } else { - "python".to_string() - } -} - fn main() -> Result<()> { env_logger::init(); @@ -399,13 +390,11 @@ fn main() -> Result<()> { } Some(CiCmd::Smoketests { args: smoketest_args }) => { - let python = infer_python(); cmd( - python, - ["-m", "smoketests"] + "cargo", + ["test", "-p", "spacetimedb-smoketests"] .into_iter() - .map(|s| s.to_string()) - .chain(smoketest_args), + .chain(smoketest_args.iter().map(|s| s.as_str()).clone()), ) .run()?; }