diff --git a/Cargo.lock b/Cargo.lock index b2a73b9..35ee82b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -366,6 +366,7 @@ dependencies = [ "fuser", "libc", "mesa-dev", + "nix", "rand", "rustc-hash", "scc", diff --git a/crates/git-fs/Cargo.toml b/crates/git-fs/Cargo.toml index 8c42a99..271c1c4 100644 --- a/crates/git-fs/Cargo.toml +++ b/crates/git-fs/Cargo.toml @@ -18,3 +18,4 @@ rand = "0.9.2" rustc-hash = "2.1.1" scc = "3.4.16" base64 = "0.22" +nix = "0.29.0" diff --git a/crates/git-fs/src/commit_worker.rs b/crates/git-fs/src/commit_worker.rs new file mode 100644 index 0000000..8d64c6b --- /dev/null +++ b/crates/git-fs/src/commit_worker.rs @@ -0,0 +1,130 @@ +//! Background worker for processing commit requests. +//! +//! This module handles asynchronous commits to the remote repository via the Mesa API. + +use base64::{Engine as _, engine::general_purpose::STANDARD as BASE64}; +use mesa_dev::Mesa; +use mesa_dev::models::{Author, CommitEncoding, CommitFile, CommitFileAction, CreateCommitRequest}; +use tokio::sync::mpsc; +use tracing::{error, info}; + +/// A request to create a commit, sent to the background worker. +pub enum CommitRequest { + /// Create a new file with the given content. + Create { + /// Path to the file to create. + path: String, + /// Content of the file. + content: Vec, + }, + /// Update an existing file with new content. + Update { + /// Path to the file to update. + path: String, + /// New content of the file. + content: Vec, + }, + /// Delete a file. + Delete { + /// Path to the file to delete. + path: String, + }, +} + +/// Configuration for the commit worker. +pub struct CommitWorkerConfig { + /// Mesa API client. + pub mesa: Mesa, + /// Repository organization/owner. + pub org: String, + /// Repository name. + pub repo: String, + /// Branch to commit to. + pub branch: String, + /// Author information for commits. + pub author: Author, +} + +/// Spawns a background task that processes commit requests from the given receiver. +/// +/// The task will run until the channel is closed (all senders are dropped). +pub fn spawn_commit_worker( + rt: &tokio::runtime::Runtime, + config: CommitWorkerConfig, + mut commit_rx: mpsc::UnboundedReceiver, +) { + let mesa = config.mesa; + let org = config.org; + let repo = config.repo; + let branch = config.branch; + let author = config.author; + rt.spawn(async move { + while let Some(request) = commit_rx.recv().await { + let (message, files) = match request { + CommitRequest::Create { path, content } => { + // Use "." for empty files to work around Mesa API bug with empty content + let content_bytes = if content.is_empty() { + b".".as_slice() + } else { + &content + }; + ( + format!("Create {path}"), + vec![CommitFile { + action: CommitFileAction::Upsert, + path, + encoding: CommitEncoding::Base64, + content: Some(BASE64.encode(content_bytes)), + }], + ) + } + CommitRequest::Update { path, content } => { + // Use "." for empty files to work around Mesa API bug with empty content + let content_bytes = if content.is_empty() { + b".".as_slice() + } else { + &content + }; + ( + format!("Update {path}"), + vec![CommitFile { + action: CommitFileAction::Upsert, + path, + encoding: CommitEncoding::Base64, + content: Some(BASE64.encode(content_bytes)), + }], + ) + } + CommitRequest::Delete { path } => ( + format!("Delete {path}"), + vec![CommitFile { + action: CommitFileAction::Delete, + path, + encoding: CommitEncoding::Base64, + content: None, + }], + ), + }; + + let create_commit_request = CreateCommitRequest { + branch: branch.clone(), + message: message.clone(), + author: author.clone(), + files, + base_sha: None, + }; + + info!("about to commit the following: {:?}", create_commit_request); + + let result = mesa + .commits(&org, &repo) + .create(&create_commit_request) + .await; + + match result { + Ok(_) => info!(message = %message, "commit pushed"), + Err(e) => error!(message = %message, error = %e, "commit failed"), + } + } + }); +} diff --git a/crates/git-fs/src/main.rs b/crates/git-fs/src/main.rs index f2a9b76..665278b 100644 --- a/crates/git-fs/src/main.rs +++ b/crates/git-fs/src/main.rs @@ -1,11 +1,13 @@ //! Mount a GitHub repository as a filesystem, without ever cloning. use std::path::PathBuf; +use std::process::Command; use clap::Parser; use fuser::MountOption; use tracing::error; use tracing_subscriber::{EnvFilter, fmt}; +mod commit_worker; mod domain; mod mesafuse; mod ssfs; @@ -32,6 +34,43 @@ struct Args { /// repository's default branch. #[arg(long)] r#ref: Option, + + /// Enable write mode. When enabled, file modifications are immediately committed to the + /// remote repository. Requires git config user.name and user.email to be set. + #[arg(long)] + writable: bool, +} + +/// Author information for commits. +#[derive(Debug, Clone)] +pub struct Author { + /// Author name (from git config user.name). + pub name: String, + /// Author email (from git config user.email). + pub email: String, +} + +/// Read a git config value. +fn git_config_get(key: &str) -> Option { + let output = Command::new("git") + .args(["config", "--get", key]) + .output() + .ok()?; + + output + .status + .success() + .then(|| String::from_utf8_lossy(&output.stdout).trim().to_owned()) +} + +/// Read author from git config, returning an error message if not found. +fn get_author_from_git_config() -> Result { + let name = git_config_get("user.name").ok_or( + "git config user.name is not set. Please run: git config --global user.name \"Your Name\"", + )?; + let email = git_config_get("user.email") + .ok_or("git config user.email is not set. Please run: git config --global user.email \"your@email.com\"")?; + Ok(Author { name, email }) } fn main() { @@ -41,13 +80,29 @@ fn main() { .with_span_events(fmt::format::FmtSpan::EXIT) .init(); - let options = vec![ - MountOption::RO, + // Read author from git config if writable mode is enabled + let author = if args.writable { + match get_author_from_git_config() { + Ok(author) => Some(author), + Err(msg) => { + error!("{msg}"); + std::process::exit(1); + } + } + } else { + None + }; + + let mut options = vec![ MountOption::AutoUnmount, MountOption::FSName("mesafs".to_owned()), ]; - let mesa_fs = MesaFS::new(&args.mesa_api_key, args.repo, args.r#ref.as_deref()); + if !args.writable { + options.push(MountOption::RO); + } + + let mesa_fs = MesaFS::new(&args.mesa_api_key, args.repo, args.r#ref.as_deref(), author); if let Err(e) = fuser::mount2(mesa_fs, &args.mount_point, &options) { error!("Failed to mount filesystem: {e}"); } diff --git a/crates/git-fs/src/mesafuse.rs b/crates/git-fs/src/mesafuse.rs index 9e5b245..ceeb623 100644 --- a/crates/git-fs/src/mesafuse.rs +++ b/crates/git-fs/src/mesafuse.rs @@ -5,6 +5,7 @@ use std::{ }; use crate::{ + commit_worker::{CommitRequest, CommitWorkerConfig, spawn_commit_worker}, domain::GhRepoInfo, ssfs::{ GetINodeError, INodeHandle, INodeKind, SsFs, SsfsBackend, SsfsBackendError, SsfsDirEntry, @@ -12,40 +13,46 @@ use crate::{ }, }; use base64::{Engine as _, engine::general_purpose::STANDARD as BASE64}; -use fuser::{Filesystem, ReplyAttr, ReplyData, ReplyDirectory, ReplyEntry, Request}; +use fuser::{ + Filesystem, ReplyAttr, ReplyCreate, ReplyData, ReplyDirectory, ReplyEmpty, ReplyEntry, + ReplyWrite, Request, +}; use mesa_dev::Mesa; -use mesa_dev::models::{Content, DirEntryType}; -use tracing::instrument; - -impl From for fuser::FileAttr { - fn from(val: INodeHandle) -> Self { - let (kind, perm) = match val.kind { - INodeKind::File => (fuser::FileType::RegularFile, 0o444), - INodeKind::Directory => (fuser::FileType::Directory, 0o755), - }; - - // TODO(markovejnovic): A lot of these falues are placeholders. - Self { - ino: u64::from(val.ino), - size: val.size, - blocks: 0, - atime: std::time::SystemTime::now(), - mtime: std::time::SystemTime::now(), - ctime: std::time::SystemTime::now(), - crtime: std::time::SystemTime::now(), - kind, - perm, - nlink: if matches!(val.kind, INodeKind::Directory) { - 2 - } else { - 1 - }, - uid: 1000, - gid: 1000, - rdev: 0, - flags: 0, - blksize: 0, +use mesa_dev::models::{Author as MesaAuthor, Content, DirEntryType}; +use tokio::sync::mpsc; +use tracing::{info, instrument}; + +/// Convert an inode handle to FUSE file attributes. +fn inode_to_file_attr(handle: INodeHandle, writable: bool) -> fuser::FileAttr { + let (kind, perm) = match handle.kind { + INodeKind::File => { + let perm = if writable { 0o644 } else { 0o444 }; + (fuser::FileType::RegularFile, perm) } + INodeKind::Directory => (fuser::FileType::Directory, 0o755), + }; + + // TODO(markovejnovic): A lot of these falues are placeholders. + fuser::FileAttr { + ino: u64::from(handle.ino), + size: handle.size, + blocks: 0, + atime: std::time::SystemTime::now(), + mtime: std::time::SystemTime::now(), + ctime: std::time::SystemTime::now(), + crtime: std::time::SystemTime::now(), + kind, + perm, + nlink: if matches!(handle.kind, INodeKind::Directory) { + 2 + } else { + 1 + }, + uid: nix::unistd::getuid().as_raw(), + gid: nix::unistd::getgid().as_raw(), + rdev: 0, + flags: 0, + blksize: 0, } } @@ -71,94 +78,111 @@ fn get_inode_err_to_errno(err: &GetINodeError) -> i32 { } } +fn backend_err_to_errno(err: &SsfsBackendError) -> i32 { + match err { + SsfsBackendError::NotFound => libc::ENOENT, + SsfsBackendError::ReadOnly => libc::EROFS, + SsfsBackendError::Io(_) => libc::EIO, + } +} + +#[derive(Clone)] pub struct MesaBackend { mesa: Mesa, org: String, repo: String, git_ref: Option, + commit_tx: Arc>, } impl SsfsBackend for MesaBackend { - fn readdir( - &self, - path: &str, - ) -> impl Future, SsfsBackendError>> + Send { - let path_arg: Option = if path.is_empty() { - None - } else { - Some(path.to_owned()) - }; - let org = self.org.clone(); - let repo = self.repo.clone(); - let git_ref = self.git_ref.clone(); - let mesa = self.mesa.clone(); - - async move { - let result = mesa - .content(&org, &repo) - .get(path_arg.as_deref(), git_ref.as_deref()) - .await; - - match result { - Ok(Content::Dir { entries, .. }) => { - let dir_entries = entries - .into_iter() - .map(|e| SsfsDirEntry { - name: OsString::from(e.name), - kind: match e.entry_type { - DirEntryType::Dir => INodeKind::Directory, - DirEntryType::File => INodeKind::File, - }, - size: e.size.unwrap_or(0), - }) - .collect(); - Ok(dir_entries) - } - Ok(Content::File { .. }) => Err(SsfsBackendError::NotFound), - Err(e) => Err(SsfsBackendError::Io(Box::new(e))), + async fn readdir(&self, path: &str) -> Result, SsfsBackendError> { + let path_arg = if path.is_empty() { None } else { Some(path) }; + + let result = self + .mesa + .content(&self.org, &self.repo) + .get(path_arg, self.git_ref.as_deref()) + .await; + + match result { + Ok(Content::Dir { entries, .. }) => { + let dir_entries = entries + .into_iter() + .map(|e| SsfsDirEntry { + name: OsString::from(e.name), + kind: match e.entry_type { + DirEntryType::Dir => INodeKind::Directory, + DirEntryType::File => INodeKind::File, + }, + size: e.size.unwrap_or(0), + }) + .collect(); + Ok(dir_entries) } + Ok(Content::File { .. }) => Err(SsfsBackendError::NotFound), + Err(e) => Err(SsfsBackendError::Io(Box::new(e))), } } - fn read_file( - &self, - path: &str, - ) -> impl Future, SsfsBackendError>> + Send { - let org = self.org.clone(); - let repo = self.repo.clone(); - let git_ref = self.git_ref.clone(); - let mesa = self.mesa.clone(); - let path = path.to_owned(); - - async move { - let result = mesa - .content(&org, &repo) - .get(Some(&path), git_ref.as_deref()) - .await; - - match result { - Ok(Content::File { - content, encoding, .. - }) => { - if encoding != "base64" { - return Err(SsfsBackendError::Io( - format!("unsupported encoding: {encoding}").into(), - )); - } - // Mesa/GitHub line-wraps base64 at 76 chars; strip whitespace before decoding. - let cleaned: String = content - .chars() - .filter(|c| !c.is_ascii_whitespace()) - .collect(); - BASE64 - .decode(&cleaned) - .map_err(|e| SsfsBackendError::Io(Box::new(e))) + async fn read_file(&self, path: &str) -> Result, SsfsBackendError> { + let result = self + .mesa + .content(&self.org, &self.repo) + .get(Some(path), self.git_ref.as_deref()) + .await; + + match result { + Ok(Content::File { + content, encoding, .. + }) => { + if encoding != "base64" { + return Err(SsfsBackendError::Io( + format!("unsupported encoding: {encoding}").into(), + )); } - Ok(Content::Dir { .. }) => Err(SsfsBackendError::NotFound), - Err(e) => Err(SsfsBackendError::Io(Box::new(e))), + // Mesa/GitHub line-wraps base64 at 76 chars; strip whitespace before decoding. + let cleaned: String = content + .chars() + .filter(|c| !c.is_ascii_whitespace()) + .collect(); + BASE64 + .decode(&cleaned) + .map_err(|e| SsfsBackendError::Io(Box::new(e))) } + Ok(Content::Dir { .. }) => Err(SsfsBackendError::NotFound), + Err(e) => Err(SsfsBackendError::Io(Box::new(e))), } } + + async fn create_file(&self, path: &str, content: &[u8]) -> Result<(), SsfsBackendError> { + let request = CommitRequest::Create { + path: path.to_owned(), + content: content.to_vec(), + }; + self.commit_tx + .send(request) + .map_err(|_| SsfsBackendError::Io("channel closed".into())) + } + + async fn update_file(&self, path: &str, content: &[u8]) -> Result<(), SsfsBackendError> { + let request = CommitRequest::Update { + path: path.to_owned(), + content: content.to_vec(), + }; + self.commit_tx + .send(request) + .map_err(|_| SsfsBackendError::Io("channel closed".into())) + } + + async fn delete_file(&self, path: &str) -> Result<(), SsfsBackendError> { + let request = CommitRequest::Delete { + path: path.to_owned(), + }; + self.commit_tx + .send(request) + .map_err(|_| SsfsBackendError::Io("channel closed".into())) + } } pub struct MesaFS { @@ -174,6 +198,9 @@ pub struct MesaFS { /// serializes to disk and how it loads from disk. We are responsible for giving it the true /// state of reality. ssfs: SsFs, + + /// Whether the filesystem is mounted in writable mode. + writable: bool, } /// Mesa's FUSE filesystem implementation. @@ -183,19 +210,51 @@ impl MesaFS { const KERNEL_TTL: Duration = Duration::from_mins(1); #[expect(clippy::expect_used)] // Runtime creation is infallible in practice; no recovery path. - pub fn new(api_key: &str, gh_repo: GhRepoInfo, git_ref: Option<&str>) -> Self { + pub fn new( + api_key: &str, + gh_repo: GhRepoInfo, + git_ref: Option<&str>, + author: Option, + ) -> Self { let rt = tokio::runtime::Runtime::new().expect("Failed to create Tokio runtime"); + // Create channel for commit requests + let (commit_tx, commit_rx) = mpsc::unbounded_channel(); + + // Extract data from inputs + let mesa = Mesa::builder(api_key).build(); + let org = gh_repo.org; + let repo = gh_repo.repo; + let branch = git_ref.map_or_else(|| "main".to_owned(), ToOwned::to_owned); + let writable = author.is_some(); + + // Spawn background commit worker if we have an author + if let Some(author) = author { + let config = CommitWorkerConfig { + mesa: mesa.clone(), + org: org.clone(), + repo: repo.clone(), + branch, + author: MesaAuthor { + name: author.name, + email: author.email, + date: None, + }, + }; + spawn_commit_worker(&rt, config, commit_rx); + } + let backend = Arc::new(MesaBackend { - mesa: Mesa::builder(api_key).build(), - org: gh_repo.org, - repo: gh_repo.repo, + mesa, + org, + repo, git_ref: git_ref.map(ToOwned::to_owned), + commit_tx: Arc::new(commit_tx), }); let ssfs = SsFs::new(backend, rt.handle().clone()); - Self { rt, ssfs } + Self { rt, ssfs, writable } } } @@ -210,12 +269,12 @@ impl Filesystem for MesaFS { match self.ssfs.lookup(parent as u32, name) { Ok(entry) => match entry { SsfsOk::Resolved(inode_handle) => { - let attr: fuser::FileAttr = inode_handle.into(); + let attr = inode_to_file_attr(inode_handle, self.writable); reply.entry(&Self::KERNEL_TTL, &attr, 0); } SsfsOk::Future(fut) => match self.rt.block_on(fut) { Ok(inode_handle) => { - let attr: fuser::FileAttr = inode_handle.into(); + let attr = inode_to_file_attr(inode_handle, self.writable); reply.entry(&Self::KERNEL_TTL, &attr, 0); } Err(err) => reply.error(ssfs_err_to_errno(&err)), @@ -230,12 +289,12 @@ impl Filesystem for MesaFS { match self.ssfs.get_inode(ino as u32) { Ok(entry) => match entry { SsfsOk::Resolved(inode_handle) => { - let attr: fuser::FileAttr = inode_handle.into(); + let attr = inode_to_file_attr(inode_handle, self.writable); reply.attr(&Self::KERNEL_TTL, &attr); } SsfsOk::Future(fut) => match self.rt.block_on(fut) { Ok(inode_handle) => { - let attr: fuser::FileAttr = inode_handle.into(); + let attr = inode_to_file_attr(inode_handle, self.writable); reply.attr(&Self::KERNEL_TTL, &attr); } Err(err) => reply.error(ssfs_err_to_errno(&err)), @@ -373,4 +432,268 @@ impl Filesystem for MesaFS { reply.ok(); } + + #[instrument(skip(self, _req, name, _mode, _umask, _flags, reply))] + fn create( + &mut self, + _req: &Request<'_>, + parent: u64, + name: &OsStr, + _mode: u32, + _umask: u32, + _flags: i32, + reply: ReplyCreate, + ) { + let parent_ino = parent as u32; + + // Build the full path + let Some(parent_path) = self.ssfs.get_path(parent_ino) else { + reply.error(libc::ENOENT); + return; + }; + + let name_str = name.to_string_lossy(); + let full_path = if parent_path.is_empty() { + name_str.to_string() + } else { + format!("{parent_path}/{name_str}") + }; + + // Create the file via backend (empty content) + info!(path = %full_path, "creating file"); + let backend = self.ssfs.backend(); + let result = self.rt.block_on(backend.create_file(&full_path, &[])); + + match result { + Ok(()) => { + info!(path = %full_path, "file created, queued for commit"); + // Update cache + if let Some(handle) = self.ssfs.insert_file(parent_ino, name, 0) { + let attr = inode_to_file_attr(handle, self.writable); + reply.created(&Self::KERNEL_TTL, &attr, 0, 0, 0); + } else { + reply.error(libc::EIO); + } + } + Err(ref e) => { + info!(path = %full_path, error = ?e, "file creation failed (queue error)"); + reply.error(backend_err_to_errno(e)); + } + } + } + + #[instrument(skip( + self, + _req, + ino, + _fh, + offset, + data, + _write_flags, + _flags, + _lock_owner, + reply + ))] + fn write( + &mut self, + _req: &Request<'_>, + ino: u64, + _fh: u64, + offset: i64, + data: &[u8], + _write_flags: u32, + _flags: i32, + _lock_owner: Option, + reply: ReplyWrite, + ) { + let ino = ino as u32; + + // Get the file path + let Some(path) = self.ssfs.get_path(ino) else { + reply.error(libc::ENOENT); + return; + }; + + // Read current content + let current_content = match self.ssfs.read(ino) { + Ok(SsfsOk::Future(fut)) => match self.rt.block_on(fut) { + Ok(data) => data, + Err(ref e) => { + reply.error(ssfs_err_to_errno(e)); + return; + } + }, + Ok(SsfsOk::Resolved(data)) => data, + Err(ref e) => { + // If file doesn't exist yet (new file), start with empty content + if matches!(e, SsfsResolutionError::DoesNotExist) { + Vec::new() + } else { + reply.error(ssfs_err_to_errno(e)); + return; + } + } + }; + + // Apply the write at offset + let offset = offset as usize; + let mut new_content = current_content; + + // Extend if needed + if offset + data.len() > new_content.len() { + new_content.resize(offset + data.len(), 0); + } + + // Copy data at offset + new_content[offset..offset + data.len()].copy_from_slice(data); + + // Update via backend + info!( + path = %path, + offset, + write_size = data.len(), + new_total_size = new_content.len(), + "writing to file" + ); + let backend = self.ssfs.backend(); + let result = self.rt.block_on(backend.update_file(&path, &new_content)); + + match result { + Ok(()) => { + info!(path = %path, size = new_content.len(), "file updated, queued for commit"); + // Update cache with new size + self.ssfs.update_file_size(ino, new_content.len() as u64); + reply.written(data.len() as u32); + } + Err(ref e) => { + info!(path = %path, error = ?e, "file write failed (queue error)"); + reply.error(backend_err_to_errno(e)); + } + } + } + + #[instrument(skip(self, _req, name, reply))] + fn unlink(&mut self, _req: &Request<'_>, parent: u64, name: &OsStr, reply: ReplyEmpty) { + let parent_ino = parent as u32; + + // Build the full path + let Some(parent_path) = self.ssfs.get_path(parent_ino) else { + reply.error(libc::ENOENT); + return; + }; + + let name_str = name.to_string_lossy(); + let full_path = if parent_path.is_empty() { + name_str.to_string() + } else { + format!("{parent_path}/{name_str}") + }; + + // Delete via backend + info!(path = %full_path, "deleting file"); + let backend = self.ssfs.backend(); + let result = self.rt.block_on(backend.delete_file(&full_path)); + + match result { + Ok(()) => { + info!(path = %full_path, "file deleted, queued for commit"); + // Update cache + self.ssfs.remove_file(parent_ino, name); + reply.ok(); + } + Err(ref e) => { + info!(path = %full_path, error = ?e, "file deletion failed (queue error)"); + reply.error(backend_err_to_errno(e)); + } + } + } + + #[instrument(skip( + self, _req, ino, _mode, _uid, _gid, size, _atime, _mtime, _ctime, _fh, _crtime, _chgtime, + _bkuptime, _flags, reply + ))] + fn setattr( + &mut self, + _req: &Request<'_>, + ino: u64, + _mode: Option, + _uid: Option, + _gid: Option, + size: Option, + _atime: Option, + _mtime: Option, + _ctime: Option, + _fh: Option, + _crtime: Option, + _chgtime: Option, + _bkuptime: Option, + _flags: Option, + reply: ReplyAttr, + ) { + let ino = ino as u32; + + // Handle truncate + if let Some(new_size) = size { + let Some(path) = self.ssfs.get_path(ino) else { + reply.error(libc::ENOENT); + return; + }; + + // Read current content + let current_content = match self.ssfs.read(ino) { + Ok(SsfsOk::Future(fut)) => self.rt.block_on(fut).unwrap_or_default(), + Ok(SsfsOk::Resolved(data)) => data, + Err(_) => Vec::new(), + }; + + let old_size = current_content.len(); + + // Truncate or extend + let mut new_content = current_content; + new_content.resize(new_size as usize, 0); + + // Update via backend + info!(path = %path, old_size, new_size, "truncating file"); + let backend = self.ssfs.backend(); + let result = self.rt.block_on(backend.update_file(&path, &new_content)); + + if let Err(ref e) = result { + info!(path = %path, error = ?e, "file truncate failed (queue error)"); + reply.error(backend_err_to_errno(e)); + return; + } + + info!(path = %path, new_size, "file truncated, queued for commit"); + // Update cache + self.ssfs.update_file_size(ino, new_size); + } + + // Return current attributes + match self.ssfs.get_inode(ino) { + Ok(SsfsOk::Resolved(handle)) => { + let attr = inode_to_file_attr(handle, self.writable); + reply.attr(&Self::KERNEL_TTL, &attr); + } + Ok(SsfsOk::Future(fut)) => match self.rt.block_on(fut) { + Ok(handle) => { + let attr = inode_to_file_attr(handle, self.writable); + reply.attr(&Self::KERNEL_TTL, &attr); + } + Err(ref e) => reply.error(ssfs_err_to_errno(e)), + }, + Err(ref e) => reply.error(get_inode_err_to_errno(e)), + } + } + + fn fsync( + &mut self, + _req: &Request<'_>, + _ino: u64, + _fh: u64, + _datasync: bool, + reply: ReplyEmpty, + ) { + // No-op: commits are pushed asynchronously via the channel + reply.ok(); + } } diff --git a/crates/git-fs/src/ssfs.rs b/crates/git-fs/src/ssfs.rs index 61726d0..2451b2d 100644 --- a/crates/git-fs/src/ssfs.rs +++ b/crates/git-fs/src/ssfs.rs @@ -144,8 +144,8 @@ pub struct SsfsDirEntry { #[derive(Debug)] pub enum SsfsBackendError { NotFound, - #[expect(dead_code)] - Io(Box), + ReadOnly, + Io(#[expect(dead_code)] Box), } /// Trait for the backend that provides directory listings and file content. @@ -159,6 +159,32 @@ pub trait SsfsBackend: Send + Sync + 'static { &self, path: &str, ) -> impl Future, SsfsBackendError>> + Send; + + /// Create a new file with the given content. + fn create_file( + &self, + _path: &str, + _content: &[u8], + ) -> impl Future> + Send { + async { Err(SsfsBackendError::ReadOnly) } + } + + /// Update an existing file with new content. + fn update_file( + &self, + _path: &str, + _content: &[u8], + ) -> impl Future> + Send { + async { Err(SsfsBackendError::ReadOnly) } + } + + /// Delete a file. + fn delete_file( + &self, + _path: &str, + ) -> impl Future> + Send { + async { Err(SsfsBackendError::ReadOnly) } + } } /// TODO(markovejnovic): In the future, we'll have to figure out how ssfs will serialize to disk. @@ -616,8 +642,89 @@ impl SsFs { Ok(SsfsOk::Future(Box::pin(async move { backend.read_file(&path).await.map_err(|e| match e { SsfsBackendError::NotFound => SsfsResolutionError::DoesNotExist, - SsfsBackendError::Io(_) => SsfsResolutionError::IoError, + SsfsBackendError::ReadOnly | SsfsBackendError::Io(_) => { + SsfsResolutionError::IoError + } }) }))) } + + /// Insert a new file inode into the cache. + /// Returns the new inode number and handle. + pub fn insert_file(&self, parent: INo, name: &PathView, size: u64) -> Option { + // Verify parent exists and is a directory + let parent_children = self.nodes.read_sync(&parent, |_, n| n.children.clone())?; + + let DirChildren::Populated(map) = parent_children else { + return None; + }; + + // Allocate new inode + let ino = self.next_ino.fetch_add(1, Ordering::Relaxed); + + let inode = INode { + ino, + parent, + name: name.to_owned(), + children: DirChildren::NotADirectory, + size, + }; + let handle = inode.handle(); + + // Insert the new inode + if self.nodes.insert_sync(ino, inode).is_err() { + return None; + } + + // Update parent's children map + let mut new_map = (*map).clone(); + new_map.insert(name.to_owned(), ino); + self.nodes.update_sync(&parent, |_, n| { + n.children = DirChildren::Populated(Arc::new(new_map)); + }); + + Some(handle) + } + + /// Update a file's size in the cache. + pub fn update_file_size(&self, ino: INo, new_size: u64) { + self.nodes.update_sync(&ino, |_, n| { + n.size = new_size; + }); + } + + /// Remove a file inode from the cache. + pub fn remove_file(&self, parent: INo, name: &PathView) -> Option { + // Get parent's children + let parent_children = self.nodes.read_sync(&parent, |_, n| n.children.clone())?; + + let DirChildren::Populated(map) = parent_children else { + return None; + }; + + // Find the child inode + let child_ino = *map.get(name)?; + + // Remove from parent's children map + let mut new_map = (*map).clone(); + new_map.remove(name); + self.nodes.update_sync(&parent, |_, n| { + n.children = DirChildren::Populated(Arc::new(new_map)); + }); + + // Remove the inode itself + self.nodes.remove_sync(&child_ino); + + Some(child_ino) + } + + /// Get the absolute path for an inode (public accessor for abspath). + pub fn get_path(&self, ino: INo) -> Option { + self.abspath(ino) + } + + /// Get a reference to the backend. + pub fn backend(&self) -> &Arc { + &self.backend + } } diff --git a/crates/mesa-dev/src/models/commit.rs b/crates/mesa-dev/src/models/commit.rs index 1770122..799a9cc 100644 --- a/crates/mesa-dev/src/models/commit.rs +++ b/crates/mesa-dev/src/models/commit.rs @@ -24,6 +24,16 @@ pub enum CommitFileAction { Delete, } +/// Encoding used for file content in a commit. +#[derive(Debug, Clone, Serialize)] +#[serde(rename_all = "lowercase")] +pub enum CommitEncoding { + /// Base64-encoded content. + Base64, + /// UTF-8 text content. + Utf8, +} + /// A file change within a commit. #[derive(Debug, Clone, Serialize)] pub struct CommitFile { @@ -31,7 +41,9 @@ pub struct CommitFile { pub action: CommitFileAction, /// File path. pub path: String, - /// Base64-encoded file content (for upsert). + /// Content encoding. + pub encoding: CommitEncoding, + /// File content (for upsert). #[serde(skip_serializing_if = "Option::is_none")] pub content: Option, } diff --git a/crates/mesa-dev/src/models/mod.rs b/crates/mesa-dev/src/models/mod.rs index d91f148..b9fe4d3 100644 --- a/crates/mesa-dev/src/models/mod.rs +++ b/crates/mesa-dev/src/models/mod.rs @@ -12,8 +12,8 @@ mod repo; pub use admin::{ApiKey, ApiKeyCreated, ApiKeyScope, CreateApiKeyRequest, ListApiKeysResponse}; pub use branch::{Branch, CreateBranchRequest, ListBranchesResponse}; pub use commit::{ - Author, Commit, CommitFile, CommitFileAction, CommitSummary, CreateCommitRequest, - ListCommitsResponse, + Author, Commit, CommitEncoding, CommitFile, CommitFileAction, CommitSummary, + CreateCommitRequest, ListCommitsResponse, }; pub use common::SuccessResponse; pub use content::{Content, DirEntry, DirEntryType};