Skip to content

Build Script Setup

Your build.rs is where the Ontogen pipeline runs. There are two ways to wire it up: the Pipeline builder (recommended for almost everything) and direct calls to the underlying generator functions (when you need maximum control).

Pipeline is a fluent builder over the generator functions. It applies sensible defaults, threads each stage’s typed output into the next, and — importantly — forwards schema.entities into the admin-registry generator automatically. Method order on the builder is irrelevant; build() always runs stages in the canonical dependency order.

The canonical reference for a Pipeline-based build.rs is examples/iron-log/src-tauri/build.rs in the repo. Here is essentially that file:

build.rs
use std::path::PathBuf;
use ontogen::servers::{ClientGenerator, NamingConfig, ServerGenerator};
use ontogen::{Pipeline, ServersConfig};
fn main() {
println!("cargo:rerun-if-changed=build.rs");
ontogen::emit_rerun_directives_excluding(
&PathBuf::from("src/api/v1"),
&["generated"],
);
let servers_config = ServersConfig {
api_dir: "src/api/v1".into(),
state_type: "AppState".into(),
service_import_path: "crate::api::v1".into(),
types_import_path: "crate::schema".into(),
state_import: "crate::AppState".into(),
naming: NamingConfig::default(),
generators: vec![
ServerGenerator::HttpAxum {
output: "src/api/transport/http/generated.rs".into(),
},
ServerGenerator::TauriIpc {
output: "src/api/transport/ipc/generated.rs".into(),
},
],
client_generators: vec![
ClientGenerator::HttpTauriIpcSplit {
output: "../src-nuxt/app/generated/transport.ts".into(),
bindings_path: "../src-nuxt/app/generated/types.ts".into(),
},
ClientGenerator::AdminRegistry {
output: "../src-nuxt/app/admin/generated/admin-registry.ts".into(),
},
],
rustfmt_edition: "2024".into(),
sse_route_overrides: Default::default(),
ts_skip_commands: vec![],
route_prefix: None,
store_type: Some("Store".into()),
store_import: Some("crate::store::Store".into()),
pagination: None,
// Pipeline auto-fills this from parse_schema; leave empty.
schema_entities: Vec::new(),
};
Pipeline::new("src/schema")
.seaorm(
"src/persistence/db/entities/generated",
"src/persistence/db/conversions/generated",
)
.dtos("src/schema/dto")
.store("src/store/generated", Some::<PathBuf>("src/store/hooks".into()))
.api("src/api/v1/generated", "AppState")
.servers(servers_config)
.build()
.unwrap_or_else(|e| {
e.emit_cargo_warning();
panic!("ontogen pipeline failed: {e}");
});
// For Tauri projects, run tauri_build::build() after Ontogen's pipeline.
}

That’s roughly 70 lines for a full Tauri + Nuxt pipeline — HTTP, IPC, TypeScript split client, admin registry. To skip a stage, omit its method call.

If you’d rather call each generator function directly — to inspect intermediate IRs, to insert custom logic between stages, or just to see what Pipeline is doing — the same build looks like this:

build.rs
use std::collections::HashMap;
use ontogen::CodegenError;
/// Unwrap a codegen result, emitting a cargo:warning before panicking.
fn unwrap_codegen<T>(result: Result<T, CodegenError>, stage: &str) -> T {
result.unwrap_or_else(|e| {
e.emit_cargo_warning();
panic!("{stage}: {e}");
})
}
fn main() {
println!("cargo:rerun-if-changed=build.rs");
// ── 1. Parse schema ──────────────────────────────────────────
let schema = unwrap_codegen(
ontogen::parse_schema(&ontogen::SchemaConfig {
schema_dir: "src/schema".into(),
}),
"parse schema",
);
// ── 2. Persistence layer (independent generators) ────────────
let seaorm = unwrap_codegen(
ontogen::gen_seaorm(
&schema.entities,
&ontogen::SeaOrmConfig {
entity_output: "src/persistence/db/entities/generated".into(),
conversion_output: "src/persistence/db/conversions/generated".into(),
skip_conversions: vec![],
},
),
"gen_seaorm",
);
unwrap_codegen(
ontogen::gen_markdown_io(
&schema.entities,
&ontogen::MarkdownIoConfig {
output_dir: "src/persistence/fs_markdown/writers".into(),
},
),
"gen_markdown_io",
);
unwrap_codegen(
ontogen::gen_dtos(
&schema.entities,
&ontogen::DtoConfig {
output_dir: "src/schema/dto".into(),
},
),
"gen_dtos",
);
// ── 3. Store layer ───────────────────────────────────────────
let _store = unwrap_codegen(
ontogen::gen_store(
&schema.entities,
Some(&seaorm),
&ontogen::StoreConfig {
output_dir: "src/store/generated".into(),
hooks_dir: Some("src/store/hooks".into()),
schema_module_path: ontogen::DEFAULT_SCHEMA_MODULE_PATH.into(),
},
),
"gen_store",
);
// ── 4. API layer ─────────────────────────────────────────────
let api = unwrap_codegen(
ontogen::gen_api(
&schema.entities,
&ontogen::ApiConfig {
output_dir: "src/api/v1/generated".into(),
exclude: vec![],
scan_dirs: vec!["src/api/v1".into()],
state_type: "AppState".to_string(),
store_type: Some("Store".to_string()),
schema_module_path: ontogen::DEFAULT_SCHEMA_MODULE_PATH.into(),
},
),
"gen_api",
);
// ── 5. Server transports + client generators ─────────────────
let _servers = unwrap_codegen(
ontogen::gen_servers(
Some(&api),
&["src/api/v1".into()],
&ontogen::ServersConfig {
api_dir: "src/api/v1".into(),
state_type: "AppState".to_string(),
service_import_path: "crate::api::v1".to_string(),
types_import_path: "crate::schema".to_string(),
state_import: "crate::AppState".to_string(),
naming: ontogen::servers::NamingConfig::default(),
generators: vec![
ontogen::servers::ServerGenerator::HttpAxum {
output: "src/api/transport/http/generated.rs".into(),
},
ontogen::servers::ServerGenerator::TauriIpc {
output: "src/api/transport/ipc/generated.rs".into(),
},
],
client_generators: vec![
ontogen::servers::ClientGenerator::HttpTauriIpcSplit {
output: "../src-nuxt/app/transport/generated.ts".into(),
bindings_path: "../src-nuxt/app/types/bindings.ts".into(),
},
ontogen::servers::ClientGenerator::AdminRegistry {
output: "../src-nuxt/layers/admin/generated/admin-registry.ts".into(),
},
],
rustfmt_edition: "2024".to_string(),
sse_route_overrides: HashMap::new(),
ts_skip_commands: vec![],
route_prefix: None,
store_type: Some("Store".to_string()),
store_import: Some("crate::store::Store".to_string()),
pagination: None,
// Required for the admin-registry generator. Pipeline does
// this for you; the explicit form must pass it manually,
// otherwise admin-registry.ts ships with empty `fields: []`.
schema_entities: schema.entities.clone(),
},
),
"gen_servers",
);
}

The supporting types — NamingConfig, ServerGenerator, ClientGenerator, RoutePrefix, PaginationConfig — all live in ontogen::servers::* (re-exported at the module level). The internal paths ontogen::servers::config::* and ontogen::servers::types::* are pub(crate); always import via ontogen::servers::Foo.

The pipeline has a natural flow, and while generators are independent functions, the data dependencies create an ordering:

parse_schema ─────────────────────────────────── required first
├── gen_seaorm ─── independent, returns SeaOrmOutput
├── gen_markdown_io ─── independent, returns ()
├── gen_dtos ─── independent, returns ()
└── gen_store ─── uses SeaOrmOutput (optional), returns StoreOutput
└── gen_api ─── uses entities, returns ApiOutput
└── gen_servers ─── uses ApiOutput; emits server
transports AND client artifacts
(controlled by ServersConfig
.client_generators)

parse_schema always comes first because every other generator needs SchemaOutput.entities. After that, gen_seaorm, gen_markdown_io, and gen_dtos are independent — they only need the entity list. gen_store benefits from SeaOrmOutput but works without it. gen_api and gen_servers form a chain where each stage’s output feeds the next. There is no separate gen_clients function — client TypeScript and admin-registry generation is driven by ServersConfig.client_generators inside gen_servers.

You don’t need to run everything. Pick the generators you need.

let schema = unwrap_codegen(
ontogen::parse_schema(&ontogen::SchemaConfig {
schema_dir: "src/schema".into(),
}),
"parse schema",
);
unwrap_codegen(
ontogen::gen_seaorm(&schema.entities, &ontogen::SeaOrmConfig {
entity_output: "src/persistence/db/entities/generated".into(),
conversion_output: "src/persistence/db/conversions/generated".into(),
skip_conversions: vec![],
}),
"gen_seaorm",
);

Each generator is a standalone function. If you’re building a CLI tool that only needs Markdown I/O, skip the database and transport generators entirely.

Every generator returns Result<T, CodegenError>. The CodegenError enum has a variant per pipeline stage:

pub enum CodegenError {
Schema(String),
Persistence(String),
Store(String),
Api(String),
Server(String),
Client(String),
/// An external tool required by the codegen pipeline is missing or failed
/// (e.g., rustfmt, prettier).
ExternalTool { tool: &'static str, detail: String },
}

The emit_cargo_warning method prints the error as a cargo:warning line, making it visible in your build output without digging through backtraces:

result.unwrap_or_else(|e| {
e.emit_cargo_warning();
panic!("{stage}: {e}");
})

When something goes wrong, you’ll see:

warning: ontogen: Failed to parse src/schema/task.rs: expected `;` at line 15

This pattern is consistent across all generators, so the unwrap_codegen helper works for everything.

You might worry about unnecessary recompilation. If generated files are rewritten on every build, won’t Cargo see changed mtimes and recompile everything?

Ontogen handles this internally. All generated file writes go through write_if_changed, which compares the new content with the existing file. If they’re identical, the file isn’t touched — its mtime stays the same, and Cargo doesn’t trigger a recompile.

// Internal to Ontogen -- you don't call this directly
pub fn write_if_changed(path: &Path, content: &[u8]) -> io::Result<()> {
if path.exists() {
let existing = fs::read(path)?;
if existing == content {
return Ok(()); // no change, skip write
}
}
fs::write(path, content)
}

This means you can run the full pipeline on every build without worrying about compile-time overhead. Only files that actually change get written, and only changed files trigger downstream recompilation.

Ontogen uses generated/ subdirectories by convention:

GeneratorTypical output path
gen_seaorm (entities)src/persistence/db/entities/generated/
gen_seaorm (conversions)src/persistence/db/conversions/generated/
gen_dtossrc/schema/dto/
gen_storesrc/store/generated/
gen_store (hooks)src/store/hooks/
gen_apisrc/api/v1/generated/
gen_markdown_iosrc/persistence/fs_markdown/writers/
gen_servers (HTTP)src/api/transport/http/generated.rs
gen_servers (IPC)src/api/transport/ipc/generated.rs
gen_servers (MCP)src/api/transport/mcp/generated.rs

The generated/ convention makes it clear which files are machine-written and should never be edited by hand. Your .gitignore can either track or ignore these directories — most projects track them so CI builds don’t need to run code generation.

As your entity count grows, the build script can get long. A few strategies help:

fn schema_config() -> ontogen::SchemaConfig {
ontogen::SchemaConfig {
schema_dir: "src/schema".into(),
}
}
fn seaorm_config() -> ontogen::SeaOrmConfig {
ontogen::SeaOrmConfig {
entity_output: "src/persistence/db/entities/generated".into(),
conversion_output: "src/persistence/db/conversions/generated".into(),
skip_conversions: vec![],
}
}
fn servers_config() -> ontogen::ServersConfig {
ontogen::ServersConfig {
// ... all the fields
}
}

If some generators are only needed for certain build targets (e.g., Tauri IPC only for desktop builds):

let mut server_generators = vec![
ontogen::servers::ServerGenerator::HttpAxum {
output: "src/api/transport/http/generated.rs".into(),
},
];
#[cfg(feature = "desktop")]
server_generators.push(ontogen::servers::ServerGenerator::TauriIpc {
output: "src/api/transport/ipc/generated.rs".into(),
});

After running the full pipeline, you can inspect the returned metadata to know what was generated:

let store = unwrap_codegen(ontogen::gen_store(/* ... */), "gen_store");
// StoreOutput tells you what was generated
for hook in &store.scaffolded_hooks {
println!("cargo:warning=Scaffolded new hook file: {}", hook.file_path.display());
}

This is useful for first-time setup: you’ll see which hook files were created and need your attention.