remove treehouse-format crate and collapse everything into src
This commit is contained in:
parent
ca127a9411
commit
b792688776
66 changed files with 145 additions and 112 deletions
235
src/cli/fix.rs
Normal file
235
src/cli/fix.rs
Normal file
|
@ -0,0 +1,235 @@
|
|||
use std::ops::{ControlFlow, Range};
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use codespan_reporting::diagnostic::Diagnostic;
|
||||
use tracing::{error, info};
|
||||
|
||||
use crate::{
|
||||
parse::{self, parse_toml_with_diagnostics, parse_tree_with_diagnostics},
|
||||
state::{report_diagnostics, FileId, Source, Treehouse},
|
||||
tree::ast::Branch,
|
||||
vfs::{self, Content, Dir, Edit, EditPath, VPath},
|
||||
};
|
||||
|
||||
use super::{FixAllArgs, FixArgs};
|
||||
|
||||
struct Fix {
|
||||
range: Range<usize>,
|
||||
replacement: String,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct State {
|
||||
fixes: Vec<Fix>,
|
||||
}
|
||||
|
||||
fn dfs_fix_branch(treehouse: &mut Treehouse, file_id: FileId, state: &mut State, branch: &Branch) {
|
||||
let mut rng = rand::thread_rng();
|
||||
let ulid = ulid::Generator::new()
|
||||
.generate_with_source(&mut rng)
|
||||
.expect("failed to generate ulid for block"); // (wtf moment. you know how big the 80-bit combination space is?)
|
||||
|
||||
let indent = " ".repeat(branch.indent_level);
|
||||
if let Some(attributes) = branch.attributes.clone() {
|
||||
// Scenario: Attributes need to be parsed as TOML and the id attribute has to be added into
|
||||
// the top-level table. Then we also need to pretty-print everything to match the right
|
||||
// indentation level.
|
||||
if let Ok(mut toml) =
|
||||
parse_toml_with_diagnostics(treehouse, file_id, attributes.data.clone())
|
||||
{
|
||||
if !toml.contains_key("id") {
|
||||
toml["id"] = toml_edit::value(ulid.to_string());
|
||||
toml.key_decor_mut("id")
|
||||
.unwrap()
|
||||
.set_prefix(" ".repeat(branch.indent_level + 2));
|
||||
}
|
||||
let mut toml_string = toml.to_string();
|
||||
|
||||
// This is incredibly janky and barely works.
|
||||
let leading_spaces: usize = toml_string.chars().take_while(|&c| c == ' ').count();
|
||||
match leading_spaces {
|
||||
0 => toml_string.insert(0, ' '),
|
||||
1 => (),
|
||||
_ => toml_string.replace_range(0..leading_spaces - 1, ""),
|
||||
}
|
||||
|
||||
let toml_string = fix_indent_in_generated_toml(&toml_string, branch.indent_level);
|
||||
|
||||
state.fixes.push(Fix {
|
||||
range: attributes.data.clone(),
|
||||
replacement: toml_string,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
// Scenario: No attributes at all.
|
||||
// In this case we can do a fast path where we generate the `% id = "whatever"` string
|
||||
// directly, not going through toml_edit.
|
||||
state.fixes.push(Fix {
|
||||
range: branch.kind_span.start..branch.kind_span.start,
|
||||
replacement: format!("% id = \"{ulid}\"\n{indent}"),
|
||||
});
|
||||
}
|
||||
|
||||
// Then we fix child branches.
|
||||
for child in &branch.children {
|
||||
dfs_fix_branch(treehouse, file_id, state, child);
|
||||
}
|
||||
}
|
||||
|
||||
fn fix_indent_in_generated_toml(toml: &str, min_indent_level: usize) -> String {
|
||||
let toml = toml.trim_end();
|
||||
|
||||
let mut result = String::with_capacity(toml.len());
|
||||
|
||||
for (i, line) in toml.lines().enumerate() {
|
||||
if line.is_empty() {
|
||||
result.push('\n');
|
||||
} else {
|
||||
let desired_line_indent_level = if i == 0 { 1 } else { min_indent_level + 2 };
|
||||
let leading_spaces: usize = line.chars().take_while(|&c| c == ' ').count();
|
||||
let needed_indentation = desired_line_indent_level.saturating_sub(leading_spaces);
|
||||
for _ in 0..needed_indentation {
|
||||
result.push(' ');
|
||||
}
|
||||
result.push_str(line);
|
||||
result.push('\n');
|
||||
}
|
||||
}
|
||||
|
||||
for _ in 0..min_indent_level {
|
||||
result.push(' ');
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
pub fn fix_file(
|
||||
treehouse: &mut Treehouse,
|
||||
diagnostics: &mut Vec<Diagnostic<FileId>>,
|
||||
file_id: FileId,
|
||||
) -> Result<String, parse::ErrorsEmitted> {
|
||||
let source = treehouse.source(file_id).input();
|
||||
parse_tree_with_diagnostics(file_id, source)
|
||||
.map(|roots| {
|
||||
let mut source = treehouse.source(file_id).input().to_owned();
|
||||
let mut state = State::default();
|
||||
|
||||
for branch in &roots.branches {
|
||||
dfs_fix_branch(treehouse, file_id, &mut state, branch);
|
||||
}
|
||||
|
||||
// Doing a depth-first search of the branches yields fixes from the beginning of the file
|
||||
// to its end. The most efficient way to apply all the fixes then is to reverse their order,
|
||||
// which lets us modify the source string in place because the fix ranges always stay
|
||||
// correct.
|
||||
for fix in state.fixes.iter().rev() {
|
||||
source.replace_range(fix.range.clone(), &fix.replacement);
|
||||
}
|
||||
|
||||
source
|
||||
})
|
||||
.map_err(|mut new| {
|
||||
diagnostics.append(&mut new);
|
||||
parse::ErrorsEmitted
|
||||
})
|
||||
}
|
||||
|
||||
pub fn fix_file_cli(fix_args: FixArgs, root: &dyn Dir) -> anyhow::Result<Edit> {
|
||||
let file = if &*fix_args.file == VPath::new("-") {
|
||||
std::io::read_to_string(std::io::stdin().lock()).context("cannot read file from stdin")?
|
||||
} else {
|
||||
vfs::query::<Content>(root, &fix_args.file)
|
||||
.ok_or_else(|| anyhow!("cannot read file to fix"))?
|
||||
.string()?
|
||||
};
|
||||
|
||||
let mut treehouse = Treehouse::new();
|
||||
let mut diagnostics = vec![];
|
||||
let file_id = treehouse.add_file(fix_args.file.clone(), Source::Other(file));
|
||||
|
||||
Ok(
|
||||
if let Ok(fixed) = fix_file(&mut treehouse, &mut diagnostics, file_id) {
|
||||
if fix_args.apply {
|
||||
let edit_path = vfs::query::<EditPath>(root, &fix_args.file).ok_or_else(|| {
|
||||
anyhow!(
|
||||
"{} is not an editable file (perhaps it is not in a persistent path?)",
|
||||
fix_args.file
|
||||
)
|
||||
})?;
|
||||
|
||||
// Try to write the backup first. If writing that fails, bail out without overwriting
|
||||
// the source file.
|
||||
if let Some(backup_path) = fix_args.backup {
|
||||
let backup_edit_path =
|
||||
vfs::query::<EditPath>(root, &backup_path).ok_or_else(|| {
|
||||
anyhow!("backup file {backup_path} is not an editable file")
|
||||
})?;
|
||||
Edit::Seq(vec![
|
||||
Edit::Write(
|
||||
backup_edit_path,
|
||||
treehouse.source(file_id).input().to_owned().into(),
|
||||
),
|
||||
Edit::Write(edit_path, fixed.into()),
|
||||
])
|
||||
} else {
|
||||
Edit::Write(edit_path, fixed.into())
|
||||
}
|
||||
} else {
|
||||
println!("{fixed}");
|
||||
Edit::NoOp
|
||||
}
|
||||
} else {
|
||||
report_diagnostics(&treehouse, &diagnostics)?;
|
||||
Edit::NoOp
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pub fn fix_all_cli(fix_all_args: FixAllArgs, dir: &dyn Dir) -> anyhow::Result<Edit> {
|
||||
let mut edits = vec![];
|
||||
|
||||
fn fix_one(dir: &dyn Dir, path: &VPath) -> anyhow::Result<Edit> {
|
||||
if path.extension() == Some("tree") {
|
||||
let Some(content) = vfs::query::<Content>(dir, path).map(Content::bytes) else {
|
||||
return Ok(Edit::NoOp);
|
||||
};
|
||||
let content = String::from_utf8(content).context("file is not valid UTF-8")?;
|
||||
|
||||
let mut treehouse = Treehouse::new();
|
||||
let mut diagnostics = vec![];
|
||||
let file_id = treehouse.add_file(path.to_owned(), Source::Other(content));
|
||||
let edit_path = vfs::query::<EditPath>(dir, path).context("path is not editable")?;
|
||||
|
||||
if let Ok(fixed) = fix_file(&mut treehouse, &mut diagnostics, file_id) {
|
||||
if fixed != treehouse.source(file_id).input() {
|
||||
return Ok(Edit::Write(edit_path, fixed.into()));
|
||||
}
|
||||
} else {
|
||||
report_diagnostics(&treehouse, &diagnostics)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Edit::NoOp)
|
||||
}
|
||||
|
||||
info!("gathering edits");
|
||||
vfs::walk_dir_rec(dir, VPath::ROOT, &mut |path| {
|
||||
match fix_one(dir, path) {
|
||||
Ok(Edit::NoOp) => (),
|
||||
Ok(edit) => edits.push(edit),
|
||||
Err(err) => error!("cannot fix {path}: {err:?}"),
|
||||
}
|
||||
|
||||
ControlFlow::Continue(())
|
||||
});
|
||||
|
||||
// NOTE: This number may be higher than you expect, because NoOp edits also count!
|
||||
info!("{} edits to apply", edits.len());
|
||||
|
||||
if !fix_all_args.apply {
|
||||
info!("dry run; add `--apply` to apply changes");
|
||||
Ok(Edit::Dry(Box::new(Edit::All(edits))))
|
||||
} else {
|
||||
Ok(Edit::All(edits))
|
||||
}
|
||||
}
|
175
src/cli/serve.rs
Normal file
175
src/cli/serve.rs
Normal file
|
@ -0,0 +1,175 @@
|
|||
#[cfg(debug_assertions)]
|
||||
mod live_reload;
|
||||
mod picture_upload;
|
||||
|
||||
use std::{net::Ipv4Addr, sync::Arc};
|
||||
|
||||
use axum::http::header::LOCATION;
|
||||
use axum::{
|
||||
extract::{Path, Query, RawQuery, State},
|
||||
http::{
|
||||
header::{CACHE_CONTROL, CONTENT_TYPE},
|
||||
HeaderValue, StatusCode,
|
||||
},
|
||||
response::{Html, IntoResponse, Response},
|
||||
routing::get,
|
||||
Router,
|
||||
};
|
||||
use serde::Deserialize;
|
||||
use tokio::net::TcpListener;
|
||||
use tracing::{error, info, instrument};
|
||||
|
||||
use crate::dirs::Dirs;
|
||||
use crate::sources::Sources;
|
||||
use crate::state::Source;
|
||||
use crate::vfs::asynch::AsyncDir;
|
||||
use crate::vfs::{self, VPath};
|
||||
|
||||
mod system {
|
||||
use crate::vfs::VPath;
|
||||
|
||||
pub const INDEX: &VPath = VPath::new_const("index");
|
||||
pub const FOUR_OH_FOUR: &VPath = VPath::new_const("_treehouse/404");
|
||||
pub const B_DOCS: &VPath = VPath::new_const("_treehouse/b");
|
||||
}
|
||||
|
||||
struct Server {
|
||||
sources: Arc<Sources>,
|
||||
target: AsyncDir,
|
||||
}
|
||||
|
||||
#[instrument(skip(sources, dirs, target))]
|
||||
pub async fn serve(
|
||||
sources: Arc<Sources>,
|
||||
dirs: Arc<Dirs>,
|
||||
target: AsyncDir,
|
||||
port: u16,
|
||||
) -> anyhow::Result<()> {
|
||||
let app = Router::new()
|
||||
.route("/", get(index)) // needed explicitly because * does not match empty paths
|
||||
.route("/*path", get(vfs_entry))
|
||||
.route("/b", get(branch))
|
||||
.route("/treehouse/quit", get(quit))
|
||||
.fallback(get(four_oh_four))
|
||||
.with_state(Arc::new(Server {
|
||||
sources: sources.clone(),
|
||||
target,
|
||||
}));
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
let app = app
|
||||
.nest("/dev/live-reload", live_reload::router())
|
||||
.nest("/dev/picture-upload", picture_upload::router(dirs));
|
||||
|
||||
info!("serving on port {port}");
|
||||
let listener = TcpListener::bind((Ipv4Addr::from([0u8, 0, 0, 0]), port)).await?;
|
||||
Ok(axum::serve(listener, app).await?)
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct VfsQuery {
|
||||
#[serde(rename = "v")]
|
||||
content_version: Option<String>,
|
||||
}
|
||||
|
||||
#[instrument(skip(state))]
|
||||
async fn get_static_file(path: &str, query: &VfsQuery, state: &Server) -> Option<Response> {
|
||||
let vpath = VPath::try_new(path).ok()?;
|
||||
|
||||
if let Some(target) = state.sources.config.redirects.path.get(vpath) {
|
||||
let url = vfs::url(&state.sources.config.site, state.target.sync(), target)?;
|
||||
return Some((StatusCode::MOVED_PERMANENTLY, [(LOCATION, url)]).into_response());
|
||||
}
|
||||
|
||||
let content = state.target.content(vpath).await?;
|
||||
let content_type = HeaderValue::from_str(content.kind()).inspect_err(
|
||||
|err| error!(?err, content_type = ?content.kind(), "content type cannot be used as an HTTP header"),
|
||||
).ok()?;
|
||||
let mut response = content.bytes().into_response();
|
||||
response.headers_mut().insert(CONTENT_TYPE, content_type);
|
||||
|
||||
if query.content_version.is_some() {
|
||||
response.headers_mut().insert(
|
||||
CACHE_CONTROL,
|
||||
HeaderValue::from_static("public, max-age=31536000, immutable"),
|
||||
);
|
||||
}
|
||||
|
||||
Some(response)
|
||||
}
|
||||
|
||||
async fn vfs_entry(
|
||||
Path(path): Path<String>,
|
||||
Query(query): Query<VfsQuery>,
|
||||
State(state): State<Arc<Server>>,
|
||||
) -> Response {
|
||||
if let Some(response) = get_static_file(&path, &query, &state).await {
|
||||
response
|
||||
} else {
|
||||
four_oh_four(State(state)).await
|
||||
}
|
||||
}
|
||||
|
||||
async fn system_page(target: &AsyncDir, path: &VPath, status_code: StatusCode) -> Response {
|
||||
if let Some(content) = target.content(path).await {
|
||||
(status_code, Html(content.bytes())).into_response()
|
||||
} else {
|
||||
(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
format!("500 Internal Server Error: system page {path} is not available"),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
}
|
||||
|
||||
async fn index(State(state): State<Arc<Server>>) -> Response {
|
||||
system_page(&state.target, system::INDEX, StatusCode::OK).await
|
||||
}
|
||||
|
||||
async fn four_oh_four(State(state): State<Arc<Server>>) -> Response {
|
||||
system_page(&state.target, system::FOUR_OH_FOUR, StatusCode::NOT_FOUND).await
|
||||
}
|
||||
|
||||
#[instrument(skip(state))]
|
||||
async fn branch(RawQuery(named_id): RawQuery, State(state): State<Arc<Server>>) -> Response {
|
||||
if let Some(named_id) = named_id {
|
||||
let branch_id = state
|
||||
.sources
|
||||
.treehouse
|
||||
.branches_by_named_id
|
||||
.get(&named_id)
|
||||
.copied()
|
||||
.or_else(|| {
|
||||
state
|
||||
.sources
|
||||
.treehouse
|
||||
.branch_redirects
|
||||
.get(&named_id)
|
||||
.copied()
|
||||
});
|
||||
if let Some(branch_id) = branch_id {
|
||||
let branch = state.sources.treehouse.tree.branch(branch_id);
|
||||
if let Source::Tree { tree_path, .. } = state.sources.treehouse.source(branch.file_id) {
|
||||
if let Some(url) =
|
||||
vfs::url(&state.sources.config.site, &state.target.sync(), tree_path)
|
||||
{
|
||||
let url = format!("{url}#{}", branch.html_id);
|
||||
return (StatusCode::FOUND, [(LOCATION, url)]).into_response();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
system_page(&state.target, system::FOUR_OH_FOUR, StatusCode::NOT_FOUND).await
|
||||
} else {
|
||||
system_page(&state.target, system::B_DOCS, StatusCode::OK).await
|
||||
}
|
||||
}
|
||||
|
||||
async fn quit() -> impl IntoResponse {
|
||||
info!("somebody just quit the treehouse. congration to them!");
|
||||
|
||||
(
|
||||
StatusCode::FOUND,
|
||||
[(LOCATION, "https://www.youtube.com/watch?v=dQw4w9WgXcQ")],
|
||||
)
|
||||
}
|
28
src/cli/serve/live_reload.rs
Normal file
28
src/cli/serve/live_reload.rs
Normal file
|
@ -0,0 +1,28 @@
|
|||
use std::time::Duration;
|
||||
|
||||
use axum::{routing::get, Router};
|
||||
use tokio::time::sleep;
|
||||
|
||||
pub fn router<S>() -> Router<S> {
|
||||
let router = Router::new().route("/back-up", get(back_up));
|
||||
|
||||
// The endpoint for immediate reload is only enabled on debug builds.
|
||||
// Release builds use the exponential backoff system that detects is the WebSocket is closed.
|
||||
#[cfg(debug_assertions)]
|
||||
let router = router.route("/stall", get(stall));
|
||||
|
||||
router.with_state(())
|
||||
}
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
async fn stall() -> String {
|
||||
loop {
|
||||
// Sleep for a day, I guess. Just to uphold the connection forever without really using any
|
||||
// significant resources.
|
||||
sleep(Duration::from_secs(60 * 60 * 24)).await;
|
||||
}
|
||||
}
|
||||
|
||||
async fn back_up() -> String {
|
||||
"".into()
|
||||
}
|
131
src/cli/serve/picture_upload.rs
Normal file
131
src/cli/serve/picture_upload.rs
Normal file
|
@ -0,0 +1,131 @@
|
|||
use std::{io::Cursor, sync::Arc};
|
||||
|
||||
use axum::{
|
||||
body::Bytes,
|
||||
debug_handler,
|
||||
extract::{DefaultBodyLimit, Query, State},
|
||||
response::IntoResponse,
|
||||
routing::post,
|
||||
Json, Router,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::info;
|
||||
|
||||
use crate::{
|
||||
dirs::Dirs,
|
||||
vfs::{self, Edit, EditPath, VPathBuf},
|
||||
};
|
||||
|
||||
pub fn router<S>(dirs: Arc<Dirs>) -> Router<S> {
|
||||
Router::new()
|
||||
.route("/", post(picture_upload))
|
||||
.layer(DefaultBodyLimit::disable())
|
||||
.with_state(dirs)
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct PictureUpload {
|
||||
label: String,
|
||||
format: String,
|
||||
compression: Compression,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)]
|
||||
enum Compression {
|
||||
Lossless,
|
||||
GameScreenshot,
|
||||
}
|
||||
|
||||
impl Compression {
|
||||
pub fn output_format(self) -> Option<&'static str> {
|
||||
match self {
|
||||
Compression::Lossless => None,
|
||||
Compression::GameScreenshot => Some("image/webp"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
enum Response {
|
||||
Ulid(String),
|
||||
Error(String),
|
||||
}
|
||||
|
||||
fn compress(image_data: &[u8], compression: Compression) -> anyhow::Result<Vec<u8>> {
|
||||
match compression {
|
||||
Compression::Lossless => Ok(image_data.to_vec()),
|
||||
Compression::GameScreenshot => {
|
||||
info!("decompressing original image");
|
||||
let decompressed = image::ImageReader::new(Cursor::new(image_data))
|
||||
.with_guessed_format()?
|
||||
.decode()?
|
||||
.to_rgba8();
|
||||
|
||||
info!("compressing to webp");
|
||||
let compressed = webp::Encoder::new(
|
||||
&decompressed,
|
||||
webp::PixelLayout::Rgba,
|
||||
decompressed.width(),
|
||||
decompressed.height(),
|
||||
)
|
||||
.encode(85.0)
|
||||
.to_vec();
|
||||
Ok(compressed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn write_compressed(
|
||||
image_data: &[u8],
|
||||
compression: Compression,
|
||||
edit_path: EditPath,
|
||||
) -> anyhow::Result<()> {
|
||||
let compressed = compress(image_data, compression)?;
|
||||
Edit::Write(edit_path, compressed).apply().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[debug_handler]
|
||||
async fn picture_upload(
|
||||
State(dirs): State<Arc<Dirs>>,
|
||||
Query(mut params): Query<PictureUpload>,
|
||||
image: Bytes,
|
||||
) -> impl IntoResponse {
|
||||
info!(?params, "uploading picture");
|
||||
|
||||
let ulid = ulid::Generator::new()
|
||||
.generate_with_source(&mut rand::thread_rng())
|
||||
.expect("failed to generate ulid");
|
||||
|
||||
if params.label.is_empty() {
|
||||
params.label = "untitled".into();
|
||||
}
|
||||
|
||||
let file_name = VPathBuf::new(format!(
|
||||
"{ulid}-{}.{}",
|
||||
params.label,
|
||||
get_extension(params.compression.output_format().unwrap_or(¶ms.format))
|
||||
.unwrap_or("unknown")
|
||||
));
|
||||
let Some(edit_path) = vfs::query::<EditPath>(&dirs.pic, &file_name) else {
|
||||
return Json(Response::Error(format!("{file_name} is not editable")));
|
||||
};
|
||||
|
||||
let result = write_compressed(&image, params.compression, edit_path).await;
|
||||
info!(?result, "done processing");
|
||||
Json(match result {
|
||||
Ok(()) => Response::Ulid(ulid.to_string()),
|
||||
Err(error) => Response::Error(error.to_string()),
|
||||
})
|
||||
}
|
||||
|
||||
fn get_extension(content_type: &str) -> Option<&'static str> {
|
||||
match content_type {
|
||||
"image/png" => Some("png"),
|
||||
"image/jpeg" => Some("jpg"),
|
||||
"image/svg+xml" => Some("svg"),
|
||||
"image/webp" => Some("webp"),
|
||||
_ => None,
|
||||
}
|
||||
}
|
66
src/cli/wc.rs
Normal file
66
src/cli/wc.rs
Normal file
|
@ -0,0 +1,66 @@
|
|||
use std::ops::ControlFlow;
|
||||
|
||||
use crate::{
|
||||
parse::parse_tree_with_diagnostics,
|
||||
state::{report_diagnostics, Source, Treehouse},
|
||||
tree::ast::{Branch, Roots},
|
||||
vfs::{self, Content, Dir, VPath},
|
||||
};
|
||||
|
||||
use super::WcArgs;
|
||||
|
||||
fn wc_branch(source: &str, branch: &Branch) -> usize {
|
||||
let word_count = source[branch.content.clone()].split_whitespace().count();
|
||||
word_count
|
||||
+ branch
|
||||
.children
|
||||
.iter()
|
||||
.map(|branch| wc_branch(source, branch))
|
||||
.sum::<usize>()
|
||||
}
|
||||
|
||||
fn wc_roots(source: &str, roots: &Roots) -> usize {
|
||||
roots
|
||||
.branches
|
||||
.iter()
|
||||
.map(|branch| wc_branch(source, branch))
|
||||
.sum()
|
||||
}
|
||||
|
||||
pub fn wc_cli(content_dir: &dyn Dir, mut wc_args: WcArgs) -> anyhow::Result<()> {
|
||||
if wc_args.paths.is_empty() {
|
||||
vfs::walk_dir_rec(content_dir, VPath::ROOT, &mut |path| {
|
||||
if path.extension() == Some("tree") {
|
||||
wc_args.paths.push(path.to_owned());
|
||||
}
|
||||
ControlFlow::Continue(())
|
||||
});
|
||||
}
|
||||
|
||||
let mut treehouse = Treehouse::new();
|
||||
|
||||
let mut total = 0;
|
||||
|
||||
for path in &wc_args.paths {
|
||||
if let Some(content) =
|
||||
vfs::query::<Content>(content_dir, path).and_then(|b| b.string().ok())
|
||||
{
|
||||
let file_id = treehouse.add_file(path.clone(), Source::Other(content.clone()));
|
||||
match parse_tree_with_diagnostics(file_id, &content) {
|
||||
Ok(parsed) => {
|
||||
let source = treehouse.source(file_id);
|
||||
let word_count = wc_roots(source.input(), &parsed);
|
||||
println!("{word_count:>8} {}", treehouse.path(file_id));
|
||||
total += word_count;
|
||||
}
|
||||
Err(diagnostics) => {
|
||||
report_diagnostics(&treehouse, &diagnostics)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
println!("{total:>8} total");
|
||||
|
||||
Ok(())
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue