Skip to content

Commit

Permalink
FAT fixes, Merlin fixes, TD v1.x
Browse files Browse the repository at this point in the history
  • Loading branch information
dfgordon committed Sep 8, 2024
1 parent 89426d8 commit 7d015b6
Show file tree
Hide file tree
Showing 19 changed files with 197 additions and 80 deletions.
15 changes: 15 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,21 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).

## [3.1.0] - 2024-09-08

### New Features

* Handles Teledisk 1.x in addition to 2.x
* Responds to a client's workspace symbols request
* Make some use of backup FATs

### Fixes

* Formatting preserves blank lines
* Correct a bug in rename symbol
* Correct a bug in address hovers
* Always zero high word of cluster1 for FAT12/16

## [3.0.2] - 2024-08-24

### Fixes
Expand Down
4 changes: 2 additions & 2 deletions Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "a2kit"
version = "3.0.2"
version = "3.1.0"
edition = "2021"
readme = "README.md"
license = "MIT"
Expand Down Expand Up @@ -46,4 +46,4 @@ num-traits = "0.2.14"
num-derive = "0.3.3"
a2kit_macro = "1.0.0"
a2kit_macro_derive = "1.0.0"
retrocompressor = "0.1.1"
retrocompressor = "1.0.0"
1 change: 1 addition & 0 deletions src/bin/server-merlin/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -308,6 +308,7 @@ fn main() -> Result<(), Box<dyn Error + Sync + Send>> {
..lsp::CompletionOptions::default()
}),
document_symbol_provider: Some(lsp::OneOf::Left(true)),
workspace_symbol_provider: Some(lsp::OneOf::Left(true)),
rename_provider: Some(lsp::OneOf::Left(true)),
document_range_formatting_provider: Some(lsp::OneOf::Left(true)),
document_on_type_formatting_provider: Some(lsp::DocumentOnTypeFormattingOptions {
Expand Down
6 changes: 6 additions & 0 deletions src/bin/server-merlin/request.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,12 @@ pub fn handle_request(
lsp::request::Completion::METHOD => Checkpoint::completion_response(chkpts, &mut tools.completion_provider, req.clone(), &mut resp),
lsp::request::FoldingRangeRequest::METHOD => Checkpoint::folding_range_response(chkpts, req.clone(), &mut resp),
lsp::request::SemanticTokensFullRequest::METHOD => Checkpoint::sem_tok_response(chkpts, &mut tools.highlighter, req.clone(), &mut resp),
lsp::request::WorkspaceSymbolRequest::METHOD => {
if let Ok(_params) = serde_json::from_value::<lsp::WorkspaceSymbolParams>(req.params) {
let ws_syms = tools.workspace.get_ws_symbols();
resp = lsp_server::Response::new_ok(req.id,ws_syms);
}
}

lsp::request::Shutdown::METHOD => {
logger(&connection,"shutdown request");
Expand Down
23 changes: 23 additions & 0 deletions src/bios/fat.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,29 @@ const BAD_CLUSTER32: u32 = 0xffffff7;
const FREE_CLUSTER: u32 = 0;
pub const FIRST_DATA_CLUSTER: u32 = 2;

/// Use a backup FAT to improve the working FAT.
/// A possible pattern is to call this once for each backup FAT
/// upon mounting a disk in hopes of repairing any issues with
/// the first FAT, which will then become the working FAT.
/// This only makes local checks in the data cluster area at present.
pub fn repair(typ: usize, working: &mut Vec<u8>, bak: &Vec<u8>, cluster_end: u32) {
for n in FIRST_DATA_CLUSTER as usize..cluster_end as usize {
let v1 = get_cluster(n,typ,working);
let v2 = get_cluster(n,typ,bak);
let dmg1 = is_damaged(n,typ,working);
let dmg2 = is_damaged(n,typ,bak);
if v1 != v2 {
log::trace!("cluster {}: working FAT has {}, backup has {}",n,v1,v2);
}
let v = match (v1 >= FIRST_DATA_CLUSTER && v1 < cluster_end && !dmg1, v2 >= FIRST_DATA_CLUSTER && v2 < cluster_end && !dmg2) {
(true,false) => v1,
(false,true) => v2,
_ => v1
};
set_cluster(n,v,typ,working);
}
}

/// get the value of cluster `n`.
/// `typ` = bits per FAT entry (12,16,32)
/// `buf` = buffer containing the entire FAT
Expand Down
78 changes: 42 additions & 36 deletions src/fs/fat/directory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -420,8 +420,47 @@ impl Directory {
}
None
}
fn add_file(&self,ans: &mut BTreeMap<String,FileInfo>,fat_typ: usize,entry_idx: usize) -> Result<bool,DYNERR> {
let entry = self.get_entry(&Ptr::Entry(entry_idx));
let (name,typ) = super::pack::file_name_to_split_string(entry.name, entry.ext);
let key = [name.clone(),".".to_string(),typ.clone()].concat();
trace!("entry in use: {}",key);
if ans.contains_key(&key) {
debug!("duplicate file {} in directory",key);
return Err(Box::new(Error::DuplicateFile));
}
let mut cluster1 = u16::from_le_bytes(entry.cluster1_low) as usize;
if fat_typ == 32 {
// Don't add for FAT12/16 in case we have wrongly set bits here
cluster1 += (u16::MAX as usize) * (u16::from_le_bytes(entry.cluster1_high) as usize);
}
let finfo: FileInfo = FileInfo {
is_root: false,
wildcard: String::new(),
idx: entry_idx,
name,
typ,
read_only: (entry.attr & READ_ONLY) > 0,
hidden: (entry.attr & HIDDEN) > 0,
system: (entry.attr & SYSTEM) > 0,
volume_id: (entry.attr & VOLUME_ID) > 0,
directory: (entry.attr & DIRECTORY) > 0,
archived: (entry.attr & ARCHIVE) > 0,
long_name: (entry.attr & LONG_NAME) > 0,
long_name_sub: (entry.attr & LONG_NAME_SUB) > 0,
write_date: super::pack::unpack_date(entry.write_date),
write_time: super::pack::unpack_time(entry.write_time,0),
create_date: super::pack::unpack_date(entry.creation_date),
create_time: super::pack::unpack_time(entry.creation_time,entry.creation_tenth),
access_date: super::pack::unpack_date(entry.access_date),
eof: u32::from_le_bytes(entry.file_size) as usize,
cluster1: Some(Ptr::Cluster(cluster1))
};
ans.insert(key.clone(),finfo);
Ok(super::pack::is_name_valid(&key))
}
/// Build an alphabetized map of file names to file info.
pub fn build_files(&self) -> Result<BTreeMap<String,FileInfo>,DYNERR> {
pub fn build_files(&self,fat_typ: usize) -> Result<BTreeMap<String,FileInfo>,DYNERR> {
let mut bad_names = 0;
let mut ans = BTreeMap::new();
// first pass collects everything except passwords
Expand All @@ -433,46 +472,13 @@ impl Directory {
if etyp==EntryType::FreeAndNoMore {
break;
}
let entry = self.get_entry(&Ptr::Entry(i));
let (name,typ) = super::pack::file_name_to_split_string(entry.name, entry.ext);
let key = [name.clone(),".".to_string(),typ.clone()].concat();
if !super::pack::is_name_valid(&key) {
bad_names += 1;
}
if bad_names > 2 {
debug!("after {} bad file names rejecting disk",bad_names);
return Err(Box::new(Error::Syntax));
}
trace!("entry in use: {}",key);
if ans.contains_key(&key) {
debug!("duplicate file {} in directory",key);
return Err(Box::new(Error::DuplicateFile));
if !self.add_file(&mut ans,fat_typ,i)? {
bad_names += 1;
}
let cluster1 = Ptr::Cluster(u16::from_le_bytes(entry.cluster1_low) as usize +
(u16::MAX as usize) * (u16::from_le_bytes(entry.cluster1_high) as usize));
let finfo: FileInfo = FileInfo {
is_root: false,
wildcard: String::new(),
idx: i,
name,
typ,
read_only: (entry.attr & READ_ONLY) > 0,
hidden: (entry.attr & HIDDEN) > 0,
system: (entry.attr & SYSTEM) > 0,
volume_id: (entry.attr & VOLUME_ID) > 0,
directory: (entry.attr & DIRECTORY) > 0,
archived: (entry.attr & ARCHIVE) > 0,
long_name: (entry.attr & LONG_NAME) > 0,
long_name_sub: (entry.attr & LONG_NAME_SUB) > 0,
write_date: super::pack::unpack_date(entry.write_date),
write_time: super::pack::unpack_time(entry.write_time,0),
create_date: super::pack::unpack_date(entry.creation_date),
create_time: super::pack::unpack_time(entry.creation_time,entry.creation_tenth),
access_date: super::pack::unpack_date(entry.access_date),
eof: u32::from_le_bytes(entry.file_size) as usize,
cluster1: Some(cluster1)
};
ans.insert(key.clone(),finfo);
}
Ok(ans)
}
Expand Down
4 changes: 2 additions & 2 deletions src/fs/fat/display.rs
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ fn is_displayed(finfo: &directory::FileInfo,pattern: &str) -> bool {

/// Display FAT directory, either in normal or `wide` mode.
/// This will behave like MS-DOS 3.3, except for color highlights.
pub fn dir(path: &str,vol_lab: &str, dir: &directory::Directory,pattern: &str,wide: bool,free: u64) -> STDRESULT {
pub fn dir(path: &str,vol_lab: &str, dir: &directory::Directory,pattern: &str,wide: bool,free: u64,fat_typ: usize) -> STDRESULT {
if vol_lab!="NO NAME" {
println!();
println!(" Volume in drive A is {}",vol_lab.blue().bold());
Expand All @@ -145,7 +145,7 @@ pub fn dir(path: &str,vol_lab: &str, dir: &directory::Directory,pattern: &str,wi
path.to_string()
}.replace("/","\\").to_uppercase();
println!(" Directory of A:{}",&displ_path);
if let Ok(sorted) = dir.build_files() {
if let Ok(sorted) = dir.build_files(fat_typ) {
// `build_files` sorts on the name automatically, so we have to "re-sort" in order
// to get the "unsorted" list.
let unsorted = dir.sort_on_entry_index(&sorted);
Expand Down
41 changes: 29 additions & 12 deletions src/fs/fat/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -210,18 +210,32 @@ impl Disk {
block >= fat::FIRST_DATA_CLUSTER as usize && block < fat::FIRST_DATA_CLUSTER as usize + self.boot_sector.cluster_count_usable() as usize
}
/// Open buffer if not already present. Will usually be called indirectly.
/// The backup FAT will be written when the buffer is written back.
/// Uses backup FAT(s) to try and improve the first FAT.
/// The backup FAT(s) will be written when the buffer is written back.
fn open_fat_buffer(&mut self) -> STDRESULT {
if self.maybe_fat==None {
trace!("buffering first FAT");
let num_fats = self.boot_sector.num_fats() as usize;
let fat_secs = self.boot_sector.fat_secs();
let mut sec1 = self.boot_sector.res_secs() as u64;
let mut ans = Vec::new();
let sec1 = self.boot_sector.res_secs() as u64;
for isec in sec1..sec1+fat_secs {
let [cyl,head,sec] = self.get_chs(&Ptr::LogicalSector(isec as usize))?;
let mut buf = self.img.read_sector(cyl,head,sec)?;
ans.append(&mut buf);
}
for fat in 1..num_fats {
sec1 += fat_secs;
trace!("verify and repair against backup FAT {}",fat);
let mut bak = Vec::new();
for isec in sec1..sec1+fat_secs {
let [cyl,head,sec] = self.get_chs(&Ptr::LogicalSector(isec as usize))?;
let mut buf = self.img.read_sector(cyl,head,sec)?;
bak.append(&mut buf);
}
let cluster_end = fat::FIRST_DATA_CLUSTER as u32 + self.boot_sector.cluster_count_usable() as u32;
fat::repair(self.typ,&mut ans,&bak,cluster_end);
}
self.maybe_fat = Some(ans);
}
Ok(())
Expand Down Expand Up @@ -465,6 +479,7 @@ impl Disk {
0 => return Ok(ans), // empty chain
c => {
if !self.clus_in_rng(c) {
log::error!("invalid cluster {} while getting data",c);
return Err(Box::new(Error::FirstClusterInvalid));
}
}
Expand All @@ -489,6 +504,7 @@ impl Disk {
0 => return Ok(ans), // empty chain
c => {
if !self.clus_in_rng(c as usize) {
log::error!("invalid cluster {} while getting length",c);
return Err(Box::new(Error::FirstClusterInvalid));
}
}
Expand Down Expand Up @@ -723,7 +739,7 @@ impl Disk {
return Ok((parent_info,root_info));
}
// walk the tree
let mut files = root.build_files()?;
let mut files = root.build_files(self.typ)?;
parent_info = Some(root_info);
for level in 0..n {
debug!("searching level {}: {}",level,parent_info.clone().unwrap().name);
Expand All @@ -742,7 +758,7 @@ impl Disk {
return Ok((parent_info,curr));
}
let new_dir = self.get_directory(&curr.cluster1)?;
files = new_dir.build_files()?;
files = new_dir.build_files(self.typ)?;
parent_info = Some(curr);
}
return Err(Box::new(Error::FileNotFound));
Expand All @@ -768,7 +784,7 @@ impl Disk {
if let Ok((maybe_parent,file_info)) = self.goto_path(old_path) {
if let Some(parent) = maybe_parent {
let search_dir = self.get_directory(&parent.cluster1)?;
let files = search_dir.build_files()?;
let files = search_dir.build_files(self.typ)?;
return match directory::get_file(new_name, &files) {
Some(_) => Err(Box::new(Error::DuplicateFile)),
None => Ok(EntryLocation { cluster1: parent.cluster1, entry: Ptr::Entry(file_info.idx), dir: search_dir })
Expand All @@ -791,7 +807,7 @@ impl Disk {
debug!("write {} to {}",new_name,parent_path);
if let Ok((_maybe_grandparent,parent)) = self.goto_path(&parent_path) {
let mut search_dir = self.get_directory(&parent.cluster1)?;
let files = search_dir.build_files()?;
let files = search_dir.build_files(self.typ)?;
return match directory::get_file(&new_name, &files) {
Some(_) => Err(Box::new(Error::DuplicateFile)),
None => match self.get_available_entry(&mut search_dir, &parent.cluster1) {
Expand Down Expand Up @@ -861,7 +877,7 @@ impl Disk {
true => globset::GlobBuilder::new(&pattern).literal_separator(true).build()?.compile_matcher(),
false => globset::GlobBuilder::new(&pattern.to_uppercase()).literal_separator(true).build()?.compile_matcher()
};
if let Ok(sorted) = dir.build_files() {
if let Ok(sorted) = dir.build_files(self.typ) {
for finfo in sorted.values() {
if finfo.volume_id {
continue;
Expand Down Expand Up @@ -900,7 +916,7 @@ impl Disk {
const DATE_FMT: &str = "%Y/%m/%d";
const TIME_FMT: &str = "%H:%M";
let mut files = json::JsonValue::new_object();
if let Ok(sorted) = dir.build_files() {
if let Ok(sorted) = dir.build_files(self.typ) {
for finfo in sorted.values() {
if finfo.volume_id {
continue;
Expand All @@ -921,6 +937,7 @@ impl Disk {
}
}
if include_meta {
log::trace!("get metadata for {}",key);
files[&key]["meta"] = json::JsonValue::new_object();
let meta = &mut files[&key]["meta"];
if finfo.directory {
Expand Down Expand Up @@ -1019,8 +1036,8 @@ impl super::DiskFS for Disk {
let (vol_lab,_) = self.get_root_dir()?;
let free = self.num_free_blocks()? as u64 * self.boot_sector.sec_size() * self.boot_sector.secs_per_clus() as u64;
match opt.to_lowercase().as_str() {
"" => display::dir(&path,&vol_lab,&dir,&pattern,false,free),
"/w" => display::dir(&path,&vol_lab,&dir,&pattern,true,free),
"" => display::dir(&path,&vol_lab,&dir,&pattern,false,free,self.typ),
"/w" => display::dir(&path,&vol_lab,&dir,&pattern,true,free,self.typ),
_ => Err(Box::new(Error::InvalidSwitch))
}
}
Expand Down Expand Up @@ -1110,7 +1127,7 @@ impl super::DiskFS for Disk {
return Err(Box::new(Error::WriteProtect));
}
let dir = self.get_directory(&finfo.cluster1)?;
let files = dir.build_files()?;
let files = dir.build_files(self.typ)?;
if files.len() > 2 {
error!("cannot delete directory with {} files",files.len()-2);
return Err(Box::new(Error::DirectoryNotEmpty));
Expand Down Expand Up @@ -1302,7 +1319,7 @@ impl super::DiskFS for Disk {
_ => Some(Ptr::Cluster(ref_con as usize))
};
let dir = self.get_directory(&cluster1).expect("disk error");
let files = dir.build_files().expect("could not build files");
let files = dir.build_files(self.typ).expect("could not build files");
for finfo in files.values() {
// recursion into subdirectory
if finfo.directory && finfo.name!="." && finfo.name!=".." {
Expand Down
22 changes: 12 additions & 10 deletions src/lang/applesoft/checkpoint.rs
Original file line number Diff line number Diff line change
Expand Up @@ -180,17 +180,19 @@ impl Checkpoint for CheckpointManager {
vec![]
}
fn get_renamables(&self,loc: &lsp_types::Location) -> Vec<lsp_types::Location> {
let sym = &self.symbols;
if let Some(ans) = goto_refs(&sym.scalars, loc) {
return ans;
}
if let Some(ans) = goto_refs(&sym.arrays, loc) {
return ans;
}
if let Some(ans) = goto_refs(&sym.functions, loc) {
return ans;
for line in self.symbols.lines.values() {
let combined = [line.gotos.clone(),line.gosubs.clone(),vec![line.primary]].concat();
for rng in &combined {
if range_contains_pos(rng, &loc.range.start) {
return vec![];
}
}
}
vec![]
let mut ans = Vec::new();
ans.append(&mut self.get_refs(loc));
ans.append(&mut self.get_defs(loc));
ans.append(&mut self.get_decs(loc));
ans
}
}

Expand Down
2 changes: 1 addition & 1 deletion src/lang/applesoft/hovers/hovers_addresses.rs
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ impl AddressHovers {
};
for offset in 0..bytes {
let hov_str = create_hover(*addr, offset, info);
hov.amap.insert(*addr,hov_str);
hov.amap.insert(*addr + offset,hov_str);
}
}
hov
Expand Down
Loading

0 comments on commit 7d015b6

Please sign in to comment.