tui: refine text area word separator handling (#5541)
## Summary - replace the word part enum with a simple `is_word_separator` helper - keep word-boundary logic aligned with the helper and punctuation-aware behavior - extend forward/backward deletion tests to cover whitespace around separators ## Testing - just fix -p codex-tui - cargo test -p codex-tui ------ https://chatgpt.com/codex/tasks/task_i_68f91c71d838832ca2a3c4f0ec1b55d4
This commit is contained in:
@@ -14,6 +14,12 @@ use textwrap::Options;
|
|||||||
use unicode_segmentation::UnicodeSegmentation;
|
use unicode_segmentation::UnicodeSegmentation;
|
||||||
use unicode_width::UnicodeWidthStr;
|
use unicode_width::UnicodeWidthStr;
|
||||||
|
|
||||||
|
const WORD_SEPARATORS: &str = "`~!@#$%^&*()-=+[{]}\\|;:'\",.<>/?";
|
||||||
|
|
||||||
|
fn is_word_separator(ch: char) -> bool {
|
||||||
|
WORD_SEPARATORS.contains(ch)
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
struct TextElement {
|
struct TextElement {
|
||||||
range: Range<usize>,
|
range: Range<usize>,
|
||||||
@@ -841,21 +847,23 @@ impl TextArea {
|
|||||||
|
|
||||||
pub(crate) fn beginning_of_previous_word(&self) -> usize {
|
pub(crate) fn beginning_of_previous_word(&self) -> usize {
|
||||||
let prefix = &self.text[..self.cursor_pos];
|
let prefix = &self.text[..self.cursor_pos];
|
||||||
let Some((first_non_ws_idx, _)) = prefix
|
let Some((first_non_ws_idx, ch)) = prefix
|
||||||
.char_indices()
|
.char_indices()
|
||||||
.rev()
|
.rev()
|
||||||
.find(|&(_, ch)| !ch.is_whitespace())
|
.find(|&(_, ch)| !ch.is_whitespace())
|
||||||
else {
|
else {
|
||||||
return 0;
|
return 0;
|
||||||
};
|
};
|
||||||
let before = &prefix[..first_non_ws_idx];
|
let is_separator = is_word_separator(ch);
|
||||||
let candidate = before
|
let mut start = first_non_ws_idx;
|
||||||
.char_indices()
|
for (idx, ch) in prefix[..first_non_ws_idx].char_indices().rev() {
|
||||||
.rev()
|
if ch.is_whitespace() || is_word_separator(ch) != is_separator {
|
||||||
.find(|&(_, ch)| ch.is_whitespace())
|
start = idx + ch.len_utf8();
|
||||||
.map(|(idx, ch)| idx + ch.len_utf8())
|
break;
|
||||||
.unwrap_or(0);
|
}
|
||||||
self.adjust_pos_out_of_elements(candidate, true)
|
start = idx;
|
||||||
|
}
|
||||||
|
self.adjust_pos_out_of_elements(start, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn end_of_next_word(&self) -> usize {
|
pub(crate) fn end_of_next_word(&self) -> usize {
|
||||||
@@ -864,11 +872,19 @@ impl TextArea {
|
|||||||
return self.text.len();
|
return self.text.len();
|
||||||
};
|
};
|
||||||
let word_start = self.cursor_pos + first_non_ws;
|
let word_start = self.cursor_pos + first_non_ws;
|
||||||
let candidate = match self.text[word_start..].find(|c: char| c.is_whitespace()) {
|
let mut iter = self.text[word_start..].char_indices();
|
||||||
Some(rel_idx) => word_start + rel_idx,
|
let Some((_, first_ch)) = iter.next() else {
|
||||||
None => self.text.len(),
|
return word_start;
|
||||||
};
|
};
|
||||||
self.adjust_pos_out_of_elements(candidate, false)
|
let is_separator = is_word_separator(first_ch);
|
||||||
|
let mut end = self.text.len();
|
||||||
|
for (idx, ch) in iter {
|
||||||
|
if ch.is_whitespace() || is_word_separator(ch) != is_separator {
|
||||||
|
end = word_start + idx;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.adjust_pos_out_of_elements(end, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn adjust_pos_out_of_elements(&self, pos: usize, prefer_start: bool) -> usize {
|
fn adjust_pos_out_of_elements(&self, pos: usize, prefer_start: bool) -> usize {
|
||||||
@@ -1239,6 +1255,56 @@ mod tests {
|
|||||||
assert_eq!(t.cursor(), elem_range.start);
|
assert_eq!(t.cursor(), elem_range.start);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn delete_backward_word_respects_word_separators() {
|
||||||
|
let mut t = ta_with("path/to/file");
|
||||||
|
t.set_cursor(t.text().len());
|
||||||
|
t.delete_backward_word();
|
||||||
|
assert_eq!(t.text(), "path/to/");
|
||||||
|
assert_eq!(t.cursor(), t.text().len());
|
||||||
|
|
||||||
|
t.delete_backward_word();
|
||||||
|
assert_eq!(t.text(), "path/to");
|
||||||
|
assert_eq!(t.cursor(), t.text().len());
|
||||||
|
|
||||||
|
let mut t = ta_with("foo/ ");
|
||||||
|
t.set_cursor(t.text().len());
|
||||||
|
t.delete_backward_word();
|
||||||
|
assert_eq!(t.text(), "foo");
|
||||||
|
assert_eq!(t.cursor(), 3);
|
||||||
|
|
||||||
|
let mut t = ta_with("foo /");
|
||||||
|
t.set_cursor(t.text().len());
|
||||||
|
t.delete_backward_word();
|
||||||
|
assert_eq!(t.text(), "foo ");
|
||||||
|
assert_eq!(t.cursor(), 4);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn delete_forward_word_respects_word_separators() {
|
||||||
|
let mut t = ta_with("path/to/file");
|
||||||
|
t.set_cursor(0);
|
||||||
|
t.delete_forward_word();
|
||||||
|
assert_eq!(t.text(), "/to/file");
|
||||||
|
assert_eq!(t.cursor(), 0);
|
||||||
|
|
||||||
|
t.delete_forward_word();
|
||||||
|
assert_eq!(t.text(), "to/file");
|
||||||
|
assert_eq!(t.cursor(), 0);
|
||||||
|
|
||||||
|
let mut t = ta_with("/ foo");
|
||||||
|
t.set_cursor(0);
|
||||||
|
t.delete_forward_word();
|
||||||
|
assert_eq!(t.text(), " foo");
|
||||||
|
assert_eq!(t.cursor(), 0);
|
||||||
|
|
||||||
|
let mut t = ta_with(" /foo");
|
||||||
|
t.set_cursor(0);
|
||||||
|
t.delete_forward_word();
|
||||||
|
assert_eq!(t.text(), "foo");
|
||||||
|
assert_eq!(t.cursor(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn yank_restores_last_kill() {
|
fn yank_restores_last_kill() {
|
||||||
let mut t = ta_with("hello");
|
let mut t = ta_with("hello");
|
||||||
|
|||||||
Reference in New Issue
Block a user