diff --git a/crates/language/src/buffer.rs b/crates/language/src/buffer.rs index 50a9e61d38..f6725a202f 100644 --- a/crates/language/src/buffer.rs +++ b/crates/language/src/buffer.rs @@ -1667,32 +1667,33 @@ impl BufferSnapshot { .and_then(|language| language.grammar.as_ref()) } - pub fn range_for_word_token_at( - &self, - position: T, - ) -> Option> { - let offset = position.to_offset(self); + pub fn surrounding_word(&self, start: T) -> (Range, Option) { + let mut start = start.to_offset(self); + let mut end = start; + let mut next_chars = self.chars_at(start).peekable(); + let mut prev_chars = self.reversed_chars_at(start).peekable(); + let word_kind = cmp::max( + prev_chars.peek().copied().map(char_kind), + next_chars.peek().copied().map(char_kind), + ); - // Find the first leaf node that touches the position. - let tree = self.tree.as_ref()?; - let mut cursor = tree.root_node().walk(); - while cursor.goto_first_child_for_byte(offset).is_some() {} - let node = cursor.node(); - if node.child_count() > 0 { - return None; + for ch in prev_chars { + if Some(char_kind(ch)) == word_kind && ch != '\n' { + start -= ch.len_utf8(); + } else { + break; + } } - // Check that the leaf node contains word characters. - let range = node.byte_range(); - if self - .text_for_range(range.clone()) - .flat_map(str::chars) - .any(|c| c.is_alphanumeric()) - { - return Some(range); - } else { - None + for ch in next_chars { + if Some(char_kind(ch)) == word_kind && ch != '\n' { + end += ch.len_utf8(); + } else { + break; + } } + + (start..end, word_kind) } pub fn range_for_syntax_ancestor(&self, range: Range) -> Option> { diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 7c291cade7..07a53fb892 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -18,10 +18,10 @@ use gpui::{ use language::{ point_to_lsp, proto::{deserialize_anchor, deserialize_version, serialize_anchor, serialize_version}, - range_from_lsp, range_to_lsp, Anchor, Bias, Buffer, CodeAction, CodeLabel, Completion, - Diagnostic, DiagnosticEntry, DiagnosticSet, Event as BufferEvent, File as _, Language, - LanguageRegistry, LanguageServerName, LocalFile, LspAdapter, OffsetRangeExt, Operation, Patch, - PointUtf16, TextBufferSnapshot, ToOffset, ToPointUtf16, Transaction, + range_from_lsp, range_to_lsp, Anchor, Bias, Buffer, CharKind, CodeAction, CodeLabel, + Completion, Diagnostic, DiagnosticEntry, DiagnosticSet, Event as BufferEvent, File as _, + Language, LanguageRegistry, LanguageServerName, LocalFile, LspAdapter, OffsetRangeExt, + Operation, Patch, PointUtf16, TextBufferSnapshot, ToOffset, ToPointUtf16, Transaction, }; use lsp::{ DiagnosticSeverity, DiagnosticTag, DocumentHighlightKind, LanguageServer, LanguageString, @@ -3182,9 +3182,12 @@ impl Project { let Range { start, end } = range_for_token .get_or_insert_with(|| { let offset = position.to_offset(&snapshot); - snapshot - .range_for_word_token_at(offset) - .unwrap_or_else(|| offset..offset) + let (range, kind) = snapshot.surrounding_word(offset); + if kind == Some(CharKind::Word) { + range + } else { + offset..offset + } }) .clone(); let text = lsp_completion @@ -7633,6 +7636,32 @@ mod tests { completions[0].old_range.to_offset(&snapshot), text.len() - 3..text.len() ); + + let text = "let a = \"atoms/cmp\""; + buffer.update(cx, |buffer, cx| buffer.set_text(text, cx)); + let completions = project.update(cx, |project, cx| { + project.completions(&buffer, text.len() - 1, cx) + }); + + fake_server + .handle_request::(|_, _| async move { + Ok(Some(lsp::CompletionResponse::Array(vec![ + lsp::CompletionItem { + label: "component".into(), + ..Default::default() + }, + ]))) + }) + .next() + .await; + let completions = completions.await.unwrap(); + let snapshot = buffer.read_with(cx, |buffer, _| buffer.snapshot()); + assert_eq!(completions.len(), 1); + assert_eq!(completions[0].new_text, "component"); + assert_eq!( + completions[0].old_range.to_offset(&snapshot), + text.len() - 4..text.len() - 1 + ); } #[gpui::test(iterations = 10)]