Fixed vim and zsh
This commit is contained in:
@ -0,0 +1 @@
|
||||
"""Code related to snippets."""
|
@ -0,0 +1,4 @@
|
||||
"""In memory representation of snippet definitions."""
|
||||
|
||||
from UltiSnips.snippet.definition.ultisnips import UltiSnipsSnippetDefinition
|
||||
from UltiSnips.snippet.definition.snipmate import SnipMateSnippetDefinition
|
@ -0,0 +1,443 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
"""Snippet representation after parsing."""
|
||||
|
||||
import re
|
||||
|
||||
import vim
|
||||
import textwrap
|
||||
|
||||
from UltiSnips import _vim
|
||||
from UltiSnips.compatibility import as_unicode
|
||||
from UltiSnips.indent_util import IndentUtil
|
||||
from UltiSnips.text import escape
|
||||
from UltiSnips.text_objects import SnippetInstance
|
||||
from UltiSnips.text_objects._python_code import \
|
||||
SnippetUtilCursor, SnippetUtilForAction
|
||||
|
||||
__WHITESPACE_SPLIT = re.compile(r"\s")
|
||||
|
||||
|
||||
def split_at_whitespace(string):
|
||||
"""Like string.split(), but keeps empty words as empty words."""
|
||||
return re.split(__WHITESPACE_SPLIT, string)
|
||||
|
||||
|
||||
def _words_for_line(trigger, before, num_words=None):
|
||||
"""Gets the final 'num_words' words from 'before'.
|
||||
|
||||
If num_words is None, then use the number of words in 'trigger'.
|
||||
|
||||
"""
|
||||
if num_words is None:
|
||||
num_words = len(split_at_whitespace(trigger))
|
||||
|
||||
word_list = split_at_whitespace(before)
|
||||
if len(word_list) <= num_words:
|
||||
return before.strip()
|
||||
else:
|
||||
before_words = before
|
||||
for i in range(-1, -(num_words + 1), -1):
|
||||
left = before_words.rfind(word_list[i])
|
||||
before_words = before_words[:left]
|
||||
return before[len(before_words):].strip()
|
||||
|
||||
|
||||
class SnippetDefinition(object):
|
||||
|
||||
"""Represents a snippet as parsed from a file."""
|
||||
|
||||
_INDENT = re.compile(r"^[ \t]*")
|
||||
_TABS = re.compile(r"^\t*")
|
||||
|
||||
def __init__(self, priority, trigger, value, description,
|
||||
options, globals, location, context, actions):
|
||||
self._priority = int(priority)
|
||||
self._trigger = as_unicode(trigger)
|
||||
self._value = as_unicode(value)
|
||||
self._description = as_unicode(description)
|
||||
self._opts = options
|
||||
self._matched = ''
|
||||
self._last_re = None
|
||||
self._globals = globals
|
||||
self._location = location
|
||||
self._context_code = context
|
||||
self._context = None
|
||||
self._actions = actions
|
||||
|
||||
# Make sure that we actually match our trigger in case we are
|
||||
# immediately expanded.
|
||||
self.matches(self._trigger)
|
||||
|
||||
def __repr__(self):
|
||||
return '_SnippetDefinition(%r,%s,%s,%s)' % (
|
||||
self._priority, self._trigger, self._description, self._opts)
|
||||
|
||||
def _re_match(self, trigger):
|
||||
"""Test if a the current regex trigger matches `trigger`.
|
||||
|
||||
If so, set _last_re and _matched.
|
||||
|
||||
"""
|
||||
for match in re.finditer(self._trigger, trigger):
|
||||
if match.end() != len(trigger):
|
||||
continue
|
||||
else:
|
||||
self._matched = trigger[match.start():match.end()]
|
||||
|
||||
self._last_re = match
|
||||
return match
|
||||
return False
|
||||
|
||||
def _context_match(self, visual_content):
|
||||
# skip on empty buffer
|
||||
if len(vim.current.buffer) == 1 and vim.current.buffer[0] == "":
|
||||
return
|
||||
|
||||
locals = {
|
||||
'context': None,
|
||||
'visual_mode': '',
|
||||
'visual_text': '',
|
||||
'last_placeholder': None
|
||||
}
|
||||
|
||||
if visual_content:
|
||||
locals['visual_mode'] = visual_content.mode
|
||||
locals['visual_text'] = visual_content.text
|
||||
locals['last_placeholder'] = visual_content.placeholder
|
||||
|
||||
return self._eval_code('snip.context = ' + self._context_code,
|
||||
locals).context
|
||||
|
||||
def _eval_code(self, code, additional_locals={}):
|
||||
code = "\n".join([
|
||||
'import re, os, vim, string, random',
|
||||
'\n'.join(self._globals.get('!p', [])).replace('\r\n', '\n'),
|
||||
code
|
||||
])
|
||||
|
||||
current = vim.current
|
||||
|
||||
locals = {
|
||||
'window': current.window,
|
||||
'buffer': current.buffer,
|
||||
'line': current.window.cursor[0]-1,
|
||||
'column': current.window.cursor[1]-1,
|
||||
'cursor': SnippetUtilCursor(current.window.cursor),
|
||||
}
|
||||
|
||||
locals.update(additional_locals)
|
||||
|
||||
snip = SnippetUtilForAction(locals)
|
||||
|
||||
try:
|
||||
exec(code, {'snip': snip})
|
||||
except Exception as e:
|
||||
self._make_debug_exception(e, code)
|
||||
raise
|
||||
|
||||
return snip
|
||||
|
||||
def _execute_action(
|
||||
self,
|
||||
action,
|
||||
context,
|
||||
additional_locals={}
|
||||
):
|
||||
mark_to_use = '`'
|
||||
with _vim.save_mark(mark_to_use):
|
||||
_vim.set_mark_from_pos(mark_to_use, _vim.get_cursor_pos())
|
||||
|
||||
cursor_line_before = _vim.buf.line_till_cursor
|
||||
|
||||
locals = {
|
||||
'context': context,
|
||||
}
|
||||
|
||||
locals.update(additional_locals)
|
||||
|
||||
snip = self._eval_code(action, locals)
|
||||
|
||||
if snip.cursor.is_set():
|
||||
vim.current.window.cursor = snip.cursor.to_vim_cursor()
|
||||
else:
|
||||
new_mark_pos = _vim.get_mark_pos(mark_to_use)
|
||||
|
||||
cursor_invalid = False
|
||||
|
||||
if _vim._is_pos_zero(new_mark_pos):
|
||||
cursor_invalid = True
|
||||
else:
|
||||
_vim.set_cursor_from_pos(new_mark_pos)
|
||||
if cursor_line_before != _vim.buf.line_till_cursor:
|
||||
cursor_invalid = True
|
||||
|
||||
if cursor_invalid:
|
||||
raise RuntimeError(
|
||||
'line under the cursor was modified, but ' +
|
||||
'"snip.cursor" variable is not set; either set set ' +
|
||||
'"snip.cursor" to new cursor position, or do not ' +
|
||||
'modify cursor line'
|
||||
)
|
||||
|
||||
return snip
|
||||
|
||||
def _make_debug_exception(self, e, code=''):
|
||||
e.snippet_info = textwrap.dedent("""
|
||||
Defined in: {}
|
||||
Trigger: {}
|
||||
Description: {}
|
||||
Context: {}
|
||||
Pre-expand: {}
|
||||
Post-expand: {}
|
||||
""").format(
|
||||
self._location,
|
||||
self._trigger,
|
||||
self._description,
|
||||
self._context_code if self._context_code else '<none>',
|
||||
self._actions['pre_expand'] if 'pre_expand' in self._actions
|
||||
else '<none>',
|
||||
self._actions['post_expand'] if 'post_expand' in self._actions
|
||||
else '<none>',
|
||||
code,
|
||||
)
|
||||
|
||||
e.snippet_code = code
|
||||
|
||||
def has_option(self, opt):
|
||||
"""Check if the named option is set."""
|
||||
return opt in self._opts
|
||||
|
||||
@property
|
||||
def description(self):
|
||||
"""Descriptive text for this snippet."""
|
||||
return ('(%s) %s' % (self._trigger, self._description)).strip()
|
||||
|
||||
@property
|
||||
def priority(self):
|
||||
"""The snippets priority, which defines which snippet will be preferred
|
||||
over others with the same trigger."""
|
||||
return self._priority
|
||||
|
||||
@property
|
||||
def trigger(self):
|
||||
"""The trigger text for the snippet."""
|
||||
return self._trigger
|
||||
|
||||
@property
|
||||
def matched(self):
|
||||
"""The last text that matched this snippet in match() or
|
||||
could_match()."""
|
||||
return self._matched
|
||||
|
||||
@property
|
||||
def location(self):
|
||||
"""Where this snippet was defined."""
|
||||
return self._location
|
||||
|
||||
@property
|
||||
def context(self):
|
||||
"""The matched context."""
|
||||
return self._context
|
||||
|
||||
def matches(self, before, visual_content=None):
|
||||
"""Returns True if this snippet matches 'before'."""
|
||||
# If user supplies both "w" and "i", it should perhaps be an
|
||||
# error, but if permitted it seems that "w" should take precedence
|
||||
# (since matching at word boundary and within a word == matching at word
|
||||
# boundary).
|
||||
self._matched = ''
|
||||
|
||||
words = _words_for_line(self._trigger, before)
|
||||
|
||||
if 'r' in self._opts:
|
||||
try:
|
||||
match = self._re_match(before)
|
||||
except Exception as e:
|
||||
self._make_debug_exception(e)
|
||||
raise
|
||||
|
||||
elif 'w' in self._opts:
|
||||
words_len = len(self._trigger)
|
||||
words_prefix = words[:-words_len]
|
||||
words_suffix = words[-words_len:]
|
||||
match = (words_suffix == self._trigger)
|
||||
if match and words_prefix:
|
||||
# Require a word boundary between prefix and suffix.
|
||||
boundary_chars = escape(words_prefix[-1:] +
|
||||
words_suffix[:1], r'\"')
|
||||
match = _vim.eval(
|
||||
'"%s" =~# "\\\\v.<."' %
|
||||
boundary_chars) != '0'
|
||||
elif 'i' in self._opts:
|
||||
match = words.endswith(self._trigger)
|
||||
else:
|
||||
match = (words == self._trigger)
|
||||
|
||||
# By default, we match the whole trigger
|
||||
if match and not self._matched:
|
||||
self._matched = self._trigger
|
||||
|
||||
# Ensure the match was on a word boundry if needed
|
||||
if 'b' in self._opts and match:
|
||||
text_before = before.rstrip()[:-len(self._matched)]
|
||||
if text_before.strip(' \t') != '':
|
||||
self._matched = ''
|
||||
return False
|
||||
|
||||
self._context = None
|
||||
if match and self._context_code:
|
||||
self._context = self._context_match(visual_content)
|
||||
if not self.context:
|
||||
match = False
|
||||
|
||||
return match
|
||||
|
||||
def could_match(self, before):
|
||||
"""Return True if this snippet could match the (partial) 'before'."""
|
||||
self._matched = ''
|
||||
|
||||
# List all on whitespace.
|
||||
if before and before[-1] in (' ', '\t'):
|
||||
before = ''
|
||||
if before and before.rstrip() is not before:
|
||||
return False
|
||||
|
||||
words = _words_for_line(self._trigger, before)
|
||||
|
||||
if 'r' in self._opts:
|
||||
# Test for full match only
|
||||
match = self._re_match(before)
|
||||
elif 'w' in self._opts:
|
||||
# Trim non-empty prefix up to word boundary, if present.
|
||||
qwords = escape(words, r'\"')
|
||||
words_suffix = _vim.eval(
|
||||
'substitute("%s", "\\\\v^.+<(.+)", "\\\\1", "")' % qwords)
|
||||
match = self._trigger.startswith(words_suffix)
|
||||
self._matched = words_suffix
|
||||
|
||||
# TODO: list_snippets() function cannot handle partial-trigger
|
||||
# matches yet, so for now fail if we trimmed the prefix.
|
||||
if words_suffix != words:
|
||||
match = False
|
||||
elif 'i' in self._opts:
|
||||
# TODO: It is hard to define when a inword snippet could match,
|
||||
# therefore we check only for full-word trigger.
|
||||
match = self._trigger.startswith(words)
|
||||
else:
|
||||
match = self._trigger.startswith(words)
|
||||
|
||||
# By default, we match the words from the trigger
|
||||
if match and not self._matched:
|
||||
self._matched = words
|
||||
|
||||
# Ensure the match was on a word boundry if needed
|
||||
if 'b' in self._opts and match:
|
||||
text_before = before.rstrip()[:-len(self._matched)]
|
||||
if text_before.strip(' \t') != '':
|
||||
self._matched = ''
|
||||
return False
|
||||
|
||||
return match
|
||||
|
||||
def instantiate(self, snippet_instance, initial_text, indent):
|
||||
"""Parses the content of this snippet and brings the corresponding text
|
||||
objects alive inside of Vim."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def do_pre_expand(self, visual_content, snippets_stack):
|
||||
if 'pre_expand' in self._actions:
|
||||
locals = {'buffer': _vim.buf, 'visual_content': visual_content}
|
||||
|
||||
snip = self._execute_action(
|
||||
self._actions['pre_expand'], self._context, locals
|
||||
)
|
||||
|
||||
self._context = snip.context
|
||||
|
||||
return snip.cursor.is_set()
|
||||
else:
|
||||
return False
|
||||
|
||||
def do_post_expand(self, start, end, snippets_stack):
|
||||
if 'post_expand' in self._actions:
|
||||
locals = {
|
||||
'snippet_start': start,
|
||||
'snippet_end': end,
|
||||
'buffer': _vim.buf
|
||||
}
|
||||
|
||||
snip = self._execute_action(
|
||||
self._actions['post_expand'], snippets_stack[-1].context, locals
|
||||
)
|
||||
|
||||
snippets_stack[-1].context = snip.context
|
||||
|
||||
return snip.cursor.is_set()
|
||||
else:
|
||||
return False
|
||||
|
||||
def do_post_jump(
|
||||
self, tabstop_number, jump_direction, snippets_stack, current_snippet
|
||||
):
|
||||
if 'post_jump' in self._actions:
|
||||
start = current_snippet.start
|
||||
end = current_snippet.end
|
||||
|
||||
locals = {
|
||||
'tabstop': tabstop_number,
|
||||
'jump_direction': jump_direction,
|
||||
'tabstops': current_snippet.get_tabstops(),
|
||||
'snippet_start': start,
|
||||
'snippet_end': end,
|
||||
'buffer': _vim.buf
|
||||
}
|
||||
|
||||
snip = self._execute_action(
|
||||
self._actions['post_jump'], current_snippet.context, locals
|
||||
)
|
||||
|
||||
current_snippet.context = snip.context
|
||||
|
||||
return snip.cursor.is_set()
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def launch(self, text_before, visual_content, parent, start, end):
|
||||
"""Launch this snippet, overwriting the text 'start' to 'end' and
|
||||
keeping the 'text_before' on the launch line.
|
||||
|
||||
'Parent' is the parent snippet instance if any.
|
||||
|
||||
"""
|
||||
indent = self._INDENT.match(text_before).group(0)
|
||||
lines = (self._value + '\n').splitlines()
|
||||
ind_util = IndentUtil()
|
||||
|
||||
# Replace leading tabs in the snippet definition via proper indenting
|
||||
initial_text = []
|
||||
for line_num, line in enumerate(lines):
|
||||
if 't' in self._opts:
|
||||
tabs = 0
|
||||
else:
|
||||
tabs = len(self._TABS.match(line).group(0))
|
||||
line_ind = ind_util.ntabs_to_proper_indent(tabs)
|
||||
if line_num != 0:
|
||||
line_ind = indent + line_ind
|
||||
|
||||
result_line = line_ind + line[tabs:]
|
||||
if 'm' in self._opts:
|
||||
result_line = result_line.rstrip()
|
||||
initial_text.append(result_line)
|
||||
initial_text = '\n'.join(initial_text)
|
||||
|
||||
snippet_instance = SnippetInstance(
|
||||
self, parent, initial_text, start, end, visual_content,
|
||||
last_re=self._last_re, globals=self._globals,
|
||||
context=self._context)
|
||||
self.instantiate(snippet_instance, initial_text, indent)
|
||||
|
||||
snippet_instance.update_textobjects()
|
||||
return snippet_instance
|
@ -0,0 +1,22 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
"""A snipMate snippet after parsing."""
|
||||
|
||||
from UltiSnips.snippet.definition._base import SnippetDefinition
|
||||
from UltiSnips.snippet.parsing.snipmate import parse_and_instantiate
|
||||
|
||||
|
||||
class SnipMateSnippetDefinition(SnippetDefinition):
|
||||
|
||||
"""See module doc."""
|
||||
|
||||
SNIPMATE_SNIPPET_PRIORITY = -1000
|
||||
|
||||
def __init__(self, trigger, value, description, location):
|
||||
SnippetDefinition.__init__(self, self.SNIPMATE_SNIPPET_PRIORITY,
|
||||
trigger, value, description, '', {}, location,
|
||||
None, {})
|
||||
|
||||
def instantiate(self, snippet_instance, initial_text, indent):
|
||||
parse_and_instantiate(snippet_instance, initial_text, indent)
|
@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
"""A UltiSnips snippet after parsing."""
|
||||
|
||||
from UltiSnips.snippet.definition._base import SnippetDefinition
|
||||
from UltiSnips.snippet.parsing.ultisnips import parse_and_instantiate
|
||||
|
||||
|
||||
class UltiSnipsSnippetDefinition(SnippetDefinition):
|
||||
|
||||
"""See module doc."""
|
||||
|
||||
def instantiate(self, snippet_instance, initial_text, indent):
|
||||
return parse_and_instantiate(snippet_instance, initial_text, indent)
|
@ -0,0 +1 @@
|
||||
"""Code related to turning text into snippets."""
|
@ -0,0 +1,68 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
"""Common functionality of the snippet parsing codes."""
|
||||
|
||||
from UltiSnips.position import Position
|
||||
from UltiSnips.snippet.parsing._lexer import tokenize, TabStopToken
|
||||
from UltiSnips.text_objects import TabStop
|
||||
|
||||
from UltiSnips.text_objects import Mirror
|
||||
from UltiSnips.snippet.parsing._lexer import MirrorToken
|
||||
|
||||
|
||||
def resolve_ambiguity(all_tokens, seen_ts):
|
||||
"""$1 could be a Mirror or a TabStop.
|
||||
|
||||
This figures this out.
|
||||
|
||||
"""
|
||||
for parent, token in all_tokens:
|
||||
if isinstance(token, MirrorToken):
|
||||
if token.number not in seen_ts:
|
||||
seen_ts[token.number] = TabStop(parent, token)
|
||||
else:
|
||||
Mirror(parent, seen_ts[token.number], token)
|
||||
|
||||
|
||||
def tokenize_snippet_text(snippet_instance, text, indent,
|
||||
allowed_tokens_in_text, allowed_tokens_in_tabstops,
|
||||
token_to_textobject):
|
||||
"""Turns 'text' into a stream of tokens and creates the text objects from
|
||||
those tokens that are mentioned in 'token_to_textobject' assuming the
|
||||
current 'indent'.
|
||||
|
||||
The 'allowed_tokens_in_text' define which tokens will be recognized
|
||||
in 'text' while 'allowed_tokens_in_tabstops' are the tokens that
|
||||
will be recognized in TabStop placeholder text.
|
||||
|
||||
"""
|
||||
seen_ts = {}
|
||||
all_tokens = []
|
||||
|
||||
def _do_parse(parent, text, allowed_tokens):
|
||||
"""Recursive function that actually creates the objects."""
|
||||
tokens = list(tokenize(text, indent, parent.start, allowed_tokens))
|
||||
for token in tokens:
|
||||
all_tokens.append((parent, token))
|
||||
if isinstance(token, TabStopToken):
|
||||
ts = TabStop(parent, token)
|
||||
seen_ts[token.number] = ts
|
||||
_do_parse(ts, token.initial_text,
|
||||
allowed_tokens_in_tabstops)
|
||||
else:
|
||||
klass = token_to_textobject.get(token.__class__, None)
|
||||
if klass is not None:
|
||||
klass(parent, token)
|
||||
_do_parse(snippet_instance, text, allowed_tokens_in_text)
|
||||
return all_tokens, seen_ts
|
||||
|
||||
|
||||
def finalize(all_tokens, seen_ts, snippet_instance):
|
||||
"""Adds a tabstop 0 if non is in 'seen_ts' and brings the text of the
|
||||
snippet instance into Vim."""
|
||||
if 0 not in seen_ts:
|
||||
mark = all_tokens[-1][1].end # Last token is always EndOfText
|
||||
m1 = Position(mark.line, mark.col)
|
||||
TabStop(snippet_instance, 0, mark, m1)
|
||||
snippet_instance.replace_initial_text()
|
@ -0,0 +1,369 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
"""Not really a lexer in the classical sense, but code to convert snippet
|
||||
definitions into logical units called Tokens."""
|
||||
|
||||
import string
|
||||
import re
|
||||
|
||||
from UltiSnips.compatibility import as_unicode
|
||||
from UltiSnips.position import Position
|
||||
from UltiSnips.text import unescape
|
||||
|
||||
|
||||
class _TextIterator(object):
|
||||
|
||||
"""Helper class to make iterating over text easier."""
|
||||
|
||||
def __init__(self, text, offset):
|
||||
self._text = as_unicode(text)
|
||||
self._line = offset.line
|
||||
self._col = offset.col
|
||||
|
||||
self._idx = 0
|
||||
|
||||
def __iter__(self):
|
||||
"""Iterator interface."""
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
"""Returns the next character."""
|
||||
if self._idx >= len(self._text):
|
||||
raise StopIteration
|
||||
|
||||
rv = self._text[self._idx]
|
||||
if self._text[self._idx] in ('\n', '\r\n'):
|
||||
self._line += 1
|
||||
self._col = 0
|
||||
else:
|
||||
self._col += 1
|
||||
self._idx += 1
|
||||
return rv
|
||||
next = __next__ # for python2
|
||||
|
||||
def peek(self, count=1):
|
||||
"""Returns the next 'count' characters without advancing the stream."""
|
||||
if count > 1: # This might return '' if nothing is found
|
||||
return self._text[self._idx:self._idx + count]
|
||||
try:
|
||||
return self._text[self._idx]
|
||||
except IndexError:
|
||||
return None
|
||||
|
||||
@property
|
||||
def pos(self):
|
||||
"""Current position in the text."""
|
||||
return Position(self._line, self._col)
|
||||
|
||||
|
||||
def _parse_number(stream):
|
||||
"""Expects the stream to contain a number next, returns the number without
|
||||
consuming any more bytes."""
|
||||
rv = ''
|
||||
while stream.peek() and stream.peek() in string.digits:
|
||||
rv += next(stream)
|
||||
|
||||
return int(rv)
|
||||
|
||||
|
||||
def _parse_till_closing_brace(stream):
|
||||
"""
|
||||
Returns all chars till a non-escaped } is found. Other
|
||||
non escaped { are taken into account and skipped over.
|
||||
|
||||
Will also consume the closing }, but not return it
|
||||
"""
|
||||
rv = ''
|
||||
in_braces = 1
|
||||
while True:
|
||||
if EscapeCharToken.starts_here(stream, '{}'):
|
||||
rv += next(stream) + next(stream)
|
||||
else:
|
||||
char = next(stream)
|
||||
if char == '{':
|
||||
in_braces += 1
|
||||
elif char == '}':
|
||||
in_braces -= 1
|
||||
if in_braces == 0:
|
||||
break
|
||||
rv += char
|
||||
return rv
|
||||
|
||||
|
||||
def _parse_till_unescaped_char(stream, chars):
|
||||
"""
|
||||
Returns all chars till a non-escaped char is found.
|
||||
|
||||
Will also consume the closing char, but and return it as second
|
||||
return value
|
||||
"""
|
||||
rv = ''
|
||||
while True:
|
||||
escaped = False
|
||||
for char in chars:
|
||||
if EscapeCharToken.starts_here(stream, char):
|
||||
rv += next(stream) + next(stream)
|
||||
escaped = True
|
||||
if not escaped:
|
||||
char = next(stream)
|
||||
if char in chars:
|
||||
break
|
||||
rv += char
|
||||
return rv, char
|
||||
|
||||
|
||||
class Token(object):
|
||||
|
||||
"""Represents a Token as parsed from a snippet definition."""
|
||||
|
||||
def __init__(self, gen, indent):
|
||||
self.initial_text = as_unicode('')
|
||||
self.start = gen.pos
|
||||
self._parse(gen, indent)
|
||||
self.end = gen.pos
|
||||
|
||||
def _parse(self, stream, indent):
|
||||
"""Parses the token from 'stream' with the current 'indent'."""
|
||||
pass # Does nothing
|
||||
|
||||
|
||||
class TabStopToken(Token):
|
||||
|
||||
"""${1:blub}"""
|
||||
CHECK = re.compile(r'^\${\d+[:}]')
|
||||
|
||||
@classmethod
|
||||
def starts_here(cls, stream):
|
||||
"""Returns true if this token starts at the current position in
|
||||
'stream'."""
|
||||
return cls.CHECK.match(stream.peek(10)) is not None
|
||||
|
||||
def _parse(self, stream, indent):
|
||||
next(stream) # $
|
||||
next(stream) # {
|
||||
|
||||
self.number = _parse_number(stream)
|
||||
|
||||
if stream.peek() == ':':
|
||||
next(stream)
|
||||
self.initial_text = _parse_till_closing_brace(stream)
|
||||
|
||||
def __repr__(self):
|
||||
return 'TabStopToken(%r,%r,%r,%r)' % (
|
||||
self.start, self.end, self.number, self.initial_text
|
||||
)
|
||||
|
||||
|
||||
class VisualToken(Token):
|
||||
|
||||
"""${VISUAL}"""
|
||||
CHECK = re.compile(r"^\${VISUAL[:}/]")
|
||||
|
||||
@classmethod
|
||||
def starts_here(cls, stream):
|
||||
"""Returns true if this token starts at the current position in
|
||||
'stream'."""
|
||||
return cls.CHECK.match(stream.peek(10)) is not None
|
||||
|
||||
def _parse(self, stream, indent):
|
||||
for _ in range(8): # ${VISUAL
|
||||
next(stream)
|
||||
|
||||
if stream.peek() == ':':
|
||||
next(stream)
|
||||
self.alternative_text, char = _parse_till_unescaped_char(stream, '/}')
|
||||
self.alternative_text = unescape(self.alternative_text)
|
||||
|
||||
if char == '/': # Transformation going on
|
||||
try:
|
||||
self.search = _parse_till_unescaped_char(stream, '/')[0]
|
||||
self.replace = _parse_till_unescaped_char(stream, '/')[0]
|
||||
self.options = _parse_till_closing_brace(stream)
|
||||
except StopIteration:
|
||||
raise RuntimeError(
|
||||
"Invalid ${VISUAL} transformation! Forgot to escape a '/'?")
|
||||
else:
|
||||
self.search = None
|
||||
self.replace = None
|
||||
self.options = None
|
||||
|
||||
def __repr__(self):
|
||||
return 'VisualToken(%r,%r)' % (
|
||||
self.start, self.end
|
||||
)
|
||||
|
||||
|
||||
class TransformationToken(Token):
|
||||
|
||||
"""${1/match/replace/options}"""
|
||||
|
||||
CHECK = re.compile(r'^\${\d+\/')
|
||||
|
||||
@classmethod
|
||||
def starts_here(cls, stream):
|
||||
"""Returns true if this token starts at the current position in
|
||||
'stream'."""
|
||||
return cls.CHECK.match(stream.peek(10)) is not None
|
||||
|
||||
def _parse(self, stream, indent):
|
||||
next(stream) # $
|
||||
next(stream) # {
|
||||
|
||||
self.number = _parse_number(stream)
|
||||
|
||||
next(stream) # /
|
||||
|
||||
self.search = _parse_till_unescaped_char(stream, '/')[0]
|
||||
self.replace = _parse_till_unescaped_char(stream, '/')[0]
|
||||
self.options = _parse_till_closing_brace(stream)
|
||||
|
||||
def __repr__(self):
|
||||
return 'TransformationToken(%r,%r,%r,%r,%r)' % (
|
||||
self.start, self.end, self.number, self.search, self.replace
|
||||
)
|
||||
|
||||
|
||||
class MirrorToken(Token):
|
||||
|
||||
"""$1."""
|
||||
CHECK = re.compile(r'^\$\d+')
|
||||
|
||||
@classmethod
|
||||
def starts_here(cls, stream):
|
||||
"""Returns true if this token starts at the current position in
|
||||
'stream'."""
|
||||
return cls.CHECK.match(stream.peek(10)) is not None
|
||||
|
||||
def _parse(self, stream, indent):
|
||||
next(stream) # $
|
||||
self.number = _parse_number(stream)
|
||||
|
||||
def __repr__(self):
|
||||
return 'MirrorToken(%r,%r,%r)' % (
|
||||
self.start, self.end, self.number
|
||||
)
|
||||
|
||||
|
||||
class EscapeCharToken(Token):
|
||||
|
||||
"""\\n."""
|
||||
@classmethod
|
||||
def starts_here(cls, stream, chars=r'{}\$`'):
|
||||
"""Returns true if this token starts at the current position in
|
||||
'stream'."""
|
||||
cs = stream.peek(2)
|
||||
if len(cs) == 2 and cs[0] == '\\' and cs[1] in chars:
|
||||
return True
|
||||
|
||||
def _parse(self, stream, indent):
|
||||
next(stream) # \
|
||||
self.initial_text = next(stream)
|
||||
|
||||
def __repr__(self):
|
||||
return 'EscapeCharToken(%r,%r,%r)' % (
|
||||
self.start, self.end, self.initial_text
|
||||
)
|
||||
|
||||
|
||||
class ShellCodeToken(Token):
|
||||
|
||||
"""`echo "hi"`"""
|
||||
@classmethod
|
||||
def starts_here(cls, stream):
|
||||
"""Returns true if this token starts at the current position in
|
||||
'stream'."""
|
||||
return stream.peek(1) == '`'
|
||||
|
||||
def _parse(self, stream, indent):
|
||||
next(stream) # `
|
||||
self.code = _parse_till_unescaped_char(stream, '`')[0]
|
||||
|
||||
def __repr__(self):
|
||||
return 'ShellCodeToken(%r,%r,%r)' % (
|
||||
self.start, self.end, self.code
|
||||
)
|
||||
|
||||
|
||||
class PythonCodeToken(Token):
|
||||
|
||||
"""`!p snip.rv = "Hi"`"""
|
||||
CHECK = re.compile(r'^`!p\s')
|
||||
|
||||
@classmethod
|
||||
def starts_here(cls, stream):
|
||||
"""Returns true if this token starts at the current position in
|
||||
'stream'."""
|
||||
return cls.CHECK.match(stream.peek(4)) is not None
|
||||
|
||||
def _parse(self, stream, indent):
|
||||
for _ in range(3):
|
||||
next(stream) # `!p
|
||||
if stream.peek() in '\t ':
|
||||
next(stream)
|
||||
|
||||
code = _parse_till_unescaped_char(stream, '`')[0]
|
||||
|
||||
# Strip the indent if any
|
||||
if len(indent):
|
||||
lines = code.splitlines()
|
||||
self.code = lines[0] + '\n'
|
||||
self.code += '\n'.join([l[len(indent):]
|
||||
for l in lines[1:]])
|
||||
else:
|
||||
self.code = code
|
||||
self.indent = indent
|
||||
|
||||
def __repr__(self):
|
||||
return 'PythonCodeToken(%r,%r,%r)' % (
|
||||
self.start, self.end, self.code
|
||||
)
|
||||
|
||||
|
||||
class VimLCodeToken(Token):
|
||||
|
||||
"""`!v g:hi`"""
|
||||
CHECK = re.compile(r'^`!v\s')
|
||||
|
||||
@classmethod
|
||||
def starts_here(cls, stream):
|
||||
"""Returns true if this token starts at the current position in
|
||||
'stream'."""
|
||||
return cls.CHECK.match(stream.peek(4)) is not None
|
||||
|
||||
def _parse(self, stream, indent):
|
||||
for _ in range(4):
|
||||
next(stream) # `!v
|
||||
self.code = _parse_till_unescaped_char(stream, '`')[0]
|
||||
|
||||
def __repr__(self):
|
||||
return 'VimLCodeToken(%r,%r,%r)' % (
|
||||
self.start, self.end, self.code
|
||||
)
|
||||
|
||||
|
||||
class EndOfTextToken(Token):
|
||||
|
||||
"""Appears at the end of the text."""
|
||||
|
||||
def __repr__(self):
|
||||
return 'EndOfText(%r)' % self.end
|
||||
|
||||
|
||||
def tokenize(text, indent, offset, allowed_tokens):
|
||||
"""Returns an iterator of tokens of 'text'['offset':] which is assumed to
|
||||
have 'indent' as the whitespace of the begging of the lines. Only
|
||||
'allowed_tokens' are considered to be valid tokens."""
|
||||
stream = _TextIterator(text, offset)
|
||||
try:
|
||||
while True:
|
||||
done_something = False
|
||||
for token in allowed_tokens:
|
||||
if token.starts_here(stream):
|
||||
yield token(stream, indent)
|
||||
done_something = True
|
||||
break
|
||||
if not done_something:
|
||||
next(stream)
|
||||
except StopIteration:
|
||||
yield EndOfTextToken(stream, indent)
|
@ -0,0 +1,38 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
"""Parses a snipMate snippet definition and launches it into Vim."""
|
||||
|
||||
from UltiSnips.snippet.parsing._base import tokenize_snippet_text, finalize, resolve_ambiguity
|
||||
from UltiSnips.snippet.parsing._lexer import EscapeCharToken, \
|
||||
VisualToken, TabStopToken, MirrorToken, ShellCodeToken
|
||||
from UltiSnips.text_objects import EscapedChar, Mirror, VimLCode, Visual
|
||||
|
||||
_TOKEN_TO_TEXTOBJECT = {
|
||||
EscapeCharToken: EscapedChar,
|
||||
VisualToken: Visual,
|
||||
ShellCodeToken: VimLCode, # `` is VimL in snipMate
|
||||
}
|
||||
|
||||
__ALLOWED_TOKENS = [
|
||||
EscapeCharToken, VisualToken, TabStopToken, MirrorToken, ShellCodeToken
|
||||
]
|
||||
|
||||
__ALLOWED_TOKENS_IN_TABSTOPS = [
|
||||
EscapeCharToken, VisualToken, MirrorToken, ShellCodeToken
|
||||
]
|
||||
|
||||
|
||||
def parse_and_instantiate(parent_to, text, indent):
|
||||
"""Parses a snippet definition in snipMate format from 'text' assuming the
|
||||
current 'indent'.
|
||||
|
||||
Will instantiate all the objects and link them as children to
|
||||
parent_to. Will also put the initial text into Vim.
|
||||
|
||||
"""
|
||||
all_tokens, seen_ts = tokenize_snippet_text(parent_to, text, indent,
|
||||
__ALLOWED_TOKENS, __ALLOWED_TOKENS_IN_TABSTOPS,
|
||||
_TOKEN_TO_TEXTOBJECT)
|
||||
resolve_ambiguity(all_tokens, seen_ts)
|
||||
finalize(all_tokens, seen_ts, parent_to)
|
@ -0,0 +1,50 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
"""Parses a UltiSnips snippet definition and launches it into Vim."""
|
||||
|
||||
from UltiSnips.snippet.parsing._base import tokenize_snippet_text, finalize, resolve_ambiguity
|
||||
from UltiSnips.snippet.parsing._lexer import EscapeCharToken, \
|
||||
VisualToken, TransformationToken, TabStopToken, MirrorToken, \
|
||||
PythonCodeToken, VimLCodeToken, ShellCodeToken
|
||||
from UltiSnips.text_objects import EscapedChar, Mirror, PythonCode, \
|
||||
ShellCode, TabStop, Transformation, VimLCode, Visual
|
||||
|
||||
_TOKEN_TO_TEXTOBJECT = {
|
||||
EscapeCharToken: EscapedChar,
|
||||
VisualToken: Visual,
|
||||
ShellCodeToken: ShellCode,
|
||||
PythonCodeToken: PythonCode,
|
||||
VimLCodeToken: VimLCode,
|
||||
}
|
||||
|
||||
__ALLOWED_TOKENS = [
|
||||
EscapeCharToken, VisualToken, TransformationToken, TabStopToken,
|
||||
MirrorToken, PythonCodeToken, VimLCodeToken, ShellCodeToken
|
||||
]
|
||||
|
||||
|
||||
def _create_transformations(all_tokens, seen_ts):
|
||||
"""Create the objects that need to know about tabstops."""
|
||||
for parent, token in all_tokens:
|
||||
if isinstance(token, TransformationToken):
|
||||
if token.number not in seen_ts:
|
||||
raise RuntimeError(
|
||||
'Tabstop %i is not known but is used by a Transformation'
|
||||
% token.number)
|
||||
Transformation(parent, seen_ts[token.number], token)
|
||||
|
||||
|
||||
def parse_and_instantiate(parent_to, text, indent):
|
||||
"""Parses a snippet definition in UltiSnips format from 'text' assuming the
|
||||
current 'indent'.
|
||||
|
||||
Will instantiate all the objects and link them as children to
|
||||
parent_to. Will also put the initial text into Vim.
|
||||
|
||||
"""
|
||||
all_tokens, seen_ts = tokenize_snippet_text(parent_to, text, indent,
|
||||
__ALLOWED_TOKENS, __ALLOWED_TOKENS, _TOKEN_TO_TEXTOBJECT)
|
||||
resolve_ambiguity(all_tokens, seen_ts)
|
||||
_create_transformations(all_tokens, seen_ts)
|
||||
finalize(all_tokens, seen_ts, parent_to)
|
@ -0,0 +1,10 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
"""Sources of snippet definitions."""
|
||||
|
||||
from UltiSnips.snippet.source._base import SnippetSource
|
||||
from UltiSnips.snippet.source.added import AddedSnippetsSource
|
||||
from UltiSnips.snippet.source.file.snipmate import SnipMateFileSource
|
||||
from UltiSnips.snippet.source.file.ultisnips import UltiSnipsFileSource, \
|
||||
find_all_snippet_files, find_snippet_files
|
@ -0,0 +1,97 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
"""Base class for snippet sources."""
|
||||
|
||||
from collections import defaultdict
|
||||
|
||||
from UltiSnips.snippet.source._snippet_dictionary import SnippetDictionary
|
||||
|
||||
|
||||
class SnippetSource(object):
|
||||
|
||||
"""See module docstring."""
|
||||
|
||||
def __init__(self):
|
||||
self._snippets = defaultdict(SnippetDictionary)
|
||||
self._extends = defaultdict(set)
|
||||
|
||||
def ensure(self, filetypes, cached):
|
||||
"""Update/reload the snippets in the source when needed.
|
||||
|
||||
It makes sure that the snippets are not outdated.
|
||||
|
||||
"""
|
||||
|
||||
def loaded(self, filetypes):
|
||||
return len(self._snippets) > 0
|
||||
|
||||
def _get_existing_deep_extends(self, base_filetypes):
|
||||
"""Helper for get all existing filetypes extended by base filetypes."""
|
||||
deep_extends = self.get_deep_extends(base_filetypes)
|
||||
return [ft for ft in deep_extends if ft in self._snippets]
|
||||
|
||||
def get_snippets(self, filetypes, before, possible, autotrigger_only,
|
||||
visual_content):
|
||||
"""Returns the snippets for all 'filetypes' (in order) and their
|
||||
parents matching the text 'before'. If 'possible' is true, a partial
|
||||
match is enough. Base classes can override this method to provide means
|
||||
of creating snippets on the fly.
|
||||
|
||||
Returns a list of SnippetDefinition s.
|
||||
|
||||
"""
|
||||
result = []
|
||||
for ft in self._get_existing_deep_extends(filetypes):
|
||||
snips = self._snippets[ft]
|
||||
result.extend(snips.get_matching_snippets(before, possible,
|
||||
autotrigger_only,
|
||||
visual_content))
|
||||
return result
|
||||
|
||||
def get_clear_priority(self, filetypes):
|
||||
"""Get maximum clearsnippets priority without arguments for specified
|
||||
filetypes, if any.
|
||||
|
||||
It returns None if there are no clearsnippets.
|
||||
|
||||
"""
|
||||
pri = None
|
||||
for ft in self._get_existing_deep_extends(filetypes):
|
||||
snippets = self._snippets[ft]
|
||||
if pri is None or snippets._clear_priority > pri:
|
||||
pri = snippets._clear_priority
|
||||
return pri
|
||||
|
||||
def get_cleared(self, filetypes):
|
||||
"""Get a set of cleared snippets marked by clearsnippets with arguments
|
||||
for specified filetypes."""
|
||||
cleared = {}
|
||||
for ft in self._get_existing_deep_extends(filetypes):
|
||||
snippets = self._snippets[ft]
|
||||
for key, value in snippets._cleared.items():
|
||||
if key not in cleared or value > cleared[key]:
|
||||
cleared[key] = value
|
||||
return cleared
|
||||
|
||||
def update_extends(self, child_ft, parent_fts):
|
||||
"""Update the extending relation by given child filetype and its parent
|
||||
filetypes."""
|
||||
self._extends[child_ft].update(parent_fts)
|
||||
|
||||
def get_deep_extends(self, base_filetypes):
|
||||
"""Get a list of filetypes that is either directed or indirected
|
||||
extended by given base filetypes.
|
||||
|
||||
Note that the returned list include the root filetype itself.
|
||||
|
||||
"""
|
||||
seen = set(base_filetypes)
|
||||
todo_fts = list(set(base_filetypes))
|
||||
while todo_fts:
|
||||
todo_ft = todo_fts.pop()
|
||||
unseen_extends = set(
|
||||
ft for ft in self._extends[todo_ft] if ft not in seen)
|
||||
seen.update(unseen_extends)
|
||||
todo_fts.extend(unseen_extends)
|
||||
return seen
|
@ -0,0 +1,60 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
"""Implements a container for parsed snippets."""
|
||||
|
||||
class SnippetDictionary(object):
|
||||
|
||||
"""See module docstring."""
|
||||
|
||||
def __init__(self):
|
||||
self._snippets = []
|
||||
self._cleared = {}
|
||||
self._clear_priority = float("-inf")
|
||||
|
||||
def add_snippet(self, snippet):
|
||||
"""Add 'snippet' to this dictionary."""
|
||||
self._snippets.append(snippet)
|
||||
|
||||
def get_matching_snippets(self, trigger, potentially, autotrigger_only,
|
||||
visual_content):
|
||||
"""Returns all snippets matching the given trigger.
|
||||
|
||||
If 'potentially' is true, returns all that could_match().
|
||||
|
||||
If 'autotrigger_only' is true, function will return only snippets which
|
||||
are marked with flag 'A' (should be automatically expanded without
|
||||
trigger key press).
|
||||
It's handled specially to avoid walking down the list of all snippets,
|
||||
which can be very slow, because function will be called on each change
|
||||
made in insert mode.
|
||||
|
||||
"""
|
||||
all_snippets = self._snippets
|
||||
if autotrigger_only:
|
||||
all_snippets = [s for s in all_snippets if s.has_option('A')]
|
||||
|
||||
if not potentially:
|
||||
return [s for s in all_snippets if s.matches(trigger,
|
||||
visual_content)]
|
||||
else:
|
||||
return [s for s in all_snippets if s.could_match(trigger)]
|
||||
|
||||
def clear_snippets(self, priority, triggers):
|
||||
"""Clear the snippets by mark them as cleared.
|
||||
|
||||
If trigger is None, it updates the value of clear priority
|
||||
instead.
|
||||
|
||||
"""
|
||||
if not triggers:
|
||||
if self._clear_priority is None or priority > self._clear_priority:
|
||||
self._clear_priority = priority
|
||||
else:
|
||||
for trigger in triggers:
|
||||
if (trigger not in self._cleared or
|
||||
priority > self._cleared[trigger]):
|
||||
self._cleared[trigger] = priority
|
||||
|
||||
def __len__(self):
|
||||
return len(self._snippets)
|
@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
"""Handles manually added snippets UltiSnips_Manager.add_snippet()."""
|
||||
|
||||
from UltiSnips.snippet.source._base import SnippetSource
|
||||
|
||||
|
||||
class AddedSnippetsSource(SnippetSource):
|
||||
|
||||
"""See module docstring."""
|
||||
|
||||
def add_snippet(self, ft, snippet):
|
||||
"""Adds the given 'snippet' for 'ft'."""
|
||||
self._snippets[ft].add_snippet(snippet)
|
@ -0,0 +1 @@
|
||||
"""Snippet sources that are file based."""
|
@ -0,0 +1,112 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
"""Code to provide access to UltiSnips files from disk."""
|
||||
|
||||
from collections import defaultdict
|
||||
import hashlib
|
||||
import os
|
||||
|
||||
from UltiSnips import _vim
|
||||
from UltiSnips import compatibility
|
||||
from UltiSnips.snippet.source._base import SnippetSource
|
||||
|
||||
|
||||
def _hash_file(path):
|
||||
"""Returns a hashdigest of 'path'."""
|
||||
if not os.path.isfile(path):
|
||||
return False
|
||||
return hashlib.sha1(open(path, 'rb').read()).hexdigest()
|
||||
|
||||
|
||||
class SnippetSyntaxError(RuntimeError):
|
||||
|
||||
"""Thrown when a syntax error is found in a file."""
|
||||
|
||||
def __init__(self, filename, line_index, msg):
|
||||
RuntimeError.__init__(self, '%s in %s:%d' % (
|
||||
msg, filename, line_index))
|
||||
|
||||
|
||||
class SnippetFileSource(SnippetSource):
|
||||
|
||||
"""Base class that abstracts away 'extends' info and file hashes."""
|
||||
|
||||
def __init__(self):
|
||||
SnippetSource.__init__(self)
|
||||
self._files_for_ft = defaultdict(set)
|
||||
self._file_hashes = defaultdict(lambda: None)
|
||||
self._ensure_cached = False
|
||||
|
||||
def ensure(self, filetypes, cached):
|
||||
if cached and self._ensure_cached:
|
||||
return
|
||||
|
||||
for ft in self.get_deep_extends(filetypes):
|
||||
if self._needs_update(ft):
|
||||
self._load_snippets_for(ft)
|
||||
|
||||
self._ensure_cached = True
|
||||
|
||||
def _get_all_snippet_files_for(self, ft):
|
||||
"""Returns a set of all files that define snippets for 'ft'."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def _parse_snippet_file(self, filedata, filename):
|
||||
"""Parses 'filedata' as a snippet file and yields events."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def _needs_update(self, ft):
|
||||
"""Returns true if any files for 'ft' have changed and must be
|
||||
reloaded."""
|
||||
existing_files = self._get_all_snippet_files_for(ft)
|
||||
if existing_files != self._files_for_ft[ft]:
|
||||
self._files_for_ft[ft] = existing_files
|
||||
return True
|
||||
|
||||
for filename in self._files_for_ft[ft]:
|
||||
if _hash_file(filename) != self._file_hashes[filename]:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _load_snippets_for(self, ft):
|
||||
"""Load all snippets for the given 'ft'."""
|
||||
if ft in self._snippets:
|
||||
del self._snippets[ft]
|
||||
del self._extends[ft]
|
||||
try:
|
||||
for fn in self._files_for_ft[ft]:
|
||||
self._parse_snippets(ft, fn)
|
||||
except:
|
||||
del self._files_for_ft[ft]
|
||||
raise
|
||||
# Now load for the parents
|
||||
for parent_ft in self.get_deep_extends([ft]):
|
||||
if parent_ft != ft and self._needs_update(parent_ft):
|
||||
self._load_snippets_for(parent_ft)
|
||||
|
||||
def _parse_snippets(self, ft, filename):
|
||||
"""Parse the 'filename' for the given 'ft' and watch it for changes in
|
||||
the future."""
|
||||
self._file_hashes[filename] = _hash_file(filename)
|
||||
file_data = compatibility.open_ascii_file(filename, 'r').read()
|
||||
for event, data in self._parse_snippet_file(file_data, filename):
|
||||
if event == 'error':
|
||||
msg, line_index = data
|
||||
filename = _vim.eval("""fnamemodify(%s, ":~:.")""" %
|
||||
_vim.escape(filename))
|
||||
raise SnippetSyntaxError(filename, line_index, msg)
|
||||
elif event == 'clearsnippets':
|
||||
priority, triggers = data
|
||||
self._snippets[ft].clear_snippets(priority, triggers)
|
||||
elif event == 'extends':
|
||||
# TODO(sirver): extends information is more global
|
||||
# than one snippet source.
|
||||
filetypes, = data
|
||||
self.update_extends(ft, filetypes)
|
||||
elif event == 'snippet':
|
||||
snippet, = data
|
||||
self._snippets[ft].add_snippet(snippet)
|
||||
else:
|
||||
assert False, 'Unhandled %s: %r' % (event, data)
|
@ -0,0 +1,29 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
"""Common code for snipMate and UltiSnips snippet files."""
|
||||
|
||||
|
||||
def handle_extends(tail, line_index):
|
||||
"""Handles an extends line in a snippet."""
|
||||
if tail:
|
||||
return 'extends', ([p.strip() for p in tail.split(',')],)
|
||||
else:
|
||||
return 'error', ("'extends' without file types", line_index)
|
||||
|
||||
|
||||
def handle_action(head, tail, line_index):
|
||||
if tail:
|
||||
action = tail.strip('"').replace(r'\"', '"').replace(r'\\\\', r'\\')
|
||||
return head, (action,)
|
||||
else:
|
||||
return 'error', ("'{}' without specified action".format(head),
|
||||
line_index)
|
||||
|
||||
|
||||
def handle_context(tail, line_index):
|
||||
if tail:
|
||||
return 'context', tail.strip('"').replace(r'\"', '"')\
|
||||
.replace(r'\\\\', r'\\')
|
||||
else:
|
||||
return 'error', ("'context' without body", line_index)
|
@ -0,0 +1,127 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
"""Parses snipMate files."""
|
||||
|
||||
import os
|
||||
import glob
|
||||
|
||||
from UltiSnips import _vim
|
||||
from UltiSnips.snippet.definition import SnipMateSnippetDefinition
|
||||
from UltiSnips.snippet.source.file._base import SnippetFileSource
|
||||
from UltiSnips.snippet.source.file._common import handle_extends
|
||||
from UltiSnips.text import LineIterator, head_tail
|
||||
|
||||
|
||||
def _splitall(path):
|
||||
"""Split 'path' into all its components."""
|
||||
# From http://my.safaribooksonline.com/book/programming/
|
||||
# python/0596001673/files/pythoncook-chp-4-sect-16
|
||||
allparts = []
|
||||
while True:
|
||||
parts = os.path.split(path)
|
||||
if parts[0] == path: # sentinel for absolute paths
|
||||
allparts.insert(0, parts[0])
|
||||
break
|
||||
elif parts[1] == path: # sentinel for relative paths
|
||||
allparts.insert(0, parts[1])
|
||||
break
|
||||
else:
|
||||
path = parts[0]
|
||||
allparts.insert(0, parts[1])
|
||||
return allparts
|
||||
|
||||
|
||||
def snipmate_files_for(ft):
|
||||
"""Returns all snipMate files we need to look at for 'ft'."""
|
||||
if ft == 'all':
|
||||
ft = '_'
|
||||
patterns = [
|
||||
'%s.snippets' % ft,
|
||||
os.path.join(ft, '*.snippets'),
|
||||
os.path.join(ft, '*.snippet'),
|
||||
os.path.join(ft, '*/*.snippet'),
|
||||
]
|
||||
ret = set()
|
||||
for rtp in _vim.eval('&runtimepath').split(','):
|
||||
path = os.path.realpath(os.path.expanduser(
|
||||
os.path.join(rtp, 'snippets')))
|
||||
for pattern in patterns:
|
||||
for fn in glob.glob(os.path.join(path, pattern)):
|
||||
ret.add(fn)
|
||||
return ret
|
||||
|
||||
|
||||
def _parse_snippet_file(content, full_filename):
|
||||
"""Parses 'content' assuming it is a .snippet file and yields events."""
|
||||
filename = full_filename[:-len('.snippet')] # strip extension
|
||||
segments = _splitall(filename)
|
||||
segments = segments[segments.index('snippets') + 1:]
|
||||
assert len(segments) in (2, 3)
|
||||
|
||||
trigger = segments[1]
|
||||
description = segments[2] if 2 < len(segments) else ''
|
||||
|
||||
# Chomp \n if any.
|
||||
if content and content.endswith(os.linesep):
|
||||
content = content[:-len(os.linesep)]
|
||||
yield 'snippet', (SnipMateSnippetDefinition(trigger, content,
|
||||
description, full_filename),)
|
||||
|
||||
|
||||
def _parse_snippet(line, lines, filename):
|
||||
"""Parse a snippet defintions."""
|
||||
start_line_index = lines.line_index
|
||||
trigger, description = head_tail(line[len('snippet'):].lstrip())
|
||||
content = ''
|
||||
while True:
|
||||
next_line = lines.peek()
|
||||
if next_line is None:
|
||||
break
|
||||
if next_line.strip() and not next_line.startswith('\t'):
|
||||
break
|
||||
line = next(lines)
|
||||
if line[0] == '\t':
|
||||
line = line[1:]
|
||||
content += line
|
||||
content = content[:-1] # Chomp the last newline
|
||||
return 'snippet', (SnipMateSnippetDefinition(
|
||||
trigger, content, description, '%s:%i' % (filename, start_line_index)),)
|
||||
|
||||
|
||||
def _parse_snippets_file(data, filename):
|
||||
"""Parse 'data' assuming it is a .snippets file.
|
||||
|
||||
Yields events in the file.
|
||||
|
||||
"""
|
||||
lines = LineIterator(data)
|
||||
for line in lines:
|
||||
if not line.strip():
|
||||
continue
|
||||
|
||||
head, tail = head_tail(line)
|
||||
if head == 'extends':
|
||||
yield handle_extends(tail, lines.line_index)
|
||||
elif head in 'snippet':
|
||||
snippet = _parse_snippet(line, lines, filename)
|
||||
if snippet is not None:
|
||||
yield snippet
|
||||
elif head and not head.startswith('#'):
|
||||
yield 'error', ('Invalid line %r' % line.rstrip(), lines.line_index)
|
||||
|
||||
|
||||
class SnipMateFileSource(SnippetFileSource):
|
||||
|
||||
"""Manages all snipMate snippet definitions found in rtp."""
|
||||
|
||||
def _get_all_snippet_files_for(self, ft):
|
||||
return snipmate_files_for(ft)
|
||||
|
||||
def _parse_snippet_file(self, filedata, filename):
|
||||
if filename.lower().endswith('snippet'):
|
||||
for event, data in _parse_snippet_file(filedata, filename):
|
||||
yield event, data
|
||||
else:
|
||||
for event, data in _parse_snippets_file(filedata, filename):
|
||||
yield event, data
|
@ -0,0 +1,187 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
"""Parsing of snippet files."""
|
||||
|
||||
from collections import defaultdict
|
||||
import glob
|
||||
import os
|
||||
|
||||
from UltiSnips import _vim
|
||||
from UltiSnips.snippet.definition import UltiSnipsSnippetDefinition
|
||||
from UltiSnips.snippet.source.file._base import SnippetFileSource
|
||||
from UltiSnips.snippet.source.file._common import handle_extends, \
|
||||
handle_action, handle_context
|
||||
from UltiSnips.text import LineIterator, head_tail
|
||||
|
||||
|
||||
def find_snippet_files(ft, directory):
|
||||
"""Returns all matching snippet files for 'ft' in 'directory'."""
|
||||
patterns = ['%s.snippets', '%s_*.snippets', os.path.join('%s', '*')]
|
||||
ret = set()
|
||||
directory = os.path.expanduser(directory)
|
||||
for pattern in patterns:
|
||||
for fn in glob.glob(os.path.join(directory, pattern % ft)):
|
||||
ret.add(os.path.realpath(fn))
|
||||
return ret
|
||||
|
||||
|
||||
def find_all_snippet_files(ft):
|
||||
"""Returns all snippet files matching 'ft' in the given runtime path
|
||||
directory."""
|
||||
if _vim.eval("exists('b:UltiSnipsSnippetDirectories')") == '1':
|
||||
snippet_dirs = _vim.eval('b:UltiSnipsSnippetDirectories')
|
||||
else:
|
||||
snippet_dirs = _vim.eval('g:UltiSnipsSnippetDirectories')
|
||||
if len(snippet_dirs) == 1 and os.path.isabs(snippet_dirs[0]):
|
||||
check_dirs = ['']
|
||||
else:
|
||||
check_dirs = _vim.eval('&runtimepath').split(',')
|
||||
patterns = ['%s.snippets', '%s_*.snippets', os.path.join('%s', '*')]
|
||||
ret = set()
|
||||
for rtp in check_dirs:
|
||||
for snippet_dir in snippet_dirs:
|
||||
if snippet_dir == 'snippets':
|
||||
raise RuntimeError(
|
||||
"You have 'snippets' in UltiSnipsSnippetDirectories. This "
|
||||
'directory is reserved for snipMate snippets. Use another '
|
||||
'directory for UltiSnips snippets.')
|
||||
pth = os.path.realpath(os.path.expanduser(
|
||||
os.path.join(rtp, snippet_dir)))
|
||||
for pattern in patterns:
|
||||
for fn in glob.glob(os.path.join(pth, pattern % ft)):
|
||||
ret.add(fn)
|
||||
return ret
|
||||
|
||||
|
||||
def _handle_snippet_or_global(
|
||||
filename, line, lines, python_globals, priority, pre_expand, context
|
||||
):
|
||||
"""Parses the snippet that begins at the current line."""
|
||||
start_line_index = lines.line_index
|
||||
descr = ''
|
||||
opts = ''
|
||||
|
||||
# Ensure this is a snippet
|
||||
snip = line.split()[0]
|
||||
|
||||
# Get and strip options if they exist
|
||||
remain = line[len(snip):].strip()
|
||||
words = remain.split()
|
||||
|
||||
if len(words) > 2:
|
||||
# second to last word ends with a quote
|
||||
if '"' not in words[-1] and words[-2][-1] == '"':
|
||||
opts = words[-1]
|
||||
remain = remain[:-len(opts) - 1].rstrip()
|
||||
|
||||
if 'e' in opts and not context:
|
||||
left = remain[:-1].rfind('"')
|
||||
if left != -1 and left != 0:
|
||||
context, remain = remain[left:].strip('"'), remain[:left]
|
||||
|
||||
# Get and strip description if it exists
|
||||
remain = remain.strip()
|
||||
if len(remain.split()) > 1 and remain[-1] == '"':
|
||||
left = remain[:-1].rfind('"')
|
||||
if left != -1 and left != 0:
|
||||
descr, remain = remain[left:], remain[:left]
|
||||
|
||||
# The rest is the trigger
|
||||
trig = remain.strip()
|
||||
if len(trig.split()) > 1 or 'r' in opts:
|
||||
if trig[0] != trig[-1]:
|
||||
return 'error', ("Invalid multiword trigger: '%s'" % trig,
|
||||
lines.line_index)
|
||||
trig = trig[1:-1]
|
||||
end = 'end' + snip
|
||||
content = ''
|
||||
|
||||
found_end = False
|
||||
for line in lines:
|
||||
if line.rstrip() == end:
|
||||
content = content[:-1] # Chomp the last newline
|
||||
found_end = True
|
||||
break
|
||||
content += line
|
||||
|
||||
if not found_end:
|
||||
return 'error', ("Missing 'endsnippet' for %r" %
|
||||
trig, lines.line_index)
|
||||
|
||||
if snip == 'global':
|
||||
python_globals[trig].append(content)
|
||||
elif snip == 'snippet':
|
||||
definition = UltiSnipsSnippetDefinition(
|
||||
priority, trig, content,
|
||||
descr, opts, python_globals,
|
||||
'%s:%i' % (filename, start_line_index),
|
||||
context, pre_expand)
|
||||
return 'snippet', (definition,)
|
||||
else:
|
||||
return 'error', ("Invalid snippet type: '%s'" % snip, lines.line_index)
|
||||
|
||||
|
||||
def _parse_snippets_file(data, filename):
|
||||
"""Parse 'data' assuming it is a snippet file.
|
||||
|
||||
Yields events in the file.
|
||||
|
||||
"""
|
||||
|
||||
python_globals = defaultdict(list)
|
||||
lines = LineIterator(data)
|
||||
current_priority = 0
|
||||
actions = {}
|
||||
context = None
|
||||
for line in lines:
|
||||
if not line.strip():
|
||||
continue
|
||||
|
||||
head, tail = head_tail(line)
|
||||
if head in ('snippet', 'global'):
|
||||
snippet = _handle_snippet_or_global(
|
||||
filename, line, lines,
|
||||
python_globals,
|
||||
current_priority,
|
||||
actions,
|
||||
context
|
||||
)
|
||||
|
||||
actions = {}
|
||||
context = None
|
||||
if snippet is not None:
|
||||
yield snippet
|
||||
elif head == 'extends':
|
||||
yield handle_extends(tail, lines.line_index)
|
||||
elif head == 'clearsnippets':
|
||||
yield 'clearsnippets', (current_priority, tail.split())
|
||||
elif head == 'context':
|
||||
head, context, = handle_context(tail, lines.line_index)
|
||||
if head == 'error':
|
||||
yield (head, tail)
|
||||
elif head == 'priority':
|
||||
try:
|
||||
current_priority = int(tail.split()[0])
|
||||
except (ValueError, IndexError):
|
||||
yield 'error', ('Invalid priority %r' % tail, lines.line_index)
|
||||
elif head in ['pre_expand', 'post_expand', 'post_jump']:
|
||||
head, tail = handle_action(head, tail, lines.line_index)
|
||||
if head == 'error':
|
||||
yield (head, tail)
|
||||
else:
|
||||
actions[head], = tail
|
||||
elif head and not head.startswith('#'):
|
||||
yield 'error', ('Invalid line %r' % line.rstrip(), lines.line_index)
|
||||
|
||||
|
||||
class UltiSnipsFileSource(SnippetFileSource):
|
||||
|
||||
"""Manages all snippets definitions found in rtp for ultisnips."""
|
||||
|
||||
def _get_all_snippet_files_for(self, ft):
|
||||
return find_all_snippet_files(ft)
|
||||
|
||||
def _parse_snippet_file(self, filedata, filename):
|
||||
for event, data in _parse_snippets_file(filedata, filename):
|
||||
yield event, data
|
Reference in New Issue
Block a user