Fixed vim and zsh

This commit is contained in:
2018-04-05 13:06:54 +02:00
parent f9db886bd3
commit 0331f6518a
2009 changed files with 256303 additions and 0 deletions

View File

@ -0,0 +1 @@
"""Code related to turning text into snippets."""

View File

@ -0,0 +1,68 @@
#!/usr/bin/env python
# encoding: utf-8
"""Common functionality of the snippet parsing codes."""
from UltiSnips.position import Position
from UltiSnips.snippet.parsing._lexer import tokenize, TabStopToken
from UltiSnips.text_objects import TabStop
from UltiSnips.text_objects import Mirror
from UltiSnips.snippet.parsing._lexer import MirrorToken
def resolve_ambiguity(all_tokens, seen_ts):
"""$1 could be a Mirror or a TabStop.
This figures this out.
"""
for parent, token in all_tokens:
if isinstance(token, MirrorToken):
if token.number not in seen_ts:
seen_ts[token.number] = TabStop(parent, token)
else:
Mirror(parent, seen_ts[token.number], token)
def tokenize_snippet_text(snippet_instance, text, indent,
allowed_tokens_in_text, allowed_tokens_in_tabstops,
token_to_textobject):
"""Turns 'text' into a stream of tokens and creates the text objects from
those tokens that are mentioned in 'token_to_textobject' assuming the
current 'indent'.
The 'allowed_tokens_in_text' define which tokens will be recognized
in 'text' while 'allowed_tokens_in_tabstops' are the tokens that
will be recognized in TabStop placeholder text.
"""
seen_ts = {}
all_tokens = []
def _do_parse(parent, text, allowed_tokens):
"""Recursive function that actually creates the objects."""
tokens = list(tokenize(text, indent, parent.start, allowed_tokens))
for token in tokens:
all_tokens.append((parent, token))
if isinstance(token, TabStopToken):
ts = TabStop(parent, token)
seen_ts[token.number] = ts
_do_parse(ts, token.initial_text,
allowed_tokens_in_tabstops)
else:
klass = token_to_textobject.get(token.__class__, None)
if klass is not None:
klass(parent, token)
_do_parse(snippet_instance, text, allowed_tokens_in_text)
return all_tokens, seen_ts
def finalize(all_tokens, seen_ts, snippet_instance):
"""Adds a tabstop 0 if non is in 'seen_ts' and brings the text of the
snippet instance into Vim."""
if 0 not in seen_ts:
mark = all_tokens[-1][1].end # Last token is always EndOfText
m1 = Position(mark.line, mark.col)
TabStop(snippet_instance, 0, mark, m1)
snippet_instance.replace_initial_text()

View File

@ -0,0 +1,369 @@
#!/usr/bin/env python
# encoding: utf-8
"""Not really a lexer in the classical sense, but code to convert snippet
definitions into logical units called Tokens."""
import string
import re
from UltiSnips.compatibility import as_unicode
from UltiSnips.position import Position
from UltiSnips.text import unescape
class _TextIterator(object):
"""Helper class to make iterating over text easier."""
def __init__(self, text, offset):
self._text = as_unicode(text)
self._line = offset.line
self._col = offset.col
self._idx = 0
def __iter__(self):
"""Iterator interface."""
return self
def __next__(self):
"""Returns the next character."""
if self._idx >= len(self._text):
raise StopIteration
rv = self._text[self._idx]
if self._text[self._idx] in ('\n', '\r\n'):
self._line += 1
self._col = 0
else:
self._col += 1
self._idx += 1
return rv
next = __next__ # for python2
def peek(self, count=1):
"""Returns the next 'count' characters without advancing the stream."""
if count > 1: # This might return '' if nothing is found
return self._text[self._idx:self._idx + count]
try:
return self._text[self._idx]
except IndexError:
return None
@property
def pos(self):
"""Current position in the text."""
return Position(self._line, self._col)
def _parse_number(stream):
"""Expects the stream to contain a number next, returns the number without
consuming any more bytes."""
rv = ''
while stream.peek() and stream.peek() in string.digits:
rv += next(stream)
return int(rv)
def _parse_till_closing_brace(stream):
"""
Returns all chars till a non-escaped } is found. Other
non escaped { are taken into account and skipped over.
Will also consume the closing }, but not return it
"""
rv = ''
in_braces = 1
while True:
if EscapeCharToken.starts_here(stream, '{}'):
rv += next(stream) + next(stream)
else:
char = next(stream)
if char == '{':
in_braces += 1
elif char == '}':
in_braces -= 1
if in_braces == 0:
break
rv += char
return rv
def _parse_till_unescaped_char(stream, chars):
"""
Returns all chars till a non-escaped char is found.
Will also consume the closing char, but and return it as second
return value
"""
rv = ''
while True:
escaped = False
for char in chars:
if EscapeCharToken.starts_here(stream, char):
rv += next(stream) + next(stream)
escaped = True
if not escaped:
char = next(stream)
if char in chars:
break
rv += char
return rv, char
class Token(object):
"""Represents a Token as parsed from a snippet definition."""
def __init__(self, gen, indent):
self.initial_text = as_unicode('')
self.start = gen.pos
self._parse(gen, indent)
self.end = gen.pos
def _parse(self, stream, indent):
"""Parses the token from 'stream' with the current 'indent'."""
pass # Does nothing
class TabStopToken(Token):
"""${1:blub}"""
CHECK = re.compile(r'^\${\d+[:}]')
@classmethod
def starts_here(cls, stream):
"""Returns true if this token starts at the current position in
'stream'."""
return cls.CHECK.match(stream.peek(10)) is not None
def _parse(self, stream, indent):
next(stream) # $
next(stream) # {
self.number = _parse_number(stream)
if stream.peek() == ':':
next(stream)
self.initial_text = _parse_till_closing_brace(stream)
def __repr__(self):
return 'TabStopToken(%r,%r,%r,%r)' % (
self.start, self.end, self.number, self.initial_text
)
class VisualToken(Token):
"""${VISUAL}"""
CHECK = re.compile(r"^\${VISUAL[:}/]")
@classmethod
def starts_here(cls, stream):
"""Returns true if this token starts at the current position in
'stream'."""
return cls.CHECK.match(stream.peek(10)) is not None
def _parse(self, stream, indent):
for _ in range(8): # ${VISUAL
next(stream)
if stream.peek() == ':':
next(stream)
self.alternative_text, char = _parse_till_unescaped_char(stream, '/}')
self.alternative_text = unescape(self.alternative_text)
if char == '/': # Transformation going on
try:
self.search = _parse_till_unescaped_char(stream, '/')[0]
self.replace = _parse_till_unescaped_char(stream, '/')[0]
self.options = _parse_till_closing_brace(stream)
except StopIteration:
raise RuntimeError(
"Invalid ${VISUAL} transformation! Forgot to escape a '/'?")
else:
self.search = None
self.replace = None
self.options = None
def __repr__(self):
return 'VisualToken(%r,%r)' % (
self.start, self.end
)
class TransformationToken(Token):
"""${1/match/replace/options}"""
CHECK = re.compile(r'^\${\d+\/')
@classmethod
def starts_here(cls, stream):
"""Returns true if this token starts at the current position in
'stream'."""
return cls.CHECK.match(stream.peek(10)) is not None
def _parse(self, stream, indent):
next(stream) # $
next(stream) # {
self.number = _parse_number(stream)
next(stream) # /
self.search = _parse_till_unescaped_char(stream, '/')[0]
self.replace = _parse_till_unescaped_char(stream, '/')[0]
self.options = _parse_till_closing_brace(stream)
def __repr__(self):
return 'TransformationToken(%r,%r,%r,%r,%r)' % (
self.start, self.end, self.number, self.search, self.replace
)
class MirrorToken(Token):
"""$1."""
CHECK = re.compile(r'^\$\d+')
@classmethod
def starts_here(cls, stream):
"""Returns true if this token starts at the current position in
'stream'."""
return cls.CHECK.match(stream.peek(10)) is not None
def _parse(self, stream, indent):
next(stream) # $
self.number = _parse_number(stream)
def __repr__(self):
return 'MirrorToken(%r,%r,%r)' % (
self.start, self.end, self.number
)
class EscapeCharToken(Token):
"""\\n."""
@classmethod
def starts_here(cls, stream, chars=r'{}\$`'):
"""Returns true if this token starts at the current position in
'stream'."""
cs = stream.peek(2)
if len(cs) == 2 and cs[0] == '\\' and cs[1] in chars:
return True
def _parse(self, stream, indent):
next(stream) # \
self.initial_text = next(stream)
def __repr__(self):
return 'EscapeCharToken(%r,%r,%r)' % (
self.start, self.end, self.initial_text
)
class ShellCodeToken(Token):
"""`echo "hi"`"""
@classmethod
def starts_here(cls, stream):
"""Returns true if this token starts at the current position in
'stream'."""
return stream.peek(1) == '`'
def _parse(self, stream, indent):
next(stream) # `
self.code = _parse_till_unescaped_char(stream, '`')[0]
def __repr__(self):
return 'ShellCodeToken(%r,%r,%r)' % (
self.start, self.end, self.code
)
class PythonCodeToken(Token):
"""`!p snip.rv = "Hi"`"""
CHECK = re.compile(r'^`!p\s')
@classmethod
def starts_here(cls, stream):
"""Returns true if this token starts at the current position in
'stream'."""
return cls.CHECK.match(stream.peek(4)) is not None
def _parse(self, stream, indent):
for _ in range(3):
next(stream) # `!p
if stream.peek() in '\t ':
next(stream)
code = _parse_till_unescaped_char(stream, '`')[0]
# Strip the indent if any
if len(indent):
lines = code.splitlines()
self.code = lines[0] + '\n'
self.code += '\n'.join([l[len(indent):]
for l in lines[1:]])
else:
self.code = code
self.indent = indent
def __repr__(self):
return 'PythonCodeToken(%r,%r,%r)' % (
self.start, self.end, self.code
)
class VimLCodeToken(Token):
"""`!v g:hi`"""
CHECK = re.compile(r'^`!v\s')
@classmethod
def starts_here(cls, stream):
"""Returns true if this token starts at the current position in
'stream'."""
return cls.CHECK.match(stream.peek(4)) is not None
def _parse(self, stream, indent):
for _ in range(4):
next(stream) # `!v
self.code = _parse_till_unescaped_char(stream, '`')[0]
def __repr__(self):
return 'VimLCodeToken(%r,%r,%r)' % (
self.start, self.end, self.code
)
class EndOfTextToken(Token):
"""Appears at the end of the text."""
def __repr__(self):
return 'EndOfText(%r)' % self.end
def tokenize(text, indent, offset, allowed_tokens):
"""Returns an iterator of tokens of 'text'['offset':] which is assumed to
have 'indent' as the whitespace of the begging of the lines. Only
'allowed_tokens' are considered to be valid tokens."""
stream = _TextIterator(text, offset)
try:
while True:
done_something = False
for token in allowed_tokens:
if token.starts_here(stream):
yield token(stream, indent)
done_something = True
break
if not done_something:
next(stream)
except StopIteration:
yield EndOfTextToken(stream, indent)

View File

@ -0,0 +1,38 @@
#!/usr/bin/env python
# encoding: utf-8
"""Parses a snipMate snippet definition and launches it into Vim."""
from UltiSnips.snippet.parsing._base import tokenize_snippet_text, finalize, resolve_ambiguity
from UltiSnips.snippet.parsing._lexer import EscapeCharToken, \
VisualToken, TabStopToken, MirrorToken, ShellCodeToken
from UltiSnips.text_objects import EscapedChar, Mirror, VimLCode, Visual
_TOKEN_TO_TEXTOBJECT = {
EscapeCharToken: EscapedChar,
VisualToken: Visual,
ShellCodeToken: VimLCode, # `` is VimL in snipMate
}
__ALLOWED_TOKENS = [
EscapeCharToken, VisualToken, TabStopToken, MirrorToken, ShellCodeToken
]
__ALLOWED_TOKENS_IN_TABSTOPS = [
EscapeCharToken, VisualToken, MirrorToken, ShellCodeToken
]
def parse_and_instantiate(parent_to, text, indent):
"""Parses a snippet definition in snipMate format from 'text' assuming the
current 'indent'.
Will instantiate all the objects and link them as children to
parent_to. Will also put the initial text into Vim.
"""
all_tokens, seen_ts = tokenize_snippet_text(parent_to, text, indent,
__ALLOWED_TOKENS, __ALLOWED_TOKENS_IN_TABSTOPS,
_TOKEN_TO_TEXTOBJECT)
resolve_ambiguity(all_tokens, seen_ts)
finalize(all_tokens, seen_ts, parent_to)

View File

@ -0,0 +1,50 @@
#!/usr/bin/env python
# encoding: utf-8
"""Parses a UltiSnips snippet definition and launches it into Vim."""
from UltiSnips.snippet.parsing._base import tokenize_snippet_text, finalize, resolve_ambiguity
from UltiSnips.snippet.parsing._lexer import EscapeCharToken, \
VisualToken, TransformationToken, TabStopToken, MirrorToken, \
PythonCodeToken, VimLCodeToken, ShellCodeToken
from UltiSnips.text_objects import EscapedChar, Mirror, PythonCode, \
ShellCode, TabStop, Transformation, VimLCode, Visual
_TOKEN_TO_TEXTOBJECT = {
EscapeCharToken: EscapedChar,
VisualToken: Visual,
ShellCodeToken: ShellCode,
PythonCodeToken: PythonCode,
VimLCodeToken: VimLCode,
}
__ALLOWED_TOKENS = [
EscapeCharToken, VisualToken, TransformationToken, TabStopToken,
MirrorToken, PythonCodeToken, VimLCodeToken, ShellCodeToken
]
def _create_transformations(all_tokens, seen_ts):
"""Create the objects that need to know about tabstops."""
for parent, token in all_tokens:
if isinstance(token, TransformationToken):
if token.number not in seen_ts:
raise RuntimeError(
'Tabstop %i is not known but is used by a Transformation'
% token.number)
Transformation(parent, seen_ts[token.number], token)
def parse_and_instantiate(parent_to, text, indent):
"""Parses a snippet definition in UltiSnips format from 'text' assuming the
current 'indent'.
Will instantiate all the objects and link them as children to
parent_to. Will also put the initial text into Vim.
"""
all_tokens, seen_ts = tokenize_snippet_text(parent_to, text, indent,
__ALLOWED_TOKENS, __ALLOWED_TOKENS, _TOKEN_TO_TEXTOBJECT)
resolve_ambiguity(all_tokens, seen_ts)
_create_transformations(all_tokens, seen_ts)
finalize(all_tokens, seen_ts, parent_to)