Fixed vim and zsh
This commit is contained in:
@ -0,0 +1,290 @@
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
|
||||
from deoplete.util import getlines
|
||||
|
||||
sys.path.insert(1, os.path.dirname(__file__)) # noqa: E261
|
||||
from deoplete_jedi import cache, profiler, utils, worker
|
||||
|
||||
from .base import Base
|
||||
|
||||
|
||||
def sort_key(item):
|
||||
w = item.get('name')
|
||||
z = len(w) - len(w.lstrip('_'))
|
||||
return (('z' * z) + w.lower()[z:], len(w))
|
||||
|
||||
|
||||
class Source(Base):
|
||||
|
||||
def __init__(self, vim):
|
||||
Base.__init__(self, vim)
|
||||
self.name = 'jedi'
|
||||
self.mark = '[jedi]'
|
||||
self.rank = 500
|
||||
self.filetypes = ['python', 'cython', 'pyrex']
|
||||
self.input_pattern = (r'[\w\)\]\}\'\"]+\.\w*$|'
|
||||
r'^\s*@\w*$|'
|
||||
r'^\s*from\s+[\w\.]*(?:\s+import\s+(?:\w*(?:,\s*)?)*)?|'
|
||||
r'^\s*import\s+(?:[\w\.]*(?:,\s*)?)*')
|
||||
self._async_keys = set()
|
||||
self.workers_started = False
|
||||
|
||||
def on_init(self, context):
|
||||
vars = context['vars']
|
||||
|
||||
self.statement_length = vars.get(
|
||||
'deoplete#sources#jedi#statement_length', 0)
|
||||
self.server_timeout = vars.get(
|
||||
'deoplete#sources#jedi#server_timeout', 10)
|
||||
self.use_short_types = vars.get(
|
||||
'deoplete#sources#jedi#short_types', False)
|
||||
self.show_docstring = vars.get(
|
||||
'deoplete#sources#jedi#show_docstring', False)
|
||||
self.debug_server = vars.get(
|
||||
'deoplete#sources#jedi#debug_server', None)
|
||||
# Only one worker is really needed since deoplete-jedi has a pretty
|
||||
# aggressive cache.
|
||||
# Two workers may be needed if working with very large source files.
|
||||
self.worker_threads = vars.get(
|
||||
'deoplete#sources#jedi#worker_threads', 2)
|
||||
# Hard coded python interpreter location
|
||||
self.python_path = vars.get(
|
||||
'deoplete#sources#jedi#python_path', '')
|
||||
self.extra_path = vars.get(
|
||||
'deoplete#sources#jedi#extra_path', [])
|
||||
|
||||
self.boilerplate = [] # Completions that are included in all results
|
||||
|
||||
log_file = ''
|
||||
root_log = logging.getLogger('deoplete')
|
||||
|
||||
if self.debug_server:
|
||||
self.is_debug_enabled = True
|
||||
if isinstance(self.debug_server, str):
|
||||
log_file = self.debug_server
|
||||
else:
|
||||
for handler in root_log.handlers:
|
||||
if isinstance(handler, logging.FileHandler):
|
||||
log_file = handler.baseFilename
|
||||
break
|
||||
|
||||
if not self.is_debug_enabled:
|
||||
child_log = root_log.getChild('jedi')
|
||||
child_log.propagate = False
|
||||
|
||||
if not self.workers_started:
|
||||
if self.python_path and 'VIRTUAL_ENV' not in os.environ:
|
||||
cache.python_path = self.python_path
|
||||
worker.start(max(1, self.worker_threads), self.statement_length,
|
||||
self.server_timeout, self.use_short_types, self.show_docstring,
|
||||
(log_file, root_log.level), self.python_path)
|
||||
cache.start_background(worker.comp_queue)
|
||||
self.workers_started = True
|
||||
|
||||
def get_complete_position(self, context):
|
||||
pattern = r'\w*$'
|
||||
if context['input'].lstrip().startswith(('from ', 'import ')):
|
||||
m = re.search(r'[,\s]$', context['input'])
|
||||
if m:
|
||||
return m.end()
|
||||
m = re.search(pattern, context['input'])
|
||||
return m.start() if m else -1
|
||||
|
||||
def mix_boilerplate(self, completions):
|
||||
seen = set()
|
||||
for item in self.boilerplate + completions:
|
||||
if item['name'] in seen:
|
||||
continue
|
||||
seen.add(item['name'])
|
||||
yield item
|
||||
|
||||
def finalize(self, item):
|
||||
abbr = item['name']
|
||||
|
||||
if self.show_docstring:
|
||||
desc = item['doc']
|
||||
else:
|
||||
desc = ''
|
||||
|
||||
if item['params'] is not None:
|
||||
sig = '{}({})'.format(item['name'], ', '.join(item['params']))
|
||||
sig_len = len(sig)
|
||||
|
||||
desc = sig + '\n\n' + desc
|
||||
|
||||
if self.statement_length > 0 and sig_len > self.statement_length:
|
||||
params = []
|
||||
length = len(item['name']) + 2
|
||||
|
||||
for p in item['params']:
|
||||
p = p.split('=', 1)[0]
|
||||
length += len(p)
|
||||
params.append(p)
|
||||
|
||||
length += 2 * (len(params) - 1)
|
||||
|
||||
# +5 for the ellipsis and separator
|
||||
while length + 5 > self.statement_length and len(params):
|
||||
length -= len(params[-1]) + 2
|
||||
params = params[:-1]
|
||||
|
||||
if len(item['params']) > len(params):
|
||||
params.append('...')
|
||||
|
||||
sig = '{}({})'.format(item['name'], ', '.join(params))
|
||||
|
||||
abbr = sig
|
||||
|
||||
if self.use_short_types:
|
||||
kind = item['short_type'] or item['type']
|
||||
else:
|
||||
kind = item['type']
|
||||
|
||||
return {
|
||||
'word': item['name'],
|
||||
'abbr': abbr,
|
||||
'kind': kind,
|
||||
'info': desc.strip(),
|
||||
'menu': '[jedi] ',
|
||||
'dup': 1,
|
||||
}
|
||||
|
||||
def finalize_cached(self, cache_key, filters, cached):
|
||||
if cached:
|
||||
if cached.completions is None:
|
||||
out = self.mix_boilerplate([])
|
||||
elif cache_key[-1] == 'vars':
|
||||
out = self.mix_boilerplate(cached.completions)
|
||||
else:
|
||||
out = cached.completions
|
||||
if filters:
|
||||
out = (x for x in out if x['type'] in filters)
|
||||
return [self.finalize(x) for x in sorted(out, key=sort_key)]
|
||||
return []
|
||||
|
||||
@profiler.profile
|
||||
def gather_candidates(self, context):
|
||||
refresh_boilerplate = False
|
||||
if not self.boilerplate:
|
||||
bp = cache.retrieve(('boilerplate~',))
|
||||
if bp:
|
||||
self.boilerplate = bp.completions[:]
|
||||
refresh_boilerplate = True
|
||||
else:
|
||||
# This should be the first time any completion happened, so
|
||||
# `wait` will be True.
|
||||
worker.work_queue.put((('boilerplate~',), [], '', 1, 0, '', None))
|
||||
|
||||
line = context['position'][1]
|
||||
col = context['complete_position']
|
||||
buf = self.vim.current.buffer
|
||||
src = getlines(self.vim)
|
||||
|
||||
extra_modules = []
|
||||
cache_key = None
|
||||
cached = None
|
||||
refresh = True
|
||||
wait = False
|
||||
|
||||
# Inclusion filters for the results
|
||||
filters = []
|
||||
|
||||
if re.match('^\s*(from|import)\s+', context['input']) \
|
||||
and not re.match('^\s*from\s+\S+\s+', context['input']):
|
||||
# If starting an import, only show module results
|
||||
filters.append('module')
|
||||
|
||||
cache_key, extra_modules = cache.cache_context(buf.name, context, src,
|
||||
self.extra_path)
|
||||
cached = cache.retrieve(cache_key)
|
||||
if cached and not cached.refresh:
|
||||
modules = cached.modules
|
||||
if all([filename in modules for filename in extra_modules]) \
|
||||
and all([utils.file_mtime(filename) == mtime
|
||||
for filename, mtime in modules.items()]):
|
||||
# The cache is still valid
|
||||
refresh = False
|
||||
|
||||
if cache_key and (cache_key[-1] in ('dot', 'vars', 'import', 'import~') or
|
||||
(cached and cache_key[-1] == 'package' and
|
||||
not len(cached.modules))):
|
||||
# Always refresh scoped variables and module imports. Additionally
|
||||
# refresh cached items that did not have associated module files.
|
||||
refresh = True
|
||||
|
||||
# Extra options to pass to the server.
|
||||
options = {
|
||||
'cwd': context.get('cwd'),
|
||||
'extra_path': self.extra_path,
|
||||
'runtimepath': context.get('runtimepath'),
|
||||
}
|
||||
|
||||
if (not cached or refresh) and cache_key and cache_key[-1] == 'package':
|
||||
# Create a synthetic completion for a module import as a fallback.
|
||||
synthetic_src = ['import {0}; {0}.'.format(cache_key[0])]
|
||||
options.update({
|
||||
'synthetic': {
|
||||
'src': synthetic_src,
|
||||
'line': 1,
|
||||
'col': len(synthetic_src[0]),
|
||||
}
|
||||
})
|
||||
|
||||
if not cached:
|
||||
wait = True
|
||||
|
||||
# Note: This waits a very short amount of time to give the server or
|
||||
# cache a chance to reply. If there's no reply during this period,
|
||||
# empty results are returned and we defer to deoplete's async refresh.
|
||||
# The current requests's async status is tracked in `_async_keys`.
|
||||
# If the async cache result is older than 5 seconds, the completion
|
||||
# request goes back to the default behavior of attempting to refresh as
|
||||
# needed by the `refresh` and `wait` variables above.
|
||||
self.debug('Key: %r, Refresh: %r, Wait: %r, Async: %r', cache_key,
|
||||
refresh, wait, cache_key in self._async_keys)
|
||||
|
||||
context['is_async'] = cache_key in self._async_keys
|
||||
if context['is_async']:
|
||||
if not cached:
|
||||
self.debug('[async] waiting for completions: %r', cache_key)
|
||||
return []
|
||||
else:
|
||||
self._async_keys.remove(cache_key)
|
||||
context['is_async'] = False
|
||||
if time.time() - cached.time < 5:
|
||||
self.debug('[async] finished: %r', cache_key)
|
||||
return self.finalize_cached(cache_key, filters, cached)
|
||||
else:
|
||||
self.debug('[async] outdated: %r', cache_key)
|
||||
|
||||
if cache_key and (not cached or refresh):
|
||||
n = time.time()
|
||||
wait_complete = False
|
||||
worker.work_queue.put((cache_key, extra_modules, '\n'.join(src),
|
||||
line, col, str(buf.name), options))
|
||||
while wait and time.time() - n < 0.25:
|
||||
cached = cache.retrieve(cache_key)
|
||||
if cached and cached.time >= n:
|
||||
self.debug('Got updated cache, stopped waiting.')
|
||||
wait_complete = True
|
||||
break
|
||||
time.sleep(0.01)
|
||||
|
||||
if wait and not wait_complete:
|
||||
self._async_keys.add(cache_key)
|
||||
context['is_async'] = True
|
||||
self.debug('[async] deferred: %r', cache_key)
|
||||
return []
|
||||
|
||||
if refresh_boilerplate:
|
||||
# This should only occur the first time completions happen.
|
||||
# Refresh the boilerplate to ensure it's always up to date (just in
|
||||
# case).
|
||||
self.debug('Refreshing boilerplate')
|
||||
worker.work_queue.put((('boilerplate~',), [], '', 1, 0, '', None))
|
||||
|
||||
return self.finalize_cached(cache_key, filters, cached)
|
@ -0,0 +1,451 @@
|
||||
import glob
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import threading
|
||||
import time
|
||||
from itertools import chain
|
||||
from string import whitespace
|
||||
|
||||
from deoplete_jedi import utils
|
||||
|
||||
_paths = []
|
||||
_cache_path = None
|
||||
# List of items in the file system cache. `import~` is a special key for
|
||||
# caching import modules. It should not be cached to disk.
|
||||
_file_cache = set(['import~'])
|
||||
|
||||
# Cache version allows us to invalidate outdated cache data structures.
|
||||
_cache_version = 16
|
||||
_cache_lock = threading.RLock()
|
||||
_cache = {}
|
||||
|
||||
python_path = 'python'
|
||||
|
||||
log = logging.getLogger('deoplete.jedi.cache')
|
||||
|
||||
# This uses [\ \t] to avoid spanning lines
|
||||
_import_re = re.compile(r'''
|
||||
^[\ \t]*(
|
||||
from[\ \t]+[\w\.]+[\ \t]+import\s+\([\s\w,]+\)|
|
||||
from[\ \t]+[\w\.]+[\ \t]+import[\ \t\w,]+|
|
||||
import[\ \t]+\([\s\w,]+\)|
|
||||
import[\ \t]+[\ \t\w,]+
|
||||
)
|
||||
''', re.VERBOSE | re.MULTILINE)
|
||||
|
||||
|
||||
class CacheEntry(object):
|
||||
def __init__(self, dict):
|
||||
self.key = tuple(dict.get('cache_key'))
|
||||
self._touched = time.time()
|
||||
self.time = dict.get('time')
|
||||
self.modules = dict.get('modules')
|
||||
self.completions = dict.get('completions', [])
|
||||
self.refresh = False
|
||||
if self.completions is None:
|
||||
self.refresh = True
|
||||
self.completions = []
|
||||
|
||||
def update_from(self, other):
|
||||
self.key = other.key
|
||||
self.time = other.time
|
||||
self.modules = other.modules
|
||||
self.completions = other.completions
|
||||
|
||||
def touch(self):
|
||||
self._touched = time.time()
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
'version': _cache_version,
|
||||
'cache_key': self.key,
|
||||
'time': self.time,
|
||||
'modules': self.modules,
|
||||
'completions': self.completions,
|
||||
}
|
||||
|
||||
|
||||
def get_cache_path():
|
||||
global _cache_path
|
||||
if not _cache_path or not os.path.isdir(_cache_path):
|
||||
p = subprocess.Popen([python_path, '-V'], stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
stdout, stderr = p.communicate()
|
||||
version = re.search(r'(\d+\.\d+)\.', (stdout or stderr).decode('utf8')).group(1)
|
||||
cache_dir = os.getenv('XDG_CACHE_HOME', '~/.cache')
|
||||
cache_dir = os.path.join(os.path.expanduser(cache_dir), 'deoplete/jedi',
|
||||
version)
|
||||
if not os.path.exists(cache_dir):
|
||||
umask = os.umask(0)
|
||||
os.makedirs(cache_dir, 0o0700)
|
||||
os.umask(umask)
|
||||
_cache_path = cache_dir
|
||||
return _cache_path
|
||||
|
||||
|
||||
def retrieve(key):
|
||||
if not key:
|
||||
return None
|
||||
|
||||
with _cache_lock:
|
||||
if key[-1] == 'package' and key[0] not in _file_cache:
|
||||
# This will only load the cached item from a file the first time it
|
||||
# was seen.
|
||||
cache_file = os.path.join(get_cache_path(), '{}.json'.format(key[0]))
|
||||
if os.path.isfile(cache_file):
|
||||
with open(cache_file, 'rt') as fp:
|
||||
try:
|
||||
data = json.load(fp)
|
||||
if data.get('version', 0) >= _cache_version:
|
||||
_file_cache.add(key[0])
|
||||
cached = CacheEntry(data)
|
||||
cached.time = time.time()
|
||||
_cache[key] = cached
|
||||
log.debug('Loaded from file: %r', key)
|
||||
return cached
|
||||
except Exception:
|
||||
pass
|
||||
cached = _cache.get(key)
|
||||
if cached:
|
||||
cached.touch()
|
||||
return cached
|
||||
|
||||
|
||||
def store(key, value):
|
||||
with _cache_lock:
|
||||
if not isinstance(value, CacheEntry):
|
||||
value = CacheEntry(value)
|
||||
|
||||
if value.refresh:
|
||||
# refresh is set when completions is None. This will be due to
|
||||
# Jedi producing an error and not getting any completions. Use any
|
||||
# previously cached completions while a refresh is attempted.
|
||||
old = _cache.get(key)
|
||||
if old is not None:
|
||||
value.completions = old.completions
|
||||
|
||||
_cache[key] = value
|
||||
|
||||
if key[-1] == 'package' and key[0] not in _file_cache:
|
||||
_file_cache.add(key[0])
|
||||
cache_file = os.path.join(get_cache_path(), '{}.json'.format(key[0]))
|
||||
with open(cache_file, 'wt') as fp:
|
||||
json.dump(value.to_dict(), fp)
|
||||
log.debug('Stored to file: %r', key)
|
||||
return value
|
||||
|
||||
|
||||
def exists(key):
|
||||
with _cache_lock:
|
||||
return key in _cache
|
||||
|
||||
|
||||
def reap_cache(max_age=300):
|
||||
"""Clear the cache of old items
|
||||
|
||||
Module level completions are exempt from reaping. It is assumed that
|
||||
module level completions will have a key length of 1.
|
||||
"""
|
||||
while True:
|
||||
time.sleep(300)
|
||||
|
||||
with _cache_lock:
|
||||
now = time.time()
|
||||
cur_len = len(_cache)
|
||||
for cached in list(_cache.values()):
|
||||
if cached.key[-1] not in ('package', 'local', 'boilerplate~',
|
||||
'import~') \
|
||||
and now - cached._touched > max_age:
|
||||
_cache.pop(cached.key)
|
||||
|
||||
if cur_len - len(_cache) > 0:
|
||||
log.debug('Removed %d of %d cache items', len(_cache), cur_len)
|
||||
|
||||
|
||||
def cache_processor_thread(compl_queue):
|
||||
errors = 0
|
||||
while True:
|
||||
try:
|
||||
compl = compl_queue.get()
|
||||
cache_key = compl.get('cache_key')
|
||||
cached = retrieve(cache_key)
|
||||
if cached is None or cached.time <= compl.get('time'):
|
||||
cached = store(cache_key, compl)
|
||||
log.debug('Processed: %r', cache_key)
|
||||
errors = 0
|
||||
except Exception as e:
|
||||
errors += 1
|
||||
if errors > 3:
|
||||
break
|
||||
log.error('Got exception while processing: %r', e)
|
||||
|
||||
|
||||
def start_background(compl_queue):
|
||||
log.debug('Starting reaper thread')
|
||||
t = threading.Thread(target=cache_processor_thread, args=(compl_queue,))
|
||||
t.daemon = True
|
||||
t.start()
|
||||
t = threading.Thread(target=reap_cache)
|
||||
t.daemon = True
|
||||
t.start()
|
||||
|
||||
|
||||
# balanced() taken from:
|
||||
# http://stackoverflow.com/a/6753172/4932879
|
||||
# Modified to include string delimiters
|
||||
def _balanced():
|
||||
# Doc strings might be an issue, but we don't care.
|
||||
idelim = iter("""(){}[]""''""")
|
||||
delims = dict(zip(idelim, idelim))
|
||||
odelims = {v: k for k, v in delims.items()}
|
||||
closing = delims.values()
|
||||
|
||||
def balanced(astr):
|
||||
"""Test if a string has balanced delimiters.
|
||||
|
||||
Returns a boolean and a string of the opened delimiter.
|
||||
"""
|
||||
stack = []
|
||||
skip = False
|
||||
open_d = ''
|
||||
open_str = ''
|
||||
for c in astr:
|
||||
if c == '\\':
|
||||
skip = True
|
||||
continue
|
||||
if skip:
|
||||
skip = False
|
||||
continue
|
||||
d = delims.get(c, None)
|
||||
if d and not open_str:
|
||||
if d in '"\'':
|
||||
open_str = d
|
||||
open_d = odelims.get(d)
|
||||
stack.append(d)
|
||||
elif c in closing:
|
||||
if c == open_str:
|
||||
open_str = ''
|
||||
if not open_str and (not stack or c != stack.pop()):
|
||||
return False, open_d
|
||||
if stack:
|
||||
open_d = odelims.get(stack[-1])
|
||||
else:
|
||||
open_d = ''
|
||||
return not stack, open_d
|
||||
return balanced
|
||||
balanced = _balanced()
|
||||
|
||||
|
||||
def split_module(text, default_value=None):
|
||||
"""Utility to split the module text.
|
||||
|
||||
If there is nothing to split, return `default_value`.
|
||||
"""
|
||||
b, d = balanced(text)
|
||||
if not b:
|
||||
# Handles cases where the cursor is inside of unclosed delimiters.
|
||||
# If the input is: re.search(x.spl
|
||||
# The returned value should be: x
|
||||
if d and d not in '\'"':
|
||||
di = text.rfind(d)
|
||||
if di != -1:
|
||||
text = text[di+1:]
|
||||
else:
|
||||
return default_value
|
||||
m = re.search('([\S\.]+)$', text)
|
||||
if m and '.' in m.group(1):
|
||||
return m.group(1).rsplit('.', 1)[0]
|
||||
return default_value
|
||||
|
||||
|
||||
def get_parents(source, line, class_only=False):
|
||||
"""Find the parent blocks
|
||||
|
||||
Collects parent blocks that contain the current line to help form a cache
|
||||
key based on variable scope.
|
||||
"""
|
||||
parents = []
|
||||
start = line - 1
|
||||
indent = len(source[start]) - len(source[start].lstrip())
|
||||
if class_only:
|
||||
pattern = r'^\s*class\s+(\w+)'
|
||||
else:
|
||||
pattern = r'^\s*(?:def|class)\s+(\w+)'
|
||||
|
||||
for i in range(start, 0, -1):
|
||||
s_line = source[i].lstrip()
|
||||
l_indent = len(source[i]) - len(s_line)
|
||||
if s_line and l_indent < indent:
|
||||
m = re.search(pattern, s_line)
|
||||
indent = l_indent
|
||||
if m:
|
||||
parents.insert(0, m.group(1))
|
||||
|
||||
return parents
|
||||
|
||||
|
||||
def full_module(source, obj):
|
||||
"""Construct the full module path
|
||||
|
||||
This finds all imports and attempts to reconstruct the full module path.
|
||||
If matched on a standard `import` line, `obj` itself is a full module path.
|
||||
On `from` import lines, the parent module is prepended to `obj`.
|
||||
"""
|
||||
|
||||
module = ''
|
||||
obj_pat = r'(?:(\S+)\s+as\s+)?\b{0}\b'.format(re.escape(obj.split('.', 1)[0]))
|
||||
for match in _import_re.finditer('\n'.join(source)):
|
||||
module = ''
|
||||
imp_line = ' '.join(match.group(0).split())
|
||||
if imp_line.startswith('from '):
|
||||
_, module, imp_line = imp_line.split(' ', 2)
|
||||
m = re.search(obj_pat, imp_line)
|
||||
if m:
|
||||
# If the import is aliased, use the alias as part of the key
|
||||
alias = m.group(1)
|
||||
if alias:
|
||||
obj = obj.split('.')
|
||||
obj[0] = alias
|
||||
obj = '.'.join(obj)
|
||||
if module:
|
||||
return '.'.join((module, obj))
|
||||
return obj
|
||||
return None
|
||||
|
||||
|
||||
def sys_path(refresh=False):
|
||||
global _paths
|
||||
if not _paths or refresh:
|
||||
p = subprocess.Popen([
|
||||
python_path,
|
||||
'-c', r'import sys; print("\n".join(sys.path))',
|
||||
], stdout=subprocess.PIPE)
|
||||
stdout, _ = p.communicate()
|
||||
_paths = [x for x in stdout.decode('utf8').split('\n')
|
||||
if x and os.path.isdir(x)]
|
||||
return _paths
|
||||
|
||||
|
||||
def is_package(module, refresh=False):
|
||||
"""Test if a module path is an installed package
|
||||
|
||||
The current interpreter's sys.path is retrieved on first run.
|
||||
"""
|
||||
if re.search(r'[^\w\.]', module):
|
||||
return False
|
||||
|
||||
paths = sys_path(refresh)
|
||||
|
||||
module = module.split('.', 1)[0]
|
||||
pglobs = [os.path.join(x, module, '__init__.py') for x in paths]
|
||||
pglobs.extend([os.path.join(x, '{}.*'.format(module)) for x in paths])
|
||||
return any(map(glob.glob, pglobs))
|
||||
|
||||
|
||||
def cache_context(filename, context, source, extra_path):
|
||||
"""Caching based on context input.
|
||||
|
||||
If the input is blank, it was triggered with `.` to get module completions.
|
||||
|
||||
The module files as reported by Jedi are stored with their modification
|
||||
times to help detect if a cache needs to be refreshed.
|
||||
|
||||
For scoped variables in the buffer, construct a cache key using the
|
||||
filename. The buffer file's modification time is checked to see if the
|
||||
completion needs to be refreshed. The approximate scope lines are cached
|
||||
to help invalidate the cache based on line position.
|
||||
|
||||
Cache keys are made using tuples to make them easier to interpret later.
|
||||
"""
|
||||
cinput = context['input'].lstrip().lstrip('@')
|
||||
if not re.sub(r'[\s\d\.]+', '', cinput):
|
||||
return None, []
|
||||
filename_hash = hashlib.md5(filename.encode('utf8')).hexdigest()
|
||||
line = context['position'][1]
|
||||
log.debug('Input: "%s"', cinput)
|
||||
cache_key = None
|
||||
extra_modules = []
|
||||
cur_module = os.path.splitext(os.path.basename(filename))[0]
|
||||
|
||||
if cinput.startswith(('import ', 'from ')):
|
||||
# Cache imports with buffer filename as the key prefix.
|
||||
# For `from` imports, the first part of the statement is
|
||||
# considered to be the same as `import` for caching.
|
||||
|
||||
import_key = 'import~'
|
||||
cinput = context['input'].lstrip()
|
||||
m = re.search(r'^from\s+(\S+)(.*)', cinput)
|
||||
if m:
|
||||
if m.group(2).lstrip() in 'import':
|
||||
cache_key = ('importkeyword~', )
|
||||
return cache_key, extra_modules
|
||||
import_key = m.group(1) or 'import~'
|
||||
elif cinput.startswith('import ') and cinput.rstrip().endswith('.'):
|
||||
import_key = re.sub(r'[^\s\w\.]', ' ', cinput.strip()).split()[-1]
|
||||
|
||||
if import_key:
|
||||
if '.' in import_key and import_key[-1] not in whitespace \
|
||||
and not re.search(r'^from\s+\S+\s+import', cinput):
|
||||
# Dot completion on the import line
|
||||
import_key, _ = import_key.rsplit('.', 1)
|
||||
import_key = import_key.rstrip('.')
|
||||
module_file = utils.module_search(
|
||||
import_key,
|
||||
chain(extra_path,
|
||||
[context.get('cwd'), os.path.dirname(filename)],
|
||||
utils.rplugin_runtime_paths(context)))
|
||||
if module_file:
|
||||
cache_key = (import_key, 'local')
|
||||
extra_modules.append(module_file)
|
||||
elif is_package(import_key):
|
||||
cache_key = (import_key, 'package')
|
||||
elif not cinput.endswith('.'):
|
||||
cache_key = ('import~',)
|
||||
else:
|
||||
return None, extra_modules
|
||||
|
||||
if not cache_key:
|
||||
obj = split_module(cinput.strip())
|
||||
if obj:
|
||||
cache_key = (obj, 'package')
|
||||
if obj.startswith('self'):
|
||||
if os.path.exists(filename):
|
||||
extra_modules.append(filename)
|
||||
# `self` is a special case object that needs a scope included
|
||||
# in the cache key.
|
||||
parents = get_parents(source, line, class_only=True)
|
||||
parents.insert(0, cur_module)
|
||||
cache_key = (filename_hash, tuple(parents), obj)
|
||||
else:
|
||||
module_path = full_module(source, obj)
|
||||
if module_path and not module_path.startswith('.') \
|
||||
and is_package(module_path):
|
||||
cache_key = (module_path, 'package')
|
||||
else:
|
||||
# A quick scan revealed that the dot completion doesn't
|
||||
# involve an imported module. Treat it like a scoped
|
||||
# variable and ensure the cache invalidates when the file
|
||||
# is saved.
|
||||
if os.path.exists(filename):
|
||||
extra_modules.append(filename)
|
||||
|
||||
module_file = utils.module_search(module_path,
|
||||
[os.path.dirname(filename)])
|
||||
if module_file:
|
||||
cache_key = (module_path, 'local')
|
||||
else:
|
||||
parents = get_parents(source, line)
|
||||
parents.insert(0, cur_module)
|
||||
cache_key = (filename_hash, tuple(parents), obj, 'dot')
|
||||
elif context.get('complete_str') or cinput.rstrip().endswith('='):
|
||||
parents = get_parents(source, line)
|
||||
parents.insert(0, cur_module)
|
||||
cache_key = (filename_hash, tuple(parents), 'vars')
|
||||
if os.path.exists(filename):
|
||||
extra_modules.append(filename)
|
||||
|
||||
return cache_key, extra_modules
|
@ -0,0 +1,9 @@
|
||||
def set_debug(logger, path):
|
||||
from logging import FileHandler, Formatter, DEBUG
|
||||
hdlr = FileHandler(path)
|
||||
logger.addHandler(hdlr)
|
||||
datefmt = '%Y/%m/%d %H:%M:%S'
|
||||
fmt = Formatter(
|
||||
"%(levelname)s %(asctime)s %(message)s", datefmt=datefmt)
|
||||
hdlr.setFormatter(fmt)
|
||||
logger.setLevel(DEBUG)
|
@ -0,0 +1,63 @@
|
||||
import functools
|
||||
import queue
|
||||
|
||||
try:
|
||||
import statistics
|
||||
stdev = statistics.stdev
|
||||
mean = statistics.mean
|
||||
except ImportError:
|
||||
stdev = None
|
||||
|
||||
def mean(l):
|
||||
return sum(l) / len(l)
|
||||
|
||||
try:
|
||||
import time
|
||||
clock = time.perf_counter
|
||||
except Exception:
|
||||
import timeit
|
||||
clock = timeit.default_timer
|
||||
|
||||
|
||||
class tfloat(float):
|
||||
color = 39
|
||||
|
||||
def __str__(self):
|
||||
n = self * 1000
|
||||
return '\x1b[%dm%f\x1b[mms' % (self.color, n)
|
||||
|
||||
|
||||
def profile(func):
|
||||
name = func.__name__
|
||||
samples = queue.deque(maxlen=5)
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
if not self.is_debug_enabled:
|
||||
return func(self, *args, **kwargs)
|
||||
start = clock()
|
||||
ret = func(self, *args, **kwargs)
|
||||
n = tfloat(clock() - start)
|
||||
|
||||
if len(samples) < 2:
|
||||
m = 0
|
||||
d = 0
|
||||
n.color = 36
|
||||
else:
|
||||
m = mean(samples)
|
||||
if stdev:
|
||||
d = tfloat(stdev(samples))
|
||||
else:
|
||||
d = 0
|
||||
|
||||
if n <= m + d:
|
||||
n.color = 32
|
||||
elif n > m + d * 2:
|
||||
n.color = 31
|
||||
else:
|
||||
n.color = 33
|
||||
samples.append(n)
|
||||
self.info('\x1b[34m%s\x1b[m t = %s, \u00b5 = %s, \u03c3 = %s)',
|
||||
name, n, m, d)
|
||||
return ret
|
||||
return wrapper
|
@ -0,0 +1,576 @@
|
||||
"""Jedi mini server for deoplete-jedi
|
||||
|
||||
This script allows Jedi to run using the Python interpreter that is found in
|
||||
the user's environment instead of the one Neovim is using.
|
||||
|
||||
Jedi seems to accumulate latency with each completion. To deal with this, the
|
||||
server is restarted after 50 completions. This threshold is relatively high
|
||||
considering that deoplete-jedi caches completion results. These combined
|
||||
should make deoplete-jedi's completions pretty fast and responsive.
|
||||
"""
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import argparse
|
||||
import functools
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import struct
|
||||
import subprocess
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
from glob import glob
|
||||
|
||||
# This is be possible because the path is inserted in deoplete_jedi.py as well
|
||||
# as set in PYTHONPATH by the Client class.
|
||||
from deoplete_jedi import utils
|
||||
|
||||
log = logging.getLogger('deoplete')
|
||||
nullHandler = logging.NullHandler()
|
||||
|
||||
if not log.handlers:
|
||||
log.addHandler(nullHandler)
|
||||
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
|
||||
libpath = os.path.join(
|
||||
os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'vendored')
|
||||
jedi_path = os.path.join(libpath, 'jedi')
|
||||
parso_path = os.path.join(libpath, 'parso')
|
||||
|
||||
# Type mapping. Empty values will use the key value instead.
|
||||
# Keep them 5 characters max to minimize required space to display.
|
||||
_types = {
|
||||
'import': 'imprt',
|
||||
'class': '',
|
||||
'function': 'def',
|
||||
'globalstmt': 'var',
|
||||
'instance': 'var',
|
||||
'statement': 'var',
|
||||
'keyword': 'keywd',
|
||||
'module': 'mod',
|
||||
'param': 'arg',
|
||||
'property': 'prop',
|
||||
|
||||
'bool': '',
|
||||
'bytes': 'byte',
|
||||
'complex': 'cmplx',
|
||||
'dict': '',
|
||||
'list': '',
|
||||
'float': '',
|
||||
'int': '',
|
||||
'object': 'obj',
|
||||
'set': '',
|
||||
'slice': '',
|
||||
'str': '',
|
||||
'tuple': '',
|
||||
'mappingproxy': 'dict', # cls.__dict__
|
||||
'member_descriptor': 'cattr',
|
||||
'getset_descriptor': 'cprop',
|
||||
'method_descriptor': 'cdef',
|
||||
}
|
||||
|
||||
|
||||
class StreamError(Exception):
|
||||
"""Error in reading/writing streams."""
|
||||
|
||||
|
||||
class StreamEmpty(StreamError):
|
||||
"""Empty stream data"""
|
||||
|
||||
|
||||
def stream_read(pipe):
|
||||
"""Read data from the pipe."""
|
||||
buffer = getattr(pipe, 'buffer', pipe)
|
||||
header = buffer.read(4)
|
||||
if not len(header):
|
||||
raise StreamEmpty
|
||||
|
||||
if len(header) < 4:
|
||||
raise StreamError('Incorrect byte length')
|
||||
|
||||
length = struct.unpack('I', header)[0]
|
||||
data = buffer.read(length)
|
||||
if len(data) < length:
|
||||
raise StreamError('Got less data than expected')
|
||||
return pickle.loads(data)
|
||||
|
||||
|
||||
def stream_write(pipe, obj):
|
||||
"""Write data to the pipe."""
|
||||
data = pickle.dumps(obj, 2)
|
||||
header = struct.pack(b'I', len(data))
|
||||
buffer = getattr(pipe, 'buffer', pipe)
|
||||
buffer.write(header + data)
|
||||
pipe.flush()
|
||||
|
||||
|
||||
def strip_decor(source):
|
||||
"""Remove decorators lines
|
||||
|
||||
If the decorator is a function call, this will leave them dangling. Jedi
|
||||
should be fine with this since they'll look like tuples just hanging out
|
||||
not doing anything important.
|
||||
"""
|
||||
return re.sub(r'^(\s*)@\w+', r'\1', source, flags=re.M)
|
||||
|
||||
|
||||
def retry_completion(func):
|
||||
"""Decorator to retry a completion
|
||||
|
||||
A second attempt is made with decorators stripped from the source.
|
||||
"""
|
||||
@functools.wraps(func)
|
||||
def wrapper(self, source, *args, **kwargs):
|
||||
try:
|
||||
return func(self, source, *args, **kwargs)
|
||||
except Exception:
|
||||
if '@' in source:
|
||||
log.warn('Retrying completion %r', func.__name__, exc_info=True)
|
||||
try:
|
||||
return func(self, strip_decor(source), *args, **kwargs)
|
||||
except Exception:
|
||||
pass
|
||||
log.warn('Failed completion %r', func.__name__, exc_info=True)
|
||||
return wrapper
|
||||
|
||||
|
||||
class Server(object):
|
||||
"""Server class
|
||||
|
||||
This is created when this script is ran directly.
|
||||
"""
|
||||
def __init__(self, desc_len=0, short_types=False, show_docstring=False):
|
||||
self.desc_len = desc_len
|
||||
self.use_short_types = short_types
|
||||
self.show_docstring = show_docstring
|
||||
self.unresolved_imports = set()
|
||||
|
||||
from jedi import settings
|
||||
|
||||
settings.use_filesystem_cache = False
|
||||
|
||||
def _loop(self):
|
||||
from jedi.evaluate.sys_path import _get_venv_sitepackages
|
||||
|
||||
while True:
|
||||
data = stream_read(sys.stdin)
|
||||
if not isinstance(data, tuple):
|
||||
continue
|
||||
|
||||
cache_key, source, line, col, filename, options = data
|
||||
orig_path = sys.path[:]
|
||||
venv = os.getenv('VIRTUAL_ENV')
|
||||
if venv:
|
||||
sys.path.insert(0, _get_venv_sitepackages(venv))
|
||||
add_path = self.find_extra_sys_path(filename)
|
||||
if add_path and add_path not in sys.path:
|
||||
# Add the found path to sys.path. I'm not 100% certain if this
|
||||
# is actually helping anything, but it feels like the right
|
||||
# thing to do.
|
||||
sys.path.insert(0, add_path)
|
||||
if filename:
|
||||
sys.path.append(os.path.dirname(filename))
|
||||
|
||||
if isinstance(options, dict):
|
||||
extra = options.get('extra_path')
|
||||
if extra:
|
||||
if not isinstance(extra, list):
|
||||
extra = [extra]
|
||||
sys.path.extend(extra)
|
||||
|
||||
# Add extra paths if working on a Python remote plugin.
|
||||
sys.path.extend(utils.rplugin_runtime_paths(options))
|
||||
|
||||
# Decorators on incomplete functions cause an error to be raised by
|
||||
# Jedi. I assume this is because Jedi is attempting to evaluate
|
||||
# the return value of the wrapped, but broken, function.
|
||||
# Our solution is to simply strip decorators from the source since
|
||||
# we are a completion service, not the syntax police.
|
||||
out = self.script_completion(source, line, col, filename)
|
||||
|
||||
if not out and cache_key[-1] == 'vars':
|
||||
# Attempt scope completion. If it fails, it should fall
|
||||
# through to script completion.
|
||||
log.debug('Fallback to scoped completions')
|
||||
out = self.scoped_completions(source, filename, cache_key[-2])
|
||||
|
||||
if not out and isinstance(options, dict) and 'synthetic' in options:
|
||||
synthetic = options.get('synthetic')
|
||||
log.debug('Using synthetic completion: %r', synthetic)
|
||||
out = self.script_completion(synthetic['src'],
|
||||
synthetic['line'],
|
||||
synthetic['col'], filename)
|
||||
|
||||
if not out and cache_key[-1] in ('package', 'local'):
|
||||
# The backup plan
|
||||
log.debug('Fallback to module completions')
|
||||
try:
|
||||
out = self.module_completions(cache_key[0], sys.path)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
stream_write(sys.stdout, out)
|
||||
sys.path[:] = orig_path
|
||||
|
||||
def run(self):
|
||||
log.debug('Starting server. sys.path = %r', sys.path)
|
||||
try:
|
||||
stream_write(sys.stdout, tuple(sys.version_info))
|
||||
self._loop()
|
||||
except StreamEmpty:
|
||||
log.debug('Input closed. Shutting down.')
|
||||
except Exception:
|
||||
log.exception('Server Exception. Shutting down.')
|
||||
|
||||
def find_extra_sys_path(self, filename):
|
||||
"""Find the file's "root"
|
||||
|
||||
This tries to determine the script's root package. The first step is
|
||||
to scan upward until there are no longer __init__.py files. If that
|
||||
fails, check immediate subdirectories to find __init__.py files which
|
||||
could mean that the current script is not part of a package, but has
|
||||
sub-modules.
|
||||
"""
|
||||
add_path = ''
|
||||
dirname = os.path.dirname(filename)
|
||||
scan_dir = dirname
|
||||
while len(scan_dir) \
|
||||
and os.path.isfile(os.path.join(scan_dir, '__init__.py')):
|
||||
scan_dir = os.path.dirname(scan_dir)
|
||||
|
||||
if scan_dir != dirname:
|
||||
add_path = scan_dir
|
||||
elif glob('{}/*/__init__.py'.format(dirname)):
|
||||
add_path = dirname
|
||||
|
||||
return add_path
|
||||
|
||||
def module_completions(self, module, paths):
|
||||
"""Directly get completions from the module file
|
||||
|
||||
This is the fallback if all else fails for module completion.
|
||||
"""
|
||||
found = utils.module_search(module, paths)
|
||||
if not found:
|
||||
return None
|
||||
|
||||
log.debug('Found script for fallback completions: %r', found)
|
||||
mod_parts = tuple(re.sub(r'\.+', '.', module).strip('.').split('.'))
|
||||
path_parts = os.path.splitext(found)[0].split('/')
|
||||
if path_parts[-1] == '__init__':
|
||||
path_parts.pop()
|
||||
path_parts = tuple(path_parts)
|
||||
match_mod = mod_parts
|
||||
ml = len(mod_parts)
|
||||
for i in range(ml):
|
||||
if path_parts[i-ml:] == mod_parts[:ml-i]:
|
||||
match_mod = mod_parts[-i:]
|
||||
break
|
||||
log.debug('Remainder to match: %r', match_mod)
|
||||
|
||||
import jedi
|
||||
|
||||
completions = jedi.api.names(path=found, references=True)
|
||||
completions = utils.jedi_walk(completions)
|
||||
while len(match_mod):
|
||||
for c in completions:
|
||||
if c.name == match_mod[0]:
|
||||
completions = c.defined_names()
|
||||
break
|
||||
else:
|
||||
log.debug('No more matches at %r', match_mod[0])
|
||||
return []
|
||||
match_mod = match_mod[:-1]
|
||||
|
||||
out = []
|
||||
tmp_filecache = {}
|
||||
seen = set()
|
||||
for c in completions:
|
||||
parsed = self.parse_completion(c, tmp_filecache)
|
||||
seen_key = (parsed['type'], parsed['name'])
|
||||
if seen_key in seen:
|
||||
continue
|
||||
seen.add(seen_key)
|
||||
out.append(parsed)
|
||||
return out
|
||||
|
||||
@retry_completion
|
||||
def script_completion(self, source, line, col, filename):
|
||||
"""Standard Jedi completions"""
|
||||
import jedi
|
||||
|
||||
log.debug('Line: %r, Col: %r, Filename: %r', line, col, filename)
|
||||
completions = jedi.Script(source, line, col, filename).completions()
|
||||
out = []
|
||||
tmp_filecache = {}
|
||||
for c in completions:
|
||||
out.append(self.parse_completion(c, tmp_filecache))
|
||||
return out
|
||||
|
||||
def get_parents(self, c):
|
||||
"""Collect parent blocks
|
||||
|
||||
This is for matching a request's cache key when performing scoped
|
||||
completions.
|
||||
"""
|
||||
parents = []
|
||||
while True:
|
||||
try:
|
||||
c = c.parent()
|
||||
parents.insert(0, c.name)
|
||||
if c.type == 'module':
|
||||
break
|
||||
except AttributeError:
|
||||
break
|
||||
return tuple(parents)
|
||||
|
||||
def resolve_import(self, completion, depth=0, max_depth=10, seen=None):
|
||||
"""Follow import until it no longer is an import type"""
|
||||
if seen is None:
|
||||
seen = []
|
||||
seen.append(completion)
|
||||
log.debug('Resolving: %r', completion)
|
||||
defs = completion.goto_assignments()
|
||||
if not defs:
|
||||
return None
|
||||
resolved = defs[0]
|
||||
if resolved in seen:
|
||||
return None
|
||||
if resolved.type == 'import' and depth < max_depth:
|
||||
return self.resolve_import(resolved, depth + 1, max_depth, seen)
|
||||
log.debug('Resolved: %r', resolved)
|
||||
return resolved
|
||||
|
||||
@retry_completion
|
||||
def scoped_completions(self, source, filename, parent):
|
||||
"""Scoped completion
|
||||
|
||||
This gets all definitions for a specific scope allowing them to be
|
||||
cached without needing to consider the current position in the source.
|
||||
This would be slow in Vim without threading.
|
||||
"""
|
||||
import jedi
|
||||
|
||||
completions = jedi.api.names(source, filename, all_scopes=True)
|
||||
out = []
|
||||
tmp_filecache = {}
|
||||
seen = set()
|
||||
for c in completions:
|
||||
c_parents = self.get_parents(c)
|
||||
if parent and (len(c_parents) > len(parent) or
|
||||
c_parents != parent[:len(c_parents)]):
|
||||
continue
|
||||
if c.type == 'import' and c.full_name not in self.unresolved_imports:
|
||||
resolved = self.resolve_import(c)
|
||||
if resolved is None:
|
||||
log.debug('Could not resolve import: %r', c.full_name)
|
||||
self.unresolved_imports.add(c.full_name)
|
||||
continue
|
||||
else:
|
||||
c = resolved
|
||||
parsed = self.parse_completion(c, tmp_filecache)
|
||||
seen_key = (parsed['name'], parsed['type'])
|
||||
if seen_key in seen:
|
||||
continue
|
||||
seen.add(seen_key)
|
||||
out.append(parsed)
|
||||
return out
|
||||
|
||||
def completion_dict(self, name, type_, comp):
|
||||
"""Final construction of the completion dict."""
|
||||
doc = comp.docstring()
|
||||
i = doc.find('\n\n')
|
||||
if i != -1:
|
||||
doc = doc[i:]
|
||||
|
||||
params = None
|
||||
try:
|
||||
if type_ in ('function', 'class'):
|
||||
params = []
|
||||
for i, p in enumerate(comp.params):
|
||||
desc = p.description.strip()
|
||||
if i == 0 and desc == 'self':
|
||||
continue
|
||||
if '\\n' in desc:
|
||||
desc = desc.replace('\\n', '\\x0A')
|
||||
# Note: Hack for jedi param bugs
|
||||
if desc.startswith('param ') or desc == 'param':
|
||||
desc = desc[5:].strip()
|
||||
if desc:
|
||||
params.append(desc)
|
||||
except Exception:
|
||||
params = None
|
||||
|
||||
return {
|
||||
'module': comp.module_path,
|
||||
'name': name,
|
||||
'type': type_,
|
||||
'short_type': _types.get(type_),
|
||||
'doc': doc.strip(),
|
||||
'params': params,
|
||||
}
|
||||
|
||||
def parse_completion(self, comp, cache):
|
||||
"""Return a tuple describing the completion.
|
||||
|
||||
Returns (name, type, description, abbreviated)
|
||||
"""
|
||||
name = comp.name
|
||||
|
||||
type_ = comp.type
|
||||
desc = comp.description
|
||||
|
||||
if type_ == 'instance' and desc.startswith(('builtins.', 'posix.')):
|
||||
# Simple description
|
||||
builtin_type = desc.rsplit('.', 1)[-1]
|
||||
if builtin_type in _types:
|
||||
return self.completion_dict(name, builtin_type, comp)
|
||||
|
||||
if type_ == 'class' and desc.startswith('builtins.'):
|
||||
return self.completion_dict(name, type_, comp)
|
||||
|
||||
if type_ == 'function':
|
||||
if comp.module_path not in cache and comp.line and comp.line > 1 \
|
||||
and os.path.exists(comp.module_path):
|
||||
with open(comp.module_path, 'r') as fp:
|
||||
cache[comp.module_path] = fp.readlines()
|
||||
lines = cache.get(comp.module_path)
|
||||
if isinstance(lines, list) and len(lines) > 1 \
|
||||
and comp.line < len(lines) and comp.line > 1:
|
||||
# Check the function's decorators to check if it's decorated
|
||||
# with @property
|
||||
i = comp.line - 2
|
||||
while i >= 0:
|
||||
line = lines[i].lstrip()
|
||||
if not line.startswith('@'):
|
||||
break
|
||||
if line.startswith('@property'):
|
||||
return self.completion_dict(name, 'property', comp)
|
||||
i -= 1
|
||||
return self.completion_dict(name, type_, comp)
|
||||
|
||||
return self.completion_dict(name, type_, comp)
|
||||
|
||||
|
||||
class Client(object):
|
||||
"""Client object
|
||||
|
||||
This will be used by deoplete-jedi to interact with the server.
|
||||
"""
|
||||
max_completion_count = 50
|
||||
|
||||
def __init__(self, desc_len=0, short_types=False, show_docstring=False,
|
||||
debug=False, python_path=None):
|
||||
self._server = None
|
||||
self.restarting = threading.Lock()
|
||||
self.version = (0, 0, 0, 'final', 0)
|
||||
self.env = os.environ.copy()
|
||||
self.env.update({
|
||||
'PYTHONPATH': os.pathsep.join(
|
||||
(parso_path, jedi_path,
|
||||
os.path.dirname(os.path.dirname(__file__)))),
|
||||
})
|
||||
|
||||
if 'VIRTUAL_ENV' in os.environ:
|
||||
self.env['VIRTUAL_ENV'] = os.getenv('VIRTUAL_ENV')
|
||||
prog = os.path.join(self.env['VIRTUAL_ENV'], 'bin', 'python')
|
||||
elif python_path:
|
||||
prog = python_path
|
||||
else:
|
||||
prog = 'python'
|
||||
|
||||
self.cmd = [prog, '-u', __file__, '--desc-length', str(desc_len)]
|
||||
if short_types:
|
||||
self.cmd.append('--short-types')
|
||||
if show_docstring:
|
||||
self.cmd.append('--docstrings')
|
||||
if debug:
|
||||
self.cmd.extend(('--debug', debug[0], '--debug-level',
|
||||
str(debug[1])))
|
||||
|
||||
try:
|
||||
self.restart()
|
||||
except Exception as exc:
|
||||
from deoplete.exceptions import SourceInitError
|
||||
raise SourceInitError('Failed to start server ({}): {}'.format(
|
||||
' '.join(self.cmd), exc))
|
||||
|
||||
def shutdown(self):
|
||||
"""Shut down the server."""
|
||||
if self._server is not None and self._server.returncode is None:
|
||||
# Closing the server's stdin will cause it to exit.
|
||||
self._server.stdin.close()
|
||||
self._server.kill()
|
||||
|
||||
def restart(self):
|
||||
"""Start or restart the server
|
||||
|
||||
If a server is already running, shut it down.
|
||||
"""
|
||||
with self.restarting:
|
||||
self.shutdown()
|
||||
self._server = subprocess.Popen(self.cmd, stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
env=self.env)
|
||||
# Might result in "pyenv: version `foo' is not installed (set by
|
||||
# /cwd/.python-version)" on stderr.
|
||||
try:
|
||||
self.version = stream_read(self._server.stdout)
|
||||
except StreamEmpty:
|
||||
out, err = self._server.communicate()
|
||||
raise Exception('Server exited with {}: error: {}'.format(
|
||||
err, self._server.returncode))
|
||||
self._count = 0
|
||||
|
||||
def completions(self, *args):
|
||||
"""Get completions from the server.
|
||||
|
||||
If the number of completions already performed reaches a threshold,
|
||||
restart the server.
|
||||
"""
|
||||
if self._count > self.max_completion_count:
|
||||
self.restart()
|
||||
|
||||
self._count += 1
|
||||
try:
|
||||
stream_write(self._server.stdin, args)
|
||||
return stream_read(self._server.stdout)
|
||||
except StreamError as exc:
|
||||
if self.restarting.acquire(False):
|
||||
self.restarting.release()
|
||||
log.error('Caught %s during handling completions(%s), '
|
||||
' restarting server', exc, args)
|
||||
self.restart()
|
||||
time.sleep(0.2)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--desc-length', type=int)
|
||||
parser.add_argument('--short-types', action='store_true')
|
||||
parser.add_argument('--docstrings', action='store_true')
|
||||
parser.add_argument('--debug', default='')
|
||||
parser.add_argument('--debug-level', type=int, default=logging.DEBUG)
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.debug:
|
||||
log.removeHandler(nullHandler)
|
||||
formatter = logging.Formatter('%(asctime)s %(levelname)-8s '
|
||||
'(%(name)s) %(message)s')
|
||||
handler = logging.FileHandler(args.debug)
|
||||
handler.setFormatter(formatter)
|
||||
handler.setLevel(args.debug_level)
|
||||
log.addHandler(handler)
|
||||
log.setLevel(logging.DEBUG)
|
||||
log = log.getChild('jedi.server')
|
||||
|
||||
s = Server(args.desc_length, args.short_types, args.docstrings)
|
||||
s.run()
|
||||
else:
|
||||
log = log.getChild('jedi.client')
|
@ -0,0 +1,90 @@
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
|
||||
def file_mtime(filename):
|
||||
"""Get file modification time
|
||||
|
||||
Return 0 if the file does not exist
|
||||
"""
|
||||
if not os.path.exists(filename):
|
||||
return 0
|
||||
return int(os.path.getmtime(filename))
|
||||
|
||||
|
||||
def module_file(dirname, suffix, base):
|
||||
"""Find a script that matches the suffix path."""
|
||||
search = os.path.abspath(os.path.join(dirname, suffix))
|
||||
# dirname = os.path.dirname(dirname)
|
||||
found = ''
|
||||
while True:
|
||||
p = os.path.join(search, '__init__.py')
|
||||
if os.path.isfile(p):
|
||||
found = p
|
||||
break
|
||||
p = search + '.py'
|
||||
if os.path.isfile(p):
|
||||
found = p
|
||||
break
|
||||
if os.path.basename(search) == base or search == dirname:
|
||||
break
|
||||
search = os.path.dirname(search)
|
||||
return found
|
||||
|
||||
|
||||
def module_search(module, paths):
|
||||
"""Search paths for a file matching the module."""
|
||||
if not module:
|
||||
return ''
|
||||
|
||||
base = re.sub(r'\.+', '.', module).strip('.').split('.')[0]
|
||||
module_path = os.path.normpath(re.sub(r'(\.+)', r'/\1/', module).strip('/'))
|
||||
for p in paths:
|
||||
found = module_file(p, module_path, base)
|
||||
if found:
|
||||
return found
|
||||
return ''
|
||||
|
||||
|
||||
def rplugin_runtime_paths(context):
|
||||
"""Adds Neovim runtime paths.
|
||||
|
||||
Additional paths are added only if a `rplugin/python*` exists.
|
||||
"""
|
||||
paths = []
|
||||
|
||||
if context and 'cwd' in context:
|
||||
cwd = context.get('cwd')
|
||||
rplugins = ('rplugin/python{}'.format(sys.version_info[0]),
|
||||
'rplugin/pythonx')
|
||||
|
||||
paths.extend(filter(os.path.exists,
|
||||
(os.path.join(cwd, x)
|
||||
for x in rplugins)))
|
||||
|
||||
if paths:
|
||||
for rtp in context.get('runtimepath', '').split(','):
|
||||
if not rtp:
|
||||
continue
|
||||
paths.extend(filter(os.path.exists,
|
||||
(os.path.join(rtp, x)
|
||||
for x in rplugins)))
|
||||
return paths
|
||||
|
||||
|
||||
def jedi_walk(completions, depth=0, max_depth=5):
|
||||
"""Walk through Jedi objects
|
||||
|
||||
The purpose for this is to help find an object with a specific name. Once
|
||||
found, the walking will stop.
|
||||
"""
|
||||
for c in completions:
|
||||
yield c
|
||||
if hasattr(c, 'description') and c.type == 'import':
|
||||
d = c.description
|
||||
if d.startswith('from ') and d.endswith('*') and depth < max_depth:
|
||||
# Haven't determined the lowest Python 3 version required.
|
||||
# If we determine 3.3, we can use `yield from`
|
||||
for sub in jedi_walk(c.defined_names(), depth+1, max_depth):
|
||||
yield sub
|
@ -0,0 +1,85 @@
|
||||
import logging
|
||||
import os
|
||||
import queue
|
||||
import threading
|
||||
import time
|
||||
|
||||
from .server import Client
|
||||
from .utils import file_mtime
|
||||
|
||||
log = logging.getLogger('deoplete.jedi.worker')
|
||||
workers = []
|
||||
work_queue = queue.Queue()
|
||||
comp_queue = queue.Queue()
|
||||
|
||||
|
||||
class Worker(threading.Thread):
|
||||
daemon = True
|
||||
|
||||
def __init__(self, in_queue, out_queue, desc_len=0, server_timeout=10,
|
||||
short_types=False, show_docstring=False, debug=False,
|
||||
python_path=None):
|
||||
self._client = Client(desc_len, short_types, show_docstring, debug,
|
||||
python_path)
|
||||
|
||||
self.server_timeout = server_timeout
|
||||
self.in_queue = in_queue
|
||||
self.out_queue = out_queue
|
||||
super(Worker, self).__init__()
|
||||
self.log = log.getChild(self.name)
|
||||
|
||||
def completion_work(self, cache_key, extra_modules, source, line, col,
|
||||
filename, options):
|
||||
completions = self._client.completions(cache_key, source, line, col,
|
||||
filename, options)
|
||||
modules = {f: file_mtime(f) for f in extra_modules}
|
||||
if completions is not None:
|
||||
for c in completions:
|
||||
m = c['module']
|
||||
if m and m not in modules and os.path.exists(m):
|
||||
modules[m] = file_mtime(m)
|
||||
|
||||
self.results = {
|
||||
'cache_key': cache_key,
|
||||
'time': time.time(),
|
||||
'modules': modules,
|
||||
'completions': completions,
|
||||
}
|
||||
|
||||
def run(self):
|
||||
while True:
|
||||
try:
|
||||
work = self.in_queue.get()
|
||||
self.log.debug('Got work')
|
||||
|
||||
self.results = None
|
||||
t = threading.Thread(target=self.completion_work, args=work)
|
||||
t.start()
|
||||
t.join(timeout=self.server_timeout)
|
||||
|
||||
if self.results:
|
||||
self.out_queue.put(self.results)
|
||||
self.log.debug('Completed work')
|
||||
else:
|
||||
self.log.warn('Restarting server because it\'s taking '
|
||||
'too long')
|
||||
# Kill all but the last queued job since they're most
|
||||
# likely a backlog that are no longer relevant.
|
||||
while self.in_queue.qsize() > 1:
|
||||
self.in_queue.get()
|
||||
self.in_queue.task_done()
|
||||
self._client.restart()
|
||||
self.in_queue.task_done()
|
||||
except Exception:
|
||||
self.log.debug('Worker error', exc_info=True)
|
||||
|
||||
|
||||
def start(count, desc_len=0, server_timeout=10, short_types=False,
|
||||
show_docstring=False, debug=False, python_path=None):
|
||||
while count > 0:
|
||||
t = Worker(work_queue, comp_queue, desc_len, server_timeout, short_types,
|
||||
show_docstring, debug, python_path)
|
||||
workers.append(t)
|
||||
t.start()
|
||||
log.debug('Started worker: %r', t)
|
||||
count -= 1
|
Reference in New Issue
Block a user