VIM: Changed plugin management to vim-plug

Before switching I used pathogen.vim for plugin management, which
resulted in a HUGE .gitmodules file as well as a huge directory
containing whole git repositories. vim-plug fixes all those problems. It
reduces the folder into a config file, containing one line per plugin
and also removes the need of adding those submodules to .gitmodules

Signed-off-by: Tobias Manske <tobias.manske@mailbox.org>
This commit is contained in:
Tobias Manske 2018-09-01 12:45:39 +02:00
parent 5cbb3a63c6
commit 491fe6066f
No known key found for this signature in database
GPG Key ID: 978D99F12D4E041F
1059 changed files with 56 additions and 150139 deletions

2
.gitignore vendored
View File

@ -140,3 +140,5 @@ i3/config
*.sqlite
i3/scripts/dmenu/database.sqlite
albert/*.db
vim/plugged/
vim/autoload/plug.vim

114
.gitmodules vendored
View File

@ -1,87 +1,3 @@
[submodule "vim/plugins/emmet-vim"]
path = vim/plugins/emmet-vim
url = https://github.com/mattn/emmet-vim
[submodule "vim/plugins/neco-vim"]
path = vim/plugins/neco-vim
url = https://github.com/Shougo/neco-vim
[submodule "vim/plugins/nerdcommenter"]
path = vim/plugins/nerdcommenter
url = https://github.com/scrooloose/nerdcommenter
[submodule "vim/plugins/supertab"]
path = vim/plugins/supertab
url = https://github.com/ervandew/supertab
[submodule "vim/plugins/vim-airline"]
path = vim/plugins/vim-airline
url = https://github.com/vim-airline/vim-airline.git
[submodule "vim/plugins/vim-airline-themes"]
path = vim/plugins/vim-airline-themes
url = https://github.com/vim-airline/vim-airline-themes.git
[submodule "vim/plugins/vim-better-whitespace"]
path = vim/plugins/vim-better-whitespace
url = git://github.com/ntpeters/vim-better-whitespace.git
[submodule "vim/plugins/vim-easymotion"]
path = vim/plugins/vim-easymotion
url = https://github.com/easymotion/vim-easymotion
[submodule "vim/plugins/vim-fugitive"]
path = vim/plugins/vim-fugitive
url = git://github.com/tpope/vim-fugitive.git
[submodule "vim/plugins/vim-yankstack"]
path = vim/plugins/vim-yankstack
url = https://github.com/maxbrunsfeld/vim-yankstack
[submodule "vim/plugins/indentline"]
path = vim/plugins/indentline
url = https://github.com/yggdroot/indentline
[submodule "vim/plugins/simpylfold"]
path = vim/plugins/simpylfold
url = https://github.com/tmhedberg/simpylfold
[submodule "vim/plugins/FastFold"]
path = vim/plugins/FastFold
url = https://github.com/Konfekt/fastfold.git
[submodule "vim/plugins/vim-nerdtree-tabs"]
path = vim/plugins/vim-nerdtree-tabs
url = https://github.com/jistr/vim-nerdtree-tabs
[submodule "vim/plugins/ultisnips"]
path = vim/plugins/ultisnips
url = https://github.com/SirVer/ultisnips.git
[submodule "vim/snippets/vim-snippets"]
path = vim/snippets/vim-snippets
url = https://github.com/honza/vim-snippets
[submodule "vim/snippets/bootstrap-snippets"]
path = vim/snippets/bootstrap-snippets
url = https://github.com/bonsaiben/bootstrap-snippets.git
[submodule "vim/snippets/snipmate-snippets-bib"]
path = vim/snippets/snipmate-snippets-bib
url = https://github.com/rbonvall/snipmate-snippets-bib.git
[submodule "vim/plugins/singlecompile"]
path = vim/plugins/singlecompile
url = https://github.com/xuhdev/singlecompile
[submodule "vim/plugins/javacomplete"]
path = vim/plugins/javacomplete
url = https://github.com/vim-scripts/javacomplete
[submodule "vim/plugins/vim-python-pep8-indent"]
path = vim/plugins/vim-python-pep8-indent
url = https://github.com/Vimjas/vim-python-pep8-indent
[submodule "vim/plugins/auto-pairs"]
path = vim/plugins/auto-pairs
url = https://github.com/jiangmiao/auto-pairs.git
[submodule "vim/plugins/kotlin-vim"]
path = vim/plugins/kotlin-vim
url = https://github.com/udalov/kotlin-vim.git
[submodule "vim/plugins/vim-hug-neovim-rpc"]
path = vim/plugins/vim-hug-neovim-rpc
url = https://github.com/roxma/vim-hug-neovim-rpc
[submodule "vim/plugins/deoplete.nvim"]
path = vim/plugins/deoplete.nvim
url = https://github.com/Shougo/deoplete.nvim.git
[submodule "vim/plugins/nvim-yarp"]
path = vim/plugins/nvim-yarp
url = https://github.com/roxma/nvim-yarp
[submodule "vim/plugins/deoplete-jedi"]
path = vim/plugins/deoplete-jedi
url = https://github.com/zchee/deoplete-jedi
[submodule "vim/plugins/vim-latex-live-preview"]
path = vim/plugins/vim-latex-live-preview
url = https://github.com/xuhdev/vim-latex-live-preview
[submodule "zsh/modules/history-substring-search/external"]
path = zsh/modules/history-substring-search/external
url = https://github.com/zsh-users/zsh-history-substring-search.git
@ -127,33 +43,3 @@
[submodule "tmux/plugins/tpm"]
path = tmux/plugins/tpm
url = https://github.com/tmux-plugins/tpm.git
[submodule "vim/plugins/python-mode"]
path = vim/plugins/python-mode
url = https://github.com/klen/python-mode.git
[submodule "vim/plugins/ale"]
path = vim/plugins/ale
url = https://github.com/w0rp/ale.git
[submodule "vim/plugins/pydoc"]
path = vim/plugins/pydoc
url = https://github.com/fs111/pydoc.vim
[submodule "vim/plugins/vim-gitgutter"]
path = vim/plugins/vim-gitgutter
url = git://github.com/airblade/vim-gitgutter.git
[submodule "vim/plugins/fzf"]
path = vim/plugins/fzf
url = git://github.com/junegunn/fzf.vim
[submodule "vim/plugins/vim-markdown"]
path = vim/plugins/vim-markdown
url = https://github.com/plasticboy/vim-markdown.git
[submodule "vim/plugins/ansible-vim"]
path = vim/plugins/ansible-vim
url = https://github.com/pearofducks/ansible-vim
[submodule "vim/plugins/vim-window-resize-easy"]
path = vim/plugins/vim-window-resize-easy
url = https://github.com/roxma/vim-window-resize-easy
[submodule "vim/plugins/traces.vim"]
path = vim/plugins/traces.vim
url = https://github.com/markonm/traces.vim
[submodule "vim/plugins/nerdtree"]
path = vim/plugins/nerdtree
url = https://github.com/scrooloose/nerdtree

View File

@ -1,9 +1,17 @@
" Plugin Manager
execute pathogen#infect('plugins/{}')
execute pathogen#infect('snippets/{}')
source ~/.vim/config/plugins.vim
" Make sure plug.vim is present
if empty(glob('~/.vim/autoload/plug.vim'))
silent !curl -fLo ~/.vim/autoload/plug.vim --create-dirs
\ https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
source ~/.vim/autoload/plug.vim
autocmd VimEnter * PlugInstall --sync
endif
" Prepare central backup/swap/undo directory
let &undodir = expand('~/.vim/.undo')
let &undodir = expand('~/.vim/.undo//')
let &backupdir = expand('~/.vim/.backup//')
let &directory = expand('~/.vim/.swp//')
@ -39,7 +47,7 @@ colorscheme happy_hacking
" Apply plugin configurations
source ~/.vim/config/plugin.vim
source ~/.vim/config/pluginconfig.vim
" Apply custom keybindings
source ~/.vim/config/keybindings.vim
@ -47,8 +55,5 @@ source ~/.vim/config/keybindings.vim
" Apply settings for custom filetypes
source ~/.vim/config/filetypes.vim
" Rebuild help files on startup
autocmd vimenter * Helptags
" Transparency
hi Normal guibg=NONE ctermbg=NONE

View File

@ -1,266 +0,0 @@
" pathogen.vim - path option manipulation
" Maintainer: Tim Pope <http://tpo.pe/>
" Version: 2.4
" Install in ~/.vim/autoload (or ~\vimfiles\autoload).
"
" For management of individually installed plugins in ~/.vim/bundle (or
" ~\vimfiles\bundle), adding `execute pathogen#infect()` to the top of your
" .vimrc is the only other setup necessary.
"
" The API is documented inline below.
if exists("g:loaded_pathogen") || &cp
finish
endif
let g:loaded_pathogen = 1
" Point of entry for basic default usage. Give a relative path to invoke
" pathogen#interpose() or an absolute path to invoke pathogen#surround().
" Curly braces are expanded with pathogen#expand(): "bundle/{}" finds all
" subdirectories inside "bundle" inside all directories in the runtime path.
" If no arguments are given, defaults "bundle/{}", and also "pack/{}/start/{}"
" on versions of Vim without native package support.
function! pathogen#infect(...) abort
if a:0
let paths = filter(reverse(copy(a:000)), 'type(v:val) == type("")')
else
let paths = ['bundle/{}', 'pack/{}/start/{}']
endif
if has('packages')
call filter(paths, 'v:val !~# "^pack/[^/]*/start/[^/]*$"')
endif
let static = '^\%([$~\\/]\|\w:[\\/]\)[^{}*]*$'
for path in filter(copy(paths), 'v:val =~# static')
call pathogen#surround(path)
endfor
for path in filter(copy(paths), 'v:val !~# static')
if path =~# '^\%([$~\\/]\|\w:[\\/]\)'
call pathogen#surround(path)
else
call pathogen#interpose(path)
endif
endfor
call pathogen#cycle_filetype()
if pathogen#is_disabled($MYVIMRC)
return 'finish'
endif
return ''
endfunction
" Split a path into a list.
function! pathogen#split(path) abort
if type(a:path) == type([]) | return a:path | endif
if empty(a:path) | return [] | endif
let split = split(a:path,'\\\@<!\%(\\\\\)*\zs,')
return map(split,'substitute(v:val,''\\\([\\,]\)'',''\1'',"g")')
endfunction
" Convert a list to a path.
function! pathogen#join(...) abort
if type(a:1) == type(1) && a:1
let i = 1
let space = ' '
else
let i = 0
let space = ''
endif
let path = ""
while i < a:0
if type(a:000[i]) == type([])
let list = a:000[i]
let j = 0
while j < len(list)
let escaped = substitute(list[j],'[,'.space.']\|\\[\,'.space.']\@=','\\&','g')
let path .= ',' . escaped
let j += 1
endwhile
else
let path .= "," . a:000[i]
endif
let i += 1
endwhile
return substitute(path,'^,','','')
endfunction
" Convert a list to a path with escaped spaces for 'path', 'tag', etc.
function! pathogen#legacyjoin(...) abort
return call('pathogen#join',[1] + a:000)
endfunction
" Turn filetype detection off and back on again if it was already enabled.
function! pathogen#cycle_filetype() abort
if exists('g:did_load_filetypes')
filetype off
filetype on
endif
endfunction
" Check if a bundle is disabled. A bundle is considered disabled if its
" basename or full name is included in the list g:pathogen_blacklist or the
" comma delimited environment variable $VIMBLACKLIST.
function! pathogen#is_disabled(path) abort
if a:path =~# '\~$'
return 1
endif
let sep = pathogen#slash()
let blacklist =
\ get(g:, 'pathogen_blacklist', get(g:, 'pathogen_disabled', [])) +
\ pathogen#split($VIMBLACKLIST)
if !empty(blacklist)
call map(blacklist, 'substitute(v:val, "[\\/]$", "", "")')
endif
return index(blacklist, fnamemodify(a:path, ':t')) != -1 || index(blacklist, a:path) != -1
endfunction
" Prepend the given directory to the runtime path and append its corresponding
" after directory. Curly braces are expanded with pathogen#expand().
function! pathogen#surround(path) abort
let sep = pathogen#slash()
let rtp = pathogen#split(&rtp)
let path = fnamemodify(a:path, ':s?[\\/]\=$??')
let before = filter(pathogen#expand(path), '!pathogen#is_disabled(v:val)')
let after = filter(reverse(pathogen#expand(path, sep.'after')), '!pathogen#is_disabled(v:val[0:-7])')
call filter(rtp, 'index(before + after, v:val) == -1')
let &rtp = pathogen#join(before, rtp, after)
return &rtp
endfunction
" For each directory in the runtime path, add a second entry with the given
" argument appended. Curly braces are expanded with pathogen#expand().
function! pathogen#interpose(name) abort
let sep = pathogen#slash()
let name = a:name
if has_key(s:done_bundles, name)
return ""
endif
let s:done_bundles[name] = 1
let list = []
for dir in pathogen#split(&rtp)
if dir =~# '\<after$'
let list += reverse(filter(pathogen#expand(dir[0:-6].name, sep.'after'), '!pathogen#is_disabled(v:val[0:-7])')) + [dir]
else
let list += [dir] + filter(pathogen#expand(dir.sep.name), '!pathogen#is_disabled(v:val)')
endif
endfor
let &rtp = pathogen#join(pathogen#uniq(list))
return 1
endfunction
let s:done_bundles = {}
" Invoke :helptags on all non-$VIM doc directories in runtimepath.
function! pathogen#helptags() abort
let sep = pathogen#slash()
for glob in pathogen#split(&rtp)
for dir in map(split(glob(glob), "\n"), 'v:val.sep."/doc/".sep')
if (dir)[0 : strlen($VIMRUNTIME)] !=# $VIMRUNTIME.sep && filewritable(dir) == 2 && !empty(split(glob(dir.'*.txt'))) && (!filereadable(dir.'tags') || filewritable(dir.'tags'))
silent! execute 'helptags' pathogen#fnameescape(dir)
endif
endfor
endfor
endfunction
command! -bar Helptags :call pathogen#helptags()
" Execute the given command. This is basically a backdoor for --remote-expr.
function! pathogen#execute(...) abort
for command in a:000
execute command
endfor
return ''
endfunction
" Section: Unofficial
function! pathogen#is_absolute(path) abort
return a:path =~# (has('win32') ? '^\%([\\/]\|\w:\)[\\/]\|^[~$]' : '^[/~$]')
endfunction
" Given a string, returns all possible permutations of comma delimited braced
" alternatives of that string. pathogen#expand('/{a,b}/{c,d}') yields
" ['/a/c', '/a/d', '/b/c', '/b/d']. Empty braces are treated as a wildcard
" and globbed. Actual globs are preserved.
function! pathogen#expand(pattern, ...) abort
let after = a:0 ? a:1 : ''
let pattern = substitute(a:pattern, '^[~$][^\/]*', '\=expand(submatch(0))', '')
if pattern =~# '{[^{}]\+}'
let [pre, pat, post] = split(substitute(pattern, '\(.\{-\}\){\([^{}]\+\)}\(.*\)', "\\1\001\\2\001\\3", ''), "\001", 1)
let found = map(split(pat, ',', 1), 'pre.v:val.post')
let results = []
for pattern in found
call extend(results, pathogen#expand(pattern))
endfor
elseif pattern =~# '{}'
let pat = matchstr(pattern, '^.*{}[^*]*\%($\|[\\/]\)')
let post = pattern[strlen(pat) : -1]
let results = map(split(glob(substitute(pat, '{}', '*', 'g')), "\n"), 'v:val.post')
else
let results = [pattern]
endif
let vf = pathogen#slash() . 'vimfiles'
call map(results, 'v:val =~# "\\*" ? v:val.after : isdirectory(v:val.vf.after) ? v:val.vf.after : isdirectory(v:val.after) ? v:val.after : ""')
return filter(results, '!empty(v:val)')
endfunction
" \ on Windows unless shellslash is set, / everywhere else.
function! pathogen#slash() abort
return !exists("+shellslash") || &shellslash ? '/' : '\'
endfunction
function! pathogen#separator() abort
return pathogen#slash()
endfunction
" Convenience wrapper around glob() which returns a list.
function! pathogen#glob(pattern) abort
let files = split(glob(a:pattern),"\n")
return map(files,'substitute(v:val,"[".pathogen#slash()."/]$","","")')
endfunction
" Like pathogen#glob(), only limit the results to directories.
function! pathogen#glob_directories(pattern) abort
return filter(pathogen#glob(a:pattern),'isdirectory(v:val)')
endfunction
" Remove duplicates from a list.
function! pathogen#uniq(list) abort
let i = 0
let seen = {}
while i < len(a:list)
if (a:list[i] ==# '' && exists('empty')) || has_key(seen,a:list[i])
call remove(a:list,i)
elseif a:list[i] ==# ''
let i += 1
let empty = 1
else
let seen[a:list[i]] = 1
let i += 1
endif
endwhile
return a:list
endfunction
" Backport of fnameescape().
function! pathogen#fnameescape(string) abort
if exists('*fnameescape')
return fnameescape(a:string)
elseif a:string ==# '-'
return '\-'
else
return substitute(escape(a:string," \t\n*?[{`$\\%#'\"|!<"),'^[+>]','\\&','')
endif
endfunction
" Like findfile(), but hardcoded to use the runtimepath.
function! pathogen#runtime_findfile(file,count) abort
let rtp = pathogen#join(1,pathogen#split(&rtp))
let file = findfile(a:file,rtp,a:count)
if file ==# ''
return ''
else
return fnamemodify(file,':p')
endif
endfunction
" vim:set et sw=2 foldmethod=expr foldexpr=getline(v\:lnum)=~'^\"\ Section\:'?'>1'\:getline(v\:lnum)=~#'^fu'?'a1'\:getline(v\:lnum)=~#'^endf'?'s1'\:'=':

42
vim/config/plugins.vim Normal file
View File

@ -0,0 +1,42 @@
call plug#begin('~/.vim/plugged')
Plug 'mattn/emmet-vim'
Plug 'Shougo/neco-vim'
Plug 'scrooloose/nerdcommenter'
Plug 'ervandew/supertab'
Plug 'vim-airline/vim-airline'
Plug 'vim-airline/vim-airline-themes'
Plug 'ntpeters/vim-better-whitespace'
Plug 'easymotion/vim-easymotion'
Plug 'tpope/vim-fugitive'
Plug 'maxbrunsfeld/vim-yankstack'
Plug 'yggdroot/indentline'
Plug 'tmhedberg/simpylfold'
Plug 'Konfekt/fastfold'
Plug 'jistr/vim-nerdtree-tabs'
Plug 'SirVer/ultisnips'
Plug 'honza/vim-snippets'
Plug 'bonsaiben/bootstrap-snippets'
Plug 'rbonvall/snipmate-snippets-bib'
Plug 'xuhdev/singlecompile'
Plug 'vim-scripts/javacomplete'
Plug 'Vimjas/vim-python-pep8-indent'
Plug 'jiangmiao/auto-pairs'
Plug 'udalov/kotlin-vim'
Plug 'roxma/vim-hug-neovim-rpc'
Plug 'Shougo/deoplete.nvim'
Plug 'roxma/nvim-yarp'
Plug 'zchee/deoplete-jedi'
Plug 'xuhdev/vim-latex-live-preview'
Plug 'klen/python-mode'
Plug 'w0rp/ale'
Plug 'fs111/pydoc.vim'
Plug 'airblade/vim-gitgutter'
Plug 'junegunn/fzf.vim'
Plug 'plasticboy/vim-markdown'
Plug 'pearofducks/ansible-vim'
Plug 'roxma/vim-window-resize-easy'
Plug 'markonm/traces.vim'
Plug 'scrooloose/nerdtree'
call plug#end()

View File

@ -1 +0,0 @@
doc/tags

View File

@ -1,106 +0,0 @@
# What good will FastFold do?
Automatic folds (that is, folds generated by a fold method different
from `manual`), bog down VIM noticeably in insert mode. They are also often
recomputed too early (for example, when inserting an opening fold marker
whose closing counterpart is yet missing to complete the fold.)
See http://vim.wikia.com/wiki/Keep_folds_closed_while_inserting_text
for a discussion.
With this plug-in, the folds in the currently edited buffer are updated by an
automatic fold method only
- when saving the buffer
- when closing or opening folds (zo, za, zc, etc...)
- when moving or operating fold-wise (zj,zk,[z,]z)
- when typing `zuz` in normal mode
and are kept as is otherwise (by keeping the fold method set to `manual`).
# Example Setup
Each of these triggers for updating folds can be modified or disabled by adding
the lines
```vim
nmap zuz <Plug>(FastFoldUpdate)
let g:fastfold_savehook = 1
let g:fastfold_fold_command_suffixes = ['x','X','a','A','o','O','c','C']
let g:fastfold_fold_movement_commands = [']z', '[z', 'zj', 'zk']
```
to the file `~/.vimrc` (respectively `%USERPROFILE%/_vimrc` on Microsoft Windows).
For example, by adding
```vim
let g:tex_fold_enabled=1
let g:vimsyn_folding='af'
let g:xml_syntax_folding = 1
let g:php_folding = 1
let g:perl_fold = 1
```
to the `.vimrc` file and installing this plug-in, the folds in a TeX, Vim, XML,
PHP or Perl file are updated by the `syntax` fold method when saving the
buffer, opening, closing, moving or operating on folds, or typing `zuz` in
normal mode and are kept as is otherwise.
*Set fold methods for every file type only*! Setting it globally risks that FastFold assumes the wrong, global, fold method instead of that intended by the file type plug-in, for example TagList.
# Configuration
- If you prefer that folds are only updated manually but not when saving the buffer,
then add `let g:fastfold_savehook = 0` to your `.vimrc`.
- If you prefer that folds are updated whenever you close or open folds by a
standard keystroke such as `zx`,`zo` or `zc`, then add `let
g:fastfold_fold_command_suffixes = []` to your `.vimrc`.
The exact list of these standard keystrokes is `zx,zX,za,zA,zo,zO,zc,zC` and
it can be customized by changing the global variable
`g:fastfold_mapsuffixes`. If you wanted to intercept all possible fold
commands (such as zr,zm,...), change this to:
```vim
let g:fastfold_fold_command_suffixes =
['x','X','a','A','o','O','c','C','r','R','m','M','i','n','N']
```
- If you prefer that this plug-in does not add a normal mode mapping that updates
folds (that defaults to `zuz`), then add
`nmap <SID>(DisableFastFoldUpdate) <Plug>(FastFoldUpdate) ` to your `.vimrc`.
You can remap `zuz` to your favorite keystroke, say `<F5>`, by adding
`nmap <F5> <Plug>(FastFoldUpdate)` to your `.Vimrc`.
There is also a command `FastFoldUpdate` that updates all folds and its
variant `FastFoldUpdate!` that updates all folds and echos by which fold
method the folds were updated.
# Addons
## Vim-Stay
`FastFold` integrates with the plug-in
[vim-stay](https://github.com/kopischke/vim-stay/issues) that restores the
folds of a file buffer by `:mkview` and `:loadview`.
## Custom Fold Text
A `CustomFoldText()` function that displays the percentage of the number of buffer lines that the folded text takes up and indents folds according to their nesting level, similar to [that](http://www.gregsexton.org/2011/03/improving-the-text-displayed-in-a-fold/) by Greg Sexton, is available at
http://www.github.com/Konfekt/FoldText
## Fold Text-Object
Create a fold text object, mapped to `iz` and `az`, by adding the lines
```vim
xnoremap iz :<c-u>FastFoldUpdate<cr><esc>:<c-u>normal! ]zv[z<cr>
xnoremap az :<c-u>FastFoldUpdate<cr><esc>:<c-u>normal! ]zV[z<cr>
```
to the file `~/.vimrc` (respectively `%USERPROFILE%/_vimrc` on Microsoft Windows).

View File

@ -1,153 +0,0 @@
FastFold, folding optimization *FastFold* *fastfold*
===========================================================================
0. Introduction ~
*FastFold-intro* *fastfold-intro*
Automatic folds - that is, folds generated by a fold method different
from `manual` - bog down VIM considerably in insert mode. Also, they are often
re-evaluated prematurely for example, when inserting an opening fold marker
whose closing counterpart has yet to be added to complete the fold.
See http://vim.wikia.com/wiki/Keep_folds_closed_while_inserting_text
for a discussion.
With this plug-in, the folds in the currently edited buffer are updated when
certain triggers are met:
- when saving the buffer
- when closing or opening folds (zo, za, zc, etc...)
- when moving or operating fold-wise (zj,zk,[z,]z)
- when typing `zuz` in normal mode
===========================================================================
1. Commands ~
*FastFold-commands* *fastfold-commands*
*FastFoldUpdate!*
- `:FastFoldUpdate` updates all folds in the current buffer.
- `:FastFoldUpdate!` updates all folds & echoes what fold method was used
- The mapping `zuz` that invokes `:FastFoldUpdate!` can be changed to your
favorite keystroke, say `<F5>`, by adding
>
nmap <F5> <Plug>(FastFoldUpdate)
<
to your `.vimrc`. It can be disabled by adding
>
nmap <SID>(DisableFastFoldUpdate) <Plug>(FastFoldUpdate)
<
===========================================================================
2. Config ~
*FastFold-config* *fastfold-config*
Each of the above triggers can be enabled or disabled by setting the
matching global flags in your `.vimrc`. Default values are shown.
>
let g:fastfold_savehook = 1
let g:fastfold_fdmhook = 0
nmap zuz <Plug>(FastFoldUpdate)
let g:fastfold_fold_command_suffixes = ['x','X','a','A','o','O','c','C']
let g:fastfold_fold_movement_commands = [']z', '[z', 'zj', 'zk']
<
For example, by adding the following to your `.vimrc`
>
let g:tex_fold_enabled=1
let g:vimsyn_folding='af'
let g:xml_syntax_folding = 1
let g:php_folding = 1
let g:perl_fold = 1
<
You will enable tex, vim, xml, php and perl syntax folding.
Set fold methods for every file type only! Setting it globally risks that
FastFold assumes the wrong, global, fold method instead of that intended by the
file type plug-in, for example TagList.
-----------------------------
- FastFold updates all folds when you open or close folds by the commands
zx, zX, za, zA, zo, zO, zc, zC. This list of commands is configured by
>
let g:fastfold_fold_command_suffixes = ['x','X','a','A','o','O','c','C']
<
To intercept all possible fold commands (such as zr,zm,...),change this to
>
let g:fastfold_fold_command_suffixes =
['x','X','a','A','o','O','c','C','r','R','m','M','i','n','N']
<
To disable all interceptions, change this to
>
let g:fastfold_fold_command_suffixes = []
<
- FastFold updates all fold when you move or operate fold-wise by
the commands zj,zk,[z or ]z. This list of commands is configured by
>
let g:fastfold_fold_movement_commands = [']z', '[z', 'zj', 'zk']
<
It can be disabled by
>
let g:fastfold_fold_movement_commands = []
<
- FastFold updates all folds when you save a buffer. This hook is set by
>
let g:fastfold_savehook = 1
<
- FastFold intercepts every change of the fold method by
>
let g:fastfold_fdmhook = 1
<
This is disabled by default as it could interfere with other plugins such as
`easymotion`.
- To disable FastFold for a list of file types, such as 'taglist', add
>
` let g:fastfold_skip_filetypes`= [ 'taglist' ]
<
to your 'vimrc'. Default value is [].
- Add a fold text-object, mapped to `iz` and `az`, by adding the lines
xnoremap iz :<c-u>FastFoldUpdate<cr><esc>:<c-u>normal! ]zv[z<cr>
xnoremap az :<c-u>FastFoldUpdate<cr><esc>:<c-u>normal! ]zV[z<cr>
to your 'vimrc'.
===========================================================================
3. Extra Notes ~
3.1 Related Plugins ~
`FastFold` integrates with the plug-in `vim-stay` available at
https://github.com/kopischke/vim-stay
that stores and restores the last folds by `:mkview` and `:loadview`.
------------------------------
A fold-text function `CustomFoldText()` that displays the percentage of the
number of buffer lines that the folded text takes up and indents folds
according to their nesting level is available at
http://www.github.com/Konfekt/FoldText
3.2 Warning ~
FastFold overwrites your manual folds when saving the currently edited buffer,
unless
- FastFold is disabled for this filetype by `g:fastfold_skip_filetypes`, or
- the `foldmethod=manual` since having entered the buffer.
3.3 API ~
The last used fold method by which FastFold updates the folds in the current
buffer can be read off from the window local variable `w:lastdfm`.
3.4 Thanks go to... ~
- starcraftman for providing this documentation, and
- blueyed, kopischke, sabauma, willywampa, and many others for reporting
issues and suggesting code improvements.
vim:tw=78:ts=2:sts=2:sw=2:ft=help:norl

View File

@ -1,224 +0,0 @@
scriptencoding utf-8
" LICENCE PUBLIQUE RIEN À BRANLER
" Version 1, Mars 2009
"
" Copyright (C) 2009 Sam Hocevar
" 14 rue de Plaisance, 75014 Paris, France
"
" La copie et la distribution de copies exactes de cette licence sont
" autorisées, et toute modification est permise à condition de changer
" le nom de la licence.
"
" CONDITIONS DE COPIE, DISTRIBUTON ET MODIFICATION
" DE LA LICENCE PUBLIQUE RIEN À BRANLER
"
" 0. Faites ce que vous voulez, jen ai RIEN À BRANLER.
if exists('g:loaded_fastfold') || &cp
finish
endif
let g:loaded_fastfold = 1
let s:keepcpo = &cpo
set cpo&vim
" ------------------------------------------------------------------------------
if !exists('g:fastfold_fdmhook') | let g:fastfold_fdmhook = 0 | endif
if !exists('g:fastfold_savehook') | let g:fastfold_savehook = 1 | endif
if !exists('g:fastfold_fold_command_suffixes')
let g:fastfold_fold_command_suffixes = ['x','X','a','A','o','O','c','C']
endif
if !exists('g:fastfold_fold_movement_commands')
let g:fastfold_fold_movement_commands = [']z', '[z', 'zj', 'zk']
endif
if !exists('g:fastfold_skip_filetypes') | let g:fastfold_skip_filetypes = [] | endif
function! s:EnterWin()
if exists('w:unchanged')
unlet w:unchanged
elseif s:Skip()
if exists('w:lastfdm')
unlet w:lastfdm
endif
else
let w:lastfdm = &l:foldmethod
setlocal foldmethod=manual
endif
endfunction
function! s:LeaveWin()
if exists('w:predifffdm')
if empty(&l:foldmethod) || &l:foldmethod is# 'manual'
let &l:foldmethod = w:predifffdm
unlet w:predifffdm
return
elseif &l:foldmethod isnot# 'diff'
unlet w:predifffdm
endif
endif
if exists('w:lastfdm') && &l:foldmethod is# 'diff'
let w:predifffdm = w:lastfdm
endif
if exists('w:lastfdm') && &l:foldmethod is# 'manual'
if !exists('b:last_changedtick') || b:changedtick > b:last_changedtick
let &l:foldmethod = w:lastfdm
let b:last_changedtick = b:changedtick
else
let w:unchanged = 1
endif
endif
endfunction
" Like windo but restore the current buffer.
" See http://vim.wikia.com/wiki/Run_a_command_in_multiple_buffers#Restoring_position
function! s:WinDo( command )
" avoid errors in CmdWin
if exists('*getcmdwintype') && !empty(getcmdwintype())
return
endif
" Work around Vim bug.
" See https://groups.google.com/forum/#!topic/vim_dev/LLTw8JV6wKg
let curaltwin = winnr('#') ? winnr('#') : 1
let currwin=winnr()
if &scrollopt =~# '\<jump\>'
set scrollopt-=jump
let l:restore = 'set scrollopt+=jump'
endif
silent! execute 'keepjumps noautocmd windo ' . a:command
silent! execute 'noautocmd ' . curaltwin . 'wincmd w'
silent! execute 'noautocmd ' . currwin . 'wincmd w'
if exists('l:restore')
exe l:restore
endif
endfunction
" WinEnter then TabEnter then BufEnter then BufWinEnter
function! s:UpdateWin(check)
" skip if another session still loading
if a:check && exists('g:SessionLoad') | return | endif
let s:curwin = winnr()
call s:WinDo('if winnr() is s:curwin | call s:LeaveWin() | endif')
call s:WinDo('if winnr() is s:curwin | call s:EnterWin() | endif')
endfunction
function! s:UpdateBuf(feedback)
let s:curbuf = bufnr('%')
call s:WinDo("if bufnr('%') is s:curbuf | call s:LeaveWin() | endif")
call s:WinDo("if bufnr('%') is s:curbuf | call s:EnterWin() | endif")
if !a:feedback | return | endif
if !exists('w:lastfdm')
echomsg "'" . &l:foldmethod . "' folds already continuously updated"
else
echomsg "updated '" . w:lastfdm . "' folds"
endif
endfunction
function! s:UpdateTab()
" skip if another session still loading
if exists('g:SessionLoad') | return | endif
call s:WinDo('call s:LeaveWin()')
call s:WinDo('call s:EnterWin()')
endfunction
function! s:Skip()
if !s:isReasonable() | return 1 | endif
if !&l:modifiable | return 1 | endif
if s:inSkipList() | return 1 | endif
return 0
endfunction
function! s:isReasonable()
if &l:foldmethod is# 'syntax' || &l:foldmethod is# 'expr'
return 1
else
return 0
endif
endfunction
function! s:inSkipList()
for ifiles in g:fastfold_skip_filetypes
if index(g:fastfold_skip_filetypes, &l:filetype) >= 0
return 1
endif
endfor
return 0
endfunction
command! -bar -bang FastFoldUpdate call s:UpdateBuf(<bang>0)
nnoremap <silent> <Plug>(FastFoldUpdate) :<c-u>FastFoldUpdate!<CR>
if !hasmapto('<Plug>(FastFoldUpdate)', 'n') && empty(mapcheck('zuz', 'n'))
nmap zuz <Plug>(FastFoldUpdate)
endif
for suffix in g:fastfold_fold_command_suffixes
execute 'nnoremap <silent> z'.suffix.' :<c-u>call <SID>UpdateWin(0)<CR>z'.suffix
endfor
for cmd in g:fastfold_fold_movement_commands
exe "nnoremap <silent><expr> " . cmd. " ':<c-u>call <SID>UpdateWin(0)<CR>'.v:count." . "'".cmd."'"
exe "xnoremap <silent><expr> " . cmd. " ':<c-u>call <SID>UpdateWin(0)<CR>gv'.v:count." . "'".cmd."'"
exe "onoremap <silent><expr> " . cmd. " '<esc>:<c-u>call <SID>UpdateWin(0)<CR>' . '\"' . v:register . v:operator . v:count1 . " . "'".cmd."'"
endfor
augroup FastFold
autocmd!
autocmd VimEnter * call s:init()
autocmd BufEnter,WinEnter *
\ if !exists('b:last_changedtick') | let b:last_changedtick = b:changedtick | endif
augroup end
function! s:init()
call s:UpdateTab()
augroup FastFoldEnter
autocmd!
" Make &l:foldmethod local to Buffer and NOT Window.
autocmd BufEnter,WinEnter *
\ if exists('b:lastfdm') | let w:lastfdm = b:lastfdm | call s:LeaveWin() | call s:EnterWin() | endif
autocmd BufLeave,WinLeave *
\ call s:LeaveWin() | call s:EnterWin() |
\ if exists('w:lastfdm') | let b:lastfdm = w:lastfdm |
\ elseif exists('b:lastfdm') | unlet b:lastfdm | endif
autocmd BufEnter,WinEnter *
\ if &l:foldmethod isnot# 'diff' && exists('b:predifffdm') | call s:UpdateBuf(0) | endif
autocmd BufLeave,WinLeave *
\ if exists('w:predifffdm') | let b:predifffdm = w:predifffdm |
\ elseif exists('b:predifffdm') | unlet b:predifffdm | endif
" UpdateBuf/Win(1) = skip if another session is still loading.
autocmd TabEnter * call s:UpdateTab()
" BufWinEnter = to change &l:foldmethod by modelines.
autocmd BufWinEnter,FileType * call s:UpdateWin(1)
" So that FastFold functions correctly after :loadview.
autocmd SessionLoadPost * call s:UpdateWin(0)
" Update folds on reload.
autocmd BufReadPost *
\ if !exists('b:already_loaded') | let b:already_loaded = 1 |
\ else | call s:UpdateBuf(0) | endif
" Update folds on saving.
if g:fastfold_savehook
autocmd BufWritePost * call s:UpdateBuf(0)
endif
if g:fastfold_fdmhook
if exists('##OptionSet')
autocmd OptionSet foldmethod call s:UpdateBuf(0)
endif
endif
augroup end
endfunction
" ------------------------------------------------------------------------------
let &cpo= s:keepcpo
unlet s:keepcpo

@ -1 +0,0 @@
Subproject commit 3f0e1cd05dc526b073946ad96c64f6a5e2f9e4e6

@ -1 +0,0 @@
Subproject commit f1c9be3cdca55c90cc190f8fc38c6c8ac7e8d371

View File

@ -1 +0,0 @@
doc/tags

View File

@ -1,325 +0,0 @@
Auto Pairs
==========
Insert or delete brackets, parens, quotes in pair.
Installation
------------
copy plugin/auto-pairs.vim to ~/.vim/plugin
or if you are using `pathogen`:
```git clone git://github.com/jiangmiao/auto-pairs.git ~/.vim/bundle/auto-pairs```
Features
--------
* Insert in pair
input: [
output: [|]
* Delete in pair
input: foo[<BS>]
output: foo
* Insert new indented line after Return
input: {|} (press <CR> at |)
output: {
|
}
* Insert spaces before closing characters, only for [], (), {}
input: {|} (press <SPACE> at |)
output: { | }
input: {|} (press <SPACE>foo} at |)
output: { foo }|
input: '|' (press <SPACE> at |)
output: ' |'
* Skip ' when inside a word
input: foo| (press ' at |)
output: foo'
* Skip closed bracket.
input: []
output: []
* Ignore auto pair when previous character is \
input: "\'
output: "\'"
* Fast Wrap
input: |'hello' (press (<M-e> at |)
output: ('hello')
wrap string, only support c style string
input: |'h\\el\'lo' (press (<M-e> at |)
output ('h\\ello\'')
input: |[foo, bar()] (press (<M-e> at |)
output: ([foo, bar()])
* Quick move char to closed pair
input: (|){["foo"]} (press <M-}> at |)
output: ({["foo"]}|)
input: |[foo, bar()] (press (<M-]> at |)
output: ([foo, bar()]|)
* Quick jump to closed pair.
input:
{
something;|
}
(press } at |)
output:
{
}|
* Support ``` ''' and """
input:
'''
output:
'''|'''
* Delete Repeated Pairs in one time
input: """|""" (press <BS> at |)
output: |
input: {{|}} (press <BS> at |)
output: |
input: [[[[[[|]]]]]] (press <BS> at |)
output: |
* Fly Mode
input: if(a[3)
output: if(a[3])| (In Fly Mode)
output: if(a[3)]) (Without Fly Mode)
input:
{
hello();|
world();
}
(press } at |)
output:
{
hello();
world();
}|
(then press <M-b> at | to do backinsert)
output:
{
hello();}|
world();
}
See Fly Mode section for details
Fly Mode
--------
Fly Mode will always force closed-pair jumping instead of inserting. only for ")", "}", "]"
If jumps in mistake, could use AutoPairsBackInsert(Default Key: `<M-b>`) to jump back and insert closed pair.
the most situation maybe want to insert single closed pair in the string, eg ")"
Fly Mode is DISABLED by default.
add **let g:AutoPairsFlyMode = 1** .vimrc to turn it on
Default Options:
let g:AutoPairsFlyMode = 0
let g:AutoPairsShortcutBackInsert = '<M-b>'
Shortcuts
---------
System Shortcuts:
<CR> : Insert new indented line after return if cursor in blank brackets or quotes.
<BS> : Delete brackets in pair
<M-p> : Toggle Autopairs (g:AutoPairsShortcutToggle)
<M-e> : Fast Wrap (g:AutoPairsShortcutFastWrap)
<M-n> : Jump to next closed pair (g:AutoPairsShortcutJump)
<M-b> : BackInsert (g:AutoPairsShortcutBackInsert)
If <M-p> <M-e> or <M-n> conflict with another keys or want to bind to another keys, add
let g:AutoPairsShortcutToggle = '<another key>'
to .vimrc, if the key is empty string '', then the shortcut will be disabled.
Options
-------
* g:AutoPairs
Default: {'(':')', '[':']', '{':'}',"'":"'",'"':'"', '`':'`'}
* b:AutoPairs
Default: g:AutoPairs
Buffer level pairs set.
* g:AutoPairsShortcutToggle
Default: '<M-p>'
The shortcut to toggle autopairs.
* g:AutoPairsShortcutFastWrap
Default: '<M-e>'
Fast wrap the word. all pairs will be consider as a block (include <>).
(|)'hello' after fast wrap at |, the word will be ('hello')
(|)<hello> after fast wrap at |, the word will be (<hello>)
* g:AutoPairsShortcutJump
Default: '<M-n>'
Jump to the next closed pair
* g:AutoPairsMapBS
Default : 1
Map <BS> to delete brackets, quotes in pair
execute 'inoremap <buffer> <silent> <BS> <C-R>=AutoPairsDelete()<CR>'
* g:AutoPairsMapCh
Default : 1
Map <C-h> to delete brackets, quotes in pair
* g:AutoPairsMapCR
Default : 1
Map <CR> to insert a new indented line if cursor in (|), {|} [|], '|', "|"
execute 'inoremap <buffer> <silent> <CR> <C-R>=AutoPairsReturn()<CR>'
* g:AutoPairsCenterLine
Default : 1
When g:AutoPairsMapCR is on, center current line after return if the line is at the bottom 1/3 of the window.
* g:AutoPairsMapSpace
Default : 1
Map <space> to insert a space after the opening character and before the closing one.
execute 'inoremap <buffer> <silent> <CR> <C-R>=AutoPairsSpace()<CR>'
* g:AutoPairsFlyMode
Default : 0
set it to 1 to enable FlyMode.
see FlyMode section for details.
* g:AutoPairsMultilineClose
Default : 1
When you press the key for the closing pair (e.g. `)`) it jumps past it.
If set to 1, then it'll jump to the next line, if there is only whitespace.
If set to 0, then it'll only jump to a closing pair on the same line.
* g:AutoPairsShortcutBackInsert
Default : <M-b>
Work with FlyMode, insert the key at the Fly Mode jumped postion
* g:AutoPairsMoveCharacter
Default: "()[]{}\"'"
Map <M-(> <M-)> <M-[> <M-]> <M-{> <M-}> <M-"> <M-'> to
move character under the cursor to the pair.
Buffer Level Pairs Setting
--------------------------
Set b:AutoPairs before BufEnter
eg:
" When the filetype is FILETYPE then make AutoPairs only match for parenthesis
au Filetype FILETYPE let b:AutoPairs = {"(": ")"}
TroubleShooting
---------------
The script will remap keys ([{'"}]) <BS>,
If auto pairs cannot work, use :imap ( to check if the map is corrected.
The correct map should be <C-R>=AutoPairsInsert("\(")<CR>
Or the plugin conflict with some other plugins.
use command :call AutoPairsInit() to remap the keys.
* How to insert parens purely
There are 3 ways
1. use Ctrl-V ) to insert paren without trigger the plugin.
2. use Alt-P to turn off the plugin.
3. use DEL or <C-O>x to delete the character insert by plugin.
* Swedish Character Conflict
Because AutoPairs uses Meta(Alt) key as shortcut, it is conflict with some Swedish character such as å.
To fix the issue, you need remap or disable the related shortcut.
Known Issues
-----------------------
Breaks '.' - [issue #3](https://github.com/jiangmiao/auto-pairs/issues/3)
Description: After entering insert mode and inputing `[hello` then leave insert
mode by `<ESC>`. press '.' will insert 'hello' instead of '[hello]'.
Reason: `[` actually equals `[]\<LEFT>` and \<LEFT> will break '.'.
After version 7.4.849, Vim implements new keyword <C-G>U to avoid the break
Solution: Update Vim to 7.4.849+
Contributors
------------
* [camthompson](https://github.com/camthompson)
License
-------
Copyright (C) 2011-2013 Miao Jiang
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,356 +0,0 @@
*AutoPairs.txt* Insert or delete brackets, parens, quotes in pair
Author: jiangmiao
License: MIT
URL: https://github.com/jiangmiao/auto-pairs
==============================================================================
CONTENTS *autopairs-contents*
1. Installation ............................. |autopairs-installation|
2. Features ..................................... |autopairs-features|
3. Fly Mode ..................................... |autopairs-fly-mode|
4. Shortcuts ................................... |autopairs-shortcuts|
5. Options ....................................... |autopairs-options|
6. Troubleshooting ...................... |autopairs-troubleshooting|
==============================================================================
1. Introduction *autopairs-installation*
Copy `plugin/auto-pairs.vim` to `~/.vim/plugin`.
Or if you are using `pathogen`: >
git clone git://github.com/jiangmiao/auto-pairs.git ~/.vim/bundle/auto-pairs
==============================================================================
2. Features *autopairs-features*
Insert in pair: >
input: [
output: [|]
Delete in pair: >
input: foo[<BS>]
output: foo
Insert new indented line after Return: >
input: {|} (press <CR> at |)
output: {
|
}
Insert spaces before closing characters, only for [], (), {}: >
input: {|} (press <SPACE> at |)
output: { | }
input: {|} (press <SPACE>foo} at |)
output: { foo }|
input: '|' (press <SPACE> at |)
output: ' |'
Skip ' when inside a word: >
input: foo| (press ' at |)
output: foo'
Skip closed bracket: >
input: []
output: []
Ignore auto pair when previous character is '\': >
input: "\'
output: "\'"
Fast Wrap: >
input: |'hello' (press (<M-e> at |)
output: ('hello')
Wrap string, only support c style string.
input: |'h\\el\'lo' (press (<M-e> at |)
output ('h\\ello\'')
input: |[foo, bar()] (press (<M-e> at |)
output: ([foo, bar()])
Quick jump to closed pair: >
input:
{
something;|
}
(press } at |)
output:
{
}|
Support ```, ''' and """: >
input:
'''
output:
'''|'''
Delete Repeated Pairs in one time: >
input: """|""" (press <BS> at |)
output: |
input: {{|}} (press <BS> at |)
output: |
input: [[[[[[|]]]]]] (press <BS> at |)
output: |
Fly Mode (|autopairs-flymode|): >
input: if(a[3)
output: if(a[3])| (In Fly Mode)
output: if(a[3)]) (Without Fly Mode)
input:
{
hello();|
world();
}
(press } at |)
output:
{
hello();
world();
}|
(then press <M-b> at | to do backinsert)
output:
{
hello();}|
world();
}
See |Fly Mode| section for details
==============================================================================
3. Fly Mode *autopairs-flymode*
Fly Mode will always force closed-pair jumping instead of inserting. Only for
")", "}", "]". If jumps in mistake, you can use |g:AutoPairsBackInsert| (default
Key: <M-b>) to jump back and insert closed pair.
The most situation maybe you want to insert single closed pair in the string,
eg: >
")"
Fly Mode is DISABLED by default. To enable Fly Mode add following to your
'.vimrc': >
let g:AutoPairsFlyMode = 1
Default Options: >
let g:AutoPairsFlyMode = 0
let g:AutoPairsShortcutBackInsert = '<M-b>'
==============================================================================
4. Shortcuts *autopairs-shortcuts*
System Shortcuts:
<CR> : Insert new indented line after return if cursor in blank brackets
or quotes.
<BS> : Delete brackets in pair
<M-p>: Toggle Autopairs (|g:AutoPairsShortcutToggle|)
<M-e>: Fast Wrap (|g:AutoPairsShortcutFastWrap|)
<M-n>: Jump to next closed pair (|g:AutoPairsShortcutJump|)
<M-b>: BackInsert (|g:AutoPairsShortcutBackInsert|)
To rebind keys <M-p>, <M-e> or <M-n> or in case of conflicts conflicts with
another keys:
let g:AutoPairsShortcutToggle = '<another key>'
If the key is empty string '', then the shortcut will be disabled.
==============================================================================
5. Options *autopairs-options*
*g:AutoPairs*
|g:AutoPairs| dict
Default: >
{'(':')', '[':']', '{':'}',"'":"'",'"':'"', '`':'`'}
Specifies which symbols should be automatically paired.
To append new pairs without overwriting defaults, add values in your `.vimrc`.:
let g:AutoPairs['<']='>'
This example will enable matching of `<` with `>`.
*b:AutoPairs*
|b:AutoPairs| dict
Default: |g:AutoPairs|
Buffer level pairs set.
You can set |b:AutoPairs| before |BufEnter|: >
au Filetype FILETYPE let b:AutoPairs = {"(": ")"}
This sets |AutoPairs| to only match for parenthesis for 'FILETYPE'.
*g:AutoPairsShortcutToggle*
|g:AutoPairsShortcutToggle| string
Default: <M-p>
The shortcut to toggle autopairs.
*g:AutoPairsShortcutFastWrap*
|g:AutoPairsShortcutFastWrap| string
Default: <M-e>
Fast wrap the word. All pairs will be considered as a block (including <>).
(|)'hello' after fast wrap at |, the word will be ('hello')
(|)<hello> after fast wrap at |, the word will be (<hello>)
*g:AutoPairsShortcutJump*
|g:AutoPairsShortcutJump| string
Default: <M-n>
Jump to the next closed pair.
*g:AutoPairsShortcutBackInsert*
|g:AutoPairsShortcutBackInsert| string
Default: <M-b>
Work with |autopairs-flymode|, insert the key at the Fly Mode jumped position.
*g:AutoPairsMapBS*
|g:AutoPairsMapBS| int
Default: 1
Map <BS> to delete brackets and quotes in pair, executes:
inoremap <buffer> <silent> <BS> <C-R>=AutoPairsDelete()<CR>
*g:AutoPairsMapCh*
|g:AutoPairsMapCh| int
Default: 1
Map <C-h> to delete brackets and quotes in pair.
*g:AutoPairsMapCR*
|g:AutoPairsMapCR| int
Default: 1
Map <CR> to insert a new indented line if cursor in (|), {|} [|], '|', "|".
Executes:
inoremap <buffer> <silent> <CR> <C-R>=AutoPairsReturn()<CR>
*g:AutoPairsCenterLine*
|g:AutoPairsCenterLine| int
Default: 1
When |g:AutoPairsMapCR| is on, center current line after return if the line
is at the bottom 1/3 of the window.
*g:AutoPairsMapSpace*
|g:AutoPairsMapSpace| int
Default: 1
Map <space> to insert a space after the opening character and before the
closing one.
Executes:
inoremap <buffer> <silent> <CR> <C-R>=AutoPairsSpace()<CR>
*g:AutoPairsFlyMode*
|g:AutoPairsFlyMode| int
Default: 0
Set it to 1 to enable |autopairs-flymode|.
*g:AutoPairsMultilineClose*
|g:AutoPairsMultilineClose| int
Default: 1
When you press the key for the closing pair (e.g. `)`) it jumps past it.
If set to 1, then it'll jump to the next line, if there is only 'whitespace'.
If set to 0, then it'll only jump to a closing pair on the same line.
==============================================================================
6. Troubleshooting *autopairs-troubleshooting*
This plugin remaps keys `([{'"}]) <BS>`
If auto pairs cannot work, use |:imap| to check if the map is corrected.
The correct map should be: >
<C-R>=AutoPairsInsert("\(")<CR>
Or the plugin conflicts with some other plugins. Use command: >
:call AutoPairsInit() to remap the keys.
--- How to insert parens purely? ---
There are 3 ways:
1. Use Ctrl-V ) to insert paren without trigger the plugin.
2. Use Alt-P to turn off the plugin.
3. Use DEL or <C-O>x to delete the character insert by plugin.
--- Swedish Character Conflict ---
Because AutoPairs uses Meta(Alt) key as a shortcut, it conflicts with some
Swedish character such as å. To fix the issue, you need remap or disable the
related shortcut.

View File

@ -1,582 +0,0 @@
" Insert or delete brackets, parens, quotes in pairs.
" Maintainer: JiangMiao <jiangfriend@gmail.com>
" Contributor: camthompson
" Last Change: 2017-06-17
" Version: 1.3.3
" Homepage: http://www.vim.org/scripts/script.php?script_id=3599
" Repository: https://github.com/jiangmiao/auto-pairs
" License: MIT
if exists('g:AutoPairsLoaded') || &cp
finish
end
let g:AutoPairsLoaded = 1
if !exists('g:AutoPairs')
let g:AutoPairs = {'(':')', '[':']', '{':'}',"'":"'",'"':'"', '`':'`'}
end
if !exists('g:AutoPairsParens')
let g:AutoPairsParens = {'(':')', '[':']', '{':'}'}
end
if !exists('g:AutoPairsMapBS')
let g:AutoPairsMapBS = 1
end
" Map <C-h> as the same BS
if !exists('g:AutoPairsMapCh')
let g:AutoPairsMapCh = 1
end
if !exists('g:AutoPairsMapCR')
let g:AutoPairsMapCR = 1
end
if !exists('g:AutoPairsMapSpace')
let g:AutoPairsMapSpace = 1
end
if !exists('g:AutoPairsCenterLine')
let g:AutoPairsCenterLine = 1
end
if !exists('g:AutoPairsShortcutToggle')
let g:AutoPairsShortcutToggle = '<M-p>'
end
if !exists('g:AutoPairsShortcutFastWrap')
let g:AutoPairsShortcutFastWrap = '<M-e>'
end
if !exists('g:AutoPairsMoveCharacter')
let g:AutoPairsMoveCharacter = "()[]{}\"'"
end
if !exists('g:AutoPairsShortcutJump')
let g:AutoPairsShortcutJump = '<M-n>'
endif
" Fly mode will for closed pair to jump to closed pair instead of insert.
" also support AutoPairsBackInsert to insert pairs where jumped.
if !exists('g:AutoPairsFlyMode')
let g:AutoPairsFlyMode = 0
endif
" When skipping the closed pair, look at the current and
" next line as well.
if !exists('g:AutoPairsMultilineClose')
let g:AutoPairsMultilineClose = 1
endif
" Work with Fly Mode, insert pair where jumped
if !exists('g:AutoPairsShortcutBackInsert')
let g:AutoPairsShortcutBackInsert = '<M-b>'
endif
if !exists('g:AutoPairsSmartQuotes')
let g:AutoPairsSmartQuotes = 1
endif
" 7.4.849 support <C-G>U to avoid breaking '.'
" Issue talk: https://github.com/jiangmiao/auto-pairs/issues/3
" Vim note: https://github.com/vim/vim/releases/tag/v7.4.849
if v:version > 704 || v:version == 704 && has("patch849")
let s:Go = "\<C-G>U"
else
let s:Go = ""
endif
let s:Left = s:Go."\<LEFT>"
let s:Right = s:Go."\<RIGHT>"
" Will auto generated {']' => '[', ..., '}' => '{'}in initialize.
let g:AutoPairsClosedPairs = {}
function! AutoPairsInsert(key)
if !b:autopairs_enabled
return a:key
end
let line = getline('.')
let pos = col('.') - 1
let before = strpart(line, 0, pos)
let after = strpart(line, pos)
let next_chars = split(after, '\zs')
let current_char = get(next_chars, 0, '')
let next_char = get(next_chars, 1, '')
let prev_chars = split(before, '\zs')
let prev_char = get(prev_chars, -1, '')
let eol = 0
if col('$') - col('.') <= 1
let eol = 1
end
" Ignore auto close if prev character is \
if prev_char == '\'
return a:key
end
" The key is difference open-pair, then it means only for ) ] } by default
if !has_key(b:AutoPairs, a:key)
let b:autopairs_saved_pair = [a:key, getpos('.')]
" Skip the character if current character is the same as input
if current_char == a:key
return s:Right
end
if !g:AutoPairsFlyMode
" Skip the character if next character is space
if current_char == ' ' && next_char == a:key
return s:Right.s:Right
end
" Skip the character if closed pair is next character
if current_char == ''
if g:AutoPairsMultilineClose
let next_lineno = line('.')+1
let next_line = getline(nextnonblank(next_lineno))
let next_char = matchstr(next_line, '\s*\zs.')
else
let next_char = matchstr(line, '\s*\zs.')
end
if next_char == a:key
return "\<ESC>e^a"
endif
endif
endif
" Fly Mode, and the key is closed-pairs, search closed-pair and jump
if g:AutoPairsFlyMode && has_key(b:AutoPairsClosedPairs, a:key)
let n = stridx(after, a:key)
if n != -1
return repeat(s:Right, n+1)
end
if search(a:key, 'W')
" force break the '.' when jump to different line
return "\<Right>"
endif
endif
" Insert directly if the key is not an open key
return a:key
end
let open = a:key
let close = b:AutoPairs[open]
if current_char == close && open == close
return s:Right
end
" Ignore auto close ' if follows a word
" MUST after closed check. 'hello|'
if a:key == "'" && prev_char =~ '\v\w'
return a:key
end
" support for ''' ``` and """
if open == close
" The key must be ' " `
let pprev_char = line[col('.')-3]
if pprev_char == open && prev_char == open
" Double pair found
return repeat(a:key, 4) . repeat(s:Left, 3)
end
end
let quotes_num = 0
" Ignore comment line for vim file
if &filetype == 'vim' && a:key == '"'
if before =~ '^\s*$'
return a:key
end
if before =~ '^\s*"'
let quotes_num = -1
end
end
" Keep quote number is odd.
" Because quotes should be matched in the same line in most of situation
if g:AutoPairsSmartQuotes && open == close
" Remove \\ \" \'
let cleaned_line = substitute(line, '\v(\\.)', '', 'g')
let n = quotes_num
let pos = 0
while 1
let pos = stridx(cleaned_line, open, pos)
if pos == -1
break
end
let n = n + 1
let pos = pos + 1
endwhile
if n % 2 == 1
return a:key
endif
endif
return open.close.s:Left
endfunction
function! AutoPairsDelete()
if !b:autopairs_enabled
return "\<BS>"
end
let line = getline('.')
let pos = col('.') - 1
let current_char = get(split(strpart(line, pos), '\zs'), 0, '')
let prev_chars = split(strpart(line, 0, pos), '\zs')
let prev_char = get(prev_chars, -1, '')
let pprev_char = get(prev_chars, -2, '')
if pprev_char == '\'
return "\<BS>"
end
" Delete last two spaces in parens, work with MapSpace
if has_key(b:AutoPairs, pprev_char) && prev_char == ' ' && current_char == ' '
return "\<BS>\<DEL>"
endif
" Delete Repeated Pair eg: '''|''' [[|]] {{|}}
if has_key(b:AutoPairs, prev_char)
let times = 0
let p = -1
while get(prev_chars, p, '') == prev_char
let p = p - 1
let times = times + 1
endwhile
let close = b:AutoPairs[prev_char]
let left = repeat(prev_char, times)
let right = repeat(close, times)
let before = strpart(line, pos-times, times)
let after = strpart(line, pos, times)
if left == before && right == after
return repeat("\<BS>\<DEL>", times)
end
end
if has_key(b:AutoPairs, prev_char)
let close = b:AutoPairs[prev_char]
if match(line,'^\s*'.close, col('.')-1) != -1
" Delete (|___)
let space = matchstr(line, '^\s*', col('.')-1)
return "\<BS>". repeat("\<DEL>", len(space)+1)
elseif match(line, '^\s*$', col('.')-1) != -1
" Delete (|__\n___)
let nline = getline(line('.')+1)
if nline =~ '^\s*'.close
if &filetype == 'vim' && prev_char == '"'
" Keep next line's comment
return "\<BS>"
end
let space = matchstr(nline, '^\s*')
return "\<BS>\<DEL>". repeat("\<DEL>", len(space)+1)
end
end
end
return "\<BS>"
endfunction
function! AutoPairsJump()
call search('["\]'')}]','W')
endfunction
" string_chunk cannot use standalone
let s:string_chunk = '\v%(\\\_.|[^\1]|[\r\n]){-}'
let s:ss_pattern = '\v''' . s:string_chunk . ''''
let s:ds_pattern = '\v"' . s:string_chunk . '"'
func! s:RegexpQuote(str)
return substitute(a:str, '\v[\[\{\(\<\>\)\}\]]', '\\&', 'g')
endf
func! s:RegexpQuoteInSquare(str)
return substitute(a:str, '\v[\[\]]', '\\&', 'g')
endf
" Search next open or close pair
func! s:FormatChunk(open, close)
let open = s:RegexpQuote(a:open)
let close = s:RegexpQuote(a:close)
let open2 = s:RegexpQuoteInSquare(a:open)
let close2 = s:RegexpQuoteInSquare(a:close)
if open == close
return '\v'.open.s:string_chunk.close
else
return '\v%(' . s:ss_pattern . '|' . s:ds_pattern . '|' . '[^'.open2.close2.']|[\r\n]' . '){-}(['.open2.close2.'])'
end
endf
" Fast wrap the word in brackets
function! AutoPairsFastWrap()
let line = getline('.')
let current_char = line[col('.')-1]
let next_char = line[col('.')]
let open_pair_pattern = '\v[({\[''"]'
let at_end = col('.') >= col('$') - 1
normal! x
" Skip blank
if next_char =~ '\v\s' || at_end
call search('\v\S', 'W')
let line = getline('.')
let next_char = line[col('.')-1]
end
if has_key(b:AutoPairs, next_char)
let followed_open_pair = next_char
let inputed_close_pair = current_char
let followed_close_pair = b:AutoPairs[next_char]
if followed_close_pair != followed_open_pair
" TODO replace system searchpair to skip string and nested pair.
" eg: (|){"hello}world"} will transform to ({"hello})world"}
call searchpair('\V'.followed_open_pair, '', '\V'.followed_close_pair, 'W')
else
call search(s:FormatChunk(followed_open_pair, followed_close_pair), 'We')
end
return s:Right.inputed_close_pair.s:Left
else
normal! he
return s:Right.current_char.s:Left
end
endfunction
function! AutoPairsMap(key)
" | is special key which separate map command from text
let key = a:key
if key == '|'
let key = '<BAR>'
end
let escaped_key = substitute(key, "'", "''", 'g')
" use expr will cause search() doesn't work
execute 'inoremap <buffer> <silent> '.key." <C-R>=AutoPairsInsert('".escaped_key."')<CR>"
endfunction
function! AutoPairsToggle()
if b:autopairs_enabled
let b:autopairs_enabled = 0
echo 'AutoPairs Disabled.'
else
let b:autopairs_enabled = 1
echo 'AutoPairs Enabled.'
end
return ''
endfunction
function! AutoPairsMoveCharacter(key)
let c = getline(".")[col(".")-1]
let escaped_key = substitute(a:key, "'", "''", 'g')
return "\<DEL>\<ESC>:call search("."'".escaped_key."'".")\<CR>a".c."\<LEFT>"
endfunction
function! AutoPairsReturn()
if b:autopairs_enabled == 0
return ''
end
let line = getline('.')
let pline = getline(line('.')-1)
let prev_char = pline[strlen(pline)-1]
let cmd = ''
let cur_char = line[col('.')-1]
if has_key(b:AutoPairs, prev_char) && b:AutoPairs[prev_char] == cur_char
if g:AutoPairsCenterLine && winline() * 3 >= winheight(0) * 2
" Recenter before adding new line to avoid replacing line content
let cmd = "zz"
end
" If equalprg has been set, then avoid call =
" https://github.com/jiangmiao/auto-pairs/issues/24
if &equalprg != ''
return "\<ESC>".cmd."O"
endif
" conflict with javascript and coffee
" javascript need indent new line
" coffeescript forbid indent new line
if &filetype == 'coffeescript' || &filetype == 'coffee'
return "\<ESC>".cmd."k==o"
else
return "\<ESC>".cmd."=ko"
endif
end
return ''
endfunction
function! AutoPairsSpace()
let line = getline('.')
let prev_char = line[col('.')-2]
let cmd = ''
let cur_char =line[col('.')-1]
if has_key(g:AutoPairsParens, prev_char) && g:AutoPairsParens[prev_char] == cur_char
let cmd = "\<SPACE>".s:Left
endif
return "\<SPACE>".cmd
endfunction
function! AutoPairsBackInsert()
if exists('b:autopairs_saved_pair')
let pair = b:autopairs_saved_pair[0]
let pos = b:autopairs_saved_pair[1]
call setpos('.', pos)
return pair
endif
return ''
endfunction
function! AutoPairsInit()
let b:autopairs_loaded = 1
if !exists('b:autopairs_enabled')
let b:autopairs_enabled = 1
end
let b:AutoPairsClosedPairs = {}
if !exists('b:AutoPairs')
let b:AutoPairs = g:AutoPairs
end
if !exists('b:AutoPairsMoveCharacter')
let b:AutoPairsMoveCharacter = g:AutoPairsMoveCharacter
end
" buffer level map pairs keys
for [open, close] in items(b:AutoPairs)
call AutoPairsMap(open)
if open != close
call AutoPairsMap(close)
end
let b:AutoPairsClosedPairs[close] = open
endfor
for key in split(b:AutoPairsMoveCharacter, '\s*')
let escaped_key = substitute(key, "'", "''", 'g')
execute 'inoremap <silent> <buffer> <M-'.key."> <C-R>=AutoPairsMoveCharacter('".escaped_key."')<CR>"
endfor
" Still use <buffer> level mapping for <BS> <SPACE>
if g:AutoPairsMapBS
" Use <C-R> instead of <expr> for issue #14 sometimes press BS output strange words
execute 'inoremap <buffer> <silent> <BS> <C-R>=AutoPairsDelete()<CR>'
end
if g:AutoPairsMapCh
execute 'inoremap <buffer> <silent> <C-h> <C-R>=AutoPairsDelete()<CR>'
endif
if g:AutoPairsMapSpace
" Try to respect abbreviations on a <SPACE>
let do_abbrev = ""
if v:version == 703 && has("patch489") || v:version > 703
let do_abbrev = "<C-]>"
endif
execute 'inoremap <buffer> <silent> <SPACE> '.do_abbrev.'<C-R>=AutoPairsSpace()<CR>'
end
if g:AutoPairsShortcutFastWrap != ''
execute 'inoremap <buffer> <silent> '.g:AutoPairsShortcutFastWrap.' <C-R>=AutoPairsFastWrap()<CR>'
end
if g:AutoPairsShortcutBackInsert != ''
execute 'inoremap <buffer> <silent> '.g:AutoPairsShortcutBackInsert.' <C-R>=AutoPairsBackInsert()<CR>'
end
if g:AutoPairsShortcutToggle != ''
" use <expr> to ensure showing the status when toggle
execute 'inoremap <buffer> <silent> <expr> '.g:AutoPairsShortcutToggle.' AutoPairsToggle()'
execute 'noremap <buffer> <silent> '.g:AutoPairsShortcutToggle.' :call AutoPairsToggle()<CR>'
end
if g:AutoPairsShortcutJump != ''
execute 'inoremap <buffer> <silent> ' . g:AutoPairsShortcutJump. ' <ESC>:call AutoPairsJump()<CR>a'
execute 'noremap <buffer> <silent> ' . g:AutoPairsShortcutJump. ' :call AutoPairsJump()<CR>'
end
endfunction
function! s:ExpandMap(map)
let map = a:map
let map = substitute(map, '\(<Plug>\w\+\)', '\=maparg(submatch(1), "i")', 'g')
return map
endfunction
function! AutoPairsTryInit()
if exists('b:autopairs_loaded')
return
end
" for auto-pairs starts with 'a', so the priority is higher than supertab and vim-endwise
"
" vim-endwise doesn't support <Plug>AutoPairsReturn
" when use <Plug>AutoPairsReturn will cause <Plug> isn't expanded
"
" supertab doesn't support <SID>AutoPairsReturn
" when use <SID>AutoPairsReturn will cause Duplicated <CR>
"
" and when load after vim-endwise will cause unexpected endwise inserted.
" so always load AutoPairs at last
" Buffer level keys mapping
" comptible with other plugin
if g:AutoPairsMapCR
if v:version == 703 && has('patch32') || v:version > 703
" VIM 7.3 supports advancer maparg which could get <expr> info
" then auto-pairs could remap <CR> in any case.
let info = maparg('<CR>', 'i', 0, 1)
if empty(info)
let old_cr = '<CR>'
let is_expr = 0
else
let old_cr = info['rhs']
let old_cr = s:ExpandMap(old_cr)
let old_cr = substitute(old_cr, '<SID>', '<SNR>' . info['sid'] . '_', 'g')
let is_expr = info['expr']
let wrapper_name = '<SID>AutoPairsOldCRWrapper73'
endif
else
" VIM version less than 7.3
" the mapping's <expr> info is lost, so guess it is expr or not, it's
" not accurate.
let old_cr = maparg('<CR>', 'i')
if old_cr == ''
let old_cr = '<CR>'
let is_expr = 0
else
let old_cr = s:ExpandMap(old_cr)
" old_cr contain (, I guess the old cr is in expr mode
let is_expr = old_cr =~ '\V(' && toupper(old_cr) !~ '\V<C-R>'
" The old_cr start with " it must be in expr mode
let is_expr = is_expr || old_cr =~ '\v^"'
let wrapper_name = '<SID>AutoPairsOldCRWrapper'
end
end
if old_cr !~ 'AutoPairsReturn'
if is_expr
" remap <expr> to `name` to avoid mix expr and non-expr mode
execute 'inoremap <buffer> <expr> <script> '. wrapper_name . ' ' . old_cr
let old_cr = wrapper_name
end
" Always silent mapping
execute 'inoremap <script> <buffer> <silent> <CR> '.old_cr.'<SID>AutoPairsReturn'
end
endif
call AutoPairsInit()
endfunction
" Always silent the command
inoremap <silent> <SID>AutoPairsReturn <C-R>=AutoPairsReturn()<CR>
imap <script> <Plug>AutoPairsReturn <SID>AutoPairsReturn
au BufEnter * :call AutoPairsTryInit()

View File

@ -1,44 +0,0 @@
# Problem summary
## Expected
## Environment Information
* OS:
* Neovim version:
## Provide a minimal init.vim with less than 50 lines (required)
```vim
" Use the following as a template.
set runtimepath+=~/path/to/deoplete.nvim/
set runtimepath+=~/path/to/deoplete-jedi/
let g:deoplete#enable_at_startup = 1
call deoplete#custom#set('jedi', 'debug_enabled', 1)
call deoplete#enable_logging('DEBUG', '/tmp/deoplete.log')
```
## Generate logfiles if appropriate
1. export NVIM_PYTHON_LOG_FILE=/tmp/nvim-log
2. export NVIM_PYTHON_LOG_LEVEL=DEBUG
3. nvim -u minimal.vimrc
Then look at and attach the files `/tmp/nvim-log_{PID}` and
`/tmp/deoplete.log` here.
## Steps to reproduce the issue after starting Neovim (required)
1.
2.
3.
## Screen shot (if possible)
## Upload the logfile(s)

View File

@ -1,66 +0,0 @@
### https://raw.github.com/github/gitignore//Python.gitignore
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*,cover
.hypothesis/
# Translations
*.mo
*.pot
# Django stuff:
*.log
# Sphinx documentation
docs/_build/
# PyBuilder
target/
#Ipython Notebook
.ipynb_checkpoints
.-rplugin~

View File

@ -1,6 +0,0 @@
[submodule "rplugin/python3/deoplete/jedi"]
path = rplugin/python3/deoplete/vendored/jedi
url = https://github.com/davidhalter/jedi.git
[submodule "rplugin/python3/deoplete/parso"]
path = rplugin/python3/deoplete/vendored/parso
url = https://github.com/davidhalter/parso.git

View File

@ -1,34 +0,0 @@
language: python
cache:
directories:
- "$HOME/.cache/pip"
matrix:
include:
- sudo: required
os: linux
dist: trusty
compiler: clang
python: '3.5'
- sudo: required
os: linux
dist: trusty
compiler: clang
python: 'nightly'
before_install:
- uname -a
- cat /etc/lsb-release
install:
- pip3 install -q -U -r ./tests/requirements.txt
script:
- make test
notifications:
email: false
slack:
rooms:
secure: Kjl0kgnF4qcnW/wjk++HnmLGD0FeQLHeaFHAlmeN1Iql7z+I7cBaW35I4P0W0sgZPzF4y6LSReGlxtmEFlqNmmYll6SPw4n2BvNv2Ir+sSl6r61plQdTQsh6RlnW4lRQMZ8JSgP/E6jci3cuchCFWnN7miYP08vmJmKTh3MlW5TjksPpNx6B+zC0zr0JqjXNXZwaSXpQYrA0hwKt0pZOQbgPxEeRnUYEAqiJxR30GmSTZ8OxWHNupNWKtSxaYV1e8/6vcaq3iae6fsP6qceVmNZzPc/IUVXA8NNmu+TKZUaQEQAKWkIm8QJVY7cnHMBdfG56L/NgX7lwmv3cRq+1mDOxEGWtOfnQwQIeV3wRKK8yactQ5cCD32WE2cioAUnvwryjOjRG5Vt8aBuFINoxdz7KTcQye1JqrjDU14ob6JAQnLafClLDhTXht+/W6/UeUr9ZOAX1nVWuuLvIJsU1SP1Uvv/PvuLk+XDrBCunDZwWssRwn1q8pBnEubhe6vbOO134hAeF0/SMnWXKuL/knTL5aqICLQOhj+ooNpb+hU3D3phlHIddhufz8cAWSxR/eqnwQ4LkKfZa2L4DMW02dou8HMl973ft/g/DCdFXFGz53VYqD7V8Mpb2DQ7nkvqmsokSDNNs99cMIDV9LVI3QJGW8OR88wUgIlONl+795a0=

View File

@ -1,14 +0,0 @@
FROM zchee/neovim:python
MAINTAINER zchee <zchee.io@gmail.com>
RUN pip3 install jedi \
&& git clone https://github.com/Shougo/deoplete.nvim /src/deoplete.nvim \
\
&& echo 'set rtp+=/src/deoplete.nvim' >> /root/.config/nvim/init.vim \
&& echo 'set rtp+=/src/deoplete-jedi' >> /root/.config/nvim/init.vim \
&& echo 'let g:deoplete#enable_at_startup = 1' >> /root/.config/nvim/init.vim \
&& echo 'let g:deoplete#auto_completion_start_length = 1' >> /root/.config/nvim/init.vim
COPY . /src/deoplete-jedi
RUN /src/run.sh

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2016 Koichi Shiraishi
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,10 +0,0 @@
RPLUGIN_PATH := ./rplugin/python3/deoplete/sources
all: test
test: flake8
flake8:
flake8 rplugin tests
.PHONY: test flake8

View File

@ -1,86 +0,0 @@
# deoplete-jedi
[deoplete.nvim](https://github.com/Shougo/deoplete.nvim) source for [jedi](https://github.com/davidhalter/jedi).
|| **Status** |
|:---:|:---:|
| **Travis CI** |[![Build Status](https://travis-ci.org/zchee/deoplete-jedi.svg?branch=master)](https://travis-ci.org/zchee/deoplete-jedi)|
| **Gitter** |[![Join the chat at https://gitter.im/zchee/deoplete-jedi](https://badges.gitter.im/zchee/deoplete-jedi.svg)](https://gitter.im/zchee/deoplete-jedi?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)|
## Required
- Neovim and neovim/python-client
- https://github.com/neovim/neovim
- https://github.com/neovim/python-client
- deoplete.nvim
- https://github.com/Shougo/deoplete.nvim
- jedi
- https://github.com/davidhalter/jedi
## Install
```vim
NeoBundle 'zchee/deoplete-jedi'
# or
Plug 'zchee/deoplete-jedi'
```
**Note:** If you don't want to use a plugin manager, you will need to clone
this repo recursively:
```
git clone --recursive https://github.com/zchee/deoplete-jedi
```
When updating the plugin, you will want to be sure that the Jedi submodule is
kept up to date with:
```
git submodule update --init
```
## Options
- `g:deoplete#sources#jedi#server_timeout`: The timeout (in seconds) for jedi
server to workaround endless loop in jedi. Increase it if you cannot get
completions for large package such as pandas (see #125). Default: 10
- `g:deoplete#sources#jedi#statement_length`: Sets the maximum length of
completion description text. If this is exceeded, a simple description is
used instead. Default: `50`
- `g:deoplete#sources#jedi#enable_cache`: Enables caching of completions for
faster results. Default: `1`
- `g:deoplete#sources#jedi#show_docstring`: Shows docstring in preview window.
Default: `0`
- `g:deoplete#sources#jedi#python_path`: Set the Python interpreter path to use
for the completion server. deoplete-jedi uses the first available `python`
in `$PATH`. Use this only if you want use a specific Python interpreter.
This has no effect if `$VIRTUAL_ENV` is present in the environment.
**Note**: This is completely unrelated to configuring Neovim.
- `g:deoplete#sources#jedi#debug_server`: Enable logging from the server. If
set to `1`, server messages are emitted to Deoplete's log file. This can
optionally be a string that points to a file for separate logging. The log
level will be inherited from `deoplete#enable_logging()`.
- `g:deoplete#sources#jedi#extra_path`: A list of extra paths to add to
`sys.path` when performing completions.
## Virtual Environments
If you are using virtualenv, it is recommended that you create environments
specifically for Neovim. This way, you will not need to install the neovim
package in each virtualenv. Once you have created them, add the following to
your vimrc file:
```vim
let g:python_host_prog = '/full/path/to/neovim2/bin/python'
let g:python3_host_prog = '/full/path/to/neovim3/bin/python'
```
Deoplete only requires Python 3. See `:h nvim-python-quickstart` for more
information.

View File

@ -1,10 +0,0 @@
# Colorable output
CRESET := \x1b[0m
CBLACK := \x1b[30;01m
CRED := \x1b[31;01m
CGREEN := \x1b[32;01m
CYELLOW := \x1b[33;01m
CBLUE := \x1b[34;01m
CMAGENTA := \x1b[35;01m
CCYAN := \x1b[36;01m
CWHITE := \x1b[37;01m

View File

@ -1,13 +0,0 @@
IMPORT_LOGGER := from logging import getLogger\nlogger = getLogger(__name__)
IMPORT_TIMEIT := from profiler import timeit
IMPORT_PYVMMONITOR := import sys\nsys.path.append("\/Applications\/PyVmMonitor.app\/Contents\/MacOS\/public_api")\nimport pyvmmonitor
SET_DEBUG_PREFIX := jedi_settings.cache_directory \= os.path.join\(cache_home, 'jedi'\)
SET_DEBUG := try:\n from helper import set_debug\n if self.vim.vars["deoplete\#enable_debug"]:\n log_file \= self.vim.vars["deoplete\#sources\#jedi\#debug\#log_file"]\n set_debug(logger, os.path.expanduser(log_file))\n except Exception:\n pass\n
TIMEIT_PREFIX := @timeit(logger,
TIMEIT_SUFFIX := )
TIMEIT_GET_COMPLETE_POSITION := ${TIMEIT_PREFIX}"simple", [0.00003000, 0.00015000]${TIMEIT_SUFFIX}
TIMEIT_GATHER_CANDIDATES := ${TIMEIT_PREFIX}"simple", [0.10000000, 0.20000000]${TIMEIT_SUFFIX}
PYVMMONITOR_DECORATOR := @pyvmmonitor.profile_method()

View File

@ -1,290 +0,0 @@
import logging
import os
import re
import sys
import time
from deoplete.util import getlines
sys.path.insert(1, os.path.dirname(__file__)) # noqa: E261
from deoplete_jedi import cache, profiler, utils, worker
from .base import Base
def sort_key(item):
w = item.get('name')
z = len(w) - len(w.lstrip('_'))
return (('z' * z) + w.lower()[z:], len(w))
class Source(Base):
def __init__(self, vim):
Base.__init__(self, vim)
self.name = 'jedi'
self.mark = '[jedi]'
self.rank = 500
self.filetypes = ['python', 'cython', 'pyrex']
self.input_pattern = (r'[\w\)\]\}\'\"]+\.\w*$|'
r'^\s*@\w*$|'
r'^\s*from\s+[\w\.]*(?:\s+import\s+(?:\w*(?:,\s*)?)*)?|'
r'^\s*import\s+(?:[\w\.]*(?:,\s*)?)*')
self._async_keys = set()
self.workers_started = False
def on_init(self, context):
vars = context['vars']
self.statement_length = vars.get(
'deoplete#sources#jedi#statement_length', 0)
self.server_timeout = vars.get(
'deoplete#sources#jedi#server_timeout', 10)
self.use_short_types = vars.get(
'deoplete#sources#jedi#short_types', False)
self.show_docstring = vars.get(
'deoplete#sources#jedi#show_docstring', False)
self.debug_server = vars.get(
'deoplete#sources#jedi#debug_server', None)
# Only one worker is really needed since deoplete-jedi has a pretty
# aggressive cache.
# Two workers may be needed if working with very large source files.
self.worker_threads = vars.get(
'deoplete#sources#jedi#worker_threads', 2)
# Hard coded python interpreter location
self.python_path = vars.get(
'deoplete#sources#jedi#python_path', '')
self.extra_path = vars.get(
'deoplete#sources#jedi#extra_path', [])
self.boilerplate = [] # Completions that are included in all results
log_file = ''
root_log = logging.getLogger('deoplete')
if self.debug_server:
self.is_debug_enabled = True
if isinstance(self.debug_server, str):
log_file = self.debug_server
else:
for handler in root_log.handlers:
if isinstance(handler, logging.FileHandler):
log_file = handler.baseFilename
break
if not self.is_debug_enabled:
child_log = root_log.getChild('jedi')
child_log.propagate = False
if not self.workers_started:
if self.python_path and 'VIRTUAL_ENV' not in os.environ:
cache.python_path = self.python_path
worker.start(max(1, self.worker_threads), self.statement_length,
self.server_timeout, self.use_short_types, self.show_docstring,
(log_file, root_log.level), self.python_path)
cache.start_background(worker.comp_queue)
self.workers_started = True
def get_complete_position(self, context):
pattern = r'\w*$'
if context['input'].lstrip().startswith(('from ', 'import ')):
m = re.search(r'[,\s]$', context['input'])
if m:
return m.end()
m = re.search(pattern, context['input'])
return m.start() if m else -1
def mix_boilerplate(self, completions):
seen = set()
for item in self.boilerplate + completions:
if item['name'] in seen:
continue
seen.add(item['name'])
yield item
def finalize(self, item):
abbr = item['name']
if self.show_docstring:
desc = item['doc']
else:
desc = ''
if item['params'] is not None:
sig = '{}({})'.format(item['name'], ', '.join(item['params']))
sig_len = len(sig)
desc = sig + '\n\n' + desc
if self.statement_length > 0 and sig_len > self.statement_length:
params = []
length = len(item['name']) + 2
for p in item['params']:
p = p.split('=', 1)[0]
length += len(p)
params.append(p)
length += 2 * (len(params) - 1)
# +5 for the ellipsis and separator
while length + 5 > self.statement_length and len(params):
length -= len(params[-1]) + 2
params = params[:-1]
if len(item['params']) > len(params):
params.append('...')
sig = '{}({})'.format(item['name'], ', '.join(params))
abbr = sig
if self.use_short_types:
kind = item['short_type'] or item['type']
else:
kind = item['type']
return {
'word': item['name'],
'abbr': abbr,
'kind': kind,
'info': desc.strip(),
'menu': '[jedi] ',
'dup': 1,
}
def finalize_cached(self, cache_key, filters, cached):
if cached:
if cached.completions is None:
out = self.mix_boilerplate([])
elif cache_key[-1] == 'vars':
out = self.mix_boilerplate(cached.completions)
else:
out = cached.completions
if filters:
out = (x for x in out if x['type'] in filters)
return [self.finalize(x) for x in sorted(out, key=sort_key)]
return []
@profiler.profile
def gather_candidates(self, context):
refresh_boilerplate = False
if not self.boilerplate:
bp = cache.retrieve(('boilerplate~',))
if bp:
self.boilerplate = bp.completions[:]
refresh_boilerplate = True
else:
# This should be the first time any completion happened, so
# `wait` will be True.
worker.work_queue.put((('boilerplate~',), [], '', 1, 0, '', None))
line = context['position'][1]
col = context['complete_position']
buf = self.vim.current.buffer
src = getlines(self.vim)
extra_modules = []
cache_key = None
cached = None
refresh = True
wait = False
# Inclusion filters for the results
filters = []
if re.match('^\s*(from|import)\s+', context['input']) \
and not re.match('^\s*from\s+\S+\s+', context['input']):
# If starting an import, only show module results
filters.append('module')
cache_key, extra_modules = cache.cache_context(buf.name, context, src,
self.extra_path)
cached = cache.retrieve(cache_key)
if cached and not cached.refresh:
modules = cached.modules
if all([filename in modules for filename in extra_modules]) \
and all([utils.file_mtime(filename) == mtime
for filename, mtime in modules.items()]):
# The cache is still valid
refresh = False
if cache_key and (cache_key[-1] in ('dot', 'vars', 'import', 'import~') or
(cached and cache_key[-1] == 'package' and
not len(cached.modules))):
# Always refresh scoped variables and module imports. Additionally
# refresh cached items that did not have associated module files.
refresh = True
# Extra options to pass to the server.
options = {
'cwd': context.get('cwd'),
'extra_path': self.extra_path,
'runtimepath': context.get('runtimepath'),
}
if (not cached or refresh) and cache_key and cache_key[-1] == 'package':
# Create a synthetic completion for a module import as a fallback.
synthetic_src = ['import {0}; {0}.'.format(cache_key[0])]
options.update({
'synthetic': {
'src': synthetic_src,
'line': 1,
'col': len(synthetic_src[0]),
}
})
if not cached:
wait = True
# Note: This waits a very short amount of time to give the server or
# cache a chance to reply. If there's no reply during this period,
# empty results are returned and we defer to deoplete's async refresh.
# The current requests's async status is tracked in `_async_keys`.
# If the async cache result is older than 5 seconds, the completion
# request goes back to the default behavior of attempting to refresh as
# needed by the `refresh` and `wait` variables above.
self.debug('Key: %r, Refresh: %r, Wait: %r, Async: %r', cache_key,
refresh, wait, cache_key in self._async_keys)
context['is_async'] = cache_key in self._async_keys
if context['is_async']:
if not cached:
self.debug('[async] waiting for completions: %r', cache_key)
return []
else:
self._async_keys.remove(cache_key)
context['is_async'] = False
if time.time() - cached.time < 5:
self.debug('[async] finished: %r', cache_key)
return self.finalize_cached(cache_key, filters, cached)
else:
self.debug('[async] outdated: %r', cache_key)
if cache_key and (not cached or refresh):
n = time.time()
wait_complete = False
worker.work_queue.put((cache_key, extra_modules, '\n'.join(src),
line, col, str(buf.name), options))
while wait and time.time() - n < 0.25:
cached = cache.retrieve(cache_key)
if cached and cached.time >= n:
self.debug('Got updated cache, stopped waiting.')
wait_complete = True
break
time.sleep(0.01)
if wait and not wait_complete:
self._async_keys.add(cache_key)
context['is_async'] = True
self.debug('[async] deferred: %r', cache_key)
return []
if refresh_boilerplate:
# This should only occur the first time completions happen.
# Refresh the boilerplate to ensure it's always up to date (just in
# case).
self.debug('Refreshing boilerplate')
worker.work_queue.put((('boilerplate~',), [], '', 1, 0, '', None))
return self.finalize_cached(cache_key, filters, cached)

View File

@ -1,451 +0,0 @@
import glob
import hashlib
import json
import logging
import os
import re
import subprocess
import threading
import time
from itertools import chain
from string import whitespace
from deoplete_jedi import utils
_paths = []
_cache_path = None
# List of items in the file system cache. `import~` is a special key for
# caching import modules. It should not be cached to disk.
_file_cache = set(['import~'])
# Cache version allows us to invalidate outdated cache data structures.
_cache_version = 16
_cache_lock = threading.RLock()
_cache = {}
python_path = 'python'
log = logging.getLogger('deoplete.jedi.cache')
# This uses [\ \t] to avoid spanning lines
_import_re = re.compile(r'''
^[\ \t]*(
from[\ \t]+[\w\.]+[\ \t]+import\s+\([\s\w,]+\)|
from[\ \t]+[\w\.]+[\ \t]+import[\ \t\w,]+|
import[\ \t]+\([\s\w,]+\)|
import[\ \t]+[\ \t\w,]+
)
''', re.VERBOSE | re.MULTILINE)
class CacheEntry(object):
def __init__(self, dict):
self.key = tuple(dict.get('cache_key'))
self._touched = time.time()
self.time = dict.get('time')
self.modules = dict.get('modules')
self.completions = dict.get('completions', [])
self.refresh = False
if self.completions is None:
self.refresh = True
self.completions = []
def update_from(self, other):
self.key = other.key
self.time = other.time
self.modules = other.modules
self.completions = other.completions
def touch(self):
self._touched = time.time()
def to_dict(self):
return {
'version': _cache_version,
'cache_key': self.key,
'time': self.time,
'modules': self.modules,
'completions': self.completions,
}
def get_cache_path():
global _cache_path
if not _cache_path or not os.path.isdir(_cache_path):
p = subprocess.Popen([python_path, '-V'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
version = re.search(r'(\d+\.\d+)\.', (stdout or stderr).decode('utf8')).group(1)
cache_dir = os.getenv('XDG_CACHE_HOME', '~/.cache')
cache_dir = os.path.join(os.path.expanduser(cache_dir), 'deoplete/jedi',
version)
if not os.path.exists(cache_dir):
umask = os.umask(0)
os.makedirs(cache_dir, 0o0700)
os.umask(umask)
_cache_path = cache_dir
return _cache_path
def retrieve(key):
if not key:
return None
with _cache_lock:
if key[-1] == 'package' and key[0] not in _file_cache:
# This will only load the cached item from a file the first time it
# was seen.
cache_file = os.path.join(get_cache_path(), '{}.json'.format(key[0]))
if os.path.isfile(cache_file):
with open(cache_file, 'rt') as fp:
try:
data = json.load(fp)
if data.get('version', 0) >= _cache_version:
_file_cache.add(key[0])
cached = CacheEntry(data)
cached.time = time.time()
_cache[key] = cached
log.debug('Loaded from file: %r', key)
return cached
except Exception:
pass
cached = _cache.get(key)
if cached:
cached.touch()
return cached
def store(key, value):
with _cache_lock:
if not isinstance(value, CacheEntry):
value = CacheEntry(value)
if value.refresh:
# refresh is set when completions is None. This will be due to
# Jedi producing an error and not getting any completions. Use any
# previously cached completions while a refresh is attempted.
old = _cache.get(key)
if old is not None:
value.completions = old.completions
_cache[key] = value
if key[-1] == 'package' and key[0] not in _file_cache:
_file_cache.add(key[0])
cache_file = os.path.join(get_cache_path(), '{}.json'.format(key[0]))
with open(cache_file, 'wt') as fp:
json.dump(value.to_dict(), fp)
log.debug('Stored to file: %r', key)
return value
def exists(key):
with _cache_lock:
return key in _cache
def reap_cache(max_age=300):
"""Clear the cache of old items
Module level completions are exempt from reaping. It is assumed that
module level completions will have a key length of 1.
"""
while True:
time.sleep(300)
with _cache_lock:
now = time.time()
cur_len = len(_cache)
for cached in list(_cache.values()):
if cached.key[-1] not in ('package', 'local', 'boilerplate~',
'import~') \
and now - cached._touched > max_age:
_cache.pop(cached.key)
if cur_len - len(_cache) > 0:
log.debug('Removed %d of %d cache items', len(_cache), cur_len)
def cache_processor_thread(compl_queue):
errors = 0
while True:
try:
compl = compl_queue.get()
cache_key = compl.get('cache_key')
cached = retrieve(cache_key)
if cached is None or cached.time <= compl.get('time'):
cached = store(cache_key, compl)
log.debug('Processed: %r', cache_key)
errors = 0
except Exception as e:
errors += 1
if errors > 3:
break
log.error('Got exception while processing: %r', e)
def start_background(compl_queue):
log.debug('Starting reaper thread')
t = threading.Thread(target=cache_processor_thread, args=(compl_queue,))
t.daemon = True
t.start()
t = threading.Thread(target=reap_cache)
t.daemon = True
t.start()
# balanced() taken from:
# http://stackoverflow.com/a/6753172/4932879
# Modified to include string delimiters
def _balanced():
# Doc strings might be an issue, but we don't care.
idelim = iter("""(){}[]""''""")
delims = dict(zip(idelim, idelim))
odelims = {v: k for k, v in delims.items()}
closing = delims.values()
def balanced(astr):
"""Test if a string has balanced delimiters.
Returns a boolean and a string of the opened delimiter.
"""
stack = []
skip = False
open_d = ''
open_str = ''
for c in astr:
if c == '\\':
skip = True
continue
if skip:
skip = False
continue
d = delims.get(c, None)
if d and not open_str:
if d in '"\'':
open_str = d
open_d = odelims.get(d)
stack.append(d)
elif c in closing:
if c == open_str:
open_str = ''
if not open_str and (not stack or c != stack.pop()):
return False, open_d
if stack:
open_d = odelims.get(stack[-1])
else:
open_d = ''
return not stack, open_d
return balanced
balanced = _balanced()
def split_module(text, default_value=None):
"""Utility to split the module text.
If there is nothing to split, return `default_value`.
"""
b, d = balanced(text)
if not b:
# Handles cases where the cursor is inside of unclosed delimiters.
# If the input is: re.search(x.spl
# The returned value should be: x
if d and d not in '\'"':
di = text.rfind(d)
if di != -1:
text = text[di+1:]
else:
return default_value
m = re.search('([\S\.]+)$', text)
if m and '.' in m.group(1):
return m.group(1).rsplit('.', 1)[0]
return default_value
def get_parents(source, line, class_only=False):
"""Find the parent blocks
Collects parent blocks that contain the current line to help form a cache
key based on variable scope.
"""
parents = []
start = line - 1
indent = len(source[start]) - len(source[start].lstrip())
if class_only:
pattern = r'^\s*class\s+(\w+)'
else:
pattern = r'^\s*(?:def|class)\s+(\w+)'
for i in range(start, 0, -1):
s_line = source[i].lstrip()
l_indent = len(source[i]) - len(s_line)
if s_line and l_indent < indent:
m = re.search(pattern, s_line)
indent = l_indent
if m:
parents.insert(0, m.group(1))
return parents
def full_module(source, obj):
"""Construct the full module path
This finds all imports and attempts to reconstruct the full module path.
If matched on a standard `import` line, `obj` itself is a full module path.
On `from` import lines, the parent module is prepended to `obj`.
"""
module = ''
obj_pat = r'(?:(\S+)\s+as\s+)?\b{0}\b'.format(re.escape(obj.split('.', 1)[0]))
for match in _import_re.finditer('\n'.join(source)):
module = ''
imp_line = ' '.join(match.group(0).split())
if imp_line.startswith('from '):
_, module, imp_line = imp_line.split(' ', 2)
m = re.search(obj_pat, imp_line)
if m:
# If the import is aliased, use the alias as part of the key
alias = m.group(1)
if alias:
obj = obj.split('.')
obj[0] = alias
obj = '.'.join(obj)
if module:
return '.'.join((module, obj))
return obj
return None
def sys_path(refresh=False):
global _paths
if not _paths or refresh:
p = subprocess.Popen([
python_path,
'-c', r'import sys; print("\n".join(sys.path))',
], stdout=subprocess.PIPE)
stdout, _ = p.communicate()
_paths = [x for x in stdout.decode('utf8').split('\n')
if x and os.path.isdir(x)]
return _paths
def is_package(module, refresh=False):
"""Test if a module path is an installed package
The current interpreter's sys.path is retrieved on first run.
"""
if re.search(r'[^\w\.]', module):
return False
paths = sys_path(refresh)
module = module.split('.', 1)[0]
pglobs = [os.path.join(x, module, '__init__.py') for x in paths]
pglobs.extend([os.path.join(x, '{}.*'.format(module)) for x in paths])
return any(map(glob.glob, pglobs))
def cache_context(filename, context, source, extra_path):
"""Caching based on context input.
If the input is blank, it was triggered with `.` to get module completions.
The module files as reported by Jedi are stored with their modification
times to help detect if a cache needs to be refreshed.
For scoped variables in the buffer, construct a cache key using the
filename. The buffer file's modification time is checked to see if the
completion needs to be refreshed. The approximate scope lines are cached
to help invalidate the cache based on line position.
Cache keys are made using tuples to make them easier to interpret later.
"""
cinput = context['input'].lstrip().lstrip('@')
if not re.sub(r'[\s\d\.]+', '', cinput):
return None, []
filename_hash = hashlib.md5(filename.encode('utf8')).hexdigest()
line = context['position'][1]
log.debug('Input: "%s"', cinput)
cache_key = None
extra_modules = []
cur_module = os.path.splitext(os.path.basename(filename))[0]
if cinput.startswith(('import ', 'from ')):
# Cache imports with buffer filename as the key prefix.
# For `from` imports, the first part of the statement is
# considered to be the same as `import` for caching.
import_key = 'import~'
cinput = context['input'].lstrip()
m = re.search(r'^from\s+(\S+)(.*)', cinput)
if m:
if m.group(2).lstrip() in 'import':
cache_key = ('importkeyword~', )
return cache_key, extra_modules
import_key = m.group(1) or 'import~'
elif cinput.startswith('import ') and cinput.rstrip().endswith('.'):
import_key = re.sub(r'[^\s\w\.]', ' ', cinput.strip()).split()[-1]
if import_key:
if '.' in import_key and import_key[-1] not in whitespace \
and not re.search(r'^from\s+\S+\s+import', cinput):
# Dot completion on the import line
import_key, _ = import_key.rsplit('.', 1)
import_key = import_key.rstrip('.')
module_file = utils.module_search(
import_key,
chain(extra_path,
[context.get('cwd'), os.path.dirname(filename)],
utils.rplugin_runtime_paths(context)))
if module_file:
cache_key = (import_key, 'local')
extra_modules.append(module_file)
elif is_package(import_key):
cache_key = (import_key, 'package')
elif not cinput.endswith('.'):
cache_key = ('import~',)
else:
return None, extra_modules
if not cache_key:
obj = split_module(cinput.strip())
if obj:
cache_key = (obj, 'package')
if obj.startswith('self'):
if os.path.exists(filename):
extra_modules.append(filename)
# `self` is a special case object that needs a scope included
# in the cache key.
parents = get_parents(source, line, class_only=True)
parents.insert(0, cur_module)
cache_key = (filename_hash, tuple(parents), obj)
else:
module_path = full_module(source, obj)
if module_path and not module_path.startswith('.') \
and is_package(module_path):
cache_key = (module_path, 'package')
else:
# A quick scan revealed that the dot completion doesn't
# involve an imported module. Treat it like a scoped
# variable and ensure the cache invalidates when the file
# is saved.
if os.path.exists(filename):
extra_modules.append(filename)
module_file = utils.module_search(module_path,
[os.path.dirname(filename)])
if module_file:
cache_key = (module_path, 'local')
else:
parents = get_parents(source, line)
parents.insert(0, cur_module)
cache_key = (filename_hash, tuple(parents), obj, 'dot')
elif context.get('complete_str') or cinput.rstrip().endswith('='):
parents = get_parents(source, line)
parents.insert(0, cur_module)
cache_key = (filename_hash, tuple(parents), 'vars')
if os.path.exists(filename):
extra_modules.append(filename)
return cache_key, extra_modules

View File

@ -1,9 +0,0 @@
def set_debug(logger, path):
from logging import FileHandler, Formatter, DEBUG
hdlr = FileHandler(path)
logger.addHandler(hdlr)
datefmt = '%Y/%m/%d %H:%M:%S'
fmt = Formatter(
"%(levelname)s %(asctime)s %(message)s", datefmt=datefmt)
hdlr.setFormatter(fmt)
logger.setLevel(DEBUG)

View File

@ -1,63 +0,0 @@
import functools
import queue
try:
import statistics
stdev = statistics.stdev
mean = statistics.mean
except ImportError:
stdev = None
def mean(l):
return sum(l) / len(l)
try:
import time
clock = time.perf_counter
except Exception:
import timeit
clock = timeit.default_timer
class tfloat(float):
color = 39
def __str__(self):
n = self * 1000
return '\x1b[%dm%f\x1b[mms' % (self.color, n)
def profile(func):
name = func.__name__
samples = queue.deque(maxlen=5)
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if not self.is_debug_enabled:
return func(self, *args, **kwargs)
start = clock()
ret = func(self, *args, **kwargs)
n = tfloat(clock() - start)
if len(samples) < 2:
m = 0
d = 0
n.color = 36
else:
m = mean(samples)
if stdev:
d = tfloat(stdev(samples))
else:
d = 0
if n <= m + d:
n.color = 32
elif n > m + d * 2:
n.color = 31
else:
n.color = 33
samples.append(n)
self.info('\x1b[34m%s\x1b[m t = %s, \u00b5 = %s, \u03c3 = %s)',
name, n, m, d)
return ret
return wrapper

View File

@ -1,576 +0,0 @@
"""Jedi mini server for deoplete-jedi
This script allows Jedi to run using the Python interpreter that is found in
the user's environment instead of the one Neovim is using.
Jedi seems to accumulate latency with each completion. To deal with this, the
server is restarted after 50 completions. This threshold is relatively high
considering that deoplete-jedi caches completion results. These combined
should make deoplete-jedi's completions pretty fast and responsive.
"""
from __future__ import unicode_literals
import argparse
import functools
import logging
import os
import re
import struct
import subprocess
import sys
import threading
import time
from glob import glob
# This is be possible because the path is inserted in deoplete_jedi.py as well
# as set in PYTHONPATH by the Client class.
from deoplete_jedi import utils
log = logging.getLogger('deoplete')
nullHandler = logging.NullHandler()
if not log.handlers:
log.addHandler(nullHandler)
try:
import cPickle as pickle
except ImportError:
import pickle
libpath = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'vendored')
jedi_path = os.path.join(libpath, 'jedi')
parso_path = os.path.join(libpath, 'parso')
# Type mapping. Empty values will use the key value instead.
# Keep them 5 characters max to minimize required space to display.
_types = {
'import': 'imprt',
'class': '',
'function': 'def',
'globalstmt': 'var',
'instance': 'var',
'statement': 'var',
'keyword': 'keywd',
'module': 'mod',
'param': 'arg',
'property': 'prop',
'bool': '',
'bytes': 'byte',
'complex': 'cmplx',
'dict': '',
'list': '',
'float': '',
'int': '',
'object': 'obj',
'set': '',
'slice': '',
'str': '',
'tuple': '',
'mappingproxy': 'dict', # cls.__dict__
'member_descriptor': 'cattr',
'getset_descriptor': 'cprop',
'method_descriptor': 'cdef',
}
class StreamError(Exception):
"""Error in reading/writing streams."""
class StreamEmpty(StreamError):
"""Empty stream data"""
def stream_read(pipe):
"""Read data from the pipe."""
buffer = getattr(pipe, 'buffer', pipe)
header = buffer.read(4)
if not len(header):
raise StreamEmpty
if len(header) < 4:
raise StreamError('Incorrect byte length')
length = struct.unpack('I', header)[0]
data = buffer.read(length)
if len(data) < length:
raise StreamError('Got less data than expected')
return pickle.loads(data)
def stream_write(pipe, obj):
"""Write data to the pipe."""
data = pickle.dumps(obj, 2)
header = struct.pack(b'I', len(data))
buffer = getattr(pipe, 'buffer', pipe)
buffer.write(header + data)
pipe.flush()
def strip_decor(source):
"""Remove decorators lines
If the decorator is a function call, this will leave them dangling. Jedi
should be fine with this since they'll look like tuples just hanging out
not doing anything important.
"""
return re.sub(r'^(\s*)@\w+', r'\1', source, flags=re.M)
def retry_completion(func):
"""Decorator to retry a completion
A second attempt is made with decorators stripped from the source.
"""
@functools.wraps(func)
def wrapper(self, source, *args, **kwargs):
try:
return func(self, source, *args, **kwargs)
except Exception:
if '@' in source:
log.warn('Retrying completion %r', func.__name__, exc_info=True)
try:
return func(self, strip_decor(source), *args, **kwargs)
except Exception:
pass
log.warn('Failed completion %r', func.__name__, exc_info=True)
return wrapper
class Server(object):
"""Server class
This is created when this script is ran directly.
"""
def __init__(self, desc_len=0, short_types=False, show_docstring=False):
self.desc_len = desc_len
self.use_short_types = short_types
self.show_docstring = show_docstring
self.unresolved_imports = set()
from jedi import settings
settings.use_filesystem_cache = False
def _loop(self):
from jedi.evaluate.sys_path import _get_venv_sitepackages
while True:
data = stream_read(sys.stdin)
if not isinstance(data, tuple):
continue
cache_key, source, line, col, filename, options = data
orig_path = sys.path[:]
venv = os.getenv('VIRTUAL_ENV')
if venv:
sys.path.insert(0, _get_venv_sitepackages(venv))
add_path = self.find_extra_sys_path(filename)
if add_path and add_path not in sys.path:
# Add the found path to sys.path. I'm not 100% certain if this
# is actually helping anything, but it feels like the right
# thing to do.
sys.path.insert(0, add_path)
if filename:
sys.path.append(os.path.dirname(filename))
if isinstance(options, dict):
extra = options.get('extra_path')
if extra:
if not isinstance(extra, list):
extra = [extra]
sys.path.extend(extra)
# Add extra paths if working on a Python remote plugin.
sys.path.extend(utils.rplugin_runtime_paths(options))
# Decorators on incomplete functions cause an error to be raised by
# Jedi. I assume this is because Jedi is attempting to evaluate
# the return value of the wrapped, but broken, function.
# Our solution is to simply strip decorators from the source since
# we are a completion service, not the syntax police.
out = self.script_completion(source, line, col, filename)
if not out and cache_key[-1] == 'vars':
# Attempt scope completion. If it fails, it should fall
# through to script completion.
log.debug('Fallback to scoped completions')
out = self.scoped_completions(source, filename, cache_key[-2])
if not out and isinstance(options, dict) and 'synthetic' in options:
synthetic = options.get('synthetic')
log.debug('Using synthetic completion: %r', synthetic)
out = self.script_completion(synthetic['src'],
synthetic['line'],
synthetic['col'], filename)
if not out and cache_key[-1] in ('package', 'local'):
# The backup plan
log.debug('Fallback to module completions')
try:
out = self.module_completions(cache_key[0], sys.path)
except Exception:
pass
stream_write(sys.stdout, out)
sys.path[:] = orig_path
def run(self):
log.debug('Starting server. sys.path = %r', sys.path)
try:
stream_write(sys.stdout, tuple(sys.version_info))
self._loop()
except StreamEmpty:
log.debug('Input closed. Shutting down.')
except Exception:
log.exception('Server Exception. Shutting down.')
def find_extra_sys_path(self, filename):
"""Find the file's "root"
This tries to determine the script's root package. The first step is
to scan upward until there are no longer __init__.py files. If that
fails, check immediate subdirectories to find __init__.py files which
could mean that the current script is not part of a package, but has
sub-modules.
"""
add_path = ''
dirname = os.path.dirname(filename)
scan_dir = dirname
while len(scan_dir) \
and os.path.isfile(os.path.join(scan_dir, '__init__.py')):
scan_dir = os.path.dirname(scan_dir)
if scan_dir != dirname:
add_path = scan_dir
elif glob('{}/*/__init__.py'.format(dirname)):
add_path = dirname
return add_path
def module_completions(self, module, paths):
"""Directly get completions from the module file
This is the fallback if all else fails for module completion.
"""
found = utils.module_search(module, paths)
if not found:
return None
log.debug('Found script for fallback completions: %r', found)
mod_parts = tuple(re.sub(r'\.+', '.', module).strip('.').split('.'))
path_parts = os.path.splitext(found)[0].split('/')
if path_parts[-1] == '__init__':
path_parts.pop()
path_parts = tuple(path_parts)
match_mod = mod_parts
ml = len(mod_parts)
for i in range(ml):
if path_parts[i-ml:] == mod_parts[:ml-i]:
match_mod = mod_parts[-i:]
break
log.debug('Remainder to match: %r', match_mod)
import jedi
completions = jedi.api.names(path=found, references=True)
completions = utils.jedi_walk(completions)
while len(match_mod):
for c in completions:
if c.name == match_mod[0]:
completions = c.defined_names()
break
else:
log.debug('No more matches at %r', match_mod[0])
return []
match_mod = match_mod[:-1]
out = []
tmp_filecache = {}
seen = set()
for c in completions:
parsed = self.parse_completion(c, tmp_filecache)
seen_key = (parsed['type'], parsed['name'])
if seen_key in seen:
continue
seen.add(seen_key)
out.append(parsed)
return out
@retry_completion
def script_completion(self, source, line, col, filename):
"""Standard Jedi completions"""
import jedi
log.debug('Line: %r, Col: %r, Filename: %r', line, col, filename)
completions = jedi.Script(source, line, col, filename).completions()
out = []
tmp_filecache = {}
for c in completions:
out.append(self.parse_completion(c, tmp_filecache))
return out
def get_parents(self, c):
"""Collect parent blocks
This is for matching a request's cache key when performing scoped
completions.
"""
parents = []
while True:
try:
c = c.parent()
parents.insert(0, c.name)
if c.type == 'module':
break
except AttributeError:
break
return tuple(parents)
def resolve_import(self, completion, depth=0, max_depth=10, seen=None):
"""Follow import until it no longer is an import type"""
if seen is None:
seen = []
seen.append(completion)
log.debug('Resolving: %r', completion)
defs = completion.goto_assignments()
if not defs:
return None
resolved = defs[0]
if resolved in seen:
return None
if resolved.type == 'import' and depth < max_depth:
return self.resolve_import(resolved, depth + 1, max_depth, seen)
log.debug('Resolved: %r', resolved)
return resolved
@retry_completion
def scoped_completions(self, source, filename, parent):
"""Scoped completion
This gets all definitions for a specific scope allowing them to be
cached without needing to consider the current position in the source.
This would be slow in Vim without threading.
"""
import jedi
completions = jedi.api.names(source, filename, all_scopes=True)
out = []
tmp_filecache = {}
seen = set()
for c in completions:
c_parents = self.get_parents(c)
if parent and (len(c_parents) > len(parent) or
c_parents != parent[:len(c_parents)]):
continue
if c.type == 'import' and c.full_name not in self.unresolved_imports:
resolved = self.resolve_import(c)
if resolved is None:
log.debug('Could not resolve import: %r', c.full_name)
self.unresolved_imports.add(c.full_name)
continue
else:
c = resolved
parsed = self.parse_completion(c, tmp_filecache)
seen_key = (parsed['name'], parsed['type'])
if seen_key in seen:
continue
seen.add(seen_key)
out.append(parsed)
return out
def completion_dict(self, name, type_, comp):
"""Final construction of the completion dict."""
doc = comp.docstring()
i = doc.find('\n\n')
if i != -1:
doc = doc[i:]
params = None
try:
if type_ in ('function', 'class'):
params = []
for i, p in enumerate(comp.params):
desc = p.description.strip()
if i == 0 and desc == 'self':
continue
if '\\n' in desc:
desc = desc.replace('\\n', '\\x0A')
# Note: Hack for jedi param bugs
if desc.startswith('param ') or desc == 'param':
desc = desc[5:].strip()
if desc:
params.append(desc)
except Exception:
params = None
return {
'module': comp.module_path,
'name': name,
'type': type_,
'short_type': _types.get(type_),
'doc': doc.strip(),
'params': params,
}
def parse_completion(self, comp, cache):
"""Return a tuple describing the completion.
Returns (name, type, description, abbreviated)
"""
name = comp.name
type_ = comp.type
desc = comp.description
if type_ == 'instance' and desc.startswith(('builtins.', 'posix.')):
# Simple description
builtin_type = desc.rsplit('.', 1)[-1]
if builtin_type in _types:
return self.completion_dict(name, builtin_type, comp)
if type_ == 'class' and desc.startswith('builtins.'):
return self.completion_dict(name, type_, comp)
if type_ == 'function':
if comp.module_path not in cache and comp.line and comp.line > 1 \
and os.path.exists(comp.module_path):
with open(comp.module_path, 'r') as fp:
cache[comp.module_path] = fp.readlines()
lines = cache.get(comp.module_path)
if isinstance(lines, list) and len(lines) > 1 \
and comp.line < len(lines) and comp.line > 1:
# Check the function's decorators to check if it's decorated
# with @property
i = comp.line - 2
while i >= 0:
line = lines[i].lstrip()
if not line.startswith('@'):
break
if line.startswith('@property'):
return self.completion_dict(name, 'property', comp)
i -= 1
return self.completion_dict(name, type_, comp)
return self.completion_dict(name, type_, comp)
class Client(object):
"""Client object
This will be used by deoplete-jedi to interact with the server.
"""
max_completion_count = 50
def __init__(self, desc_len=0, short_types=False, show_docstring=False,
debug=False, python_path=None):
self._server = None
self.restarting = threading.Lock()
self.version = (0, 0, 0, 'final', 0)
self.env = os.environ.copy()
self.env.update({
'PYTHONPATH': os.pathsep.join(
(parso_path, jedi_path,
os.path.dirname(os.path.dirname(__file__)))),
})
if 'VIRTUAL_ENV' in os.environ:
self.env['VIRTUAL_ENV'] = os.getenv('VIRTUAL_ENV')
prog = os.path.join(self.env['VIRTUAL_ENV'], 'bin', 'python')
elif python_path:
prog = python_path
else:
prog = 'python'
self.cmd = [prog, '-u', __file__, '--desc-length', str(desc_len)]
if short_types:
self.cmd.append('--short-types')
if show_docstring:
self.cmd.append('--docstrings')
if debug:
self.cmd.extend(('--debug', debug[0], '--debug-level',
str(debug[1])))
try:
self.restart()
except Exception as exc:
from deoplete.exceptions import SourceInitError
raise SourceInitError('Failed to start server ({}): {}'.format(
' '.join(self.cmd), exc))
def shutdown(self):
"""Shut down the server."""
if self._server is not None and self._server.returncode is None:
# Closing the server's stdin will cause it to exit.
self._server.stdin.close()
self._server.kill()
def restart(self):
"""Start or restart the server
If a server is already running, shut it down.
"""
with self.restarting:
self.shutdown()
self._server = subprocess.Popen(self.cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self.env)
# Might result in "pyenv: version `foo' is not installed (set by
# /cwd/.python-version)" on stderr.
try:
self.version = stream_read(self._server.stdout)
except StreamEmpty:
out, err = self._server.communicate()
raise Exception('Server exited with {}: error: {}'.format(
err, self._server.returncode))
self._count = 0
def completions(self, *args):
"""Get completions from the server.
If the number of completions already performed reaches a threshold,
restart the server.
"""
if self._count > self.max_completion_count:
self.restart()
self._count += 1
try:
stream_write(self._server.stdin, args)
return stream_read(self._server.stdout)
except StreamError as exc:
if self.restarting.acquire(False):
self.restarting.release()
log.error('Caught %s during handling completions(%s), '
' restarting server', exc, args)
self.restart()
time.sleep(0.2)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--desc-length', type=int)
parser.add_argument('--short-types', action='store_true')
parser.add_argument('--docstrings', action='store_true')
parser.add_argument('--debug', default='')
parser.add_argument('--debug-level', type=int, default=logging.DEBUG)
args = parser.parse_args()
if args.debug:
log.removeHandler(nullHandler)
formatter = logging.Formatter('%(asctime)s %(levelname)-8s '
'(%(name)s) %(message)s')
handler = logging.FileHandler(args.debug)
handler.setFormatter(formatter)
handler.setLevel(args.debug_level)
log.addHandler(handler)
log.setLevel(logging.DEBUG)
log = log.getChild('jedi.server')
s = Server(args.desc_length, args.short_types, args.docstrings)
s.run()
else:
log = log.getChild('jedi.client')

View File

@ -1,90 +0,0 @@
import os
import re
import sys
def file_mtime(filename):
"""Get file modification time
Return 0 if the file does not exist
"""
if not os.path.exists(filename):
return 0
return int(os.path.getmtime(filename))
def module_file(dirname, suffix, base):
"""Find a script that matches the suffix path."""
search = os.path.abspath(os.path.join(dirname, suffix))
# dirname = os.path.dirname(dirname)
found = ''
while True:
p = os.path.join(search, '__init__.py')
if os.path.isfile(p):
found = p
break
p = search + '.py'
if os.path.isfile(p):
found = p
break
if os.path.basename(search) == base or search == dirname:
break
search = os.path.dirname(search)
return found
def module_search(module, paths):
"""Search paths for a file matching the module."""
if not module:
return ''
base = re.sub(r'\.+', '.', module).strip('.').split('.')[0]
module_path = os.path.normpath(re.sub(r'(\.+)', r'/\1/', module).strip('/'))
for p in paths:
found = module_file(p, module_path, base)
if found:
return found
return ''
def rplugin_runtime_paths(context):
"""Adds Neovim runtime paths.
Additional paths are added only if a `rplugin/python*` exists.
"""
paths = []
if context and 'cwd' in context:
cwd = context.get('cwd')
rplugins = ('rplugin/python{}'.format(sys.version_info[0]),
'rplugin/pythonx')
paths.extend(filter(os.path.exists,
(os.path.join(cwd, x)
for x in rplugins)))
if paths:
for rtp in context.get('runtimepath', '').split(','):
if not rtp:
continue
paths.extend(filter(os.path.exists,
(os.path.join(rtp, x)
for x in rplugins)))
return paths
def jedi_walk(completions, depth=0, max_depth=5):
"""Walk through Jedi objects
The purpose for this is to help find an object with a specific name. Once
found, the walking will stop.
"""
for c in completions:
yield c
if hasattr(c, 'description') and c.type == 'import':
d = c.description
if d.startswith('from ') and d.endswith('*') and depth < max_depth:
# Haven't determined the lowest Python 3 version required.
# If we determine 3.3, we can use `yield from`
for sub in jedi_walk(c.defined_names(), depth+1, max_depth):
yield sub

View File

@ -1,85 +0,0 @@
import logging
import os
import queue
import threading
import time
from .server import Client
from .utils import file_mtime
log = logging.getLogger('deoplete.jedi.worker')
workers = []
work_queue = queue.Queue()
comp_queue = queue.Queue()
class Worker(threading.Thread):
daemon = True
def __init__(self, in_queue, out_queue, desc_len=0, server_timeout=10,
short_types=False, show_docstring=False, debug=False,
python_path=None):
self._client = Client(desc_len, short_types, show_docstring, debug,
python_path)
self.server_timeout = server_timeout
self.in_queue = in_queue
self.out_queue = out_queue
super(Worker, self).__init__()
self.log = log.getChild(self.name)
def completion_work(self, cache_key, extra_modules, source, line, col,
filename, options):
completions = self._client.completions(cache_key, source, line, col,
filename, options)
modules = {f: file_mtime(f) for f in extra_modules}
if completions is not None:
for c in completions:
m = c['module']
if m and m not in modules and os.path.exists(m):
modules[m] = file_mtime(m)
self.results = {
'cache_key': cache_key,
'time': time.time(),
'modules': modules,
'completions': completions,
}
def run(self):
while True:
try:
work = self.in_queue.get()
self.log.debug('Got work')
self.results = None
t = threading.Thread(target=self.completion_work, args=work)
t.start()
t.join(timeout=self.server_timeout)
if self.results:
self.out_queue.put(self.results)
self.log.debug('Completed work')
else:
self.log.warn('Restarting server because it\'s taking '
'too long')
# Kill all but the last queued job since they're most
# likely a backlog that are no longer relevant.
while self.in_queue.qsize() > 1:
self.in_queue.get()
self.in_queue.task_done()
self._client.restart()
self.in_queue.task_done()
except Exception:
self.log.debug('Worker error', exc_info=True)
def start(count, desc_len=0, server_timeout=10, short_types=False,
show_docstring=False, debug=False, python_path=None):
while count > 0:
t = Worker(work_queue, comp_queue, desc_len, server_timeout, short_types,
show_docstring, debug, python_path)
workers.append(t)
t.start()
log.debug('Started worker: %r', t)
count -= 1

View File

@ -1,19 +0,0 @@
[run]
omit =
jedi/_compatibility.py
jedi/evaluate/site.py
[report]
# Regexes for lines to exclude from consideration
exclude_lines =
# Don't complain about missing debug-only code:
def __repr__
if self\.debug
# Don't complain if tests don't hit defensive assertion code:
raise AssertionError
raise NotImplementedError
# Don't complain if non-runnable code isn't run:
if 0:
if __name__ == .__main__.:

View File

@ -1,13 +0,0 @@
*~
*.sw?
*.pyc
.ropeproject
.tox
.coveralls.yml
.coverage
/build/
/docs/_build/
/dist/
jedi.egg-info/
record.json
/.cache/

View File

@ -1,30 +0,0 @@
language: python
sudo: false
python:
- 2.6
- 2.7
- 3.3
- 3.4
- 3.5
- 3.6
- pypy
matrix:
allow_failures:
- python: pypy
- env: TOXENV=cov
- env: TOXENV=sith
include:
- python: 3.5
env: TOXENV=cov
- python: 3.5
env: TOXENV=sith
install:
- pip install --quiet tox-travis
script:
- tox
after_script:
- if [ $TOXENV == "cov" ]; then
pip install --quiet coveralls;
coveralls;
fi

View File

@ -1,49 +0,0 @@
Main Authors
============
David Halter (@davidhalter) <davidhalter88@gmail.com>
Takafumi Arakaki (@tkf) <aka.tkf@gmail.com>
Code Contributors
=================
Danilo Bargen (@dbrgn) <mail@dbrgn.ch>
Laurens Van Houtven (@lvh) <_@lvh.cc>
Aldo Stracquadanio (@Astrac) <aldo.strac@gmail.com>
Jean-Louis Fuchs (@ganwell) <ganwell@fangorn.ch>
tek (@tek)
Yasha Borevich (@jjay) <j.borevich@gmail.com>
Aaron Griffin <aaronmgriffin@gmail.com>
andviro (@andviro)
Mike Gilbert (@floppym) <floppym@gentoo.org>
Aaron Meurer (@asmeurer) <asmeurer@gmail.com>
Lubos Trilety <ltrilety@redhat.com>
Akinori Hattori (@hattya) <hattya@gmail.com>
srusskih (@srusskih)
Steven Silvester (@blink1073)
Colin Duquesnoy (@ColinDuquesnoy) <colin.duquesnoy@gmail.com>
Jorgen Schaefer (@jorgenschaefer) <contact@jorgenschaefer.de>
Fredrik Bergroth (@fbergroth)
Mathias Fußenegger (@mfussenegger)
Syohei Yoshida (@syohex) <syohex@gmail.com>
ppalucky (@ppalucky)
immerrr (@immerrr) immerrr@gmail.com
Albertas Agejevas (@alga)
Savor d'Isavano (@KenetJervet) <newelevenken@163.com>
Phillip Berndt (@phillipberndt) <phillip.berndt@gmail.com>
Ian Lee (@IanLee1521) <IanLee1521@gmail.com>
Farkhad Khatamov (@hatamov) <comsgn@gmail.com>
Kevin Kelley (@kelleyk) <kelleyk@kelleyk.net>
Sid Shanker (@squidarth) <sid.p.shanker@gmail.com>
Reinoud Elhorst (@reinhrst)
Guido van Rossum (@gvanrossum) <guido@python.org>
Dmytro Sadovnychyi (@sadovnychyi) <jedi@dmit.ro>
Cristi Burcă (@scribu)
bstaint (@bstaint)
Mathias Rav (@Mortal) <rav@cs.au.dk>
Daniel Fiterman (@dfit99) <fitermandaniel2@gmail.com>
Simon Ruggier (@sruggier)
Élie Gouzien (@ElieGouzien)
Note: (@user) means a github user name.

View File

@ -1,87 +0,0 @@
.. :changelog:
Changelog
---------
0.11.0 (2017-09-20)
+++++++++++++++++++
- Split Jedi's parser into a separate project called ``parso``.
- Avoiding side effects in REPL completion.
- Numpy docstring support should be much better.
- Moved the `settings.*recursion*` away, they are no longer usable.
0.10.2 (2017-04-05)
+++++++++++++++++++
- Python Packaging sucks. Some files were not included in 0.10.1.
0.10.1 (2017-04-05)
+++++++++++++++++++
- Fixed a few very annoying bugs.
- Prepared the parser to be factored out of Jedi.
0.10.0 (2017-02-03)
+++++++++++++++++++
- Actual semantic completions for the complete Python syntax.
- Basic type inference for ``yield from`` PEP 380.
- PEP 484 support (most of the important features of it). Thanks Claude! (@reinhrst)
- Added ``get_line_code`` to ``Definition`` and ``Completion`` objects.
- Completely rewritten the type inference engine.
- A new and better parser for (fast) parsing diffs of Python code.
0.9.0 (2015-04-10)
++++++++++++++++++
- The import logic has been rewritten to look more like Python's. There is now
an ``Evaluator.modules`` import cache, which resembles ``sys.modules``.
- Integrated the parser of 2to3. This will make refactoring possible. It will
also be possible to check for error messages (like compiling an AST would give)
in the future.
- With the new parser, the evaluation also completely changed. It's now simpler
and more readable.
- Completely rewritten REPL completion.
- Added ``jedi.names``, a command to do static analysis. Thanks to that
sourcegraph guys for sponsoring this!
- Alpha version of the linter.
0.8.1 (2014-07-23)
+++++++++++++++++++
- Bugfix release, the last release forgot to include files that improve
autocompletion for builtin libraries. Fixed.
0.8.0 (2014-05-05)
+++++++++++++++++++
- Memory Consumption for compiled modules (e.g. builtins, sys) has been reduced
drastically. Loading times are down as well (it takes basically as long as an
import).
- REPL completion is starting to become usable.
- Various small API changes. Generally this release focuses on stability and
refactoring of internal APIs.
- Introducing operator precedence, which makes calculating correct Array
indices and ``__getattr__`` strings possible.
0.7.0 (2013-08-09)
++++++++++++++++++
- Switched from LGPL to MIT license.
- Added an Interpreter class to the API to make autocompletion in REPL
possible.
- Added autocompletion support for namespace packages.
- Add sith.py, a new random testing method.
0.6.0 (2013-05-14)
++++++++++++++++++
- Much faster parser with builtin part caching.
- A test suite, thanks @tkf.
0.5 versions (2012)
+++++++++++++++++++
- Initial development.

View File

@ -1,8 +0,0 @@
Pull Requests are great.
1. Fork the Repo on github.
2. If you are adding functionality or fixing a bug, please add a test!
3. Add your name to AUTHORS.txt
4. Push to your fork and submit a pull request.
**Try to use the PEP8 style guide.**

View File

@ -1,24 +0,0 @@
All contributions towards Jedi are MIT licensed.
-------------------------------------------------------------------------------
The MIT License (MIT)
Copyright (c) <2013> <David Halter and others, see AUTHORS.txt>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,15 +0,0 @@
include README.rst
include CHANGELOG.rst
include LICENSE.txt
include AUTHORS.txt
include .coveragerc
include sith.py
include conftest.py
include pytest.ini
include tox.ini
include requirements.txt
include jedi/evaluate/compiled/fake/*.pym
include jedi/parser/python/grammar*.txt
recursive-include test *
recursive-include docs *
recursive-exclude * *.pyc

View File

@ -1,215 +0,0 @@
###################################################################
Jedi - an awesome autocompletion/static analysis library for Python
###################################################################
.. image:: https://secure.travis-ci.org/davidhalter/jedi.png?branch=master
:target: http://travis-ci.org/davidhalter/jedi
:alt: Travis-CI build status
.. image:: https://coveralls.io/repos/davidhalter/jedi/badge.png?branch=master
:target: https://coveralls.io/r/davidhalter/jedi
:alt: Coverage Status
*If you have specific questions, please add an issue or ask on* `stackoverflow
<https://stackoverflow.com/questions/tagged/python-jedi>`_ *with the label* ``python-jedi``.
Jedi is a static analysis tool for Python that can be used in IDEs/editors. Its
historic focus is autocompletion, but does static analysis for now as well.
Jedi is fast and is very well tested. It understands Python on a deeper level
than all other static analysis frameworks for Python.
Jedi has support for two different goto functions. It's possible to search for
related names and to list all names in a Python file and infer them. Jedi
understands docstrings and you can use Jedi autocompletion in your REPL as
well.
Jedi uses a very simple API to connect with IDE's. There's a reference
implementation as a `VIM-Plugin <https://github.com/davidhalter/jedi-vim>`_,
which uses Jedi's autocompletion. We encourage you to use Jedi in your IDEs.
It's really easy.
Jedi can currently be used with the following editors/projects:
- Vim (jedi-vim_, YouCompleteMe_, deoplete-jedi_, completor.vim_)
- Emacs (Jedi.el_, company-mode_, elpy_, anaconda-mode_, ycmd_)
- Sublime Text (SublimeJEDI_ [ST2 + ST3], anaconda_ [only ST3])
- TextMate_ (Not sure if it's actually working)
- Kate_ version 4.13+ supports it natively, you have to enable it, though. [`proof
<https://projects.kde.org/projects/kde/applications/kate/repository/show?rev=KDE%2F4.13>`_]
- Atom_ (autocomplete-python-jedi_)
- SourceLair_
- `GNOME Builder`_ (with support for GObject Introspection)
- `Visual Studio Code`_ (via `Python Extension <https://marketplace.visualstudio.com/items?itemName=donjayamanne.python>`_)
- Gedit (gedi_)
- wdb_ - Web Debugger
- `Eric IDE`_ (Available as a plugin)
- `Ipython 6.0.0+ <http://ipython.readthedocs.io/en/stable/whatsnew/version6.html>`_
and many more!
Here are some pictures taken from jedi-vim_:
.. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_complete.png
Completion for almost anything (Ctrl+Space).
.. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_function.png
Display of function/class bodies, docstrings.
.. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_pydoc.png
Pydoc support (Shift+k).
There is also support for goto and renaming.
Get the latest version from `github <https://github.com/davidhalter/jedi>`_
(master branch should always be kind of stable/working).
Docs are available at `https://jedi.readthedocs.org/en/latest/
<https://jedi.readthedocs.org/en/latest/>`_. Pull requests with documentation
enhancements and/or fixes are awesome and most welcome. Jedi uses `semantic
versioning <http://semver.org/>`_.
Installation
============
pip install jedi
Note: This just installs the Jedi library, not the editor plugins. For
information about how to make it work with your editor, refer to the
corresponding documentation.
You don't want to use ``pip``? Please refer to the `manual
<https://jedi.readthedocs.org/en/latest/docs/installation.html>`_.
Feature Support and Caveats
===========================
Jedi really understands your Python code. For a comprehensive list what Jedi
understands, see: `Features
<https://jedi.readthedocs.org/en/latest/docs/features.html>`_. A list of
caveats can be found on the same page.
You can run Jedi on cPython 2.6, 2.7, 3.3, 3.4 or 3.5 but it should also
understand/parse code older than those versions.
Tips on how to use Jedi efficiently can be found `here
<https://jedi.readthedocs.org/en/latest/docs/features.html#recipes>`_.
API
---
You can find the documentation for the `API here <https://jedi.readthedocs.org/en/latest/docs/plugin-api.html>`_.
Autocompletion / Goto / Pydoc
-----------------------------
Please check the API for a good explanation. There are the following commands:
- ``jedi.Script.goto_assignments``
- ``jedi.Script.completions``
- ``jedi.Script.usages``
The returned objects are very powerful and really all you might need.
Autocompletion in your REPL (IPython, etc.)
-------------------------------------------
Starting with Ipython `6.0.0` Jedi is a dependency of IPython. Autocompletion
in IPython is therefore possible without additional configuration.
It's possible to have Jedi autocompletion in REPL modes - `example video <https://vimeo.com/122332037>`_.
This means that in Python you can enable tab completion in a `REPL
<https://jedi.readthedocs.org/en/latest/docs/usage.html#tab-completion-in-the-python-shell>`_.
Static Analysis / Linter
------------------------
To do all forms of static analysis, please try to use ``jedi.names``. It will
return a list of names that you can use to infer types and so on.
Linting is another thing that is going to be part of Jedi. For now you can try
an alpha version ``python -m jedi linter``. The API might change though and
it's still buggy. It's Jedi's goal to be smarter than classic linter and
understand ``AttributeError`` and other code issues.
Refactoring
-----------
Jedi's parser would support refactoring, but there's no API to use it right
now. If you're interested in helping out here, let me know. With the latest
parser changes, it should be very easy to actually make it work.
Development
===========
There's a pretty good and extensive `development documentation
<https://jedi.readthedocs.org/en/latest/docs/development.html>`_.
Testing
=======
The test suite depends on ``tox`` and ``pytest``::
pip install tox pytest
To run the tests for all supported Python versions::
tox
If you want to test only a specific Python version (e.g. Python 2.7), it's as
easy as ::
tox -e py27
Tests are also run automatically on `Travis CI
<https://travis-ci.org/davidhalter/jedi/>`_.
For more detailed information visit the `testing documentation
<https://jedi.readthedocs.org/en/latest/docs/testing.html>`_
Acknowledgements
================
- Takafumi Arakaki (@tkf) for creating a solid test environment and a lot of
other things.
- Danilo Bargen (@dbrgn) for general housekeeping and being a good friend :).
- Guido van Rossum (@gvanrossum) for creating the parser generator pgen2
(originally used in lib2to3).
.. _jedi-vim: https://github.com/davidhalter/jedi-vim
.. _youcompleteme: http://valloric.github.io/YouCompleteMe/
.. _deoplete-jedi: https://github.com/zchee/deoplete-jedi
.. _completor.vim: https://github.com/maralla/completor.vim
.. _Jedi.el: https://github.com/tkf/emacs-jedi
.. _company-mode: https://github.com/syohex/emacs-company-jedi
.. _elpy: https://github.com/jorgenschaefer/elpy
.. _anaconda-mode: https://github.com/proofit404/anaconda-mode
.. _ycmd: https://github.com/abingham/emacs-ycmd
.. _sublimejedi: https://github.com/srusskih/SublimeJEDI
.. _anaconda: https://github.com/DamnWidget/anaconda
.. _wdb: https://github.com/Kozea/wdb
.. _TextMate: https://github.com/lawrenceakka/python-jedi.tmbundle
.. _Kate: http://kate-editor.org
.. _Atom: https://atom.io/
.. _autocomplete-python-jedi: https://atom.io/packages/autocomplete-python-jedi
.. _SourceLair: https://www.sourcelair.com
.. _GNOME Builder: https://wiki.gnome.org/Apps/Builder
.. _Visual Studio Code: https://code.visualstudio.com/
.. _gedi: https://github.com/isamert/gedi
.. _Eric IDE: http://eric-ide.python-projects.org

View File

@ -1,72 +0,0 @@
import tempfile
import shutil
import pytest
import jedi
collect_ignore = ["setup.py"]
# The following hooks (pytest_configure, pytest_unconfigure) are used
# to modify `jedi.settings.cache_directory` because `clean_jedi_cache`
# has no effect during doctests. Without these hooks, doctests uses
# user's cache (e.g., ~/.cache/jedi/). We should remove this
# workaround once the problem is fixed in py.test.
#
# See:
# - https://github.com/davidhalter/jedi/pull/168
# - https://bitbucket.org/hpk42/pytest/issue/275/
jedi_cache_directory_orig = None
jedi_cache_directory_temp = None
def pytest_addoption(parser):
parser.addoption("--jedi-debug", "-D", action='store_true',
help="Enables Jedi's debug output.")
parser.addoption("--warning-is-error", action='store_true',
help="Warnings are treated as errors.")
def pytest_configure(config):
global jedi_cache_directory_orig, jedi_cache_directory_temp
jedi_cache_directory_orig = jedi.settings.cache_directory
jedi_cache_directory_temp = tempfile.mkdtemp(prefix='jedi-test-')
jedi.settings.cache_directory = jedi_cache_directory_temp
if config.option.jedi_debug:
jedi.set_debug_function()
if config.option.warning_is_error:
import warnings
warnings.simplefilter("error")
def pytest_unconfigure(config):
global jedi_cache_directory_orig, jedi_cache_directory_temp
jedi.settings.cache_directory = jedi_cache_directory_orig
shutil.rmtree(jedi_cache_directory_temp)
@pytest.fixture(scope='session')
def clean_jedi_cache(request):
"""
Set `jedi.settings.cache_directory` to a temporary directory during test.
Note that you can't use built-in `tmpdir` and `monkeypatch`
fixture here because their scope is 'function', which is not used
in 'session' scope fixture.
This fixture is activated in ../pytest.ini.
"""
from jedi import settings
old = settings.cache_directory
tmp = tempfile.mkdtemp(prefix='jedi-test-')
settings.cache_directory = tmp
@request.addfinalizer
def restore():
settings.cache_directory = old
shutil.rmtree(tmp)

View File

@ -1,52 +0,0 @@
#!/usr/bin/env bash
# The script creates a separate folder in build/ and creates tags there, pushes
# them and then uploads the package to PyPI.
set -eu -o pipefail
BASE_DIR=$(dirname $(readlink -f "$0"))
cd $BASE_DIR
git fetch --tags
PROJECT_NAME=jedi
BRANCH=master
BUILD_FOLDER=build
[ -d $BUILD_FOLDER ] || mkdir $BUILD_FOLDER
# Remove the previous deployment first.
# Checkout the right branch
cd $BUILD_FOLDER
rm -rf $PROJECT_NAME
git clone .. $PROJECT_NAME
cd $PROJECT_NAME
git checkout $BRANCH
# Test first.
tox
# Create tag
tag=v$(python -c "import $PROJECT_NAME; print($PROJECT_NAME.__version__)")
master_ref=$(git show-ref -s heads/$BRANCH)
tag_ref=$(git show-ref -s $tag || true)
if [[ $tag_ref ]]; then
if [[ $tag_ref != $master_ref ]]; then
echo 'Cannot tag something that has already been tagged with another commit.'
exit 1
fi
else
git tag $tag
git push --tags
fi
# Package and upload to PyPI
#rm -rf dist/ - Not needed anymore, because the folder is never reused.
echo `pwd`
python setup.py sdist bdist_wheel
# Maybe do a pip install twine before.
twine upload dist/*
cd $BASE_DIR
# The tags have been pushed to this repo. Push the tags to github, now.
git push --tags

View File

@ -1,153 +0,0 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
-rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Jedi.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Jedi.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/Jedi"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Jedi"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."

View File

@ -1,14 +0,0 @@
Installation
------------
Install the graphviz library::
sudo apt-get install graphviz
Install sphinx::
sudo pip install sphinx
You might also need to install the Python graphviz interface::
sudo pip install graphviz

View File

@ -1,3 +0,0 @@
The source of the logo is a photoshop file hosted here:
https://dl.dropboxusercontent.com/u/170011615/Jedi12_Logo.psd.xz

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

View File

@ -1,4 +0,0 @@
<h3>Github</h3>
<iframe src="http://ghbtns.com/github-btn.html?user=davidhalter&repo=jedi&type=watch&count=true&size=large"
frameborder="0" scrolling="0" width="170" height="30" allowtransparency="true"></iframe>
<br><br>

View File

@ -1,3 +0,0 @@
<p class="logo"><a href="{{ pathto(master_doc) }}">
<img class="logo" src="{{ pathto('_static/logo.png', 1) }}" alt="Logo"/>
</a></p>

View File

@ -1,37 +0,0 @@
Copyright (c) 2010 by Armin Ronacher.
Some rights reserved.
Redistribution and use in source and binary forms of the theme, with or
without modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* The names of the contributors may not be used to endorse or
promote products derived from this software without specific
prior written permission.
We kindly ask you to only use these themes in an unmodified manner just
for Flask and Flask-related products, not for unrelated projects. If you
like the visual style and want to use it for your own projects, please
consider making some larger changes to the themes (such as changing
font faces, sizes, colors or margins).
THIS THEME IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS THEME, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,28 +0,0 @@
{%- extends "basic/layout.html" %}
{%- block extrahead %}
{{ super() }}
{% if theme_touch_icon %}
<link rel="apple-touch-icon" href="{{ pathto('_static/' ~ theme_touch_icon, 1) }}" />
{% endif %}
<link media="only screen and (max-device-width: 480px)" href="{{
pathto('_static/small_flask.css', 1) }}" type= "text/css" rel="stylesheet" />
<a href="https://github.com/davidhalter/jedi">
<img style="position: absolute; top: 0; right: 0; border: 0;" src="https://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub">
</a>
{% endblock %}
{%- block relbar2 %}{% endblock %}
{% block header %}
{{ super() }}
{% if pagename == 'index' %}
<div class=indexwrapper>
{% endif %}
{% endblock %}
{%- block footer %}
<div class="footer">
&copy; Copyright {{ copyright }}.
Created using <a href="http://sphinx.pocoo.org/">Sphinx</a>.
</div>
{% if pagename == 'index' %}
</div>
{% endif %}
{%- endblock %}

View File

@ -1,19 +0,0 @@
<h3>Related Topics</h3>
<ul>
<li><a href="{{ pathto(master_doc) }}">Documentation overview</a><ul>
{%- for parent in parents %}
<li><a href="{{ parent.link|e }}">{{ parent.title }}</a><ul>
{%- endfor %}
{%- if prev %}
<li>Previous: <a href="{{ prev.link|e }}" title="{{ _('previous chapter')
}}">{{ prev.title }}</a></li>
{%- endif %}
{%- if next %}
<li>Next: <a href="{{ next.link|e }}" title="{{ _('next chapter')
}}">{{ next.title }}</a></li>
{%- endif %}
{%- for parent in parents %}
</ul></li>
{%- endfor %}
</ul></li>
</ul>

View File

@ -1,394 +0,0 @@
/*
* flasky.css_t
* ~~~~~~~~~~~~
*
* :copyright: Copyright 2010 by Armin Ronacher.
* :license: Flask Design License, see LICENSE for details.
*/
{% set page_width = '940px' %}
{% set sidebar_width = '220px' %}
@import url("basic.css");
/* -- page layout ----------------------------------------------------------- */
body {
font-family: 'Georgia', serif;
font-size: 17px;
background-color: white;
color: #000;
margin: 0;
padding: 0;
}
div.document {
width: {{ page_width }};
margin: 30px auto 0 auto;
}
div.documentwrapper {
float: left;
width: 100%;
}
div.bodywrapper {
margin: 0 0 0 {{ sidebar_width }};
}
div.sphinxsidebar {
width: {{ sidebar_width }};
}
hr {
border: 1px solid #B1B4B6;
}
div.body {
background-color: #ffffff;
color: #3E4349;
padding: 0 30px 0 30px;
}
img.floatingflask {
padding: 0 0 10px 10px;
float: right;
}
div.footer {
width: {{ page_width }};
margin: 20px auto 30px auto;
font-size: 14px;
color: #888;
text-align: right;
}
div.footer a {
color: #888;
}
div.related {
display: none;
}
div.sphinxsidebar a {
color: #444;
text-decoration: none;
border-bottom: 1px dotted #999;
}
div.sphinxsidebar a:hover {
border-bottom: 1px solid #999;
}
div.sphinxsidebar {
font-size: 14px;
line-height: 1.5;
}
div.sphinxsidebarwrapper {
padding: 18px 10px;
}
div.sphinxsidebarwrapper p.logo {
padding: 0 0 20px 0;
margin: 0;
text-align: center;
}
div.sphinxsidebar h3,
div.sphinxsidebar h4 {
font-family: 'Garamond', 'Georgia', serif;
color: #444;
font-size: 24px;
font-weight: normal;
margin: 0 0 5px 0;
padding: 0;
}
div.sphinxsidebar h4 {
font-size: 20px;
}
div.sphinxsidebar h3 a {
color: #444;
}
div.sphinxsidebar p.logo a,
div.sphinxsidebar h3 a,
div.sphinxsidebar p.logo a:hover,
div.sphinxsidebar h3 a:hover {
border: none;
}
div.sphinxsidebar p {
color: #555;
margin: 10px 0;
}
div.sphinxsidebar ul {
margin: 10px 0;
padding: 0;
color: #000;
}
div.sphinxsidebar input {
border: 1px solid #ccc;
font-family: 'Georgia', serif;
font-size: 1em;
}
/* -- body styles ----------------------------------------------------------- */
a {
color: #004B6B;
text-decoration: underline;
}
a:hover {
color: #6D4100;
text-decoration: underline;
}
div.body h1,
div.body h2,
div.body h3,
div.body h4,
div.body h5,
div.body h6 {
font-family: 'Garamond', 'Georgia', serif;
font-weight: normal;
margin: 30px 0px 10px 0px;
padding: 0;
}
{% if theme_index_logo %}
div.indexwrapper h1 {
text-indent: -999999px;
background: url({{ theme_index_logo }}) no-repeat center center;
height: {{ theme_index_logo_height }};
}
{% endif %}
div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; }
div.body h2 { font-size: 180%; }
div.body h3 { font-size: 150%; }
div.body h4 { font-size: 130%; }
div.body h5 { font-size: 100%; }
div.body h6 { font-size: 100%; }
a.headerlink {
color: #ddd;
padding: 0 4px;
text-decoration: none;
}
a.headerlink:hover {
color: #444;
}
div.body p, div.body dd, div.body li {
line-height: 1.4em;
}
div.admonition {
background: #fafafa;
margin: 20px -30px;
padding: 10px 30px;
border-top: 1px solid #ccc;
border-bottom: 1px solid #ccc;
}
div.admonition tt.xref, div.admonition a tt {
border-bottom: 1px solid #fafafa;
}
dd div.admonition {
margin-left: -60px;
padding-left: 60px;
}
div.admonition p.admonition-title {
font-family: 'Garamond', 'Georgia', serif;
font-weight: normal;
font-size: 24px;
margin: 0 0 10px 0;
padding: 0;
line-height: 1;
}
div.admonition p.last {
margin-bottom: 0;
}
div.highlight {
background-color: white;
}
dt:target, .highlight {
background: #FAF3E8;
}
div.note {
background-color: #eee;
border: 1px solid #ccc;
}
div.seealso {
background-color: #ffc;
border: 1px solid #ff6;
}
div.topic {
background-color: #eee;
}
p.admonition-title {
display: inline;
}
p.admonition-title:after {
content: ":";
}
pre, tt {
font-family: 'Consolas', 'Menlo', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
font-size: 0.9em;
}
img.screenshot {
}
tt.descname, tt.descclassname {
font-size: 0.95em;
}
tt.descname {
padding-right: 0.08em;
}
img.screenshot {
-moz-box-shadow: 2px 2px 4px #eee;
-webkit-box-shadow: 2px 2px 4px #eee;
box-shadow: 2px 2px 4px #eee;
}
table.docutils {
border: 1px solid #888;
-moz-box-shadow: 2px 2px 4px #eee;
-webkit-box-shadow: 2px 2px 4px #eee;
box-shadow: 2px 2px 4px #eee;
}
table.docutils td, table.docutils th {
border: 1px solid #888;
padding: 0.25em 0.7em;
}
table.field-list, table.footnote {
border: none;
-moz-box-shadow: none;
-webkit-box-shadow: none;
box-shadow: none;
}
table.footnote {
margin: 15px 0;
width: 100%;
border: 1px solid #eee;
background: #fdfdfd;
font-size: 0.9em;
}
table.footnote + table.footnote {
margin-top: -15px;
border-top: none;
}
table.field-list th {
padding: 0 0.8em 0 0;
}
table.field-list td {
padding: 0;
}
table.footnote td.label {
width: 0px;
padding: 0.3em 0 0.3em 0.5em;
}
table.footnote td {
padding: 0.3em 0.5em;
}
dl {
margin: 0;
padding: 0;
}
dl dd {
margin-left: 30px;
}
blockquote {
margin: 0 0 0 30px;
padding: 0;
}
ul, ol {
margin: 10px 0 10px 30px;
padding: 0;
}
pre {
background: #eee;
padding: 7px 30px;
margin: 15px -30px;
line-height: 1.3em;
}
dl pre, blockquote pre, li pre {
margin-left: -60px;
padding-left: 60px;
}
dl dl pre {
margin-left: -90px;
padding-left: 90px;
}
tt {
background-color: #ecf0f3;
color: #222;
/* padding: 1px 2px; */
}
tt.xref, a tt {
background-color: #FBFBFB;
border-bottom: 1px solid white;
}
a.reference {
text-decoration: none;
border-bottom: 1px dotted #004B6B;
}
a.reference:hover {
border-bottom: 1px solid #6D4100;
}
a.footnote-reference {
text-decoration: none;
font-size: 0.7em;
vertical-align: top;
border-bottom: 1px dotted #004B6B;
}
a.footnote-reference:hover {
border-bottom: 1px solid #6D4100;
}
a:hover tt {
background: #EEE;
}

View File

@ -1,70 +0,0 @@
/*
* small_flask.css_t
* ~~~~~~~~~~~~~~~~~
*
* :copyright: Copyright 2010 by Armin Ronacher.
* :license: Flask Design License, see LICENSE for details.
*/
body {
margin: 0;
padding: 20px 30px;
}
div.documentwrapper {
float: none;
background: white;
}
div.sphinxsidebar {
display: block;
float: none;
width: 102.5%;
margin: 50px -30px -20px -30px;
padding: 10px 20px;
background: #333;
color: white;
}
div.sphinxsidebar h3, div.sphinxsidebar h4, div.sphinxsidebar p,
div.sphinxsidebar h3 a {
color: white;
}
div.sphinxsidebar a {
color: #aaa;
}
div.sphinxsidebar p.logo {
display: none;
}
div.document {
width: 100%;
margin: 0;
}
div.related {
display: block;
margin: 0;
padding: 10px 0 20px 0;
}
div.related ul,
div.related ul li {
margin: 0;
padding: 0;
}
div.footer {
display: none;
}
div.bodywrapper {
margin: 0;
}
div.body {
min-height: 0;
padding: 0;
}

View File

@ -1,9 +0,0 @@
[theme]
inherit = basic
stylesheet = flasky.css
pygments_style = flask_theme_support.FlaskyStyle
[options]
index_logo =
index_logo_height = 120px
touch_icon =

View File

@ -1,125 +0,0 @@
"""
Copyright (c) 2010 by Armin Ronacher.
Some rights reserved.
Redistribution and use in source and binary forms of the theme, with or
without modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* The names of the contributors may not be used to endorse or
promote products derived from this software without specific
prior written permission.
We kindly ask you to only use these themes in an unmodified manner just
for Flask and Flask-related products, not for unrelated projects. If you
like the visual style and want to use it for your own projects, please
consider making some larger changes to the themes (such as changing
font faces, sizes, colors or margins).
THIS THEME IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS THEME, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
# flasky extensions. flasky pygments style based on tango style
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class FlaskyStyle(Style):
background_color = "#f8f8f8"
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Preproc: "noitalic", # class: 'cp'
Keyword: "bold #004461", # class: 'k'
Keyword.Constant: "bold #004461", # class: 'kc'
Keyword.Declaration: "bold #004461", # class: 'kd'
Keyword.Namespace: "bold #004461", # class: 'kn'
Keyword.Pseudo: "bold #004461", # class: 'kp'
Keyword.Reserved: "bold #004461", # class: 'kr'
Keyword.Type: "bold #004461", # class: 'kt'
Operator: "#582800", # class: 'o'
Operator.Word: "bold #004461", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#004461", # class: 'nb'
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
Name.Class: "#000000", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "#888", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#000000", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #004461", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
Number: "#990000", # class: 'm'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
String.Double: "#4e9a06", # class: 's2'
String.Escape: "#4e9a06", # class: 'se'
String.Heredoc: "#4e9a06", # class: 'sh'
String.Interpol: "#4e9a06", # class: 'si'
String.Other: "#4e9a06", # class: 'sx'
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "#888", # class: 'go'
Generic.Prompt: "#745334", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
}

View File

@ -1,291 +0,0 @@
# -*- coding: utf-8 -*-
#
# Jedi documentation build configuration file, created by
# sphinx-quickstart on Wed Dec 26 00:11:34 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import datetime
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.append(os.path.abspath('_themes'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.todo',
'sphinx.ext.intersphinx', 'sphinx.ext.inheritance_diagram']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Jedi'
copyright = u'2012 - {today.year}, Jedi contributors'.format(today=datetime.date.today())
import jedi
from jedi.utils import version_info
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.'.join(str(x) for x in version_info()[:2])
# The full version, including alpha/beta/rc tags.
release = jedi.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'flask'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'sidebarlogo.html',
'localtoc.html',
#'relations.html',
'ghbuttons.html',
#'sourcelink.html',
#'searchbox.html'
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Jedidoc'
#html_style = 'default.css' # Force usage of default template on RTD
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Jedi.tex', u'Jedi Documentation',
u'Jedi contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'jedi', u'Jedi Documentation',
[u'Jedi contributors'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Jedi', u'Jedi Documentation',
u'Jedi contributors', 'Jedi', 'Awesome Python autocompletion library.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for todo module ---------------------------------------------------
todo_include_todos = False
# -- Options for autodoc module ------------------------------------------------
autoclass_content = 'both'
autodoc_member_order = 'bysource'
autodoc_default_flags = []
#autodoc_default_flags = ['members', 'undoc-members']
# -- Options for intersphinx module --------------------------------------------
intersphinx_mapping = {
'http://docs.python.org/': None,
}
def skip_deprecated(app, what, name, obj, skip, options):
"""
All attributes containing a deprecated note shouldn't be documented
anymore. This makes it even clearer that they are not supported anymore.
"""
doc = obj.__doc__
return skip or doc and '.. deprecated::' in doc
def setup(app):
app.connect('autodoc-skip-member', skip_deprecated)

View File

@ -1,244 +0,0 @@
.. include:: ../global.rst
Jedi Development
================
.. currentmodule:: jedi
.. note:: This documentation is for Jedi developers who want to improve Jedi
itself, but have no idea how Jedi works. If you want to use Jedi for
your IDE, look at the `plugin api <plugin-api.html>`_.
Introduction
------------
This page tries to address the fundamental demand for documentation of the
|jedi| internals. Understanding a dynamic language is a complex task. Especially
because type inference in Python can be a very recursive task. Therefore |jedi|
couldn't get rid of complexity. I know that **simple is better than complex**,
but unfortunately it sometimes requires complex solutions to understand complex
systems.
Since most of the Jedi internals have been written by me (David Halter), this
introduction will be written mostly by me, because no one else understands to
the same level how Jedi works. Actually this is also the reason for exactly this
part of the documentation. To make multiple people able to edit the Jedi core.
In five chapters I'm trying to describe the internals of |jedi|:
- :ref:`The Jedi Core <core>`
- :ref:`Core Extensions <core-extensions>`
- :ref:`Imports & Modules <imports-modules>`
- :ref:`Caching & Recursions <caching-recursions>`
- :ref:`Helper modules <dev-helpers>`
.. note:: Testing is not documented here, you'll find that
`right here <testing.html>`_.
.. _core:
The Jedi Core
-------------
The core of Jedi consists of three parts:
- :ref:`Parser <parser>`
- :ref:`Python code evaluation <evaluate>`
- :ref:`API <dev-api>`
Most people are probably interested in :ref:`code evaluation <evaluate>`,
because that's where all the magic happens. I need to introduce the :ref:`parser
<parser>` first, because :mod:`jedi.evaluate` uses it extensively.
.. _parser:
Parser (parser/__init__.py)
~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: jedi.parser
Parser Tree (parser/tree.py)
++++++++++++++++++++++++++++++++++++++++++++++++
.. automodule:: jedi.parser.tree
Class inheritance diagram:
.. inheritance-diagram::
Module
Class
Function
Lambda
Flow
ForStmt
Import
ExprStmt
Param
Name
CompFor
:parts: 1
.. _evaluate:
Evaluation of python code (evaluate/__init__.py)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: jedi.evaluate
Evaluation Representation (evaluate/representation.py)
++++++++++++++++++++++++++++++++++++++++++++++++++++++
.. automodule:: jedi.evaluate.representation
.. inheritance-diagram::
jedi.evaluate.instance.TreeInstance
jedi.evaluate.representation.ClassContext
jedi.evaluate.representation.FunctionContext
jedi.evaluate.representation.FunctionExecutionContext
:parts: 1
.. _name_resolution:
Name resolution (evaluate/finder.py)
++++++++++++++++++++++++++++++++++++
.. automodule:: jedi.evaluate.finder
.. _dev-api:
API (api.py and api_classes.py)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The API has been designed to be as easy to use as possible. The API
documentation can be found `here <plugin-api.html>`_. The API itself contains
little code that needs to be mentioned here. Generally I'm trying to be
conservative with the API. I'd rather not add new API features if they are not
necessary, because it's much harder to deprecate stuff than to add it later.
.. _core-extensions:
Core Extensions
---------------
Core Extensions is a summary of the following topics:
- :ref:`Iterables & Dynamic Arrays <iterables>`
- :ref:`Dynamic Parameters <dynamic>`
- :ref:`Diff Parser <diff-parser>`
- :ref:`Docstrings <docstrings>`
- :ref:`Refactoring <refactoring>`
These topics are very important to understand what Jedi additionally does, but
they could be removed from Jedi and Jedi would still work. But slower and
without some features.
.. _iterables:
Iterables & Dynamic Arrays (evaluate/iterable.py)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To understand Python on a deeper level, |jedi| needs to understand some of the
dynamic features of Python like lists that are filled after creation:
.. automodule:: jedi.evaluate.iterable
.. _dynamic:
Parameter completion (evaluate/dynamic.py)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: jedi.evaluate.dynamic
.. _diff-parser:
Diff Parser (parser/diff.py)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: jedi.parser.python.diff
.. _docstrings:
Docstrings (evaluate/docstrings.py)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: jedi.evaluate.docstrings
.. _refactoring:
Refactoring (evaluate/refactoring.py)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: jedi.refactoring
.. _imports-modules:
Imports & Modules
-------------------
- :ref:`Modules <modules>`
- :ref:`Builtin Modules <builtin>`
- :ref:`Imports <imports>`
.. _builtin:
Compiled Modules (evaluate/compiled.py)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: jedi.evaluate.compiled
.. _imports:
Imports (evaluate/imports.py)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: jedi.evaluate.imports
.. _caching-recursions:
Caching & Recursions
--------------------
- :ref:`Caching <cache>`
- :ref:`Recursions <recursion>`
.. _cache:
Caching (cache.py)
~~~~~~~~~~~~~~~~~~
.. automodule:: jedi.cache
.. _recursion:
Recursions (recursion.py)
~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: jedi.evaluate.recursion
.. _dev-helpers:
Helper Modules
---------------
Most other modules are not really central to how Jedi works. They all contain
relevant code, but you if you understand the modules above, you pretty much
understand Jedi.
Python 2/3 compatibility (_compatibility.py)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: jedi._compatibility

View File

@ -1,263 +0,0 @@
.. include:: ../global.rst
Features and Caveats
====================
Jedi obviously supports autocompletion. It's also possible to get it working in
(:ref:`your REPL (IPython, etc.) <repl-completion>`).
Static analysis is also possible by using the command ``jedi.names``.
The Jedi Linter is currently in an alpha version and can be tested by calling
``python -m jedi linter``.
Jedi would in theory support refactoring, but we have never publicized it,
because it's not production ready. If you're interested in helping out here,
let me know. With the latest parser changes, it should be very easy to actually
make it work.
General Features
----------------
- python 2.6+ and 3.3+ support
- ignores syntax errors and wrong indentation
- can deal with complex module / function / class structures
- virtualenv support
- can infer function arguments from sphinx, epydoc and basic numpydoc docstrings,
and PEP0484-style type hints (:ref:`type hinting <type-hinting>`)
Supported Python Features
-------------------------
|jedi| supports many of the widely used Python features:
- builtins
- returns, yields, yield from
- tuple assignments / array indexing / dictionary indexing / star unpacking
- with-statement / exception handling
- ``*args`` / ``**kwargs``
- decorators / lambdas / closures
- generators / iterators
- some descriptors: property / staticmethod / classmethod
- some magic methods: ``__call__``, ``__iter__``, ``__next__``, ``__get__``,
``__getitem__``, ``__init__``
- ``list.append()``, ``set.add()``, ``list.extend()``, etc.
- (nested) list comprehensions / ternary expressions
- relative imports
- ``getattr()`` / ``__getattr__`` / ``__getattribute__``
- function annotations (py3k feature, are ignored right now, but being parsed.
I don't know what to do with them.)
- class decorators (py3k feature, are being ignored too, until I find a use
case, that doesn't work with |jedi|)
- simple/usual ``sys.path`` modifications
- ``isinstance`` checks for if/while/assert
- namespace packages (includes ``pkgutil`` and ``pkg_resources`` namespaces)
- Django / Flask / Buildout support
Unsupported Features
--------------------
Not yet implemented:
- manipulations of instances outside the instance variables without using
methods
- implicit namespace packages (Python 3.3+, `PEP 420 <https://www.python.org/dev/peps/pep-0420/>`_)
Will probably never be implemented:
- metaclasses (how could an auto-completion ever support this)
- ``setattr()``, ``__import__()``
- writing to some dicts: ``globals()``, ``locals()``, ``object.__dict__``
- evaluating ``if`` / ``while`` / ``del``
Caveats
-------
**Malformed Syntax**
Syntax errors and other strange stuff may lead to undefined behaviour of the
completion. |jedi| is **NOT** a Python compiler, that tries to correct you. It
is a tool that wants to help you. But **YOU** have to know Python, not |jedi|.
**Legacy Python 2 Features**
This framework should work for both Python 2/3. However, some things were just
not as *pythonic* in Python 2 as things should be. To keep things simple, some
older Python 2 features have been left out:
- Classes: Always Python 3 like, therefore all classes inherit from ``object``.
- Generators: No ``next()`` method. The ``__next__()`` method is used instead.
**Slow Performance**
Importing ``numpy`` can be quite slow sometimes, as well as loading the
builtins the first time. If you want to speed things up, you could write import
hooks in |jedi|, which preload stuff. However, once loaded, this is not a
problem anymore. The same is true for huge modules like ``PySide``, ``wx``,
etc.
**Security**
Security is an important issue for |jedi|. Therefore no Python code is
executed. As long as you write pure python, everything is evaluated
statically. But: If you use builtin modules (``c_builtin``) there is no other
option than to execute those modules. However: Execute isn't that critical (as
e.g. in pythoncomplete, which used to execute *every* import!), because it
means one import and no more. So basically the only dangerous thing is using
the import itself. If your ``c_builtin`` uses some strange initializations, it
might be dangerous. But if it does you're screwed anyways, because eventually
you're going to execute your code, which executes the import.
Recipes
-------
Here are some tips on how to use |jedi| efficiently.
.. _type-hinting:
Type Hinting
~~~~~~~~~~~~
If |jedi| cannot detect the type of a function argument correctly (due to the
dynamic nature of Python), you can help it by hinting the type using
one of the following docstring/annotation syntax styles:
**PEP-0484 style**
https://www.python.org/dev/peps/pep-0484/
function annotations (python 3 only; python 2 function annotations with
comments in planned but not yet implemented)
::
def myfunction(node: ProgramNode, foo: str) -> None:
"""Do something with a ``node``.
"""
node.| # complete here
assignment, for-loop and with-statement type hints (all python versions).
Note that the type hints must be on the same line as the statement
::
x = foo() # type: int
x, y = 2, 3 # type: typing.Optional[int], typing.Union[int, str] # typing module is mostly supported
for key, value in foo.items(): # type: str, Employee # note that Employee must be in scope
pass
with foo() as f: # type: int
print(f + 3)
Most of the features in PEP-0484 are supported including the typing module
(for python < 3.5 you have to do ``pip install typing`` to use these),
and forward references.
Things that are missing (and this is not an exhaustive list; some of these
are planned, others might be hard to implement and provide little worth):
- annotating functions with comments: https://www.python.org/dev/peps/pep-0484/#suggested-syntax-for-python-2-7-and-straddling-code
- understanding ``typing.cast()``
- stub files: https://www.python.org/dev/peps/pep-0484/#stub-files
- ``typing.Callable``
- ``typing.TypeVar``
- User defined generic types: https://www.python.org/dev/peps/pep-0484/#user-defined-generic-types
**Sphinx style**
http://sphinx-doc.org/domains.html#info-field-lists
::
def myfunction(node, foo):
"""Do something with a ``node``.
:type node: ProgramNode
:param str foo: foo parameter description
"""
node.| # complete here
**Epydoc**
http://epydoc.sourceforge.net/manual-fields.html
::
def myfunction(node):
"""Do something with a ``node``.
@type node: ProgramNode
"""
node.| # complete here
**Numpydoc**
https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
In order to support the numpydoc format, you need to install the `numpydoc
<https://pypi.python.org/pypi/numpydoc>`__ package.
::
def foo(var1, var2, long_var_name='hi'):
r"""A one-line summary that does not use variable names or the
function name.
...
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists,
etc. -- that can be converted to an array. We can also
refer to variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
...
"""
var2.| # complete here
A little history
----------------
The Star Wars Jedi are awesome. My Jedi software tries to imitate a little bit
of the precognition the Jedi have. There's even an awesome `scene
<http://www.youtube.com/watch?v=5BDO3pyavOY>`_ of Monty Python Jedis :-).
But actually the name hasn't so much to do with Star Wars. It's part of my
second name.
After I explained Guido van Rossum, how some parts of my auto-completion work,
he said (we drank a beer or two):
*"Oh, that worries me..."*
When it's finished, I hope he'll like it :-)
I actually started Jedi, because there were no good solutions available for VIM.
Most auto-completions just didn't work well. The only good solution was PyCharm.
But I like my good old VIM. Rope was never really intended to be an
auto-completion (and also I really hate project folders for my Python scripts).
It's more of a refactoring suite. So I decided to do my own version of a
completion, which would execute non-dangerous code. But I soon realized, that
this wouldn't work. So I built an extremely recursive thing which understands
many of Python's key features.
By the way, I really tried to program it as understandable as possible. But I
think understanding it might need quite some time, because of its recursive
nature.

View File

@ -1,83 +0,0 @@
.. include:: ../global.rst
Installation and Configuration
==============================
You can either include |jedi| as a submodule in your text editor plugin (like
jedi-vim_ does by default), or you can install it systemwide.
.. note:: This just installs the |jedi| library, not the :ref:`editor plugins
<editor-plugins>`. For information about how to make it work with your
editor, refer to the corresponding documentation.
The preferred way
-----------------
On any system you can install |jedi| directly from the Python package index
using pip::
sudo pip install jedi
If you want to install the current development version (master branch)::
sudo pip install -e git://github.com/davidhalter/jedi.git#egg=jedi
System-wide installation via a package manager
----------------------------------------------
Arch Linux
~~~~~~~~~~
You can install |jedi| directly from official Arch Linux packages:
- `python-jedi <https://www.archlinux.org/packages/community/any/python-jedi/>`__
(Python 3)
- `python2-jedi <https://www.archlinux.org/packages/community/any/python2-jedi/>`__
(Python 2)
The specified Python version just refers to the *runtime environment* for
|jedi|. Use the Python 2 version if you're running vim (or whatever editor you
use) under Python 2. Otherwise, use the Python 3 version. But whatever version
you choose, both are able to complete both Python 2 and 3 *code*.
(There is also a packaged version of the vim plugin available:
`vim-jedi at Arch Linux <https://www.archlinux.org/packages/community/any/vim-jedi/>`__.)
Debian
~~~~~~
Debian packages are available in the `unstable repository
<http://packages.debian.org/search?keywords=python%20jedi>`__.
Others
~~~~~~
We are in the discussion of adding |jedi| to the Fedora repositories.
Manual installation from a downloaded package
---------------------------------------------
If you prefer not to use an automated package installer, you can `download
<https://github.com/davidhalter/jedi/archive/master.zip>`__ a current copy of
|jedi| and install it manually.
To install it, navigate to the directory containing `setup.py` on your console
and type::
sudo python setup.py install
Inclusion as a submodule
------------------------
If you use an editor plugin like jedi-vim_, you can simply include |jedi| as a
git submodule of the plugin directory. Vim plugin managers like Vundle_ or
Pathogen_ make it very easy to keep submodules up to date.
.. _jedi-vim: https://github.com/davidhalter/jedi-vim
.. _vundle: https://github.com/gmarik/vundle
.. _pathogen: https://github.com/tpope/vim-pathogen

View File

@ -1,36 +0,0 @@
.. _xxx:
Parser Tree
===========
Usage
-----
.. automodule:: jedi.parser.python
:members:
:undoc-members:
Parser Tree Base Class
----------------------
All nodes and leaves have these methods/properties:
.. autoclass:: jedi.parser.tree.NodeOrLeaf
:members:
:undoc-members:
Python Parser Tree
------------------
.. automodule:: jedi.parser.python.tree
:members:
:undoc-members:
:show-inheritance:
Utility
-------
.. autofunction:: jedi.parser.tree.search_ancestor

View File

@ -1,10 +0,0 @@
.. include:: ../global.rst
.. _plugin-api-classes:
API Return Classes
------------------
.. automodule:: jedi.api.classes
:members:
:undoc-members:

View File

@ -1,100 +0,0 @@
.. include:: ../global.rst
The Plugin API
==============
.. currentmodule:: jedi
Note: This documentation is for Plugin developers, who want to improve their
editors/IDE autocompletion
If you want to use |jedi|, you first need to ``import jedi``. You then have
direct access to the :class:`.Script`. You can then call the functions
documented here. These functions return :ref:`API classes
<plugin-api-classes>`.
Deprecations
------------
The deprecation process is as follows:
1. A deprecation is announced in the next major/minor release.
2. We wait either at least a year & at least two minor releases until we remove
the deprecated functionality.
API documentation
-----------------
API Interface
~~~~~~~~~~~~~
.. automodule:: jedi.api
:members:
:undoc-members:
Examples
--------
Completions:
.. sourcecode:: python
>>> import jedi
>>> source = '''import json; json.l'''
>>> script = jedi.Script(source, 1, 19, '')
>>> script
<jedi.api.Script object at 0x2121b10>
>>> completions = script.completions()
>>> completions
[<Completion: load>, <Completion: loads>]
>>> completions[1]
<Completion: loads>
>>> completions[1].complete
'oads'
>>> completions[1].name
'loads'
Definitions / Goto:
.. sourcecode:: python
>>> import jedi
>>> source = '''def my_func():
... print 'called'
...
... alias = my_func
... my_list = [1, None, alias]
... inception = my_list[2]
...
... inception()'''
>>> script = jedi.Script(source, 8, 1, '')
>>>
>>> script.goto_assignments()
[<Definition inception=my_list[2]>]
>>>
>>> script.goto_definitions()
[<Definition def my_func>]
Related names:
.. sourcecode:: python
>>> import jedi
>>> source = '''x = 3
... if 1 == 2:
... x = 4
... else:
... del x'''
>>> script = jedi.Script(source, 5, 8, '')
>>> rns = script.related_names()
>>> rns
[<RelatedName x@3,4>, <RelatedName x@1,0>]
>>> rns[0].start_pos
(3, 4)
>>> rns[0].is_keyword
False
>>> rns[0].text
'x'

View File

@ -1,6 +0,0 @@
.. include:: ../global.rst
Settings
========
.. automodule:: jedi.settings

View File

@ -1,106 +0,0 @@
This file is the start of the documentation of how static analysis works.
Below is a list of parser names that are used within nodes_to_execute.
------------ cared for:
global_stmt
exec_stmt # no priority
assert_stmt
if_stmt
while_stmt
for_stmt
try_stmt
(except_clause)
with_stmt
(with_item)
(with_var)
print_stmt
del_stmt
return_stmt
raise_stmt
yield_expr
file_input
funcdef
param
old_lambdef
lambdef
import_name
import_from
(import_as_name)
(dotted_as_name)
(import_as_names)
(dotted_as_names)
(dotted_name)
classdef
comp_for
(comp_if) ?
decorator
----------- add basic
test
or_test
and_test
not_test
expr
xor_expr
and_expr
shift_expr
arith_expr
term
factor
power
atom
comparison
expr_stmt
testlist
testlist1
testlist_safe
----------- special care:
# mostly depends on how we handle the other ones.
testlist_star_expr # should probably just work with expr_stmt
star_expr
exprlist # just ignore? then names are just resolved. Strange anyway, bc expr is not really allowed in the list, typically.
----------- ignore:
suite
subscriptlist
subscript
simple_stmt
?? sliceop # can probably just be added.
testlist_comp # prob ignore and care about it with atom.
dictorsetmaker
trailer
decorators
decorated
# always execute function arguments? -> no problem with stars.
# Also arglist and argument are different in different grammars.
arglist
argument
----------- remove:
tname # only exists in current Jedi parser. REMOVE!
tfpdef # python 2: tuple assignment; python 3: annotation
vfpdef # reduced in python 3 and therefore not existing.
tfplist # not in 3
vfplist # not in 3
--------- not existing with parser reductions.
small_stmt
import_stmt
flow_stmt
compound_stmt
stmt
pass_stmt
break_stmt
continue_stmt
comp_op
augassign
old_test
typedargslist # afaik becomes [param]
varargslist # dito
vname
comp_iter
test_nocond

View File

@ -1,40 +0,0 @@
.. include:: ../global.rst
Jedi Testing
============
The test suite depends on ``tox`` and ``pytest``::
pip install tox pytest
To run the tests for all supported Python versions::
tox
If you want to test only a specific Python version (e.g. Python 2.7), it's as
easy as::
tox -e py27
Tests are also run automatically on `Travis CI
<https://travis-ci.org/davidhalter/jedi/>`_.
You want to add a test for |jedi|? Great! We love that. Normally you should
write your tests as :ref:`Blackbox Tests <blackbox>`. Most tests would
fit right in there.
For specific API testing we're using simple unit tests, with a focus on a
simple and readable testing structure.
.. _blackbox:
Blackbox Tests (run.py)
~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: test.run
Refactoring Tests (refactor.py)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: test.refactor

View File

@ -1,122 +0,0 @@
.. include:: ../global.rst
End User Usage
==============
If you are a not an IDE Developer, the odds are that you just want to use
|jedi| as a browser plugin or in the shell. Yes that's :ref:`also possible
<repl-completion>`!
|jedi| is relatively young and can be used in a variety of Plugins and
Software. If your Editor/IDE is not among them, recommend |jedi| to your IDE
developers.
.. _editor-plugins:
Editor Plugins
--------------
Vim:
- jedi-vim_
- YouCompleteMe_
- deoplete-jedi_
Emacs:
- Jedi.el_
- elpy_
- anaconda-mode_
Sublime Text 2/3:
- SublimeJEDI_ (ST2 & ST3)
- anaconda_ (only ST3)
SynWrite:
- SynJedi_
TextMate:
- Textmate_ (Not sure if it's actually working)
Kate:
- Kate_ version 4.13+ `supports it natively
<https://projects.kde.org/projects/kde/applications/kate/repository/entry/addons/kate/pate/src/plugins/python_autocomplete_jedi.py?rev=KDE%2F4.13>`__,
you have to enable it, though.
Visual Studio Code:
- `Python Extension`_
Atom:
- autocomplete-python-jedi_
SourceLair:
- SourceLair_
GNOME Builder:
- `GNOME Builder`_ `supports it natively
<https://git.gnome.org/browse/gnome-builder/tree/plugins/jedi>`__,
and is enabled by default.
Gedit:
- gedi_
Eric IDE:
- `Eric IDE`_ (Available as a plugin)
Web Debugger:
- wdb_
and many more!
.. _repl-completion:
Tab Completion in the Python Shell
----------------------------------
Starting with Ipython `6.0.0` Jedi is a dependency of IPython. Autocompletion
in IPython is therefore possible without additional configuration.
There are two different options how you can use Jedi autocompletion in
your Python interpreter. One with your custom ``$HOME/.pythonrc.py`` file
and one that uses ``PYTHONSTARTUP``.
Using ``PYTHONSTARTUP``
~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: jedi.api.replstartup
Using a custom ``$HOME/.pythonrc.py``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autofunction:: jedi.utils.setup_readline
.. _jedi-vim: https://github.com/davidhalter/jedi-vim
.. _youcompleteme: http://valloric.github.io/YouCompleteMe/
.. _deoplete-jedi: https://github.com/zchee/deoplete-jedi
.. _Jedi.el: https://github.com/tkf/emacs-jedi
.. _elpy: https://github.com/jorgenschaefer/elpy
.. _anaconda-mode: https://github.com/proofit404/anaconda-mode
.. _sublimejedi: https://github.com/srusskih/SublimeJEDI
.. _anaconda: https://github.com/DamnWidget/anaconda
.. _SynJedi: http://uvviewsoft.com/synjedi/
.. _wdb: https://github.com/Kozea/wdb
.. _TextMate: https://github.com/lawrenceakka/python-jedi.tmbundle
.. _kate: http://kate-editor.org/
.. _autocomplete-python-jedi: https://atom.io/packages/autocomplete-python-jedi
.. _SourceLair: https://www.sourcelair.com
.. _GNOME Builder: https://wiki.gnome.org/Apps/Builder/
.. _gedi: https://github.com/isamert/gedi
.. _Eric IDE: http://eric-ide.python-projects.org
.. _Python Extension: https://marketplace.visualstudio.com/items?itemName=donjayamanne.python

View File

@ -1,3 +0,0 @@
:orphan:
.. |jedi| replace:: *Jedi*

View File

@ -1,40 +0,0 @@
.. include global.rst
Jedi - an awesome autocompletion/static analysis library for Python
===================================================================
Release v\ |release|. (:doc:`Installation <docs/installation>`)
.. automodule:: jedi
Autocompletion can look like this (e.g. VIM plugin):
.. figure:: _screenshots/screenshot_complete.png
.. _toc:
Docs
----
.. toctree::
:maxdepth: 2
docs/usage
docs/installation
docs/features
docs/plugin-api
docs/plugin-api-classes
docs/settings
docs/development
docs/testing
.. _resources:
Resources
---------
- `Source Code on Github <https://github.com/davidhalter/jedi>`_
- `Travis Testing <https://travis-ci.org/davidhalter/jedi>`_
- `Python Package Index <http://pypi.python.org/pypi/jedi/>`_

View File

@ -1,43 +0,0 @@
"""
Jedi is a static analysis tool for Python that can be used in IDEs/editors. Its
historic focus is autocompletion, but does static analysis for now as well.
Jedi is fast and is very well tested. It understands Python on a deeper level
than all other static analysis frameworks for Python.
Jedi has support for two different goto functions. It's possible to search for
related names and to list all names in a Python file and infer them. Jedi
understands docstrings and you can use Jedi autocompletion in your REPL as
well.
Jedi uses a very simple API to connect with IDE's. There's a reference
implementation as a `VIM-Plugin <https://github.com/davidhalter/jedi-vim>`_,
which uses Jedi's autocompletion. We encourage you to use Jedi in your IDEs.
It's really easy.
To give you a simple example how you can use the Jedi library, here is an
example for the autocompletion feature:
>>> import jedi
>>> source = '''
... import datetime
... datetime.da'''
>>> script = jedi.Script(source, 3, len('datetime.da'), 'example.py')
>>> script
<Script: 'example.py'>
>>> completions = script.completions()
>>> completions #doctest: +ELLIPSIS
[<Completion: date>, <Completion: datetime>, ...]
>>> print(completions[0].complete)
te
>>> print(completions[0].name)
date
As you see Jedi is pretty simple and allows you to concentrate on writing a
good text editor, while still having very good IDE features for Python.
"""
__version__ = '0.11.0'
from jedi.api import Script, Interpreter, set_debug_function, \
preload_module, names
from jedi import settings

View File

@ -1,48 +0,0 @@
import sys
from os.path import join, dirname, abspath, isdir
def _start_linter():
"""
This is a pre-alpha API. You're not supposed to use it at all, except for
testing. It will very likely change.
"""
import jedi
if '--debug' in sys.argv:
jedi.set_debug_function()
for path in sys.argv[2:]:
if path.startswith('--'):
continue
if isdir(path):
import fnmatch
import os
paths = []
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, '*.py'):
paths.append(os.path.join(root, filename))
else:
paths = [path]
try:
for path in paths:
for error in jedi.Script(path=path)._analysis():
print(error)
except Exception:
if '--pdb' in sys.argv:
import traceback
traceback.print_exc()
import pdb
pdb.post_mortem()
else:
raise
if len(sys.argv) == 2 and sys.argv[1] == 'repl':
# don't want to use __main__ only for repl yet, maybe we want to use it for
# something else. So just use the keyword ``repl`` for now.
print(join(dirname(abspath(__file__)), 'api', 'replstartup.py'))
elif len(sys.argv) > 1 and sys.argv[1] == 'linter':
_start_linter()

View File

@ -1,299 +0,0 @@
"""
To ensure compatibility from Python ``2.6`` - ``3.3``, a module has been
created. Clearly there is huge need to use conforming syntax.
"""
import sys
import imp
import os
import re
import pkgutil
import warnings
try:
import importlib
except ImportError:
pass
# Cannot use sys.version.major and minor names, because in Python 2.6 it's not
# a namedtuple.
is_py3 = sys.version_info[0] >= 3
is_py33 = is_py3 and sys.version_info[1] >= 3
is_py34 = is_py3 and sys.version_info[1] >= 4
is_py35 = is_py3 and sys.version_info[1] >= 5
is_py26 = not is_py3 and sys.version_info[1] < 7
py_version = int(str(sys.version_info[0]) + str(sys.version_info[1]))
class DummyFile(object):
def __init__(self, loader, string):
self.loader = loader
self.string = string
def read(self):
return self.loader.get_source(self.string)
def close(self):
del self.loader
def find_module_py34(string, path=None, fullname=None):
implicit_namespace_pkg = False
spec = None
loader = None
spec = importlib.machinery.PathFinder.find_spec(string, path)
if hasattr(spec, 'origin'):
origin = spec.origin
implicit_namespace_pkg = origin == 'namespace'
# We try to disambiguate implicit namespace pkgs with non implicit namespace pkgs
if implicit_namespace_pkg:
fullname = string if not path else fullname
implicit_ns_info = ImplicitNSInfo(fullname, spec.submodule_search_locations._path)
return None, implicit_ns_info, False
# we have found the tail end of the dotted path
if hasattr(spec, 'loader'):
loader = spec.loader
return find_module_py33(string, path, loader)
def find_module_py33(string, path=None, loader=None, fullname=None):
loader = loader or importlib.machinery.PathFinder.find_module(string, path)
if loader is None and path is None: # Fallback to find builtins
try:
with warnings.catch_warnings(record=True):
# Mute "DeprecationWarning: Use importlib.util.find_spec()
# instead." While we should replace that in the future, it's
# probably good to wait until we deprecate Python 3.3, since
# it was added in Python 3.4 and find_loader hasn't been
# removed in 3.6.
loader = importlib.find_loader(string)
except ValueError as e:
# See #491. Importlib might raise a ValueError, to avoid this, we
# just raise an ImportError to fix the issue.
raise ImportError("Originally " + repr(e))
if loader is None:
raise ImportError("Couldn't find a loader for {0}".format(string))
try:
is_package = loader.is_package(string)
if is_package:
if hasattr(loader, 'path'):
module_path = os.path.dirname(loader.path)
else:
# At least zipimporter does not have path attribute
module_path = os.path.dirname(loader.get_filename(string))
if hasattr(loader, 'archive'):
module_file = DummyFile(loader, string)
else:
module_file = None
else:
module_path = loader.get_filename(string)
module_file = DummyFile(loader, string)
except AttributeError:
# ExtensionLoader has not attribute get_filename, instead it has a
# path attribute that we can use to retrieve the module path
try:
module_path = loader.path
module_file = DummyFile(loader, string)
except AttributeError:
module_path = string
module_file = None
finally:
is_package = False
if hasattr(loader, 'archive'):
module_path = loader.archive
return module_file, module_path, is_package
def find_module_pre_py33(string, path=None, fullname=None):
try:
module_file, module_path, description = imp.find_module(string, path)
module_type = description[2]
return module_file, module_path, module_type is imp.PKG_DIRECTORY
except ImportError:
pass
if path is None:
path = sys.path
for item in path:
loader = pkgutil.get_importer(item)
if loader:
try:
loader = loader.find_module(string)
if loader:
is_package = loader.is_package(string)
is_archive = hasattr(loader, 'archive')
try:
module_path = loader.get_filename(string)
except AttributeError:
# fallback for py26
try:
module_path = loader._get_filename(string)
except AttributeError:
continue
if is_package:
module_path = os.path.dirname(module_path)
if is_archive:
module_path = loader.archive
file = None
if not is_package or is_archive:
file = DummyFile(loader, string)
return (file, module_path, is_package)
except ImportError:
pass
raise ImportError("No module named {0}".format(string))
find_module = find_module_py33 if is_py33 else find_module_pre_py33
find_module = find_module_py34 if is_py34 else find_module
find_module.__doc__ = """
Provides information about a module.
This function isolates the differences in importing libraries introduced with
python 3.3 on; it gets a module name and optionally a path. It will return a
tuple containin an open file for the module (if not builtin), the filename
or the name of the module if it is a builtin one and a boolean indicating
if the module is contained in a package.
"""
class ImplicitNSInfo(object):
"""Stores information returned from an implicit namespace spec"""
def __init__(self, name, paths):
self.name = name
self.paths = paths
# unicode function
try:
unicode = unicode
except NameError:
unicode = str
# exec function
if is_py3:
def exec_function(source, global_map):
exec(source, global_map)
else:
eval(compile("""def exec_function(source, global_map):
exec source in global_map """, 'blub', 'exec'))
# re-raise function
if is_py3:
def reraise(exception, traceback):
raise exception.with_traceback(traceback)
else:
eval(compile("""
def reraise(exception, traceback):
raise exception, None, traceback
""", 'blub', 'exec'))
reraise.__doc__ = """
Re-raise `exception` with a `traceback` object.
Usage::
reraise(Exception, sys.exc_info()[2])
"""
class Python3Method(object):
def __init__(self, func):
self.func = func
def __get__(self, obj, objtype):
if obj is None:
return lambda *args, **kwargs: self.func(*args, **kwargs)
else:
return lambda *args, **kwargs: self.func(obj, *args, **kwargs)
def use_metaclass(meta, *bases):
""" Create a class with a metaclass. """
if not bases:
bases = (object,)
return meta("HackClass", bases, {})
try:
encoding = sys.stdout.encoding
if encoding is None:
encoding = 'utf-8'
except AttributeError:
encoding = 'ascii'
def u(string):
"""Cast to unicode DAMMIT!
Written because Python2 repr always implicitly casts to a string, so we
have to cast back to a unicode (and we now that we always deal with valid
unicode, because we check that in the beginning).
"""
if is_py3:
return str(string)
if not isinstance(string, unicode):
return unicode(str(string), 'UTF-8')
return string
try:
import builtins # module name in python 3
except ImportError:
import __builtin__ as builtins
import ast
def literal_eval(string):
# py3.0, py3.1 and py32 don't support unicode literals. Support those, I
# don't want to write two versions of the tokenizer.
if is_py3 and sys.version_info.minor < 3:
if re.match('[uU][\'"]', string):
string = string[1:]
return ast.literal_eval(string)
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest # Python 2
try:
FileNotFoundError = FileNotFoundError
except NameError:
FileNotFoundError = IOError
def no_unicode_pprint(dct):
"""
Python 2/3 dict __repr__ may be different, because of unicode differens
(with or without a `u` prefix). Normally in doctests we could use `pprint`
to sort dicts and check for equality, but here we have to write a separate
function to do that.
"""
import pprint
s = pprint.pformat(dct)
print(re.sub("u'", "'", s))
def utf8_repr(func):
"""
``__repr__`` methods in Python 2 don't allow unicode objects to be
returned. Therefore cast them to utf-8 bytes in this decorator.
"""
def wrapper(self):
result = func(self)
if isinstance(result, unicode):
return result.encode('utf-8')
else:
return result
if is_py3:
return func
else:
return wrapper

View File

@ -1,458 +0,0 @@
"""
The API basically only provides one class. You can create a :class:`Script` and
use its methods.
Additionally you can add a debug function with :func:`set_debug_function`.
Alternatively, if you don't need a custom function and are happy with printing
debug messages to stdout, simply call :func:`set_debug_function` without
arguments.
.. warning:: Please, note that Jedi is **not thread safe**.
"""
import os
import sys
import parso
from parso.python import tree
from parso import python_bytes_to_unicode, split_lines
from jedi.parser_utils import get_executable_nodes, get_statement_of_position
from jedi import debug
from jedi import settings
from jedi import cache
from jedi.api import classes
from jedi.api import interpreter
from jedi.api import usages
from jedi.api import helpers
from jedi.api.completion import Completion
from jedi.evaluate import Evaluator
from jedi.evaluate import representation as er
from jedi.evaluate import imports
from jedi.evaluate.param import try_iter_content
from jedi.evaluate.helpers import get_module_names, evaluate_call_of_leaf
from jedi.evaluate.sys_path import get_venv_path, dotted_path_in_sys_path
from jedi.evaluate.iterable import unpack_tuple_to_dict
from jedi.evaluate.filters import TreeNameDefinition
# Jedi uses lots and lots of recursion. By setting this a little bit higher, we
# can remove some "maximum recursion depth" errors.
sys.setrecursionlimit(3000)
class Script(object):
"""
A Script is the base for completions, goto or whatever you want to do with
|jedi|.
You can either use the ``source`` parameter or ``path`` to read a file.
Usually you're going to want to use both of them (in an editor).
The script might be analyzed in a different ``sys.path`` than |jedi|:
- if `sys_path` parameter is not ``None``, it will be used as ``sys.path``
for the script;
- if `sys_path` parameter is ``None`` and ``VIRTUAL_ENV`` environment
variable is defined, ``sys.path`` for the specified environment will be
guessed (see :func:`jedi.evaluate.sys_path.get_venv_path`) and used for
the script;
- otherwise ``sys.path`` will match that of |jedi|.
:param source: The source code of the current file, separated by newlines.
:type source: str
:param line: The line to perform actions on (starting with 1).
:type line: int
:param column: The column of the cursor (starting with 0).
:type column: int
:param path: The path of the file in the file system, or ``''`` if
it hasn't been saved yet.
:type path: str or None
:param encoding: The encoding of ``source``, if it is not a
``unicode`` object (default ``'utf-8'``).
:type encoding: str
:param source_encoding: The encoding of ``source``, if it is not a
``unicode`` object (default ``'utf-8'``).
:type encoding: str
:param sys_path: ``sys.path`` to use during analysis of the script
:type sys_path: list
"""
def __init__(self, source=None, line=None, column=None, path=None,
encoding='utf-8', sys_path=None):
self._orig_path = path
# An empty path (also empty string) should always result in no path.
self.path = os.path.abspath(path) if path else None
if source is None:
# TODO add a better warning than the traceback!
with open(path, 'rb') as f:
source = f.read()
# TODO do we really want that?
self._source = python_bytes_to_unicode(source, encoding, errors='replace')
self._code_lines = split_lines(self._source)
line = max(len(self._code_lines), 1) if line is None else line
if not (0 < line <= len(self._code_lines)):
raise ValueError('`line` parameter is not in a valid range.')
line_len = len(self._code_lines[line - 1])
column = line_len if column is None else column
if not (0 <= column <= line_len):
raise ValueError('`column` parameter is not in a valid range.')
self._pos = line, column
self._path = path
cache.clear_time_caches()
debug.reset_time()
# Load the Python grammar of the current interpreter.
self._grammar = parso.load_grammar()
if sys_path is None:
venv = os.getenv('VIRTUAL_ENV')
if venv:
sys_path = list(get_venv_path(venv))
self._evaluator = Evaluator(self._grammar, sys_path=sys_path)
debug.speed('init')
@cache.memoize_method
def _get_module_node(self):
return self._grammar.parse(
code=self._source,
path=self.path,
cache=False, # No disk cache, because the current script often changes.
diff_cache=True,
cache_path=settings.cache_directory
)
@cache.memoize_method
def _get_module(self):
module = er.ModuleContext(
self._evaluator,
self._get_module_node(),
self.path
)
if self.path is not None:
name = dotted_path_in_sys_path(self._evaluator.sys_path, self.path)
if name is not None:
imports.add_module(self._evaluator, name, module)
return module
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, repr(self._orig_path))
def completions(self):
"""
Return :class:`classes.Completion` objects. Those objects contain
information about the completions, more than just names.
:return: Completion objects, sorted by name and __ comes last.
:rtype: list of :class:`classes.Completion`
"""
debug.speed('completions start')
completion = Completion(
self._evaluator, self._get_module(), self._code_lines,
self._pos, self.call_signatures
)
completions = completion.completions()
debug.speed('completions end')
return completions
def goto_definitions(self):
"""
Return the definitions of a the path under the cursor. goto function!
This follows complicated paths and returns the end, not the first
definition. The big difference between :meth:`goto_assignments` and
:meth:`goto_definitions` is that :meth:`goto_assignments` doesn't
follow imports and statements. Multiple objects may be returned,
because Python itself is a dynamic language, which means depending on
an option you can have two different versions of a function.
:rtype: list of :class:`classes.Definition`
"""
module_node = self._get_module_node()
leaf = module_node.get_name_of_position(self._pos)
if leaf is None:
leaf = module_node.get_leaf_for_position(self._pos)
if leaf is None:
return []
context = self._evaluator.create_context(self._get_module(), leaf)
definitions = helpers.evaluate_goto_definition(self._evaluator, context, leaf)
names = [s.name for s in definitions]
defs = [classes.Definition(self._evaluator, name) for name in names]
# The additional set here allows the definitions to become unique in an
# API sense. In the internals we want to separate more things than in
# the API.
return helpers.sorted_definitions(set(defs))
def goto_assignments(self, follow_imports=False):
"""
Return the first definition found, while optionally following imports.
Multiple objects may be returned, because Python itself is a
dynamic language, which means depending on an option you can have two
different versions of a function.
:rtype: list of :class:`classes.Definition`
"""
def filter_follow_imports(names, check):
for name in names:
if check(name):
for result in filter_follow_imports(name.goto(), check):
yield result
else:
yield name
names = self._goto()
if follow_imports:
def check(name):
if isinstance(name, er.ModuleName):
return False
return name.api_type == 'module'
else:
def check(name):
return isinstance(name, imports.SubModuleName)
names = filter_follow_imports(names, check)
defs = [classes.Definition(self._evaluator, d) for d in set(names)]
return helpers.sorted_definitions(defs)
def _goto(self):
"""
Used for goto_assignments and usages.
"""
name = self._get_module_node().get_name_of_position(self._pos)
if name is None:
return []
context = self._evaluator.create_context(self._get_module(), name)
return list(self._evaluator.goto(context, name))
def usages(self, additional_module_paths=()):
"""
Return :class:`classes.Definition` objects, which contain all
names that point to the definition of the name under the cursor. This
is very useful for refactoring (renaming), or to show all usages of a
variable.
.. todo:: Implement additional_module_paths
:rtype: list of :class:`classes.Definition`
"""
temp, settings.dynamic_flow_information = \
settings.dynamic_flow_information, False
try:
module_node = self._get_module_node()
user_stmt = get_statement_of_position(module_node, self._pos)
definition_names = self._goto()
if not definition_names and isinstance(user_stmt, tree.Import):
# For not defined imports (goto doesn't find something, we take
# the name as a definition. This is enough, because every name
# points to it.
name = user_stmt.get_name_of_position(self._pos)
if name is None:
# Must be syntax
return []
definition_names = [TreeNameDefinition(self._get_module(), name)]
if not definition_names:
# Without a definition for a name we cannot find references.
return []
definition_names = usages.resolve_potential_imports(self._evaluator,
definition_names)
modules = set([d.get_root_context() for d in definition_names])
modules.add(self._get_module())
definitions = usages.usages(self._evaluator, definition_names, modules)
finally:
settings.dynamic_flow_information = temp
return helpers.sorted_definitions(set(definitions))
def call_signatures(self):
"""
Return the function object of the call you're currently in.
E.g. if the cursor is here::
abs(# <-- cursor is here
This would return the ``abs`` function. On the other hand::
abs()# <-- cursor is here
This would return an empty list..
:rtype: list of :class:`classes.CallSignature`
"""
call_signature_details = \
helpers.get_call_signature_details(self._get_module_node(), self._pos)
if call_signature_details is None:
return []
context = self._evaluator.create_context(
self._get_module(),
call_signature_details.bracket_leaf
)
definitions = helpers.cache_call_signatures(
self._evaluator,
context,
call_signature_details.bracket_leaf,
self._code_lines,
self._pos
)
debug.speed('func_call followed')
return [classes.CallSignature(self._evaluator, d.name,
call_signature_details.bracket_leaf.start_pos,
call_signature_details.call_index,
call_signature_details.keyword_name_str)
for d in definitions if hasattr(d, 'py__call__')]
def _analysis(self):
self._evaluator.is_analysis = True
module_node = self._get_module_node()
self._evaluator.analysis_modules = [module_node]
try:
for node in get_executable_nodes(module_node):
context = self._get_module().create_context(node)
if node.type in ('funcdef', 'classdef'):
# TODO This is stupid, should be private
from jedi.evaluate.finder import _name_to_types
# Resolve the decorators.
_name_to_types(self._evaluator, context, node.children[1])
elif isinstance(node, tree.Import):
import_names = set(node.get_defined_names())
if node.is_nested():
import_names |= set(path[-1] for path in node.get_paths())
for n in import_names:
imports.infer_import(context, n)
elif node.type == 'expr_stmt':
types = context.eval_node(node)
for testlist in node.children[:-1:2]:
# Iterate tuples.
unpack_tuple_to_dict(context, types, testlist)
else:
if node.type == 'name':
defs = self._evaluator.goto_definitions(context, node)
else:
defs = evaluate_call_of_leaf(context, node)
try_iter_content(defs)
self._evaluator.reset_recursion_limitations()
ana = [a for a in self._evaluator.analysis if self.path == a.path]
return sorted(set(ana), key=lambda x: x.line)
finally:
self._evaluator.is_analysis = False
class Interpreter(Script):
"""
Jedi API for Python REPLs.
In addition to completion of simple attribute access, Jedi
supports code completion based on static code analysis.
Jedi can complete attributes of object which is not initialized
yet.
>>> from os.path import join
>>> namespace = locals()
>>> script = Interpreter('join("").up', [namespace])
>>> print(script.completions()[0].name)
upper
"""
def __init__(self, source, namespaces, **kwds):
"""
Parse `source` and mixin interpreted Python objects from `namespaces`.
:type source: str
:arg source: Code to parse.
:type namespaces: list of dict
:arg namespaces: a list of namespace dictionaries such as the one
returned by :func:`locals`.
Other optional arguments are same as the ones for :class:`Script`.
If `line` and `column` are None, they are assumed be at the end of
`source`.
"""
try:
namespaces = [dict(n) for n in namespaces]
except Exception:
raise TypeError("namespaces must be a non-empty list of dicts.")
super(Interpreter, self).__init__(source, **kwds)
self.namespaces = namespaces
def _get_module(self):
parser_module = super(Interpreter, self)._get_module_node()
return interpreter.MixedModuleContext(
self._evaluator,
parser_module,
self.namespaces,
path=self.path
)
def names(source=None, path=None, encoding='utf-8', all_scopes=False,
definitions=True, references=False):
"""
Returns a list of `Definition` objects, containing name parts.
This means you can call ``Definition.goto_assignments()`` and get the
reference of a name.
The parameters are the same as in :py:class:`Script`, except or the
following ones:
:param all_scopes: If True lists the names of all scopes instead of only
the module namespace.
:param definitions: If True lists the names that have been defined by a
class, function or a statement (``a = b`` returns ``a``).
:param references: If True lists all the names that are not listed by
``definitions=True``. E.g. ``a = b`` returns ``b``.
"""
def def_ref_filter(_def):
is_def = _def._name.tree_name.is_definition()
return definitions and is_def or references and not is_def
# Set line/column to a random position, because they don't matter.
script = Script(source, line=1, column=0, path=path, encoding=encoding)
module_context = script._get_module()
defs = [
classes.Definition(
script._evaluator,
TreeNameDefinition(
module_context.create_context(name if name.parent.type == 'file_input' else name.parent),
name
)
) for name in get_module_names(script._get_module_node(), all_scopes)
]
return sorted(filter(def_ref_filter, defs), key=lambda x: (x.line, x.column))
def preload_module(*modules):
"""
Preloading modules tells Jedi to load a module now, instead of lazy parsing
of modules. Usful for IDEs, to control which modules to load on startup.
:param modules: different module names, list of string.
"""
for m in modules:
s = "import %s as x; x." % m
Script(s, 1, len(s), None).completions()
def set_debug_function(func_cb=debug.print_to_stdout, warnings=True,
notices=True, speed=True):
"""
Define a callback debug function to get all the debug messages.
If you don't specify any arguments, debug messages will be printed to stdout.
:param func_cb: The callback function for debug messages, with n params.
"""
debug.debug_function = func_cb
debug.enable_warning = warnings
debug.enable_notice = notices
debug.enable_speed = speed

View File

@ -1,678 +0,0 @@
"""
The :mod:`jedi.api.classes` module contains the return classes of the API.
These classes are the much bigger part of the whole API, because they contain
the interesting information about completion and goto operations.
"""
import re
from parso.cache import parser_cache
from parso.python.tree import search_ancestor
from jedi._compatibility import u
from jedi import settings
from jedi import common
from jedi.cache import memoize_method
from jedi.evaluate import representation as er
from jedi.evaluate import instance
from jedi.evaluate import imports
from jedi.evaluate import compiled
from jedi.evaluate.filters import ParamName
from jedi.evaluate.imports import ImportName
from jedi.api.keywords import KeywordName
def _sort_names_by_start_pos(names):
return sorted(names, key=lambda s: s.start_pos or (0, 0))
def defined_names(evaluator, context):
"""
List sub-definitions (e.g., methods in class).
:type scope: Scope
:rtype: list of Definition
"""
filter = next(context.get_filters(search_global=True))
names = [name for name in filter.values()]
return [Definition(evaluator, n) for n in _sort_names_by_start_pos(names)]
class BaseDefinition(object):
_mapping = {
'posixpath': 'os.path',
'riscospath': 'os.path',
'ntpath': 'os.path',
'os2emxpath': 'os.path',
'macpath': 'os.path',
'genericpath': 'os.path',
'posix': 'os',
'_io': 'io',
'_functools': 'functools',
'_sqlite3': 'sqlite3',
'__builtin__': '',
'builtins': '',
}
_tuple_mapping = dict((tuple(k.split('.')), v) for (k, v) in {
'argparse._ActionsContainer': 'argparse.ArgumentParser',
}.items())
def __init__(self, evaluator, name):
self._evaluator = evaluator
self._name = name
"""
An instance of :class:`parso.reprsentation.Name` subclass.
"""
self.is_keyword = isinstance(self._name, KeywordName)
# generate a path to the definition
self._module = name.get_root_context()
if self.in_builtin_module():
self.module_path = None
else:
self.module_path = self._module.py__file__()
"""Shows the file path of a module. e.g. ``/usr/lib/python2.7/os.py``"""
@property
def name(self):
"""
Name of variable/function/class/module.
For example, for ``x = None`` it returns ``'x'``.
:rtype: str or None
"""
return self._name.string_name
@property
def type(self):
"""
The type of the definition.
Here is an example of the value of this attribute. Let's consider
the following source. As what is in ``variable`` is unambiguous
to Jedi, :meth:`jedi.Script.goto_definitions` should return a list of
definition for ``sys``, ``f``, ``C`` and ``x``.
>>> from jedi import Script
>>> source = '''
... import keyword
...
... class C:
... pass
...
... class D:
... pass
...
... x = D()
...
... def f():
... pass
...
... for variable in [keyword, f, C, x]:
... variable'''
>>> script = Script(source)
>>> defs = script.goto_definitions()
Before showing what is in ``defs``, let's sort it by :attr:`line`
so that it is easy to relate the result to the source code.
>>> defs = sorted(defs, key=lambda d: d.line)
>>> defs # doctest: +NORMALIZE_WHITESPACE
[<Definition module keyword>, <Definition class C>,
<Definition instance D>, <Definition def f>]
Finally, here is what you can get from :attr:`type`:
>>> defs[0].type
'module'
>>> defs[1].type
'class'
>>> defs[2].type
'instance'
>>> defs[3].type
'function'
"""
tree_name = self._name.tree_name
resolve = False
if tree_name is not None:
# TODO move this to their respective names.
definition = tree_name.get_definition()
if definition is not None and definition.type == 'import_from' and \
tree_name.is_definition():
resolve = True
if isinstance(self._name, imports.SubModuleName) or resolve:
for context in self._name.infer():
return context.api_type
return self._name.api_type
def _path(self):
"""The path to a module/class/function definition."""
def to_reverse():
name = self._name
if name.api_type == 'module':
try:
name = list(name.infer())[0].name
except IndexError:
pass
if name.api_type == 'module':
module_contexts = name.infer()
if module_contexts:
module_context, = module_contexts
for n in reversed(module_context.py__name__().split('.')):
yield n
else:
# We don't really know anything about the path here. This
# module is just an import that would lead in an
# ImportError. So simply return the name.
yield name.string_name
return
else:
yield name.string_name
parent_context = name.parent_context
while parent_context is not None:
try:
method = parent_context.py__name__
except AttributeError:
try:
yield parent_context.name.string_name
except AttributeError:
pass
else:
for name in reversed(method().split('.')):
yield name
parent_context = parent_context.parent_context
return reversed(list(to_reverse()))
@property
def module_name(self):
"""
The module name.
>>> from jedi import Script
>>> source = 'import json'
>>> script = Script(source, path='example.py')
>>> d = script.goto_definitions()[0]
>>> print(d.module_name) # doctest: +ELLIPSIS
json
"""
return self._module.name.string_name
def in_builtin_module(self):
"""Whether this is a builtin module."""
return isinstance(self._module, compiled.CompiledObject)
@property
def line(self):
"""The line where the definition occurs (starting with 1)."""
start_pos = self._name.start_pos
if start_pos is None:
return None
return start_pos[0]
@property
def column(self):
"""The column where the definition occurs (starting with 0)."""
start_pos = self._name.start_pos
if start_pos is None:
return None
return start_pos[1]
def docstring(self, raw=False, fast=True):
r"""
Return a document string for this completion object.
Example:
>>> from jedi import Script
>>> source = '''\
... def f(a, b=1):
... "Document for function f."
... '''
>>> script = Script(source, 1, len('def f'), 'example.py')
>>> doc = script.goto_definitions()[0].docstring()
>>> print(doc)
f(a, b=1)
<BLANKLINE>
Document for function f.
Notice that useful extra information is added to the actual
docstring. For function, it is call signature. If you need
actual docstring, use ``raw=True`` instead.
>>> print(script.goto_definitions()[0].docstring(raw=True))
Document for function f.
:param fast: Don't follow imports that are only one level deep like
``import foo``, but follow ``from foo import bar``. This makes
sense for speed reasons. Completing `import a` is slow if you use
the ``foo.docstring(fast=False)`` on every object, because it
parses all libraries starting with ``a``.
"""
return _Help(self._name).docstring(fast=fast, raw=raw)
@property
def description(self):
"""A textual description of the object."""
return u(self._name.string_name)
@property
def full_name(self):
"""
Dot-separated path of this object.
It is in the form of ``<module>[.<submodule>[...]][.<object>]``.
It is useful when you want to look up Python manual of the
object at hand.
Example:
>>> from jedi import Script
>>> source = '''
... import os
... os.path.join'''
>>> script = Script(source, 3, len('os.path.join'), 'example.py')
>>> print(script.goto_definitions()[0].full_name)
os.path.join
Notice that it returns ``'os.path.join'`` instead of (for example)
``'posixpath.join'``. This is not correct, since the modules name would
be ``<module 'posixpath' ...>```. However most users find the latter
more practical.
"""
path = list(self._path())
# TODO add further checks, the mapping should only occur on stdlib.
if not path:
return None # for keywords the path is empty
with common.ignored(KeyError):
path[0] = self._mapping[path[0]]
for key, repl in self._tuple_mapping.items():
if tuple(path[:len(key)]) == key:
path = [repl] + path[len(key):]
return '.'.join(path if path[0] else path[1:])
def goto_assignments(self):
if self._name.tree_name is None:
return self
names = self._evaluator.goto(self._name.parent_context, self._name.tree_name)
return [Definition(self._evaluator, n) for n in names]
def _goto_definitions(self):
# TODO make this function public.
return [Definition(self._evaluator, d.name) for d in self._name.infer()]
@property
@memoize_method
def params(self):
"""
Raises an ``AttributeError``if the definition is not callable.
Otherwise returns a list of `Definition` that represents the params.
"""
def get_param_names(context):
param_names = []
if context.api_type == 'function':
param_names = list(context.get_param_names())
if isinstance(context, instance.BoundMethod):
param_names = param_names[1:]
elif isinstance(context, (instance.AbstractInstanceContext, er.ClassContext)):
if isinstance(context, er.ClassContext):
search = '__init__'
else:
search = '__call__'
names = context.get_function_slot_names(search)
if not names:
return []
# Just take the first one here, not optimal, but currently
# there's no better solution.
inferred = names[0].infer()
param_names = get_param_names(next(iter(inferred)))
if isinstance(context, er.ClassContext):
param_names = param_names[1:]
return param_names
elif isinstance(context, compiled.CompiledObject):
return list(context.get_param_names())
return param_names
followed = list(self._name.infer())
if not followed or not hasattr(followed[0], 'py__call__'):
raise AttributeError()
context = followed[0] # only check the first one.
return [Definition(self._evaluator, n) for n in get_param_names(context)]
def parent(self):
context = self._name.parent_context
if context is None:
return None
if isinstance(context, er.FunctionExecutionContext):
# TODO the function context should be a part of the function
# execution context.
context = er.FunctionContext(
self._evaluator, context.parent_context, context.tree_node)
return Definition(self._evaluator, context.name)
def __repr__(self):
return "<%s %s>" % (type(self).__name__, self.description)
def get_line_code(self, before=0, after=0):
"""
Returns the line of code where this object was defined.
:param before: Add n lines before the current line to the output.
:param after: Add n lines after the current line to the output.
:return str: Returns the line(s) of code or an empty string if it's a
builtin.
"""
if self.in_builtin_module():
return ''
path = self._name.get_root_context().py__file__()
lines = parser_cache[self._evaluator.grammar._hashed][path].lines
index = self._name.start_pos[0] - 1
start_index = max(index - before, 0)
return ''.join(lines[start_index:index + after + 1])
class Completion(BaseDefinition):
"""
`Completion` objects are returned from :meth:`api.Script.completions`. They
provide additional information about a completion.
"""
def __init__(self, evaluator, name, stack, like_name_length):
super(Completion, self).__init__(evaluator, name)
self._like_name_length = like_name_length
self._stack = stack
# Completion objects with the same Completion name (which means
# duplicate items in the completion)
self._same_name_completions = []
def _complete(self, like_name):
append = ''
if settings.add_bracket_after_function \
and self.type == 'Function':
append = '('
if isinstance(self._name, ParamName) and self._stack is not None:
node_names = list(self._stack.get_node_names(self._evaluator.grammar._pgen_grammar))
if 'trailer' in node_names and 'argument' not in node_names:
append += '='
name = self._name.string_name
if like_name:
name = name[self._like_name_length:]
return name + append
@property
def complete(self):
"""
Return the rest of the word, e.g. completing ``isinstance``::
isinstan# <-- Cursor is here
would return the string 'ce'. It also adds additional stuff, depending
on your `settings.py`.
Assuming the following function definition::
def foo(param=0):
pass
completing ``foo(par`` would give a ``Completion`` which `complete`
would be `am=`
"""
return self._complete(True)
@property
def name_with_symbols(self):
"""
Similar to :attr:`name`, but like :attr:`name` returns also the
symbols, for example assuming the following function definition::
def foo(param=0):
pass
completing ``foo(`` would give a ``Completion`` which
``name_with_symbols`` would be "param=".
"""
return self._complete(False)
def docstring(self, raw=False, fast=True):
if self._like_name_length >= 3:
# In this case we can just resolve the like name, because we
# wouldn't load like > 100 Python modules anymore.
fast = False
return super(Completion, self).docstring(raw=raw, fast=fast)
@property
def description(self):
"""Provide a description of the completion object."""
# TODO improve the class structure.
return Definition.description.__get__(self)
def __repr__(self):
return '<%s: %s>' % (type(self).__name__, self._name.string_name)
@memoize_method
def follow_definition(self):
"""
Return the original definitions. I strongly recommend not using it for
your completions, because it might slow down |jedi|. If you want to
read only a few objects (<=20), it might be useful, especially to get
the original docstrings. The basic problem of this function is that it
follows all results. This means with 1000 completions (e.g. numpy),
it's just PITA-slow.
"""
defs = self._name.infer()
return [Definition(self._evaluator, d.name) for d in defs]
class Definition(BaseDefinition):
"""
*Definition* objects are returned from :meth:`api.Script.goto_assignments`
or :meth:`api.Script.goto_definitions`.
"""
def __init__(self, evaluator, definition):
super(Definition, self).__init__(evaluator, definition)
@property
def description(self):
"""
A description of the :class:`.Definition` object, which is heavily used
in testing. e.g. for ``isinstance`` it returns ``def isinstance``.
Example:
>>> from jedi import Script
>>> source = '''
... def f():
... pass
...
... class C:
... pass
...
... variable = f if random.choice([0,1]) else C'''
>>> script = Script(source, column=3) # line is maximum by default
>>> defs = script.goto_definitions()
>>> defs = sorted(defs, key=lambda d: d.line)
>>> defs
[<Definition def f>, <Definition class C>]
>>> str(defs[0].description) # strip literals in python2
'def f'
>>> str(defs[1].description)
'class C'
"""
typ = self.type
tree_name = self._name.tree_name
if typ in ('function', 'class', 'module', 'instance') or tree_name is None:
if typ == 'function':
# For the description we want a short and a pythonic way.
typ = 'def'
return typ + ' ' + u(self._name.string_name)
elif typ == 'param':
code = search_ancestor(tree_name, 'param').get_code(
include_prefix=False,
include_comma=False
)
return typ + ' ' + code
definition = tree_name.get_definition() or tree_name
# Remove the prefix, because that's not what we want for get_code
# here.
txt = definition.get_code(include_prefix=False)
# Delete comments:
txt = re.sub('#[^\n]+\n', ' ', txt)
# Delete multi spaces/newlines
txt = re.sub('\s+', ' ', txt).strip()
return txt
@property
def desc_with_module(self):
"""
In addition to the definition, also return the module.
.. warning:: Don't use this function yet, its behaviour may change. If
you really need it, talk to me.
.. todo:: Add full path. This function is should return a
`module.class.function` path.
"""
position = '' if self.in_builtin_module else '@%s' % (self.line)
return "%s:%s%s" % (self.module_name, self.description, position)
@memoize_method
def defined_names(self):
"""
List sub-definitions (e.g., methods in class).
:rtype: list of Definition
"""
defs = self._name.infer()
return sorted(
common.unite(defined_names(self._evaluator, d) for d in defs),
key=lambda s: s._name.start_pos or (0, 0)
)
def is_definition(self):
"""
Returns True, if defined as a name in a statement, function or class.
Returns False, if it's a reference to such a definition.
"""
if self._name.tree_name is None:
return True
else:
return self._name.tree_name.is_definition()
def __eq__(self, other):
return self._name.start_pos == other._name.start_pos \
and self.module_path == other.module_path \
and self.name == other.name \
and self._evaluator == other._evaluator
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self._name.start_pos, self.module_path, self.name, self._evaluator))
class CallSignature(Definition):
"""
`CallSignature` objects is the return value of `Script.function_definition`.
It knows what functions you are currently in. e.g. `isinstance(` would
return the `isinstance` function. without `(` it would return nothing.
"""
def __init__(self, evaluator, executable_name, bracket_start_pos, index, key_name_str):
super(CallSignature, self).__init__(evaluator, executable_name)
self._index = index
self._key_name_str = key_name_str
self._bracket_start_pos = bracket_start_pos
@property
def index(self):
"""
The Param index of the current call.
Returns None if the index cannot be found in the curent call.
"""
if self._key_name_str is not None:
for i, param in enumerate(self.params):
if self._key_name_str == param.name:
return i
if self.params:
param_name = self.params[-1]._name
if param_name.tree_name is not None:
if param_name.tree_name.get_definition().star_count == 2:
return i
return None
if self._index >= len(self.params):
for i, param in enumerate(self.params):
tree_name = param._name.tree_name
if tree_name is not None:
# *args case
if tree_name.get_definition().star_count == 1:
return i
return None
return self._index
@property
def bracket_start(self):
"""
The indent of the bracket that is responsible for the last function
call.
"""
return self._bracket_start_pos
def __repr__(self):
return '<%s: %s index %s>' % \
(type(self).__name__, self._name.string_name, self.index)
class _Help(object):
"""
Temporary implementation, will be used as `Script.help() or something in
the future.
"""
def __init__(self, definition):
self._name = definition
@memoize_method
def _get_contexts(self, fast):
if isinstance(self._name, ImportName) and fast:
return {}
if self._name.api_type == 'statement':
return {}
return self._name.infer()
def docstring(self, fast=True, raw=True):
"""
The docstring ``__doc__`` for any object.
See :attr:`doc` for example.
"""
# TODO: Use all of the followed objects as output. Possibly divinding
# them by a few dashes.
for context in self._get_contexts(fast=fast):
return context.py__doc__(include_call_signature=not raw)
return ''

View File

@ -1,291 +0,0 @@
from parso.python import token
from parso.python import tree
from parso.tree import search_ancestor, Leaf
from jedi import debug
from jedi import settings
from jedi.api import classes
from jedi.api import helpers
from jedi.evaluate import imports
from jedi.api import keywords
from jedi.evaluate.helpers import evaluate_call_of_leaf
from jedi.evaluate.filters import get_global_filters
from jedi.parser_utils import get_statement_of_position
def get_call_signature_param_names(call_signatures):
# add named params
for call_sig in call_signatures:
for p in call_sig.params:
# Allow protected access, because it's a public API.
tree_name = p._name.tree_name
# Compiled modules typically don't allow keyword arguments.
if tree_name is not None:
# Allow access on _definition here, because it's a
# public API and we don't want to make the internal
# Name object public.
tree_param = tree.search_ancestor(tree_name, 'param')
if tree_param.star_count == 0: # no *args/**kwargs
yield p._name
def filter_names(evaluator, completion_names, stack, like_name):
comp_dct = {}
for name in completion_names:
if settings.case_insensitive_completion \
and name.string_name.lower().startswith(like_name.lower()) \
or name.string_name.startswith(like_name):
new = classes.Completion(
evaluator,
name,
stack,
len(like_name)
)
k = (new.name, new.complete) # key
if k in comp_dct and settings.no_completion_duplicates:
comp_dct[k]._same_name_completions.append(new)
else:
comp_dct[k] = new
yield new
def get_user_scope(module_context, position):
"""
Returns the scope in which the user resides. This includes flows.
"""
user_stmt = get_statement_of_position(module_context.tree_node, position)
if user_stmt is None:
def scan(scope):
for s in scope.children:
if s.start_pos <= position <= s.end_pos:
if isinstance(s, (tree.Scope, tree.Flow)):
return scan(s) or s
elif s.type in ('suite', 'decorated'):
return scan(s)
return None
scanned_node = scan(module_context.tree_node)
if scanned_node:
return module_context.create_context(scanned_node, node_is_context=True)
return module_context
else:
return module_context.create_context(user_stmt)
def get_flow_scope_node(module_node, position):
node = module_node.get_leaf_for_position(position, include_prefixes=True)
while not isinstance(node, (tree.Scope, tree.Flow)):
node = node.parent
return node
class Completion:
def __init__(self, evaluator, module, code_lines, position, call_signatures_method):
self._evaluator = evaluator
self._module_context = module
self._module_node = module.tree_node
self._code_lines = code_lines
# The first step of completions is to get the name
self._like_name = helpers.get_on_completion_name(self._module_node, code_lines, position)
# The actual cursor position is not what we need to calculate
# everything. We want the start of the name we're on.
self._position = position[0], position[1] - len(self._like_name)
self._call_signatures_method = call_signatures_method
def completions(self):
completion_names = self._get_context_completions()
completions = filter_names(self._evaluator, completion_names,
self.stack, self._like_name)
return sorted(completions, key=lambda x: (x.name.startswith('__'),
x.name.startswith('_'),
x.name.lower()))
def _get_context_completions(self):
"""
Analyzes the context that a completion is made in and decides what to
return.
Technically this works by generating a parser stack and analysing the
current stack for possible grammar nodes.
Possible enhancements:
- global/nonlocal search global
- yield from / raise from <- could be only exceptions/generators
- In args: */**: no completion
- In params (also lambda): no completion before =
"""
grammar = self._evaluator.grammar
try:
self.stack = helpers.get_stack_at_position(
grammar, self._code_lines, self._module_node, self._position
)
except helpers.OnErrorLeaf as e:
self.stack = None
if e.error_leaf.value == '.':
# After ErrorLeaf's that are dots, we will not do any
# completions since this probably just confuses the user.
return []
# If we don't have a context, just use global completion.
return self._global_completions()
allowed_keywords, allowed_tokens = \
helpers.get_possible_completion_types(grammar._pgen_grammar, self.stack)
if 'if' in allowed_keywords:
leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True)
previous_leaf = leaf.get_previous_leaf()
indent = self._position[1]
if not (leaf.start_pos <= self._position <= leaf.end_pos):
indent = leaf.start_pos[1]
if previous_leaf is not None:
stmt = previous_leaf
while True:
stmt = search_ancestor(
stmt, 'if_stmt', 'for_stmt', 'while_stmt', 'try_stmt',
'error_node',
)
if stmt is None:
break
type_ = stmt.type
if type_ == 'error_node':
first = stmt.children[0]
if isinstance(first, Leaf):
type_ = first.value + '_stmt'
# Compare indents
if stmt.start_pos[1] == indent:
if type_ == 'if_stmt':
allowed_keywords += ['elif', 'else']
elif type_ == 'try_stmt':
allowed_keywords += ['except', 'finally', 'else']
elif type_ == 'for_stmt':
allowed_keywords.append('else')
completion_names = list(self._get_keyword_completion_names(allowed_keywords))
if token.NAME in allowed_tokens or token.INDENT in allowed_tokens:
# This means that we actually have to do type inference.
symbol_names = list(self.stack.get_node_names(grammar._pgen_grammar))
nodes = list(self.stack.get_nodes())
if nodes and nodes[-1] in ('as', 'def', 'class'):
# No completions for ``with x as foo`` and ``import x as foo``.
# Also true for defining names as a class or function.
return list(self._get_class_context_completions(is_function=True))
elif "import_stmt" in symbol_names:
level, names = self._parse_dotted_names(nodes, "import_from" in symbol_names)
only_modules = not ("import_from" in symbol_names and 'import' in nodes)
completion_names += self._get_importer_names(
names,
level,
only_modules=only_modules,
)
elif symbol_names[-1] in ('trailer', 'dotted_name') and nodes[-1] == '.':
dot = self._module_node.get_leaf_for_position(self._position)
completion_names += self._trailer_completions(dot.get_previous_leaf())
else:
completion_names += self._global_completions()
completion_names += self._get_class_context_completions(is_function=False)
if 'trailer' in symbol_names:
call_signatures = self._call_signatures_method()
completion_names += get_call_signature_param_names(call_signatures)
return completion_names
def _get_keyword_completion_names(self, keywords_):
for k in keywords_:
yield keywords.keyword(self._evaluator, k).name
def _global_completions(self):
context = get_user_scope(self._module_context, self._position)
debug.dbg('global completion scope: %s', context)
flow_scope_node = get_flow_scope_node(self._module_node, self._position)
filters = get_global_filters(
self._evaluator,
context,
self._position,
origin_scope=flow_scope_node
)
completion_names = []
for filter in filters:
completion_names += filter.values()
return completion_names
def _trailer_completions(self, previous_leaf):
user_context = get_user_scope(self._module_context, self._position)
evaluation_context = self._evaluator.create_context(
self._module_context, previous_leaf
)
contexts = evaluate_call_of_leaf(evaluation_context, previous_leaf)
completion_names = []
debug.dbg('trailer completion contexts: %s', contexts)
for context in contexts:
for filter in context.get_filters(
search_global=False, origin_scope=user_context.tree_node):
completion_names += filter.values()
return completion_names
def _parse_dotted_names(self, nodes, is_import_from):
level = 0
names = []
for node in nodes[1:]:
if node in ('.', '...'):
if not names:
level += len(node.value)
elif node.type == 'dotted_name':
names += node.children[::2]
elif node.type == 'name':
names.append(node)
elif node == ',':
if not is_import_from:
names = []
else:
# Here if the keyword `import` comes along it stops checking
# for names.
break
return level, names
def _get_importer_names(self, names, level=0, only_modules=True):
names = [n.value for n in names]
i = imports.Importer(self._evaluator, names, self._module_context, level)
return i.completion_names(self._evaluator, only_modules=only_modules)
def _get_class_context_completions(self, is_function=True):
"""
Autocomplete inherited methods when overriding in child class.
"""
leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True)
cls = tree.search_ancestor(leaf, 'classdef')
if isinstance(cls, (tree.Class, tree.Function)):
# Complete the methods that are defined in the super classes.
random_context = self._module_context.create_context(
cls,
node_is_context=True
)
else:
return
if cls.start_pos[1] >= leaf.start_pos[1]:
return
filters = random_context.get_filters(search_global=False, is_instance=True)
# The first dict is the dictionary of class itself.
next(filters)
for filter in filters:
for name in filter.values():
if (name.api_type == 'function') == is_function:
yield name

View File

@ -1,315 +0,0 @@
"""
Helpers for the API
"""
import re
from collections import namedtuple
from textwrap import dedent
from parso.python.parser import Parser
from parso.python import tree
from parso import split_lines
from jedi._compatibility import u
from jedi.evaluate.helpers import evaluate_call_of_leaf
from jedi.cache import time_cache
CompletionParts = namedtuple('CompletionParts', ['path', 'has_dot', 'name'])
def sorted_definitions(defs):
# Note: `or ''` below is required because `module_path` could be
return sorted(defs, key=lambda x: (x.module_path or '', x.line or 0, x.column or 0))
def get_on_completion_name(module_node, lines, position):
leaf = module_node.get_leaf_for_position(position)
if leaf is None or leaf.type in ('string', 'error_leaf'):
# Completions inside strings are a bit special, we need to parse the
# string. The same is true for comments and error_leafs.
line = lines[position[0] - 1]
# The first step of completions is to get the name
return re.search(r'(?!\d)\w+$|$', line[:position[1]]).group(0)
elif leaf.type not in ('name', 'keyword'):
return ''
return leaf.value[:position[1] - leaf.start_pos[1]]
def _get_code(code_lines, start_pos, end_pos):
# Get relevant lines.
lines = code_lines[start_pos[0] - 1:end_pos[0]]
# Remove the parts at the end of the line.
lines[-1] = lines[-1][:end_pos[1]]
# Remove first line indentation.
lines[0] = lines[0][start_pos[1]:]
return '\n'.join(lines)
class OnErrorLeaf(Exception):
@property
def error_leaf(self):
return self.args[0]
def _is_on_comment(leaf, position):
comment_lines = split_lines(leaf.prefix)
difference = leaf.start_pos[0] - position[0]
prefix_start_pos = leaf.get_start_pos_of_prefix()
if difference == 0:
indent = leaf.start_pos[1]
elif position[0] == prefix_start_pos[0]:
indent = prefix_start_pos[1]
else:
indent = 0
line = comment_lines[-difference - 1][:position[1] - indent]
return '#' in line
def _get_code_for_stack(code_lines, module_node, position):
leaf = module_node.get_leaf_for_position(position, include_prefixes=True)
# It might happen that we're on whitespace or on a comment. This means
# that we would not get the right leaf.
if leaf.start_pos >= position:
if _is_on_comment(leaf, position):
return u('')
# If we're not on a comment simply get the previous leaf and proceed.
leaf = leaf.get_previous_leaf()
if leaf is None:
return u('') # At the beginning of the file.
is_after_newline = leaf.type == 'newline'
while leaf.type == 'newline':
leaf = leaf.get_previous_leaf()
if leaf is None:
return u('')
if leaf.type == 'error_leaf' or leaf.type == 'string':
if leaf.start_pos[0] < position[0]:
# On a different line, we just begin anew.
return u('')
# Error leafs cannot be parsed, completion in strings is also
# impossible.
raise OnErrorLeaf(leaf)
else:
user_stmt = leaf
while True:
if user_stmt.parent.type in ('file_input', 'suite', 'simple_stmt'):
break
user_stmt = user_stmt.parent
if is_after_newline:
if user_stmt.start_pos[1] > position[1]:
# This means that it's actually a dedent and that means that we
# start without context (part of a suite).
return u('')
# This is basically getting the relevant lines.
return _get_code(code_lines, user_stmt.get_start_pos_of_prefix(), position)
def get_stack_at_position(grammar, code_lines, module_node, pos):
"""
Returns the possible node names (e.g. import_from, xor_test or yield_stmt).
"""
class EndMarkerReached(Exception):
pass
def tokenize_without_endmarker(code):
# TODO This is for now not an official parso API that exists purely
# for Jedi.
tokens = grammar._tokenize(code)
for token_ in tokens:
if token_.string == safeword:
raise EndMarkerReached()
else:
yield token_
# The code might be indedented, just remove it.
code = dedent(_get_code_for_stack(code_lines, module_node, pos))
# We use a word to tell Jedi when we have reached the start of the
# completion.
# Use Z as a prefix because it's not part of a number suffix.
safeword = 'ZZZ_USER_WANTS_TO_COMPLETE_HERE_WITH_JEDI'
code = code + safeword
p = Parser(grammar._pgen_grammar, error_recovery=True)
try:
p.parse(tokens=tokenize_without_endmarker(code))
except EndMarkerReached:
return Stack(p.pgen_parser.stack)
raise SystemError("This really shouldn't happen. There's a bug in Jedi.")
class Stack(list):
def get_node_names(self, grammar):
for dfa, state, (node_number, nodes) in self:
yield grammar.number2symbol[node_number]
def get_nodes(self):
for dfa, state, (node_number, nodes) in self:
for node in nodes:
yield node
def get_possible_completion_types(pgen_grammar, stack):
def add_results(label_index):
try:
grammar_labels.append(inversed_tokens[label_index])
except KeyError:
try:
keywords.append(inversed_keywords[label_index])
except KeyError:
t, v = pgen_grammar.labels[label_index]
assert t >= 256
# See if it's a symbol and if we're in its first set
inversed_keywords
itsdfa = pgen_grammar.dfas[t]
itsstates, itsfirst = itsdfa
for first_label_index in itsfirst.keys():
add_results(first_label_index)
inversed_keywords = dict((v, k) for k, v in pgen_grammar.keywords.items())
inversed_tokens = dict((v, k) for k, v in pgen_grammar.tokens.items())
keywords = []
grammar_labels = []
def scan_stack(index):
dfa, state, node = stack[index]
states, first = dfa
arcs = states[state]
for label_index, new_state in arcs:
if label_index == 0:
# An accepting state, check the stack below.
scan_stack(index - 1)
else:
add_results(label_index)
scan_stack(-1)
return keywords, grammar_labels
def evaluate_goto_definition(evaluator, context, leaf):
if leaf.type == 'name':
# In case of a name we can just use goto_definition which does all the
# magic itself.
return evaluator.goto_definitions(context, leaf)
parent = leaf.parent
if parent.type == 'atom':
return context.eval_node(leaf.parent)
elif parent.type == 'trailer':
return evaluate_call_of_leaf(context, leaf)
elif isinstance(leaf, tree.Literal):
return context.evaluator.eval_atom(context, leaf)
return []
CallSignatureDetails = namedtuple(
'CallSignatureDetails',
['bracket_leaf', 'call_index', 'keyword_name_str']
)
def _get_index_and_key(nodes, position):
"""
Returns the amount of commas and the keyword argument string.
"""
nodes_before = [c for c in nodes if c.start_pos < position]
if nodes_before[-1].type == 'arglist':
nodes_before = [c for c in nodes_before[-1].children if c.start_pos < position]
key_str = None
if nodes_before:
last = nodes_before[-1]
if last.type == 'argument' and last.children[1].end_pos <= position:
# Checked if the argument
key_str = last.children[0].value
elif last == '=':
key_str = nodes_before[-2].value
return nodes_before.count(','), key_str
def _get_call_signature_details_from_error_node(node, position):
for index, element in reversed(list(enumerate(node.children))):
# `index > 0` means that it's a trailer and not an atom.
if element == '(' and element.end_pos <= position and index > 0:
# It's an error node, we don't want to match too much, just
# until the parentheses is enough.
children = node.children[index:]
name = element.get_previous_leaf()
if name is None:
continue
if name.type == 'name' or name.parent.type in ('trailer', 'atom'):
return CallSignatureDetails(
element,
*_get_index_and_key(children, position)
)
def get_call_signature_details(module, position):
leaf = module.get_leaf_for_position(position, include_prefixes=True)
if leaf.start_pos >= position:
# Whitespace / comments after the leaf count towards the previous leaf.
leaf = leaf.get_previous_leaf()
if leaf is None:
return None
if leaf == ')':
if leaf.end_pos == position:
leaf = leaf.get_next_leaf()
# Now that we know where we are in the syntax tree, we start to look at
# parents for possible function definitions.
node = leaf.parent
while node is not None:
if node.type in ('funcdef', 'classdef'):
# Don't show call signatures if there's stuff before it that just
# makes it feel strange to have a call signature.
return None
for n in node.children[::-1]:
if n.start_pos < position and n.type == 'error_node':
result = _get_call_signature_details_from_error_node(n, position)
if result is not None:
return result
if node.type == 'trailer' and node.children[0] == '(':
leaf = node.get_previous_leaf()
if leaf is None:
return None
return CallSignatureDetails(
node.children[0], *_get_index_and_key(node.children, position))
node = node.parent
return None
@time_cache("call_signatures_validity")
def cache_call_signatures(evaluator, context, bracket_leaf, code_lines, user_pos):
"""This function calculates the cache key."""
index = user_pos[0] - 1
before_cursor = code_lines[index][:user_pos[1]]
other_lines = code_lines[bracket_leaf.start_pos[0]:index]
whole = '\n'.join(other_lines + [before_cursor])
before_bracket = re.match(r'.*\(', whole, re.DOTALL)
module_path = context.get_root_context().py__file__()
if module_path is None:
yield None # Don't cache!
else:
yield (module_path, before_bracket, bracket_leaf.start_pos)
yield evaluate_goto_definition(
evaluator,
context,
bracket_leaf.get_previous_leaf()
)

View File

@ -1,47 +0,0 @@
"""
TODO Some parts of this module are still not well documented.
"""
from jedi.evaluate.representation import ModuleContext
from jedi.evaluate import compiled
from jedi.evaluate.compiled import mixed
from jedi.evaluate.context import Context
class NamespaceObject(object):
def __init__(self, dct):
self.__dict__ = dct
class MixedModuleContext(Context):
resets_positions = True
type = 'mixed_module'
def __init__(self, evaluator, tree_module, namespaces, path):
self.evaluator = evaluator
self._namespaces = namespaces
self._namespace_objects = [NamespaceObject(n) for n in namespaces]
self._module_context = ModuleContext(evaluator, tree_module, path=path)
self.tree_node = tree_module
def get_node(self):
return self.tree_node
def get_filters(self, *args, **kwargs):
for filter in self._module_context.get_filters(*args, **kwargs):
yield filter
for namespace_obj in self._namespace_objects:
compiled_object = compiled.create(self.evaluator, namespace_obj)
mixed_object = mixed.MixedObject(
self.evaluator,
parent_context=self,
compiled_object=compiled_object,
tree_context=self._module_context
)
for filter in mixed_object.get_filters(*args, **kwargs):
yield filter
def __getattr__(self, name):
return getattr(self._module_context, name)

View File

@ -1,144 +0,0 @@
import pydoc
import keyword
from jedi._compatibility import is_py3, is_py35
from jedi import common
from jedi.evaluate.filters import AbstractNameDefinition
from parso.python.tree import Leaf
try:
from pydoc_data import topics as pydoc_topics
except ImportError:
# Python 2
try:
import pydoc_topics
except ImportError:
# This is for Python 3 embeddable version, which dont have
# pydoc_data module in its file python3x.zip.
pydoc_topics = None
if is_py3:
if is_py35:
# in python 3.5 async and await are not proper keywords, but for
# completion pursposes should as as though they are
keys = keyword.kwlist + ["async", "await"]
else:
keys = keyword.kwlist
else:
keys = keyword.kwlist + ['None', 'False', 'True']
def has_inappropriate_leaf_keyword(pos, module):
relevant_errors = filter(
lambda error: error.first_pos[0] == pos[0],
module.error_statement_stacks)
for error in relevant_errors:
if error.next_token in keys:
return True
return False
def completion_names(evaluator, stmt, pos, module):
keyword_list = all_keywords(evaluator)
if not isinstance(stmt, Leaf) or has_inappropriate_leaf_keyword(pos, module):
keyword_list = filter(
lambda keyword: not keyword.only_valid_as_leaf,
keyword_list
)
return [keyword.name for keyword in keyword_list]
def all_keywords(evaluator, pos=(0, 0)):
return set([Keyword(evaluator, k, pos) for k in keys])
def keyword(evaluator, string, pos=(0, 0)):
if string in keys:
return Keyword(evaluator, string, pos)
else:
return None
def get_operator(evaluator, string, pos):
return Keyword(evaluator, string, pos)
keywords_only_valid_as_leaf = (
'continue',
'break',
)
class KeywordName(AbstractNameDefinition):
api_type = 'keyword'
def __init__(self, evaluator, name):
self.evaluator = evaluator
self.string_name = name
self.parent_context = evaluator.BUILTINS
def eval(self):
return set()
def infer(self):
return [Keyword(self.evaluator, self.string_name, (0, 0))]
class Keyword(object):
api_type = 'keyword'
def __init__(self, evaluator, name, pos):
self.name = KeywordName(evaluator, name)
self.start_pos = pos
self.parent = evaluator.BUILTINS
@property
def only_valid_as_leaf(self):
return self.name.value in keywords_only_valid_as_leaf
@property
def names(self):
""" For a `parsing.Name` like comparision """
return [self.name]
def py__doc__(self, include_call_signature=False):
return imitate_pydoc(self.name.string_name)
def __repr__(self):
return '<%s: %s>' % (type(self).__name__, self.name)
def imitate_pydoc(string):
"""
It's not possible to get the pydoc's without starting the annoying pager
stuff.
"""
if pydoc_topics is None:
return ''
# str needed because of possible unicode stuff in py2k (pydoc doesn't work
# with unicode strings)
string = str(string)
h = pydoc.help
with common.ignored(KeyError):
# try to access symbols
string = h.symbols[string]
string, _, related = string.partition(' ')
get_target = lambda s: h.topics.get(s, h.keywords.get(s))
while isinstance(string, str):
string = get_target(string)
try:
# is a tuple now
label, related = string
except TypeError:
return ''
try:
return pydoc_topics.topics[label].strip() if pydoc_topics else ''
except KeyError:
return ''

View File

@ -1,27 +0,0 @@
"""
To use Jedi completion in Python interpreter, add the following in your shell
setup (e.g., ``.bashrc``)::
export PYTHONSTARTUP="$(python -m jedi repl)"
Then you will be able to use Jedi completer in your Python interpreter::
$ python
Python 2.7.2+ (default, Jul 20 2012, 22:15:08)
[GCC 4.6.1] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import os
>>> os.path.join().split().in<TAB> # doctest: +SKIP
os.path.join().split().index os.path.join().split().insert
"""
import jedi.utils
from jedi import __version__ as __jedi_version__
print('REPL completion using Jedi %s' % __jedi_version__)
jedi.utils.setup_readline()
del jedi
# Note: try not to do many things here, as it will contaminate global
# namespace of the interpreter.

View File

@ -1,75 +0,0 @@
from jedi.api import classes
from parso.python import tree
from jedi.evaluate import imports
from jedi.evaluate.filters import TreeNameDefinition
from jedi.evaluate.representation import ModuleContext
def compare_contexts(c1, c2):
return c1 == c2 or (c1[1] == c2[1] and c1[0].tree_node == c2[0].tree_node)
def usages(evaluator, definition_names, mods):
"""
:param definitions: list of Name
"""
def resolve_names(definition_names):
for name in definition_names:
if name.api_type == 'module':
found = False
for context in name.infer():
if isinstance(context, ModuleContext):
found = True
yield context.name
if not found:
yield name
else:
yield name
def compare_array(definition_names):
""" `definitions` are being compared by module/start_pos, because
sometimes the id's of the objects change (e.g. executions).
"""
return [
(name.get_root_context(), name.start_pos)
for name in resolve_names(definition_names)
]
search_name = list(definition_names)[0].string_name
compare_definitions = compare_array(definition_names)
mods = mods | set([d.get_root_context() for d in definition_names])
definition_names = set(resolve_names(definition_names))
for m in imports.get_modules_containing_name(evaluator, mods, search_name):
if isinstance(m, ModuleContext):
for name_node in m.tree_node.get_used_names().get(search_name, []):
context = evaluator.create_context(m, name_node)
result = evaluator.goto(context, name_node)
if any(compare_contexts(c1, c2)
for c1 in compare_array(result)
for c2 in compare_definitions):
name = TreeNameDefinition(context, name_node)
definition_names.add(name)
# Previous definitions might be imports, so include them
# (because goto might return that import name).
compare_definitions += compare_array([name])
else:
# compiled objects
definition_names.add(m.name)
return [classes.Definition(evaluator, n) for n in definition_names]
def resolve_potential_imports(evaluator, definitions):
""" Adds the modules of the imports """
new = set()
for d in definitions:
if isinstance(d, TreeNameDefinition):
imp_or_stmt = d.tree_name.get_definition()
if isinstance(imp_or_stmt, tree.Import):
new |= resolve_potential_imports(
evaluator,
set(imports.infer_import(
d.parent_context, d.tree_name, is_goto=True
))
)
return set(definitions) | new

View File

@ -1,124 +0,0 @@
"""
This caching is very important for speed and memory optimizations. There's
nothing really spectacular, just some decorators. The following cache types are
available:
- ``time_cache`` can be used to cache something for just a limited time span,
which can be useful if there's user interaction and the user cannot react
faster than a certain time.
This module is one of the reasons why |jedi| is not thread-safe. As you can see
there are global variables, which are holding the cache information. Some of
these variables are being cleaned after every API usage.
"""
import time
import inspect
from jedi import settings
from parso.cache import parser_cache
_time_caches = {}
def underscore_memoization(func):
"""
Decorator for methods::
class A(object):
def x(self):
if self._x:
self._x = 10
return self._x
Becomes::
class A(object):
@underscore_memoization
def x(self):
return 10
A now has an attribute ``_x`` written by this decorator.
"""
name = '_' + func.__name__
def wrapper(self):
try:
return getattr(self, name)
except AttributeError:
result = func(self)
if inspect.isgenerator(result):
result = list(result)
setattr(self, name, result)
return result
return wrapper
def clear_time_caches(delete_all=False):
""" Jedi caches many things, that should be completed after each completion
finishes.
:param delete_all: Deletes also the cache that is normally not deleted,
like parser cache, which is important for faster parsing.
"""
global _time_caches
if delete_all:
for cache in _time_caches.values():
cache.clear()
parser_cache.clear()
else:
# normally just kill the expired entries, not all
for tc in _time_caches.values():
# check time_cache for expired entries
for key, (t, value) in list(tc.items()):
if t < time.time():
# delete expired entries
del tc[key]
def time_cache(time_add_setting):
"""
This decorator works as follows: Call it with a setting and after that
use the function with a callable that returns the key.
But: This function is only called if the key is not available. After a
certain amount of time (`time_add_setting`) the cache is invalid.
If the given key is None, the function will not be cached.
"""
def _temp(key_func):
dct = {}
_time_caches[time_add_setting] = dct
def wrapper(*args, **kwargs):
generator = key_func(*args, **kwargs)
key = next(generator)
try:
expiry, value = dct[key]
if expiry > time.time():
return value
except KeyError:
pass
value = next(generator)
time_add = getattr(settings, time_add_setting)
if key is not None:
dct[key] = time.time() + time_add, value
return value
return wrapper
return _temp
def memoize_method(method):
"""A normal memoize function."""
def wrapper(self, *args, **kwargs):
cache_dict = self.__dict__.setdefault('_memoize_method_dct', {})
dct = cache_dict.setdefault(method, {})
key = (args, frozenset(kwargs.items()))
try:
return dct[key]
except KeyError:
result = method(self, *args, **kwargs)
dct[key] = result
return result
return wrapper

View File

@ -1,111 +0,0 @@
""" A universal module with functions / classes without dependencies. """
import sys
import contextlib
import functools
from jedi._compatibility import reraise
from jedi import settings
class UncaughtAttributeError(Exception):
"""
Important, because `__getattr__` and `hasattr` catch AttributeErrors
implicitly. This is really evil (mainly because of `__getattr__`).
`hasattr` in Python 2 is even more evil, because it catches ALL exceptions.
Therefore this class originally had to be derived from `BaseException`
instead of `Exception`. But because I removed relevant `hasattr` from
the code base, we can now switch back to `Exception`.
:param base: return values of sys.exc_info().
"""
def safe_property(func):
return property(reraise_uncaught(func))
def reraise_uncaught(func):
"""
Re-throw uncaught `AttributeError`.
Usage: Put ``@rethrow_uncaught`` in front of the function
which does **not** suppose to raise `AttributeError`.
AttributeError is easily get caught by `hasattr` and another
``except AttributeError`` clause. This becomes problem when you use
a lot of "dynamic" attributes (e.g., using ``@property``) because you
can't distinguish if the property does not exist for real or some code
inside of the "dynamic" attribute through that error. In a well
written code, such error should not exist but getting there is very
difficult. This decorator is to help us getting there by changing
`AttributeError` to `UncaughtAttributeError` to avoid unexpected catch.
This helps us noticing bugs earlier and facilitates debugging.
.. note:: Treating StopIteration here is easy.
Add that feature when needed.
"""
@functools.wraps(func)
def wrapper(*args, **kwds):
try:
return func(*args, **kwds)
except AttributeError:
exc_info = sys.exc_info()
reraise(UncaughtAttributeError(exc_info[1]), exc_info[2])
return wrapper
class PushBackIterator(object):
def __init__(self, iterator):
self.pushes = []
self.iterator = iterator
self.current = None
def push_back(self, value):
self.pushes.append(value)
def __iter__(self):
return self
def next(self):
""" Python 2 Compatibility """
return self.__next__()
def __next__(self):
if self.pushes:
self.current = self.pushes.pop()
else:
self.current = next(self.iterator)
return self.current
def indent_block(text, indention=' '):
"""This function indents a text block with a default of four spaces."""
temp = ''
while text and text[-1] == '\n':
temp += text[-1]
text = text[:-1]
lines = text.split('\n')
return '\n'.join(map(lambda s: indention + s, lines)) + temp
@contextlib.contextmanager
def ignored(*exceptions):
"""
Context manager that ignores all of the specified exceptions. This will
be in the standard library starting with Python 3.4.
"""
try:
yield
except exceptions:
pass
def unite(iterable):
"""Turns a two dimensional array into a one dimensional."""
return set(typ for types in iterable for typ in types)
def to_list(func):
def wrapper(*args, **kwargs):
return list(func(*args, **kwargs))
return wrapper

View File

@ -1,128 +0,0 @@
from jedi._compatibility import encoding, is_py3, u
import os
import time
def _lazy_colorama_init():
"""
Lazily init colorama if necessary, not to screw up stdout is debug not
enabled.
This version of the function does nothing.
"""
pass
_inited=False
try:
if os.name == 'nt':
# Does not work on Windows, as pyreadline and colorama interfere
raise ImportError
else:
# Use colorama for nicer console output.
from colorama import Fore, init
from colorama import initialise
def _lazy_colorama_init():
"""
Lazily init colorama if necessary, not to screw up stdout is
debug not enabled.
This version of the function does init colorama.
"""
global _inited
if not _inited:
# pytest resets the stream at the end - causes troubles. Since
# after every output the stream is reset automatically we don't
# need this.
initialise.atexit_done = True
try:
init()
except Exception:
# Colorama fails with initializing under vim and is buggy in
# version 0.3.6.
pass
_inited = True
except ImportError:
class Fore(object):
RED = ''
GREEN = ''
YELLOW = ''
MAGENTA = ''
RESET = ''
NOTICE = object()
WARNING = object()
SPEED = object()
enable_speed = False
enable_warning = False
enable_notice = False
# callback, interface: level, str
debug_function = None
_debug_indent = 0
_start_time = time.time()
def reset_time():
global _start_time, _debug_indent
_start_time = time.time()
_debug_indent = 0
def increase_indent(func):
"""Decorator for makin """
def wrapper(*args, **kwargs):
global _debug_indent
_debug_indent += 1
try:
return func(*args, **kwargs)
finally:
_debug_indent -= 1
return wrapper
def dbg(message, *args, **kwargs):
""" Looks at the stack, to see if a debug message should be printed. """
# Python 2 compatibility, because it doesn't understand default args
color = kwargs.pop('color', 'GREEN')
assert color
if debug_function and enable_notice:
i = ' ' * _debug_indent
_lazy_colorama_init()
debug_function(color, i + 'dbg: ' + message % tuple(u(repr(a)) for a in args))
def warning(message, *args, **kwargs):
format = kwargs.pop('format', True)
assert not kwargs
if debug_function and enable_warning:
i = ' ' * _debug_indent
if format:
message = message % tuple(u(repr(a)) for a in args)
debug_function('RED', i + 'warning: ' + message)
def speed(name):
if debug_function and enable_speed:
now = time.time()
i = ' ' * _debug_indent
debug_function('YELLOW', i + 'speed: ' + '%s %s' % (name, now - _start_time))
def print_to_stdout(color, str_out):
"""
The default debug function that prints to standard out.
:param str color: A string that is an attribute of ``colorama.Fore``.
"""
col = getattr(Fore, color)
_lazy_colorama_init()
if not is_py3:
str_out = str_out.encode(encoding, 'replace')
print(col + str_out + Fore.RESET)
# debug_function = print_to_stdout

View File

@ -1,629 +0,0 @@
"""
Evaluation of Python code in |jedi| is based on three assumptions:
* The code uses as least side effects as possible. Jedi understands certain
list/tuple/set modifications, but there's no guarantee that Jedi detects
everything (list.append in different modules for example).
* No magic is being used:
- metaclasses
- ``setattr()`` / ``__import__()``
- writing to ``globals()``, ``locals()``, ``object.__dict__``
* The programmer is not a total dick, e.g. like `this
<https://github.com/davidhalter/jedi/issues/24>`_ :-)
The actual algorithm is based on a principle called lazy evaluation. If you
don't know about it, google it. That said, the typical entry point for static
analysis is calling ``eval_statement``. There's separate logic for
autocompletion in the API, the evaluator is all about evaluating an expression.
Now you need to understand what follows after ``eval_statement``. Let's
make an example::
import datetime
datetime.date.toda# <-- cursor here
First of all, this module doesn't care about completion. It really just cares
about ``datetime.date``. At the end of the procedure ``eval_statement`` will
return the ``date`` class.
To *visualize* this (simplified):
- ``Evaluator.eval_statement`` doesn't do much, because there's no assignment.
- ``Evaluator.eval_element`` cares for resolving the dotted path
- ``Evaluator.find_types`` searches for global definitions of datetime, which
it finds in the definition of an import, by scanning the syntax tree.
- Using the import logic, the datetime module is found.
- Now ``find_types`` is called again by ``eval_element`` to find ``date``
inside the datetime module.
Now what would happen if we wanted ``datetime.date.foo.bar``? Two more
calls to ``find_types``. However the second call would be ignored, because the
first one would return nothing (there's no foo attribute in ``date``).
What if the import would contain another ``ExprStmt`` like this::
from foo import bar
Date = bar.baz
Well... You get it. Just another ``eval_statement`` recursion. It's really
easy. Python can obviously get way more complicated then this. To understand
tuple assignments, list comprehensions and everything else, a lot more code had
to be written.
Jedi has been tested very well, so you can just start modifying code. It's best
to write your own test first for your "new" feature. Don't be scared of
breaking stuff. As long as the tests pass, you're most likely to be fine.
I need to mention now that lazy evaluation is really good because it
only *evaluates* what needs to be *evaluated*. All the statements and modules
that are not used are just being ignored.
"""
import copy
import sys
from parso.python import tree
import parso
from jedi import debug
from jedi.common import unite
from jedi.evaluate import representation as er
from jedi.evaluate import imports
from jedi.evaluate import recursion
from jedi.evaluate import iterable
from jedi.evaluate.cache import evaluator_function_cache
from jedi.evaluate import stdlib
from jedi.evaluate import finder
from jedi.evaluate import compiled
from jedi.evaluate import precedence
from jedi.evaluate import param
from jedi.evaluate import helpers
from jedi.evaluate import pep0484
from jedi.evaluate.filters import TreeNameDefinition, ParamName
from jedi.evaluate.instance import AnonymousInstance, BoundMethod
from jedi.evaluate.context import ContextualizedName, ContextualizedNode
from jedi import parser_utils
def _limit_context_infers(func):
"""
This is for now the way how we limit type inference going wild. There are
other ways to ensure recursion limits as well. This is mostly necessary
because of instance (self) access that can be quite tricky to limit.
I'm still not sure this is the way to go, but it looks okay for now and we
can still go anther way in the future. Tests are there. ~ dave
"""
def wrapper(evaluator, context, *args, **kwargs):
n = context.tree_node
try:
evaluator.inferred_element_counts[n] += 1
if evaluator.inferred_element_counts[n] > 300:
debug.warning('In context %s there were too many inferences.', n)
return set()
except KeyError:
evaluator.inferred_element_counts[n] = 1
return func(evaluator, context, *args, **kwargs)
return wrapper
class Evaluator(object):
def __init__(self, grammar, sys_path=None):
self.grammar = grammar
self.latest_grammar = parso.load_grammar(version='3.6')
self.memoize_cache = {} # for memoize decorators
# To memorize modules -> equals `sys.modules`.
self.modules = {} # like `sys.modules`.
self.compiled_cache = {} # see `evaluate.compiled.create()`
self.inferred_element_counts = {}
self.mixed_cache = {} # see `evaluate.compiled.mixed._create()`
self.analysis = []
self.dynamic_params_depth = 0
self.is_analysis = False
self.python_version = sys.version_info[:2]
if sys_path is None:
sys_path = sys.path
self.sys_path = copy.copy(sys_path)
try:
self.sys_path.remove('')
except ValueError:
pass
self.reset_recursion_limitations()
# Constants
self.BUILTINS = compiled.get_special_object(self, 'BUILTINS')
def reset_recursion_limitations(self):
self.recursion_detector = recursion.RecursionDetector()
self.execution_recursion_detector = recursion.ExecutionRecursionDetector(self)
def find_types(self, context, name_or_str, name_context, position=None,
search_global=False, is_goto=False, analysis_errors=True):
"""
This is the search function. The most important part to debug.
`remove_statements` and `filter_statements` really are the core part of
this completion.
:param position: Position of the last statement -> tuple of line, column
:return: List of Names. Their parents are the types.
"""
f = finder.NameFinder(self, context, name_context, name_or_str,
position, analysis_errors=analysis_errors)
filters = f.get_filters(search_global)
if is_goto:
return f.filter_name(filters)
return f.find(filters, attribute_lookup=not search_global)
@_limit_context_infers
def eval_statement(self, context, stmt, seek_name=None):
with recursion.execution_allowed(self, stmt) as allowed:
if allowed or context.get_root_context() == self.BUILTINS:
return self._eval_stmt(context, stmt, seek_name)
return set()
#@evaluator_function_cache(default=[])
@debug.increase_indent
def _eval_stmt(self, context, stmt, seek_name=None):
"""
The starting point of the completion. A statement always owns a call
list, which are the calls, that a statement does. In case multiple
names are defined in the statement, `seek_name` returns the result for
this name.
:param stmt: A `tree.ExprStmt`.
"""
debug.dbg('eval_statement %s (%s)', stmt, seek_name)
rhs = stmt.get_rhs()
types = self.eval_element(context, rhs)
if seek_name:
c_node = ContextualizedName(context, seek_name)
types = finder.check_tuple_assignments(self, c_node, types)
first_operator = next(stmt.yield_operators(), None)
if first_operator not in ('=', None) and first_operator.type == 'operator':
# `=` is always the last character in aug assignments -> -1
operator = copy.copy(first_operator)
operator.value = operator.value[:-1]
name = stmt.get_defined_names()[0].value
left = context.py__getattribute__(
name, position=stmt.start_pos, search_global=True)
for_stmt = tree.search_ancestor(stmt, 'for_stmt')
if for_stmt is not None and for_stmt.type == 'for_stmt' and types \
and parser_utils.for_stmt_defines_one_name(for_stmt):
# Iterate through result and add the values, that's possible
# only in for loops without clutter, because they are
# predictable. Also only do it, if the variable is not a tuple.
node = for_stmt.get_testlist()
cn = ContextualizedNode(context, node)
ordered = list(iterable.py__iter__(self, cn.infer(), cn))
for lazy_context in ordered:
dct = {for_stmt.children[1].value: lazy_context.infer()}
with helpers.predefine_names(context, for_stmt, dct):
t = self.eval_element(context, rhs)
left = precedence.calculate(self, context, left, operator, t)
types = left
else:
types = precedence.calculate(self, context, left, operator, types)
debug.dbg('eval_statement result %s', types)
return types
def eval_element(self, context, element):
if isinstance(context, iterable.CompForContext):
return self._eval_element_not_cached(context, element)
if_stmt = element
while if_stmt is not None:
if_stmt = if_stmt.parent
if if_stmt.type in ('if_stmt', 'for_stmt'):
break
if parser_utils.is_scope(if_stmt):
if_stmt = None
break
predefined_if_name_dict = context.predefined_names.get(if_stmt)
if predefined_if_name_dict is None and if_stmt and if_stmt.type == 'if_stmt':
if_stmt_test = if_stmt.children[1]
name_dicts = [{}]
# If we already did a check, we don't want to do it again -> If
# context.predefined_names is filled, we stop.
# We don't want to check the if stmt itself, it's just about
# the content.
if element.start_pos > if_stmt_test.end_pos:
# Now we need to check if the names in the if_stmt match the
# names in the suite.
if_names = helpers.get_names_of_node(if_stmt_test)
element_names = helpers.get_names_of_node(element)
str_element_names = [e.value for e in element_names]
if any(i.value in str_element_names for i in if_names):
for if_name in if_names:
definitions = self.goto_definitions(context, if_name)
# Every name that has multiple different definitions
# causes the complexity to rise. The complexity should
# never fall below 1.
if len(definitions) > 1:
if len(name_dicts) * len(definitions) > 16:
debug.dbg('Too many options for if branch evaluation %s.', if_stmt)
# There's only a certain amount of branches
# Jedi can evaluate, otherwise it will take to
# long.
name_dicts = [{}]
break
original_name_dicts = list(name_dicts)
name_dicts = []
for definition in definitions:
new_name_dicts = list(original_name_dicts)
for i, name_dict in enumerate(new_name_dicts):
new_name_dicts[i] = name_dict.copy()
new_name_dicts[i][if_name.value] = set([definition])
name_dicts += new_name_dicts
else:
for name_dict in name_dicts:
name_dict[if_name.value] = definitions
if len(name_dicts) > 1:
result = set()
for name_dict in name_dicts:
with helpers.predefine_names(context, if_stmt, name_dict):
result |= self._eval_element_not_cached(context, element)
return result
else:
return self._eval_element_if_evaluated(context, element)
else:
if predefined_if_name_dict:
return self._eval_element_not_cached(context, element)
else:
return self._eval_element_if_evaluated(context, element)
def _eval_element_if_evaluated(self, context, element):
"""
TODO This function is temporary: Merge with eval_element.
"""
parent = element
while parent is not None:
parent = parent.parent
predefined_if_name_dict = context.predefined_names.get(parent)
if predefined_if_name_dict is not None:
return self._eval_element_not_cached(context, element)
return self._eval_element_cached(context, element)
@evaluator_function_cache(default=set())
def _eval_element_cached(self, context, element):
return self._eval_element_not_cached(context, element)
@debug.increase_indent
@_limit_context_infers
def _eval_element_not_cached(self, context, element):
debug.dbg('eval_element %s@%s', element, element.start_pos)
types = set()
typ = element.type
if typ in ('name', 'number', 'string', 'atom'):
types = self.eval_atom(context, element)
elif typ == 'keyword':
# For False/True/None
if element.value in ('False', 'True', 'None'):
types.add(compiled.builtin_from_name(self, element.value))
# else: print e.g. could be evaluated like this in Python 2.7
elif typ == 'lambdef':
types = set([er.FunctionContext(self, context, element)])
elif typ == 'expr_stmt':
types = self.eval_statement(context, element)
elif typ in ('power', 'atom_expr'):
first_child = element.children[0]
if not (first_child.type == 'keyword' and first_child.value == 'await'):
types = self.eval_atom(context, first_child)
for trailer in element.children[1:]:
if trailer == '**': # has a power operation.
right = self.eval_element(context, element.children[2])
types = set(precedence.calculate(self, context, types, trailer, right))
break
types = self.eval_trailer(context, types, trailer)
elif typ in ('testlist_star_expr', 'testlist',):
# The implicit tuple in statements.
types = set([iterable.SequenceLiteralContext(self, context, element)])
elif typ in ('not_test', 'factor'):
types = self.eval_element(context, element.children[-1])
for operator in element.children[:-1]:
types = set(precedence.factor_calculate(self, types, operator))
elif typ == 'test':
# `x if foo else y` case.
types = (self.eval_element(context, element.children[0]) |
self.eval_element(context, element.children[-1]))
elif typ == 'operator':
# Must be an ellipsis, other operators are not evaluated.
# In Python 2 ellipsis is coded as three single dot tokens, not
# as one token 3 dot token.
assert element.value in ('.', '...')
types = set([compiled.create(self, Ellipsis)])
elif typ == 'dotted_name':
types = self.eval_atom(context, element.children[0])
for next_name in element.children[2::2]:
# TODO add search_global=True?
types = unite(
typ.py__getattribute__(next_name, name_context=context)
for typ in types
)
types = types
elif typ == 'eval_input':
types = self._eval_element_not_cached(context, element.children[0])
elif typ == 'annassign':
types = pep0484._evaluate_for_annotation(context, element.children[1])
else:
types = precedence.calculate_children(self, context, element.children)
debug.dbg('eval_element result %s', types)
return types
def eval_atom(self, context, atom):
"""
Basically to process ``atom`` nodes. The parser sometimes doesn't
generate the node (because it has just one child). In that case an atom
might be a name or a literal as well.
"""
if atom.type == 'name':
# This is the first global lookup.
stmt = tree.search_ancestor(
atom, 'expr_stmt', 'lambdef'
) or atom
if stmt.type == 'lambdef':
stmt = atom
return context.py__getattribute__(
name_or_str=atom,
position=stmt.start_pos,
search_global=True
)
elif isinstance(atom, tree.Literal):
string = parser_utils.safe_literal_eval(atom.value)
return set([compiled.create(self, string)])
else:
c = atom.children
if c[0].type == 'string':
# Will be one string.
types = self.eval_atom(context, c[0])
for string in c[1:]:
right = self.eval_atom(context, string)
types = precedence.calculate(self, context, types, '+', right)
return types
# Parentheses without commas are not tuples.
elif c[0] == '(' and not len(c) == 2 \
and not(c[1].type == 'testlist_comp' and
len(c[1].children) > 1):
return self.eval_element(context, c[1])
try:
comp_for = c[1].children[1]
except (IndexError, AttributeError):
pass
else:
if comp_for == ':':
# Dict comprehensions have a colon at the 3rd index.
try:
comp_for = c[1].children[3]
except IndexError:
pass
if comp_for.type == 'comp_for':
return set([iterable.Comprehension.from_atom(self, context, atom)])
# It's a dict/list/tuple literal.
array_node = c[1]
try:
array_node_c = array_node.children
except AttributeError:
array_node_c = []
if c[0] == '{' and (array_node == '}' or ':' in array_node_c):
context = iterable.DictLiteralContext(self, context, atom)
else:
context = iterable.SequenceLiteralContext(self, context, atom)
return set([context])
def eval_trailer(self, context, types, trailer):
trailer_op, node = trailer.children[:2]
if node == ')': # `arglist` is optional.
node = ()
new_types = set()
if trailer_op == '[':
new_types |= iterable.py__getitem__(self, context, types, trailer)
else:
for typ in types:
debug.dbg('eval_trailer: %s in scope %s', trailer, typ)
if trailer_op == '.':
new_types |= typ.py__getattribute__(
name_context=context,
name_or_str=node
)
elif trailer_op == '(':
arguments = param.TreeArguments(self, context, node, trailer)
new_types |= self.execute(typ, arguments)
return new_types
@debug.increase_indent
def execute(self, obj, arguments):
if self.is_analysis:
arguments.eval_all()
debug.dbg('execute: %s %s', obj, arguments)
try:
# Some stdlib functions like super(), namedtuple(), etc. have been
# hard-coded in Jedi to support them.
return stdlib.execute(self, obj, arguments)
except stdlib.NotInStdLib:
pass
try:
func = obj.py__call__
except AttributeError:
debug.warning("no execution possible %s", obj)
return set()
else:
types = func(arguments)
debug.dbg('execute result: %s in %s', types, obj)
return types
def goto_definitions(self, context, name):
def_ = name.get_definition(import_name_always=True)
if def_ is not None:
type_ = def_.type
if type_ == 'classdef':
return [er.ClassContext(self, name.parent, context)]
elif type_ == 'funcdef':
return [er.FunctionContext(self, context, name.parent)]
if type_ == 'expr_stmt':
is_simple_name = name.parent.type not in ('power', 'trailer')
if is_simple_name:
return self.eval_statement(context, def_, name)
if type_ == 'for_stmt':
container_types = self.eval_element(context, def_.children[3])
cn = ContextualizedNode(context, def_.children[3])
for_types = iterable.py__iter__types(self, container_types, cn)
c_node = ContextualizedName(context, name)
return finder.check_tuple_assignments(self, c_node, for_types)
if type_ in ('import_from', 'import_name'):
return imports.infer_import(context, name)
return helpers.evaluate_call_of_leaf(context, name)
def goto(self, context, name):
definition = name.get_definition(import_name_always=True)
if definition is not None:
type_ = definition.type
if type_ == 'expr_stmt':
# Only take the parent, because if it's more complicated than just
# a name it's something you can "goto" again.
is_simple_name = name.parent.type not in ('power', 'trailer')
if is_simple_name:
return [TreeNameDefinition(context, name)]
elif type_ == 'param':
return [ParamName(context, name)]
elif type_ in ('funcdef', 'classdef'):
return [TreeNameDefinition(context, name)]
elif type_ in ('import_from', 'import_name'):
module_names = imports.infer_import(context, name, is_goto=True)
return module_names
par = name.parent
typ = par.type
if typ == 'argument' and par.children[1] == '=' and par.children[0] == name:
# Named param goto.
trailer = par.parent
if trailer.type == 'arglist':
trailer = trailer.parent
if trailer.type != 'classdef':
if trailer.type == 'decorator':
types = self.eval_element(context, trailer.children[1])
else:
i = trailer.parent.children.index(trailer)
to_evaluate = trailer.parent.children[:i]
types = self.eval_element(context, to_evaluate[0])
for trailer in to_evaluate[1:]:
types = self.eval_trailer(context, types, trailer)
param_names = []
for typ in types:
try:
get_param_names = typ.get_param_names
except AttributeError:
pass
else:
for param_name in get_param_names():
if param_name.string_name == name.value:
param_names.append(param_name)
return param_names
elif typ == 'dotted_name': # Is a decorator.
index = par.children.index(name)
if index > 0:
new_dotted = helpers.deep_ast_copy(par)
new_dotted.children[index - 1:] = []
values = self.eval_element(context, new_dotted)
return unite(
value.py__getattribute__(name, name_context=context, is_goto=True)
for value in values
)
if typ == 'trailer' and par.children[0] == '.':
values = helpers.evaluate_call_of_leaf(context, name, cut_own_trailer=True)
return unite(
value.py__getattribute__(name, name_context=context, is_goto=True)
for value in values
)
else:
stmt = tree.search_ancestor(
name, 'expr_stmt', 'lambdef'
) or name
if stmt.type == 'lambdef':
stmt = name
return context.py__getattribute__(
name,
position=stmt.start_pos,
search_global=True, is_goto=True
)
def create_context(self, base_context, node, node_is_context=False, node_is_object=False):
def parent_scope(node):
while True:
node = node.parent
if parser_utils.is_scope(node):
return node
elif node.type in ('argument', 'testlist_comp'):
if node.children[1].type == 'comp_for':
return node.children[1]
elif node.type == 'dictorsetmaker':
for n in node.children[1:4]:
# In dictionaries it can be pretty much anything.
if n.type == 'comp_for':
return n
def from_scope_node(scope_node, child_is_funcdef=None, is_nested=True, node_is_object=False):
if scope_node == base_node:
return base_context
is_funcdef = scope_node.type in ('funcdef', 'lambdef')
parent_scope = parser_utils.get_parent_scope(scope_node)
parent_context = from_scope_node(parent_scope, child_is_funcdef=is_funcdef)
if is_funcdef:
if isinstance(parent_context, AnonymousInstance):
func = BoundMethod(
self, parent_context, parent_context.class_context,
parent_context.parent_context, scope_node
)
else:
func = er.FunctionContext(
self,
parent_context,
scope_node
)
if is_nested and not node_is_object:
return func.get_function_execution()
return func
elif scope_node.type == 'classdef':
class_context = er.ClassContext(self, scope_node, parent_context)
if child_is_funcdef:
# anonymous instance
return AnonymousInstance(self, parent_context, class_context)
else:
return class_context
elif scope_node.type == 'comp_for':
if node.start_pos >= scope_node.children[-1].start_pos:
return parent_context
return iterable.CompForContext.from_comp_for(parent_context, scope_node)
raise Exception("There's a scope that was not managed.")
base_node = base_context.tree_node
if node_is_context and parser_utils.is_scope(node):
scope_node = node
else:
if node.parent.type in ('funcdef', 'classdef') and node.parent.name == node:
# When we're on class/function names/leafs that define the
# object itself and not its contents.
node = node.parent
scope_node = parent_scope(node)
return from_scope_node(scope_node, is_nested=True, node_is_object=node_is_object)

View File

@ -1,214 +0,0 @@
"""
Module for statical analysis.
"""
from jedi import debug
from parso.python import tree
from jedi.evaluate.compiled import CompiledObject
CODES = {
'attribute-error': (1, AttributeError, 'Potential AttributeError.'),
'name-error': (2, NameError, 'Potential NameError.'),
'import-error': (3, ImportError, 'Potential ImportError.'),
'type-error-too-many-arguments': (4, TypeError, None),
'type-error-too-few-arguments': (5, TypeError, None),
'type-error-keyword-argument': (6, TypeError, None),
'type-error-multiple-values': (7, TypeError, None),
'type-error-star-star': (8, TypeError, None),
'type-error-star': (9, TypeError, None),
'type-error-operation': (10, TypeError, None),
'type-error-not-iterable': (11, TypeError, None),
'type-error-isinstance': (12, TypeError, None),
'type-error-not-subscriptable': (13, TypeError, None),
'value-error-too-many-values': (14, ValueError, None),
'value-error-too-few-values': (15, ValueError, None),
}
class Error(object):
def __init__(self, name, module_path, start_pos, message=None):
self.path = module_path
self._start_pos = start_pos
self.name = name
if message is None:
message = CODES[self.name][2]
self.message = message
@property
def line(self):
return self._start_pos[0]
@property
def column(self):
return self._start_pos[1]
@property
def code(self):
# The class name start
first = self.__class__.__name__[0]
return first + str(CODES[self.name][0])
def __unicode__(self):
return '%s:%s:%s: %s %s' % (self.path, self.line, self.column,
self.code, self.message)
def __str__(self):
return self.__unicode__()
def __eq__(self, other):
return (self.path == other.path and self.name == other.name and
self._start_pos == other._start_pos)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.path, self._start_pos, self.name))
def __repr__(self):
return '<%s %s: %s@%s,%s>' % (self.__class__.__name__,
self.name, self.path,
self._start_pos[0], self._start_pos[1])
class Warning(Error):
pass
def add(node_context, error_name, node, message=None, typ=Error, payload=None):
exception = CODES[error_name][1]
if _check_for_exception_catch(node_context, node, exception, payload):
return
# TODO this path is probably not right
module_context = node_context.get_root_context()
module_path = module_context.py__file__()
instance = typ(error_name, module_path, node.start_pos, message)
debug.warning(str(instance), format=False)
node_context.evaluator.analysis.append(instance)
def _check_for_setattr(instance):
"""
Check if there's any setattr method inside an instance. If so, return True.
"""
from jedi.evaluate.representation import ModuleContext
module = instance.get_root_context()
if not isinstance(module, ModuleContext):
return False
node = module.tree_node
try:
stmts = node.get_used_names()['setattr']
except KeyError:
return False
return any(node.start_pos < stmt.start_pos < node.end_pos
for stmt in stmts)
def add_attribute_error(name_context, lookup_context, name):
message = ('AttributeError: %s has no attribute %s.' % (lookup_context, name))
from jedi.evaluate.instance import AbstractInstanceContext, CompiledInstanceName
# Check for __getattr__/__getattribute__ existance and issue a warning
# instead of an error, if that happens.
typ = Error
if isinstance(lookup_context, AbstractInstanceContext):
slot_names = lookup_context.get_function_slot_names('__getattr__') + \
lookup_context.get_function_slot_names('__getattribute__')
for n in slot_names:
if isinstance(name, CompiledInstanceName) and \
n.parent_context.obj == object:
typ = Warning
break
if _check_for_setattr(lookup_context):
typ = Warning
payload = lookup_context, name
add(name_context, 'attribute-error', name, message, typ, payload)
def _check_for_exception_catch(node_context, jedi_name, exception, payload=None):
"""
Checks if a jedi object (e.g. `Statement`) sits inside a try/catch and
doesn't count as an error (if equal to `exception`).
Also checks `hasattr` for AttributeErrors and uses the `payload` to compare
it.
Returns True if the exception was catched.
"""
def check_match(cls, exception):
try:
return isinstance(cls, CompiledObject) and issubclass(exception, cls.obj)
except TypeError:
return False
def check_try_for_except(obj, exception):
# Only nodes in try
iterator = iter(obj.children)
for branch_type in iterator:
colon = next(iterator)
suite = next(iterator)
if branch_type == 'try' \
and not (branch_type.start_pos < jedi_name.start_pos <= suite.end_pos):
return False
for node in obj.get_except_clause_tests():
if node is None:
return True # An exception block that catches everything.
else:
except_classes = node_context.eval_node(node)
for cls in except_classes:
from jedi.evaluate import iterable
if isinstance(cls, iterable.AbstractSequence) and \
cls.array_type == 'tuple':
# multiple exceptions
for lazy_context in cls.py__iter__():
for typ in lazy_context.infer():
if check_match(typ, exception):
return True
else:
if check_match(cls, exception):
return True
def check_hasattr(node, suite):
try:
assert suite.start_pos <= jedi_name.start_pos < suite.end_pos
assert node.type in ('power', 'atom_expr')
base = node.children[0]
assert base.type == 'name' and base.value == 'hasattr'
trailer = node.children[1]
assert trailer.type == 'trailer'
arglist = trailer.children[1]
assert arglist.type == 'arglist'
from jedi.evaluate.param import TreeArguments
args = list(TreeArguments(node_context.evaluator, node_context, arglist).unpack())
# Arguments should be very simple
assert len(args) == 2
# Check name
key, lazy_context = args[1]
names = list(lazy_context.infer())
assert len(names) == 1 and isinstance(names[0], CompiledObject)
assert names[0].obj == payload[1].value
# Check objects
key, lazy_context = args[0]
objects = lazy_context.infer()
return payload[0] in objects
except AssertionError:
return False
obj = jedi_name
while obj is not None and not isinstance(obj, (tree.Function, tree.Class)):
if isinstance(obj, tree.Flow):
# try/except catch check
if obj.type == 'try_stmt' and check_try_for_except(obj, exception):
return True
# hasattr check
if exception == AttributeError and obj.type in ('if_stmt', 'while_stmt'):
if check_hasattr(obj.children[1], obj.children[3]):
return True
obj = obj.parent
return False

View File

@ -1,81 +0,0 @@
"""
- the popular ``_memoize_default`` works like a typical memoize and returns the
default otherwise.
- ``CachedMetaClass`` uses ``_memoize_default`` to do the same with classes.
"""
import inspect
_NO_DEFAULT = object()
def _memoize_default(default=_NO_DEFAULT, evaluator_is_first_arg=False, second_arg_is_evaluator=False):
""" This is a typical memoization decorator, BUT there is one difference:
To prevent recursion it sets defaults.
Preventing recursion is in this case the much bigger use than speed. I
don't think, that there is a big speed difference, but there are many cases
where recursion could happen (think about a = b; b = a).
"""
def func(function):
def wrapper(obj, *args, **kwargs):
# TODO These checks are kind of ugly and slow.
if evaluator_is_first_arg:
cache = obj.memoize_cache
elif second_arg_is_evaluator:
cache = args[0].memoize_cache # needed for meta classes
else:
cache = obj.evaluator.memoize_cache
try:
memo = cache[function]
except KeyError:
memo = {}
cache[function] = memo
key = (obj, args, frozenset(kwargs.items()))
if key in memo:
return memo[key]
else:
if default is not _NO_DEFAULT:
memo[key] = default
rv = function(obj, *args, **kwargs)
if inspect.isgenerator(rv):
rv = list(rv)
memo[key] = rv
return rv
return wrapper
return func
def evaluator_function_cache(default=_NO_DEFAULT):
def decorator(func):
return _memoize_default(default=default, evaluator_is_first_arg=True)(func)
return decorator
def evaluator_method_cache(default=_NO_DEFAULT):
def decorator(func):
return _memoize_default(default=default)(func)
return decorator
def _memoize_meta_class():
def decorator(call):
return _memoize_default(second_arg_is_evaluator=True)(call)
return decorator
class CachedMetaClass(type):
"""
This is basically almost the same than the decorator above, it just caches
class initializations. Either you do it this way or with decorators, but
with decorators you lose class access (isinstance, etc).
"""
@_memoize_meta_class()
def __call__(self, *args, **kwargs):
return super(CachedMetaClass, self).__call__(*args, **kwargs)

View File

@ -1,637 +0,0 @@
"""
Imitate the parser representation.
"""
import inspect
import re
import sys
import os
import types
from functools import partial
from jedi._compatibility import builtins as _builtins, unicode, py_version
from jedi import debug
from jedi.cache import underscore_memoization, memoize_method
from jedi.evaluate.filters import AbstractFilter, AbstractNameDefinition, \
ContextNameMixin
from jedi.evaluate.context import Context, LazyKnownContext
from jedi.evaluate.compiled.getattr_static import getattr_static
from . import fake
_sep = os.path.sep
if os.path.altsep is not None:
_sep += os.path.altsep
_path_re = re.compile('(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep)))
del _sep
# Those types don't exist in typing.
MethodDescriptorType = type(str.replace)
WrapperDescriptorType = type(set.__iter__)
# `object.__subclasshook__` is an already executed descriptor.
object_class_dict = type.__dict__["__dict__"].__get__(object)
ClassMethodDescriptorType = type(object_class_dict['__subclasshook__'])
ALLOWED_DESCRIPTOR_ACCESS = (
types.FunctionType,
types.GetSetDescriptorType,
types.MemberDescriptorType,
MethodDescriptorType,
WrapperDescriptorType,
ClassMethodDescriptorType,
staticmethod,
classmethod,
)
class CheckAttribute(object):
"""Raises an AttributeError if the attribute X isn't available."""
def __init__(self, func):
self.func = func
# Remove the py in front of e.g. py__call__.
self.check_name = func.__name__[2:]
def __get__(self, instance, owner):
# This might raise an AttributeError. That's wanted.
if self.check_name == '__iter__':
# Python iterators are a bit strange, because there's no need for
# the __iter__ function as long as __getitem__ is defined (it will
# just start with __getitem__(0). This is especially true for
# Python 2 strings, where `str.__iter__` is not even defined.
try:
iter(instance.obj)
except TypeError:
raise AttributeError
else:
getattr(instance.obj, self.check_name)
return partial(self.func, instance)
class CompiledObject(Context):
path = None # modules have this attribute - set it to None.
used_names = lambda self: {} # To be consistent with modules.
def __init__(self, evaluator, obj, parent_context=None, faked_class=None):
super(CompiledObject, self).__init__(evaluator, parent_context)
self.obj = obj
# This attribute will not be set for most classes, except for fakes.
self.tree_node = faked_class
def get_root_node(self):
# To make things a bit easier with filters we add this method here.
return self.get_root_context()
@CheckAttribute
def py__call__(self, params):
if inspect.isclass(self.obj):
from jedi.evaluate.instance import CompiledInstance
return set([CompiledInstance(self.evaluator, self.parent_context, self, params)])
else:
return set(self._execute_function(params))
@CheckAttribute
def py__class__(self):
return create(self.evaluator, self.obj.__class__)
@CheckAttribute
def py__mro__(self):
return (self,) + tuple(create(self.evaluator, cls) for cls in self.obj.__mro__[1:])
@CheckAttribute
def py__bases__(self):
return tuple(create(self.evaluator, cls) for cls in self.obj.__bases__)
def py__bool__(self):
return bool(self.obj)
def py__file__(self):
try:
return self.obj.__file__
except AttributeError:
return None
def is_class(self):
return inspect.isclass(self.obj)
def py__doc__(self, include_call_signature=False):
return inspect.getdoc(self.obj) or ''
def get_param_names(self):
obj = self.obj
try:
if py_version < 33:
raise ValueError("inspect.signature was introduced in 3.3")
if py_version == 34:
# In 3.4 inspect.signature are wrong for str and int. This has
# been fixed in 3.5. The signature of object is returned,
# because no signature was found for str. Here we imitate 3.5
# logic and just ignore the signature if the magic methods
# don't match object.
# 3.3 doesn't even have the logic and returns nothing for str
# and classes that inherit from object.
user_def = inspect._signature_get_user_defined_method
if (inspect.isclass(obj)
and not user_def(type(obj), '__init__')
and not user_def(type(obj), '__new__')
and (obj.__init__ != object.__init__
or obj.__new__ != object.__new__)):
raise ValueError
signature = inspect.signature(obj)
except ValueError: # Has no signature
params_str, ret = self._parse_function_doc()
tokens = params_str.split(',')
if inspect.ismethoddescriptor(obj):
tokens.insert(0, 'self')
for p in tokens:
parts = p.strip().split('=')
yield UnresolvableParamName(self, parts[0])
else:
for signature_param in signature.parameters.values():
yield SignatureParamName(self, signature_param)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, repr(self.obj))
@underscore_memoization
def _parse_function_doc(self):
doc = self.py__doc__()
if doc is None:
return '', ''
return _parse_function_doc(doc)
@property
def api_type(self):
obj = self.obj
if inspect.isclass(obj):
return 'class'
elif inspect.ismodule(obj):
return 'module'
elif inspect.isbuiltin(obj) or inspect.ismethod(obj) \
or inspect.ismethoddescriptor(obj) or inspect.isfunction(obj):
return 'function'
# Everything else...
return 'instance'
@property
def type(self):
"""Imitate the tree.Node.type values."""
cls = self._get_class()
if inspect.isclass(cls):
return 'classdef'
elif inspect.ismodule(cls):
return 'file_input'
elif inspect.isbuiltin(cls) or inspect.ismethod(cls) or \
inspect.ismethoddescriptor(cls):
return 'funcdef'
@underscore_memoization
def _cls(self):
"""
We used to limit the lookups for instantiated objects like list(), but
this is not the case anymore. Python itself
"""
# Ensures that a CompiledObject is returned that is not an instance (like list)
return self
def _get_class(self):
if not fake.is_class_instance(self.obj) or \
inspect.ismethoddescriptor(self.obj): # slots
return self.obj
try:
return self.obj.__class__
except AttributeError:
# happens with numpy.core.umath._UFUNC_API (you get it
# automatically by doing `import numpy`.
return type
def get_filters(self, search_global=False, is_instance=False,
until_position=None, origin_scope=None):
yield self._ensure_one_filter(is_instance)
@memoize_method
def _ensure_one_filter(self, is_instance):
"""
search_global shouldn't change the fact that there's one dict, this way
there's only one `object`.
"""
return CompiledObjectFilter(self.evaluator, self, is_instance)
@CheckAttribute
def py__getitem__(self, index):
if type(self.obj) not in (str, list, tuple, unicode, bytes, bytearray, dict):
# Get rid of side effects, we won't call custom `__getitem__`s.
return set()
return set([create(self.evaluator, self.obj[index])])
@CheckAttribute
def py__iter__(self):
if type(self.obj) not in (str, list, tuple, unicode, bytes, bytearray, dict):
# Get rid of side effects, we won't call custom `__getitem__`s.
return
for i, part in enumerate(self.obj):
if i > 20:
# Should not go crazy with large iterators
break
yield LazyKnownContext(create(self.evaluator, part))
def py__name__(self):
try:
return self._get_class().__name__
except AttributeError:
return None
@property
def name(self):
try:
name = self._get_class().__name__
except AttributeError:
name = repr(self.obj)
return CompiledContextName(self, name)
def _execute_function(self, params):
from jedi.evaluate import docstrings
if self.type != 'funcdef':
return
for name in self._parse_function_doc()[1].split():
try:
bltn_obj = getattr(_builtins, name)
except AttributeError:
continue
else:
if bltn_obj is None:
# We want to evaluate everything except None.
# TODO do we?
continue
bltn_obj = create(self.evaluator, bltn_obj)
for result in self.evaluator.execute(bltn_obj, params):
yield result
for type_ in docstrings.infer_return_types(self):
yield type_
def get_self_attributes(self):
return [] # Instance compatibility
def get_imports(self):
return [] # Builtins don't have imports
def dict_values(self):
return set(create(self.evaluator, v) for v in self.obj.values())
class CompiledName(AbstractNameDefinition):
def __init__(self, evaluator, parent_context, name):
self._evaluator = evaluator
self.parent_context = parent_context
self.string_name = name
def __repr__(self):
try:
name = self.parent_context.name # __name__ is not defined all the time
except AttributeError:
name = None
return '<%s: (%s).%s>' % (self.__class__.__name__, name, self.string_name)
@property
def api_type(self):
return next(iter(self.infer())).api_type
@underscore_memoization
def infer(self):
module = self.parent_context.get_root_context()
return [_create_from_name(self._evaluator, module, self.parent_context, self.string_name)]
class SignatureParamName(AbstractNameDefinition):
api_type = 'param'
def __init__(self, compiled_obj, signature_param):
self.parent_context = compiled_obj.parent_context
self._signature_param = signature_param
@property
def string_name(self):
return self._signature_param.name
def infer(self):
p = self._signature_param
evaluator = self.parent_context.evaluator
types = set()
if p.default is not p.empty:
types.add(create(evaluator, p.default))
if p.annotation is not p.empty:
annotation = create(evaluator, p.annotation)
types |= annotation.execute_evaluated()
return types
class UnresolvableParamName(AbstractNameDefinition):
api_type = 'param'
def __init__(self, compiled_obj, name):
self.parent_context = compiled_obj.parent_context
self.string_name = name
def infer(self):
return set()
class CompiledContextName(ContextNameMixin, AbstractNameDefinition):
def __init__(self, context, name):
self.string_name = name
self._context = context
self.parent_context = context.parent_context
class EmptyCompiledName(AbstractNameDefinition):
"""
Accessing some names will raise an exception. To avoid not having any
completions, just give Jedi the option to return this object. It infers to
nothing.
"""
def __init__(self, evaluator, name):
self.parent_context = evaluator.BUILTINS
self.string_name = name
def infer(self):
return []
class CompiledObjectFilter(AbstractFilter):
name_class = CompiledName
def __init__(self, evaluator, compiled_object, is_instance=False):
self._evaluator = evaluator
self._compiled_object = compiled_object
self._is_instance = is_instance
@memoize_method
def get(self, name):
name = str(name)
obj = self._compiled_object.obj
try:
attr, is_get_descriptor = getattr_static(obj, name)
except AttributeError:
return []
else:
if is_get_descriptor \
and not type(attr) in ALLOWED_DESCRIPTOR_ACCESS:
# In case of descriptors that have get methods we cannot return
# it's value, because that would mean code execution.
return [EmptyCompiledName(self._evaluator, name)]
if self._is_instance and name not in dir(obj):
return []
return [self._create_name(name)]
def values(self):
obj = self._compiled_object.obj
names = []
for name in dir(obj):
names += self.get(name)
is_instance = self._is_instance or fake.is_class_instance(obj)
# ``dir`` doesn't include the type names.
if not inspect.ismodule(obj) and (obj is not type) and not is_instance:
for filter in create(self._evaluator, type).get_filters():
names += filter.values()
return names
def _create_name(self, name):
return self.name_class(self._evaluator, self._compiled_object, name)
def dotted_from_fs_path(fs_path, sys_path):
"""
Changes `/usr/lib/python3.4/email/utils.py` to `email.utils`. I.e.
compares the path with sys.path and then returns the dotted_path. If the
path is not in the sys.path, just returns None.
"""
if os.path.basename(fs_path).startswith('__init__.'):
# We are calculating the path. __init__ files are not interesting.
fs_path = os.path.dirname(fs_path)
# prefer
# - UNIX
# /path/to/pythonX.Y/lib-dynload
# /path/to/pythonX.Y/site-packages
# - Windows
# C:\path\to\DLLs
# C:\path\to\Lib\site-packages
# over
# - UNIX
# /path/to/pythonX.Y
# - Windows
# C:\path\to\Lib
path = ''
for s in sys_path:
if (fs_path.startswith(s) and len(path) < len(s)):
path = s
# - Window
# X:\path\to\lib-dynload/datetime.pyd => datetime
module_path = fs_path[len(path):].lstrip(os.path.sep).lstrip('/')
# - Window
# Replace like X:\path\to\something/foo/bar.py
return _path_re.sub('', module_path).replace(os.path.sep, '.').replace('/', '.')
def load_module(evaluator, path=None, name=None):
sys_path = evaluator.sys_path
if path is not None:
dotted_path = dotted_from_fs_path(path, sys_path=sys_path)
else:
dotted_path = name
if dotted_path is None:
p, _, dotted_path = path.partition(os.path.sep)
sys_path.insert(0, p)
temp, sys.path = sys.path, sys_path
try:
__import__(dotted_path)
except RuntimeError:
if 'PySide' in dotted_path or 'PyQt' in dotted_path:
# RuntimeError: the PyQt4.QtCore and PyQt5.QtCore modules both wrap
# the QObject class.
# See https://github.com/davidhalter/jedi/pull/483
return None
raise
except ImportError:
# If a module is "corrupt" or not really a Python module or whatever.
debug.warning('Module %s not importable in path %s.', dotted_path, path)
return None
finally:
sys.path = temp
# Just access the cache after import, because of #59 as well as the very
# complicated import structure of Python.
module = sys.modules[dotted_path]
return create(evaluator, module)
docstr_defaults = {
'floating point number': 'float',
'character': 'str',
'integer': 'int',
'dictionary': 'dict',
'string': 'str',
}
def _parse_function_doc(doc):
"""
Takes a function and returns the params and return value as a tuple.
This is nothing more than a docstring parser.
TODO docstrings like utime(path, (atime, mtime)) and a(b [, b]) -> None
TODO docstrings like 'tuple of integers'
"""
# parse round parentheses: def func(a, (b,c))
try:
count = 0
start = doc.index('(')
for i, s in enumerate(doc[start:]):
if s == '(':
count += 1
elif s == ')':
count -= 1
if count == 0:
end = start + i
break
param_str = doc[start + 1:end]
except (ValueError, UnboundLocalError):
# ValueError for doc.index
# UnboundLocalError for undefined end in last line
debug.dbg('no brackets found - no param')
end = 0
param_str = ''
else:
# remove square brackets, that show an optional param ( = None)
def change_options(m):
args = m.group(1).split(',')
for i, a in enumerate(args):
if a and '=' not in a:
args[i] += '=None'
return ','.join(args)
while True:
param_str, changes = re.subn(r' ?\[([^\[\]]+)\]',
change_options, param_str)
if changes == 0:
break
param_str = param_str.replace('-', '_') # see: isinstance.__doc__
# parse return value
r = re.search('-[>-]* ', doc[end:end + 7])
if r is None:
ret = ''
else:
index = end + r.end()
# get result type, which can contain newlines
pattern = re.compile(r'(,\n|[^\n-])+')
ret_str = pattern.match(doc, index).group(0).strip()
# New object -> object()
ret_str = re.sub(r'[nN]ew (.*)', r'\1()', ret_str)
ret = docstr_defaults.get(ret_str, ret_str)
return param_str, ret
def _create_from_name(evaluator, module, compiled_object, name):
obj = compiled_object.obj
faked = None
try:
faked = fake.get_faked(evaluator, module, obj, parent_context=compiled_object, name=name)
if faked.type == 'funcdef':
from jedi.evaluate.representation import FunctionContext
return FunctionContext(evaluator, compiled_object, faked)
except fake.FakeDoesNotExist:
pass
try:
obj = getattr(obj, name)
except AttributeError:
# Happens e.g. in properties of
# PyQt4.QtGui.QStyleOptionComboBox.currentText
# -> just set it to None
obj = None
return create(evaluator, obj, parent_context=compiled_object, faked=faked)
def builtin_from_name(evaluator, string):
bltn_obj = getattr(_builtins, string)
return create(evaluator, bltn_obj)
def _a_generator(foo):
"""Used to have an object to return for generators."""
yield 42
yield foo
_SPECIAL_OBJECTS = {
'FUNCTION_CLASS': type(load_module),
'METHOD_CLASS': type(CompiledObject.is_class),
'MODULE_CLASS': type(os),
'GENERATOR_OBJECT': _a_generator(1.0),
'BUILTINS': _builtins,
}
def get_special_object(evaluator, identifier):
obj = _SPECIAL_OBJECTS[identifier]
return create(evaluator, obj, parent_context=create(evaluator, _builtins))
def compiled_objects_cache(attribute_name):
def decorator(func):
"""
This decorator caches just the ids, oopposed to caching the object itself.
Caching the id has the advantage that an object doesn't need to be
hashable.
"""
def wrapper(evaluator, obj, parent_context=None, module=None, faked=None):
cache = getattr(evaluator, attribute_name)
# Do a very cheap form of caching here.
key = id(obj), id(parent_context)
try:
return cache[key][0]
except KeyError:
# TODO this whole decorator is way too ugly
result = func(evaluator, obj, parent_context, module, faked)
# Need to cache all of them, otherwise the id could be overwritten.
cache[key] = result, obj, parent_context, module, faked
return result
return wrapper
return decorator
@compiled_objects_cache('compiled_cache')
def create(evaluator, obj, parent_context=None, module=None, faked=None):
"""
A very weird interface class to this module. The more options provided the
more acurate loading compiled objects is.
"""
if inspect.ismodule(obj):
if parent_context is not None:
# Modules don't have parents, be careful with caching: recurse.
return create(evaluator, obj)
else:
if parent_context is None and obj is not _builtins:
return create(evaluator, obj, create(evaluator, _builtins))
try:
faked = fake.get_faked(evaluator, module, obj, parent_context=parent_context)
if faked.type == 'funcdef':
from jedi.evaluate.representation import FunctionContext
return FunctionContext(evaluator, parent_context, faked)
except fake.FakeDoesNotExist:
pass
return CompiledObject(evaluator, obj, parent_context, faked)

View File

@ -1,213 +0,0 @@
"""
Loads functions that are mixed in to the standard library. E.g. builtins are
written in C (binaries), but my autocompletion only understands Python code. By
mixing in Python code, the autocompletion should work much better for builtins.
"""
import os
import inspect
import types
from itertools import chain
from parso.python import tree
from jedi._compatibility import is_py3, builtins, unicode, is_py34
modules = {}
MethodDescriptorType = type(str.replace)
# These are not considered classes and access is granted even though they have
# a __class__ attribute.
NOT_CLASS_TYPES = (
types.BuiltinFunctionType,
types.CodeType,
types.FrameType,
types.FunctionType,
types.GeneratorType,
types.GetSetDescriptorType,
types.LambdaType,
types.MemberDescriptorType,
types.MethodType,
types.ModuleType,
types.TracebackType,
MethodDescriptorType
)
if is_py3:
NOT_CLASS_TYPES += (
types.MappingProxyType,
types.SimpleNamespace
)
if is_py34:
NOT_CLASS_TYPES += (types.DynamicClassAttribute,)
class FakeDoesNotExist(Exception):
pass
def _load_faked_module(grammar, module):
module_name = module.__name__
if module_name == '__builtin__' and not is_py3:
module_name = 'builtins'
try:
return modules[module_name]
except KeyError:
path = os.path.dirname(os.path.abspath(__file__))
try:
with open(os.path.join(path, 'fake', module_name) + '.pym') as f:
source = f.read()
except IOError:
modules[module_name] = None
return
modules[module_name] = m = grammar.parse(unicode(source))
if module_name == 'builtins' and not is_py3:
# There are two implementations of `open` for either python 2/3.
# -> Rename the python2 version (`look at fake/builtins.pym`).
open_func = _search_scope(m, 'open')
open_func.children[1].value = 'open_python3'
open_func = _search_scope(m, 'open_python2')
open_func.children[1].value = 'open'
return m
def _search_scope(scope, obj_name):
for s in chain(scope.iter_classdefs(), scope.iter_funcdefs()):
if s.name.value == obj_name:
return s
def get_module(obj):
if inspect.ismodule(obj):
return obj
try:
obj = obj.__objclass__
except AttributeError:
pass
try:
imp_plz = obj.__module__
except AttributeError:
# Unfortunately in some cases like `int` there's no __module__
return builtins
else:
if imp_plz is None:
# Happens for example in `(_ for _ in []).send.__module__`.
return builtins
else:
try:
return __import__(imp_plz)
except ImportError:
# __module__ can be something arbitrary that doesn't exist.
return builtins
def _faked(grammar, module, obj, name):
# Crazy underscore actions to try to escape all the internal madness.
if module is None:
module = get_module(obj)
faked_mod = _load_faked_module(grammar, module)
if faked_mod is None:
return None, None
# Having the module as a `parser.python.tree.Module`, we need to scan
# for methods.
if name is None:
if inspect.isbuiltin(obj) or inspect.isclass(obj):
return _search_scope(faked_mod, obj.__name__), faked_mod
elif not inspect.isclass(obj):
# object is a method or descriptor
try:
objclass = obj.__objclass__
except AttributeError:
return None, None
else:
cls = _search_scope(faked_mod, objclass.__name__)
if cls is None:
return None, None
return _search_scope(cls, obj.__name__), faked_mod
else:
if obj is module:
return _search_scope(faked_mod, name), faked_mod
else:
try:
cls_name = obj.__name__
except AttributeError:
return None, None
cls = _search_scope(faked_mod, cls_name)
if cls is None:
return None, None
return _search_scope(cls, name), faked_mod
return None, None
def memoize_faked(obj):
"""
A typical memoize function that ignores issues with non hashable results.
"""
cache = obj.cache = {}
def memoizer(*args, **kwargs):
key = (obj, args, frozenset(kwargs.items()))
try:
result = cache[key]
except (TypeError, ValueError):
return obj(*args, **kwargs)
except KeyError:
result = obj(*args, **kwargs)
if result is not None:
cache[key] = obj(*args, **kwargs)
return result
else:
return result
return memoizer
@memoize_faked
def _get_faked(grammar, module, obj, name=None):
result, fake_module = _faked(grammar, module, obj, name)
if result is None:
# We're not interested in classes. What we want is functions.
raise FakeDoesNotExist
elif result.type == 'classdef':
return result, fake_module
else:
# Set the docstr which was previously not set (faked modules don't
# contain it).
assert result.type == 'funcdef'
doc = '"""%s"""' % obj.__doc__ # TODO need escapes.
suite = result.children[-1]
string = tree.String(doc, (0, 0), '')
new_line = tree.Newline('\n', (0, 0))
docstr_node = tree.PythonNode('simple_stmt', [string, new_line])
suite.children.insert(1, docstr_node)
return result, fake_module
def get_faked(evaluator, module, obj, name=None, parent_context=None):
if parent_context and parent_context.tree_node is not None:
# Try to search in already clearly defined stuff.
found = _search_scope(parent_context.tree_node, name)
if found is not None:
return found
else:
raise FakeDoesNotExist
faked, fake_module = _get_faked(evaluator.latest_grammar, module and module.obj, obj, name)
if module is not None:
module.get_used_names = fake_module.get_used_names
return faked
def is_class_instance(obj):
"""Like inspect.* methods."""
try:
cls = obj.__class__
except AttributeError:
return False
else:
return cls != type and not issubclass(cls, NOT_CLASS_TYPES)

View File

@ -1,9 +0,0 @@
class partial():
def __init__(self, func, *args, **keywords):
self.__func = func
self.__args = args
self.__keywords = keywords
def __call__(self, *args, **kwargs):
# TODO should be **dict(self.__keywords, **kwargs)
return self.__func(*(self.__args + args), **self.__keywords)

View File

@ -1,26 +0,0 @@
def connect(database, timeout=None, isolation_level=None, detect_types=None, factory=None):
return Connection()
class Connection():
def cursor(self):
return Cursor()
class Cursor():
def cursor(self):
return Cursor()
def fetchone(self):
return Row()
def fetchmany(self, size=cursor.arraysize):
return [self.fetchone()]
def fetchall(self):
return [self.fetchone()]
class Row():
def keys(self):
return ['']

View File

@ -1,99 +0,0 @@
def compile():
class SRE_Match():
endpos = int()
lastgroup = int()
lastindex = int()
pos = int()
string = str()
regs = ((int(), int()),)
def __init__(self, pattern):
self.re = pattern
def start(self):
return int()
def end(self):
return int()
def span(self):
return int(), int()
def expand(self):
return str()
def group(self, nr):
return str()
def groupdict(self):
return {str(): str()}
def groups(self):
return (str(),)
class SRE_Pattern():
flags = int()
groupindex = {}
groups = int()
pattern = str()
def findall(self, string, pos=None, endpos=None):
"""
findall(string[, pos[, endpos]]) --> list.
Return a list of all non-overlapping matches of pattern in string.
"""
return [str()]
def finditer(self, string, pos=None, endpos=None):
"""
finditer(string[, pos[, endpos]]) --> iterator.
Return an iterator over all non-overlapping matches for the
RE pattern in string. For each match, the iterator returns a
match object.
"""
yield SRE_Match(self)
def match(self, string, pos=None, endpos=None):
"""
match(string[, pos[, endpos]]) --> match object or None.
Matches zero or more characters at the beginning of the string
pattern
"""
return SRE_Match(self)
def scanner(self, string, pos=None, endpos=None):
pass
def search(self, string, pos=None, endpos=None):
"""
search(string[, pos[, endpos]]) --> match object or None.
Scan through string looking for a match, and return a corresponding
MatchObject instance. Return None if no position in the string matches.
"""
return SRE_Match(self)
def split(self, string, maxsplit=0]):
"""
split(string[, maxsplit = 0]) --> list.
Split string by the occurrences of pattern.
"""
return [str()]
def sub(self, repl, string, count=0):
"""
sub(repl, string[, count = 0]) --> newstring
Return the string obtained by replacing the leftmost non-overlapping
occurrences of pattern in string by the replacement repl.
"""
return str()
def subn(self, repl, string, count=0):
"""
subn(repl, string[, count = 0]) --> (newstring, number of subs)
Return the tuple (new_string, number_of_subs_made) found by replacing
the leftmost non-overlapping occurrences of pattern with the
replacement repl.
"""
return (str(), int())
return SRE_Pattern()

View File

@ -1,9 +0,0 @@
def proxy(object, callback=None):
return object
class ref():
def __init__(self, object, callback=None):
self.__object = object
def __call__(self):
return self.__object

View File

@ -1,274 +0,0 @@
"""
Pure Python implementation of some builtins.
This code is not going to be executed anywhere.
These implementations are not always correct, but should work as good as
possible for the auto completion.
"""
def next(iterator, default=None):
if random.choice([0, 1]):
if hasattr("next"):
return iterator.next()
else:
return iterator.__next__()
else:
if default is not None:
return default
def iter(collection, sentinel=None):
if sentinel:
yield collection()
else:
for c in collection:
yield c
def range(start, stop=None, step=1):
return [0]
class file():
def __iter__(self):
yield ''
def next(self):
return ''
def readlines(self):
return ['']
def __enter__(self):
return self
class xrange():
# Attention: this function doesn't exist in Py3k (there it is range).
def __iter__(self):
yield 1
def count(self):
return 1
def index(self):
return 1
def open(file, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True):
import io
return io.TextIOWrapper(file, mode, buffering, encoding, errors, newline, closefd)
def open_python2(name, mode=None, buffering=None):
return file(name, mode, buffering)
#--------------------------------------------------------
# descriptors
#--------------------------------------------------------
class property():
def __init__(self, fget, fset=None, fdel=None, doc=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
self.__doc__ = doc
def __get__(self, obj, cls):
return self.fget(obj)
def __set__(self, obj, value):
self.fset(obj, value)
def __delete__(self, obj):
self.fdel(obj)
def setter(self, func):
self.fset = func
return self
def getter(self, func):
self.fget = func
return self
def deleter(self, func):
self.fdel = func
return self
class staticmethod():
def __init__(self, func):
self.__func = func
def __get__(self, obj, cls):
return self.__func
class classmethod():
def __init__(self, func):
self.__func = func
def __get__(self, obj, cls):
def _method(*args, **kwargs):
return self.__func(cls, *args, **kwargs)
return _method
#--------------------------------------------------------
# array stuff
#--------------------------------------------------------
class list():
def __init__(self, iterable=[]):
self.__iterable = []
for i in iterable:
self.__iterable += [i]
def __iter__(self):
for i in self.__iterable:
yield i
def __getitem__(self, y):
return self.__iterable[y]
def pop(self):
return self.__iterable[int()]
class tuple():
def __init__(self, iterable=[]):
self.__iterable = []
for i in iterable:
self.__iterable += [i]
def __iter__(self):
for i in self.__iterable:
yield i
def __getitem__(self, y):
return self.__iterable[y]
def index(self):
return 1
def count(self):
return 1
class set():
def __init__(self, iterable=[]):
self.__iterable = iterable
def __iter__(self):
for i in self.__iterable:
yield i
def pop(self):
return list(self.__iterable)[-1]
def copy(self):
return self
def difference(self, other):
return self - other
def intersection(self, other):
return self & other
def symmetric_difference(self, other):
return self ^ other
def union(self, other):
return self | other
class frozenset():
def __init__(self, iterable=[]):
self.__iterable = iterable
def __iter__(self):
for i in self.__iterable:
yield i
def copy(self):
return self
class dict():
def __init__(self, **elements):
self.__elements = elements
def clear(self):
# has a strange docstr
pass
def get(self, k, d=None):
# TODO implement
try:
#return self.__elements[k]
pass
except KeyError:
return d
def values(self):
return self.__elements.values()
def setdefault(self, k, d):
# TODO maybe also return the content
return d
class enumerate():
def __init__(self, sequence, start=0):
self.__sequence = sequence
def __iter__(self):
for i in self.__sequence:
yield 1, i
def __next__(self):
return next(self.__iter__())
def next(self):
return next(self.__iter__())
class reversed():
def __init__(self, sequence):
self.__sequence = sequence
def __iter__(self):
for i in self.__sequence:
yield i
def __next__(self):
return next(self.__iter__())
def next(self):
return next(self.__iter__())
def sorted(iterable, cmp=None, key=None, reverse=False):
return iterable
#--------------------------------------------------------
# basic types
#--------------------------------------------------------
class int():
def __init__(self, x, base=None):
pass
class str():
def __init__(self, obj):
pass
def strip(self):
return str()
def split(self):
return [str()]
class type():
def mro():
return [object]

View File

@ -1,4 +0,0 @@
class datetime():
@staticmethod
def now():
return datetime()

Some files were not shown because too many files have changed in this diff Show More