This commit is contained in:
Vitor Santos Costa
2018-01-05 16:57:38 +00:00
parent 814aa2bd4c
commit 9c862c21bc
271 changed files with 43711 additions and 6129 deletions

View File

@@ -1,4 +1,3 @@
set (EXTRAS
MANIFEST.in
YAP_KERNEL.md
@@ -60,21 +59,14 @@
yap_kernel/pylab/config.py
)
configure_file(setup.py.in ${CMAKE_CURRENT_BINARY_DIR}/setup.py)
configure_file(setup.py.in ${CMAKE_CURRENT_BINARY_DIR}/setup.py)
configure_file(${CMAKE_SOURCE_DIR}/misc/editors/prolog.js.in ${CMAKE_CURRENT_BINARY_DIR}/prolog.js )
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/yap_kernel/resources )
file( COPY ${CMAKE_SOURCE_DIR}/docs/icons/yap_32x32x32.png
DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/yap_kernel/resources/ )
file( RENAME ${CMAKE_CURRENT_BINARY_DIR}/yap_kernel/resources/yap_32x32x32.png ${CMAKE_CURRENT_BINARY_DIR}/yap_kernel/resources/logo-32x32.png )
file( COPY ${CMAKE_SOURCE_DIR}/docs/icons/yap_64x64x32.png DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/yap_kernel/resources )
file( RENAME ${CMAKE_CURRENT_BINARY_DIR}/yap_kernel/resources/yap_64x64x32.png ${CMAKE_CURRENT_BINARY_DIR}/yap_kernel/resources/logo-64x64.png )
file( COPY ${CMAKE_CURRENT_SOURCE_DIR}/kernel.js DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/yap_kernel/resources/ )
file( COPY ${CMAKE_SOURCE_DIR}/misc/editors/prolog.js DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/yap_kernel/resources/)
set(SETUP_PY ${CMAKE_CURRENT_BINARY_DIR}/setup.py)
add_custom_target( YAPKernel ALL
COMMAND ${PYTHON_EXECUTABLE} ${SETUP_PY} build sdist bdist
COMMAND ${PYTHON_EXECUTABLE} ${SETUP_PY} build sdist bdist
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
DEPENDS YAP4PY
)
@@ -83,4 +75,3 @@
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})")
install(FILES ${PL_SOURCES} DESTINATION ${libpl} )

View File

@@ -1,204 +1,9 @@
define( function() {
return {onload:function() {
console.info('Kernel specific javascript loaded');
// do more things here, like define a codemirror mode]]
require(['codemirror/lib/codemirror', 'codemirror/mode/meta'], function(CodeMirror) {
` CodeMirror.mI = [
{name: "APL", mime: "text/apl", mode: "apl", ext: ["dyalog", "apl"]},
{name: "PGP", mimes: ["application/pgp", "application/pgp-keys", "application/pgp-signature"], mode: "asciiarmor", ext: ["pgp"]},
{name: "ASN.1", mime: "text/x-ttcn-asn", mode: "asn.1", ext: ["asn", "asn1"]},
{name: "Asterisk", mime: "text/x-asterisk", mode: "asterisk", file: /^extensions\.conf$/i},
{name: "Brainfuck", mime: "text/x-brainfuck", mode: "brainfuck", ext: ["b", "bf"]},
{name: "C", mime: "text/x-csrc", mode: "clike", ext: ["c", "h"]},
{name: "C++", mime: "text/x-c++src", mode: "clike", ext: ["cpp", "c++", "cc", "cxx", "hpp", "h++", "hh", "hxx"], alias: ["cpp"]},
{name: "Cobol", mime: "text/x-cobol", mode: "cobol", ext: ["cob", "cpy"]},
{name: "C#", mime: "text/x-csharp", mode: "clike", ext: ["cs"], alias: ["csharp"]},
{name: "Clojure", mime: "text/x-clojure", mode: "clojure", ext: ["clj", "cljc", "cljx"]},
{name: "ClojureScript", mime: "text/x-clojurescript", mode: "clojure", ext: ["cljs"]},
{name: "Closure Stylesheets (GSS)", mime: "text/x-gss", mode: "css", ext: ["gss"]},
{name: "CMake", mime: "text/x-cmake", mode: "cmake", ext: ["cmake", "cmake.in"], file: /^CMakeLists.txt$/},
{name: "CoffeeScript", mime: "text/x-coffeescript", mode: "coffeescript", ext: ["coffee"], alias: ["coffee", "coffee-script"]},
{name: "Common Lisp", mime: "text/x-common-lisp", mode: "commonlisp", ext: ["cl", "lisp", "el"], alias: ["lisp"]},
{name: "Cypher", mime: "application/x-cypher-query", mode: "cypher", ext: ["cyp", "cypher"]},
{name: "Cython", mime: "text/x-cython", mode: "python", ext: ["pyx", "pxd", "pxi"]},
{name: "Crystal", mime: "text/x-crystal", mode: "crystal", ext: ["cr"]},
{name: "CSS", mime: "text/css", mode: "css", ext: ["css"]},
{name: "CQL", mime: "text/x-cassandra", mode: "sql", ext: ["cql"]},
{name: "D", mime: "text/x-d", mode: "d", ext: ["d"]},
{name: "Dart", mimes: ["application/dart", "text/x-dart"], mode: "dart", ext: ["dart"]},
{name: "diff", mime: "text/x-diff", mode: "diff", ext: ["diff", "patch"]},
{name: "Django", mime: "text/x-django", mode: "django"},
{name: "Dockerfile", mime: "text/x-dockerfile", mode: "dockerfile", file: /^Dockerfile$/},
{name: "DTD", mime: "application/xml-dtd", mode: "dtd", ext: ["dtd"]},
{name: "Dylan", mime: "text/x-dylan", mode: "dylan", ext: ["dylan", "dyl", "intr"]},
{name: "EBNF", mime: "text/x-ebnf", mode: "ebnf"},
{name: "ECL", mime: "text/x-ecl", mode: "ecl", ext: ["ecl"]},
{name: "edn", mime: "application/edn", mode: "clojure", ext: ["edn"]},
{name: "Eiffel", mime: "text/x-eiffel", mode: "eiffel", ext: ["e"]},
{name: "Elm", mime: "text/x-elm", mode: "elm", ext: ["elm"]},
{name: "Embedded Javascript", mime: "application/x-ejs", mode: "htmlembedded", ext: ["ejs"]},
{name: "Embedded Ruby", mime: "application/x-erb", mode: "htmlembedded", ext: ["erb"]},
{name: "Erlang", mime: "text/x-erlang", mode: "erlang", ext: ["erl"]},
{name: "Factor", mime: "text/x-factor", mode: "factor", ext: ["factor"]},
{name: "FCL", mime: "text/x-fcl", mode: "fcl"},
{name: "Forth", mime: "text/x-forth", mode: "forth", ext: ["forth", "fth", "4th"]},
{name: "Fortran", mime: "text/x-fortran", mode: "fortran", ext: ["f", "for", "f77", "f90"]},
{name: "F#", mime: "text/x-fsharp", mode: "mllike", ext: ["fs"], alias: ["fsharp"]},
{name: "Gas", mime: "text/x-gas", mode: "gas", ext: ["s"]},
{name: "Gherkin", mime: "text/x-feature", mode: "gherkin", ext: ["feature"]},
{name: "GitHub Flavored Markdown", mime: "text/x-gfm", mode: "gfm", file: /^(readme|contributing|history).md$/i},
{name: "Go", mime: "text/x-go", mode: "go", ext: ["go"]},
{name: "Groovy", mime: "text/x-groovy", mode: "groovy", ext: ["groovy", "gradle"], file: /^Jenkinsfile$/},
{name: "HAML", mime: "text/x-haml", mode: "haml", ext: ["haml"]},
{name: "Haskell", mime: "text/x-haskell", mode: "haskell", ext: ["hs"]},
{name: "Haskell (Literate)", mime: "text/x-literate-haskell", mode: "haskell-literate", ext: ["lhs"]},
{name: "Haxe", mime: "text/x-haxe", mode: "haxe", ext: ["hx"]},
{name: "HXML", mime: "text/x-hxml", mode: "haxe", ext: ["hxml"]},
{name: "ASP.NET", mime: "application/x-aspx", mode: "htmlembedded", ext: ["aspx"], alias: ["asp", "aspx"]},
{name: "HTML", mime: "text/html", mode: "htmlmixed", ext: ["html", "htm"], alias: ["xhtml"]},
{name: "HTTP", mime: "message/http", mode: "http"},
{name: "IDL", mime: "text/x-idl", mode: "idl", ext: ["pro"]},
{name: "Pug", mime: "text/x-pug", mode: "pug", ext: ["jade", "pug"], alias: ["jade"]},
{name: "Java", mime: "text/x-java", mode: "clike", ext: ["java"]},
{name: "Java Server Pages", mime: "application/x-jsp", mode: "htmlembedded", ext: ["jsp"], alias: ["jsp"]},
{name: "JavaScript", mimes: ["text/javascript", "text/ecmascript", "application/javascript", "application/x-javascript", "application/ecmascript"],
mode: "javascript", ext: ["js"], alias: ["ecmascript", "js", "node"]},
{name: "JSON", mimes: ["application/json", "application/x-json"], mode: "javascript", ext: ["json", "map"], alias: ["json5"]},
{name: "JSON-LD", mime: "application/ld+json", mode: "javascript", ext: ["jsonld"], alias: ["jsonld"]},
{name: "JSX", mime: "text/jsx", mode: "jsx", ext: ["jsx"]},
{name: "Jinja2", mime: "null", mode: "jinja2"},
{name: "Julia", mime: "text/x-julia", mode: "julia", ext: ["jl"]},
{name: "Kotlin", mime: "text/x-kotlin", mode: "clike", ext: ["kt"]},
{name: "LESS", mime: "text/x-less", mode: "css", ext: ["less"]},
{name: "LiveScript", mime: "text/x-livescript", mode: "livescript", ext: ["ls"], alias: ["ls"]},
{name: "Lua", mime: "text/x-lua", mode: "lua", ext: ["lua"]},
{name: "Markdown", mime: "text/x-markdown", mode: "markdown", ext: ["markdown", "md", "mkd"]},
{name: "mIRC", mime: "text/mirc", mode: "mirc"},
{name: "MariaDB SQL", mime: "text/x-mariadb", mode: "sql"},
{name: "Mathematica", mime: "text/x-mathematica", mode: "mathematica", ext: ["m", "nb"]},
{name: "Modelica", mime: "text/x-modelica", mode: "modelica", ext: ["mo"]},
{name: "MUMPS", mime: "text/x-mumps", mode: "mumps", ext: ["mps"]},
{name: "MS SQL", mime: "text/x-mssql", mode: "sql"},
{name: "mbox", mime: "application/mbox", mode: "mbox", ext: ["mbox"]},
{name: "MySQL", mime: "text/x-mysql", mode: "sql"},
{name: "Nginx", mime: "text/x-nginx-conf", mode: "nginx", file: /nginx.*\.conf$/i},
{name: "NSIS", mime: "text/x-nsis", mode: "nsis", ext: ["nsh", "nsi"]},
{name: "NTriples", mime: "text/n-triples", mode: "ntriples", ext: ["nt"]},
{name: "Objective C", mime: "text/x-objectivec", mode: "clike", ext: ["m", "mm"], alias: ["objective-c", "objc"]},
{name: "OCaml", mime: "text/x-ocaml", mode: "mllike", ext: ["ml", "mli", "mll", "mly"]},
{name: "Octave", mime: "text/x-octave", mode: "octave", ext: ["m"]},
{name: "Oz", mime: "text/x-oz", mode: "oz", ext: ["oz"]},
{name: "Pascal", mime: "text/x-pascal", mode: "pascal", ext: ["p", "pas"]},
{name: "PEG.js", mime: "null", mode: "pegjs", ext: ["jsonld"]},
{name: "Perl", mime: "text/x-perl", mode: "perl", ext: ["pl", "pm"]},
{name: "PHP", mime: "application/x-httpd-php", mode: "php", ext: ["php", "php3", "php4", "php5", "phtml"]},
{name: "Pig", mime: "text/x-pig", mode: "pig", ext: ["pig"]},
{name: "Plain Text", mime: "text/plain", mode: "null", ext: ["txt", "text", "conf", "def", "list", "log"]},
{name: "PLSQL", mime: "text/x-plsql", mode: "sql", ext: ["pls"]},
{name: "PowerShell", mime: "application/x-powershell", mode: "powershell", ext: ["ps1", "psd1", "psm1"]},
{name: "Properties files", mime: "text/x-properties", mode: "properties", ext: ["properties", "ini", "in"], alias: ["ini", "properties"]},
{ name: "Prolog", mime: "text/x-prolog",
mode: "prolog", ext: ["pl", "yap", "yss", "P"] },
/*define(['./prolog.js'], function(){
{name: "ProtoBuf", mime: "text/x-protobuf", mode: "protobuf", ext: ["proto"]},
< {name: "Python", mime: "text/x-python", mode: "python", ext: ["BUILD", "bzl", "py", "pyw"], file: /^(BUCK|BUILD)$/},
{name: "Puppet", mime: "text/x-puppet", mode: "puppet", ext: ["pp"]},
{name: "Q", mime: "text/x-q", mode: "q", ext: ["q"]},
{name: "R", mime: "text/x-rsrc", mode: "r", ext: ["r", "R"], alias: ["rscript"]},
{name: "reStructuredText", mime: "text/x-rst", mode: "rst", ext: ["rst"], alias: ["rst"]},
{name: "RPM Changes", mime: "text/x-rpm-changes", mode: "rpm"},
{name: "RPM Spec", mime: "text/x-rpm-spec", mode: "rpm", ext: ["spec"]},
{name: "Ruby", mime: "text/x-ruby", mode: "ruby", ext: ["rb"], alias: ["jruby", "macruby", "rake", "rb", "rbx"]},
{name: "Rust", mime: "text/x-rustsrc", mode: "rust", ext: ["rs"]},
{name: "SAS", mime: "text/x-sas", mode: "sas", ext: ["sas"]},
{name: "Sass", mime: "text/x-sass", mode: "sass", ext: ["sass"]},
{name: "Scala", mime: "text/x-scala", mode: "clike", ext: ["scala"]},
{name: "Scheme", mime: "text/x-scheme", mode: "scheme", ext: ["scm", "ss"]},
{name: "SCSS", mime: "text/x-scss", mode: "css", ext: ["scss"]},
{name: "Shell", mime: "text/x-sh", mode: "shell", ext: ["sh", "ksh", "bash"], alias: ["bash", "sh", "zsh"], file: /^PKGBUILD$/},
{name: "Sieve", mime: "application/sieve", mode: "sieve", ext: ["siv", "sieve"]},
{name: "Slim", mimes: ["text/x-slim", "application/x-slim"], mode: "slim", ext: ["slim"]},
{name: "Smalltalk", mime: "text/x-stsrc", mode: "smalltalk", ext: ["st"]},
{name: "Smarty", mime: "text/x-smarty", mode: "smarty", ext: ["tpl"]},
{name: "Solr", mime: "text/x-solr", mode: "solr"},
{name: "Soy", mime: "text/x-soy", mode: "soy", ext: ["soy"], alias: ["closure template"]},
{name: "SPARQL", mime: "application/sparql-query", mode: "sparql", ext: ["rq", "sparql"], alias: ["sparul"]},
{name: "Spreadsheet", mime: "text/x-spreadsheet", mode: "spreadsheet", alias: ["excel", "formula"]},
{name: "SQL", mime: "text/x-sql", mode: "sql", ext: ["sql"]},
{name: "Squirrel", mime: "text/x-squirrel", mode: "clike", ext: ["nut"]},
{name: "Stylus", mime: "text/x-styl", mode: "stylus", ext: ["styl"]},
{name: "Swift", mime: "text/x-swift", mode: "swift", ext: ["swift"]},
{name: "sTeX", mime: "text/x-stex", mode: "stex"},
{name: "LaTeX", mime: "text/x-latex", mode: "stex", ext: ["text", "ltx"], alias: ["tex"]},
{name: "SystemVerilog", mime: "text/x-systemverilog", mode: "verilog", ext: ["v"]},
{name: "Tcl", mime: "text/x-tcl", mode: "tcl", ext: ["tcl"]},
{name: "Textile", mime: "text/x-textile", mode: "textile", ext: ["textile"]},
{name: "TiddlyWiki ", mime: "text/x-tiddlywiki", mode: "tiddlywiki"},
{name: "Tiki wiki", mime: "text/tiki", mode: "tiki"},
{name: "TOML", mime: "text/x-toml", mode: "toml", ext: ["toml"]},
{name: "Tornado", mime: "text/x-tornado", mode: "tornado"},
{name: "troff", mime: "text/troff", mode: "troff", ext: ["1", "2", "3", "4", "5", "6", "7", "8", "9"]},
{name: "TTCN", mime: "text/x-ttcn", mode: "ttcn", ext: ["ttcn", "ttcn3", "ttcnpp"]},
{name: "TTCN_CFG", mime: "text/x-ttcn-cfg", mode: "ttcn-cfg", ext: ["cfg"]},
{name: "Turtle", mime: "text/turtle", mode: "turtle", ext: ["ttl"]},
{name: "TypeScript", mime: "application/typescript", mode: "javascript", ext: ["ts"], alias: ["ts"]},
{name: "Twig", mime: "text/x-twig", mode: "twig"},
{name: "Web IDL", mime: "text/x-webidl", mode: "webidl", ext: ["webidl"]},
{name: "VB.NET", mime: "text/x-vb", mode: "vb", ext: ["vb"]},
{name: "VBScript", mime: "text/vbscript", mode: "vbscript", ext: ["vbs"]},
{name: "Velocity", mime: "text/velocity", mode: "velocity", ext: ["vtl"]},
{name: "Verilog", mime: "text/x-verilog", mode: "verilog", ext: ["v"]},
{name: "VHDL", mime: "text/x-vhdl", mode: "vhdl", ext: ["vhd", "vhdl"]},
{name: "Vue.js Component", mimes: ["script/x-vue", "text/x-vue"], mode: "vue", ext: ["vue"]},
{name: "XML", mimes: ["application/xml", "text/xml"], mode: "xml", ext: ["xml", "xsl", "xsd"], alias: ["rss", "wsdl", "xsd"]},
{name: "XQuery", mime: "application/xquery", mode: "xquery", ext: ["xy", "xquery"]},
{name: "Yacas", mime: "text/x-yacas", mode: "yacas", ext: ["ys"]},
{name: "YAML", mimes: ["text/x-yaml", "text/yaml"], mode: "yaml", ext: ["yaml", "yml"], alias: ["yml"]},
{name: "Z80", mime: "text/x-z80", mode: "z80", ext: ["z80"]},
{name: "mscgen", mime: "text/x-mscgen", mode: "mscgen", ext: ["mscgen", "mscin", "msc"]},
{name: "xu", mime: "text/x-xu", mode: "mscgen", ext: ["xu"]},
{name: "msgenny", mime: "text/x-msgenny", mode: "mscgen", ext: ["msgenny"]}
];
var onload = function(){
console.log("I am being loaded");
});
CodeMirror.findModeByMIME = function(mime) {
mime = mime.toLowerCase();
for (var i = 0; i < CodeMirror.mI.length; i++) {
var info = CodeMirror.mI[i];
if (info.mime == mime) return info;
if (info.mimes) for (var j = 0; j < info.mimes.length; j++)
if (info.mimes[j] == mime) return info;
}
};
CodeMirror.findModeByExtension = function(ext) {
for (var i = 0; i < CodeMirror.mI.length; i++) {
var info = CodeMirror.mI[i];
if (info.ext) for (var j = 0; j < info.ext.length; j++)
if (info.ext[j] == ext) return info;
}
};
CodeMirror.findModeByFileName = function(filename) {
for (var i = 0; i < CodeMirror.mI.length; i++) {
var info = CodeMirror.mI[i];
if (info.file && info.file.test(filename)) return info;
}
var dot = filename.lastIndexOf(".");
var ext = dot > -1 && filename.substring(dot + 1, filename.length);
if (ext) return CodeMirror.findModeByExtension(ext);
};
CodeMirror.findModeByName = function(name) {
name = name.toLowerCase();
for (var i = 0; i < CodeMirror.mI.length; i++) {
var info = CodeMirror.mI[i];
if (info.name.toLowerCase() == name) return info;
if (info.alias) for (var j = 0; j < info.alias.length; j++)
if (info.alias[j].toLowerCase() == name) return info;
}
};
} }; });
};
return {onload:onload}
});*/

View File

@@ -1,133 +0,0 @@
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
# the name of the package
name = 'yap_kernel'
#-----------------------------------------------------------------------------
# Minimal Python version sanity check
#-----------------------------------------------------------------------------
import sys
import sysconfig
import setuptools
v = sys.version_info
if v[:2] < (2,7) or (v[0] >= 3 and v[:2] < (3,3)):
error = "ERROR: %s requires Python version 3.3 or above." % name
print(error, file=sys.stderr)
sys.exit(1)
PY3 = (sys.version_info[0] >= 3)
#-----------------------------------------------------------------------------
# get on with it
#-----------------------------------------------------------------------------
from glob import glob
import os
import shutil
from distutils.core import setup
pjoin = os.path.join
here = os.path.abspath(os.path.dirname(__file__))
# pkg_root = pjoin(here, name)
packages = setuptools.find_packages('/home/vsc/github/yap-6.3/packages/python/yap_kernel')
# for d, _, _ in os.walk(pjoin(here, name)):
# if os.path.exists(pjoin(d, '__init__.py')):
# packages.append(d[len(here)+1:].replace(os.path.sep, '.'))
sys.path.insert(0, "/home/vsc/github/yap-6.3/packages/python/yap_kernel")
package_data = {
'yap_ipython': ['prolog/*.*'],
'yap_kernel': ['resources/*.*']
}
version_ns = {}
with open(pjoin('/home/vsc/github/yap-6.3/packages/python/yap_kernel', name, '_version.py')) as f:
exec(f.read(), {}, version_ns)
setup_args = dict(
name = name,
version = version_ns['__version__'],
scripts = glob(pjoin('scripts', '*')),
packages = packages,
py_modules = ['yap_kernel_launcher'],
package_data = package_data,
package_dir = {'':"/home/vsc/github/yap-6.3/packages/python/yap_kernel"},
description = "YAP Kernel for Jupyter",
author = 'YAP Development Team',
author_email = 'YAP-dev@scipy.org',
url = 'http://ipython.org',
license = 'BSD',
platforms = "Linux, Mac OS X, Windows",
keywords = ['Interactive', 'Interpreter', 'Shell', 'Web'],
classifiers = [
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Prolog',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
],
)
if 'develop' in sys.argv or any(a.startswith('bdist') for a in sys.argv):
import setuptools
setuptools_args = {}
install_requires = setuptools_args['install_requires'] = [
]
if any(a.startswith(('bdist', 'build', 'install')) for a in sys.argv):
from yap_kernel.kernelspec import write_kernel_spec, make_yap_kernel_cmd, KERNEL_NAME
argv = make_yap_kernel_cmd(executable='python')
dest = os.path.join(here, 'resources')
if not os.path.exists(dest):
os.makedirs( dest )
shutil.copy('/home/vsc/github/yap-6.3/docs/icons/yap_32x32x32.png',os.path.join(dest,'logo-32x32.png'))
shutil.copy('/home/vsc/github/yap-6.3/docs/icons/yap_64x64x32.png',os.path.join(dest,'logo-64x64.png'))
try:
write_kernel_spec(dest, overrides={'argv': argv})
except:
none
# shutil.copy('/home/vsc/github/yap-6.3/packages/python/yap_kernel/kernel.js',dest)
# shutil.copy('/home/vsc/github/yap-6.3/misc/editors/prolog.js',dest)
setup_args['data_files'] = [
(pjoin('share', 'jupyter', 'kernels', KERNEL_NAME), glob(pjoin(dest, '*'))),
]
mode_loc = pjoin( sysconfig.get_path('platlib'), 'notebook', 'static', 'components', 'codemirror', 'mode', 'prolog')
custom_loc = pjoin( sysconfig.get_path('platlib'), 'notebook', 'static', 'custom')
try:
shutil.copy( pjoin( custom_loc, "custom.js") , pjoin( custom_loc, "custom.js.orig"))
shutil.copy( "/home/vsc/github/yap-6.3/packages/python/yap_kernel/custom.js" , pjoin( custom_loc, "custom.js"))
if not os.path.exists(mode_loc):
os.makedirs(mode_loc)
shutil.copy( "/home/vsc/github/yap-6.3/misc/editors/prolog.js" , mode_loc)
except:
pass
extras_require = setuptools_args['extras_require'] = {
'test:python_version=="2.7"': ['mock'],
'test': ['nose_warnings_filters', 'nose-timer'],
}
if 'setuptools' in sys.modules:
setup_args.update(setuptools_args)
if __name__ == '__main__':
setup(**setup_args)

View File

@@ -100,26 +100,24 @@ if any(a.startswith(('bdist', 'build', 'install')) for a in sys.argv):
try:
shutil.rmtree(dest)
os.makedirs( dest )
shutil.copy2('${CMAKE_SOURCE_DIR}/docs/icons/yap_32x32x32.png',dest)
shutil.copy2('${CMAKE_SOURCE_DIR}/docs/icons/yap_64x64x32.png',dest)
shutil.copy2('${CMAKE_SOURCE_DIR}/docs/icons/yap_32x32x32.png',pjoin(dest,"logo_32x32.png"))
shutil.copy2('${CMAKE_SOURCE_DIR}/docs/icons/yap_64x64x32.png',pjoin(dest,"logo_64x64.png"))
write_kernel_spec(dest, overrides={'argv': argv})
except:
pass
# shutil.copy('${CMAKE_CURRENT_SOURCE_DIR}/kernel.js',dest)
# shutil.copy('${CMAKE_SOURCE_DIR}/misc/editors/prolog.js',dest)
setup_args['data_files'] = [
(pjoin('share', 'jupyter', 'kernels', KERNEL_NAME), glob(pjoin(dest, '*'))),
]
setup_args['data_files'] = [(pjoin('share', 'jupyter', 'kernels', KERNEL_NAME), glob(pjoin(dest, '*')))]
mode_loc = pjoin( sysconfig.get_path('platlib'), 'notebook', 'static', 'components', 'codemirror', 'mode', 'prolog')
custom_loc = pjoin( sysconfig.get_path('platlib'), 'notebook', 'static', 'custom')
try:
shutil.copy( pjoin( custom_loc, "custom.js") , pjoin( custom_loc, "custom.js.orig"))
shutil.copy( "${CMAKE_CURRENT_SOURCE_DIR}/custom.js" , pjoin( custom_loc, "custom.js"))
if not os.path.exists(mode_loc):
os.makedirs(mode_loc)
shutil.copy( "${CMAKE_SOURCE_DIR}/misc/editors/prolog.js" , mode_loc)
except:
pass
# try:
# shutil.copy( pjoin( custom_loc, "custom.js") , pjoin( custom_loc, "custom.js.orig"))
# shutil.copy( "${CMAKE_CURRENT_SOURCE_DIR}/custom.js" , pjoin( custom_loc, "custom.js"))
# if not os.path.exists(mode_loc):
# os.makedirs(mode_loc)
# shutil.copy( "${CMAKE_SOURCE_DIR}/misc/editors/prolog.js" , mode_loc)
# except:
# pass
extras_require = setuptools_args['extras_require'] = {
'test:python_version=="2.7"': ['mock'],

View File

@@ -1,11 +1,11 @@
# encoding: utf-8
"""
IPython: tools for interactive and parallel computing in Python.
yap_ipython: tools for interactive and parallel computing in Python.
http://ipython.org
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2008-2011, IPython Development Team.
# Copyright (c) 2008-2011, yap_ipython Development Team.
# Copyright (c) 2001-2007, Fernando Perez <fernando.perez@colorado.edu>
# Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
# Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
@@ -29,19 +29,19 @@ import sys
# Don't forget to also update setup.py when this changes!
if sys.version_info < (3,3):
raise ImportError(
"""
IPython 6.0+ does not support Python 2.6, 2.7, 3.0, 3.1, or 3.2.
When using Python 2.7, please install IPython 5.x LTS Long Term Support version.
Beginning with IPython 6.0, Python 3.3 and above is required.
See IPython `README.rst` file for more information:
https://github.com/ipython/ipython/blob/master/README.rst
""")
"""
yap_ipython 6.0+ does not support Python 2.6, 2.7, 3.0, 3.1, or 3.2.
When using Python 2.7, please install yap_ipython 5.x LTS Long Term Support version.
Beginning with yap_ipython 6.0, Python 3.3 and above is required.
See yap_ipython `README.rst` file for more information:
https://github.com/ipython/ipython/blob/master/README.rst
""")
# Make it easy to import extensions - they are always directly on pythonpath.
# Therefore, non-IPython modules can be added to extensions directory.
# Therefore, non-yap_ipython modules can be added to extensions directory.
# This should probably be in ipapp.py.
sys.path.append(os.path.join(os.path.dirname(__file__), "extensions"))
@@ -51,13 +51,13 @@ sys.path.append(os.path.join(os.path.dirname(__file__), "extensions"))
from .core.getipython import get_ipython
from .core import release
from IPython.core.application import Application
from IPython.terminal.embed import embed
from .core.application import Application
from .terminal.embed import embed
from .core.interactiveshell import YAPInteractive as InteractiveShell
from IPython.testing import test
from IPython.utils.sysinfo import sys_info
from IPython.utils.frame import extract_module_locals
from .core.interactiveshell import InteractiveShell
from .testing import test
from .utils.sysinfo import sys_info
from .utils.frame import extract_module_locals
# Release data
__author__ = '%s <%s>' % (release.author, release.author_email)
@@ -66,86 +66,86 @@ __version__ = release.version
version_info = release.version_info
def embed_kernel(module=None, local_ns=None, **kwargs):
"""Embed and start an IPython kernel in a given scope.
"""Embed and start an yap_ipython kernel in a given scope.
If you don't want the kernel to initialize the namespace
from the scope of the surrounding function,
and/or you want to load full IPython configuration,
you probably want `IPython.start_kernel()` instead.
and/or you want to load full yap_ipython configuration,
you probably want `yap_ipython.start_kernel()` instead.
Parameters
----------
module : ModuleType, optional
The module to load into IPython globals (default: caller)
The module to load into yap_ipython globals (default: caller)
local_ns : dict, optional
The namespace to load into IPython user namespace (default: caller)
The namespace to load into yap_ipython user namespace (default: caller)
kwargs : various, optional
Further keyword args are relayed to the IPKernelApp constructor,
Further keyword args are relayed to the YAPKernelApp constructor,
allowing configuration of the Kernel. Will only have an effect
on the first embed_kernel call for a given process.
"""
(caller_module, caller_locals) = extract_module_locals(1)
if module is None:
module = caller_module
if local_ns is None:
local_ns = caller_locals
# Only import .zmq when we really need it
from ipykernel.embed import embed_kernel as real_embed_kernel
from yap_kernel.embed import embed_kernel as real_embed_kernel
real_embed_kernel(module=module, local_ns=local_ns, **kwargs)
def start_ipython(argv=None, **kwargs):
"""Launch a normal IPython instance (as opposed to embedded)
`IPython.embed()` puts a shell in a particular calling scope,
"""Launch a normal yap_ipython instance (as opposed to embedded)
`yap_ipython.embed()` puts a shell in a particular calling scope,
such as a function or method for debugging purposes,
which is often not desirable.
`start_ipython()` does full, regular IPython initialization,
`start_ipython()` does full, regular yap_ipython initialization,
including loading startup files, configuration, etc.
much of which is skipped by `embed()`.
This is a public API method, and will survive implementation changes.
Parameters
----------
argv : list or None, optional
If unspecified or None, IPython will parse command-line options from sys.argv.
If unspecified or None, yap_ipython will parse command-line options from sys.argv.
To prevent any command-line parsing, pass an empty list: `argv=[]`.
user_ns : dict, optional
specify this dictionary to initialize the IPython user namespace with particular values.
specify this dictionary to initialize the yap_ipython user namespace with particular values.
kwargs : various, optional
Any other kwargs will be passed to the Application constructor,
such as `config`.
"""
from IPython.terminal.ipapp import launch_new_instance
from yap_ipython.terminal.ipapp import launch_new_instance
return launch_new_instance(argv=argv, **kwargs)
def start_kernel(argv=None, **kwargs):
"""Launch a normal IPython kernel instance (as opposed to embedded)
`IPython.embed_kernel()` puts a shell in a particular calling scope,
"""Launch a normal yap_ipython kernel instance (as opposed to embedded)
`yap_ipython.embed_kernel()` puts a shell in a particular calling scope,
such as a function or method for debugging purposes,
which is often not desirable.
`start_kernel()` does full, regular IPython initialization,
`start_kernel()` does full, regular yap_ipython initialization,
including loading startup files, configuration, etc.
much of which is skipped by `embed()`.
Parameters
----------
argv : list or None, optional
If unspecified or None, IPython will parse command-line options from sys.argv.
If unspecified or None, yap_ipython will parse command-line options from sys.argv.
To prevent any command-line parsing, pass an empty list: `argv=[]`.
user_ns : dict, optional
specify this dictionary to initialize the IPython user namespace with particular values.
specify this dictionary to initialize the yap_ipython user namespace with particular values.
kwargs : various, optional
Any other kwargs will be passed to the Application constructor,
such as `config`.
"""
from IPython.kernel.zmq.kernelapp import launch_new_instance
from yap_ipython.kernel.zmq.kernelapp import launch_new_instance
return launch_new_instance(argv=argv, **kwargs)

View File

@@ -0,0 +1,14 @@
# encoding: utf-8
"""Terminal-based yap_ipython entry point.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012, yap_ipython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from yap_ipython import start_ipython
start_ipython()

View File

@@ -0,0 +1,19 @@
"""
Shim to maintain backwards compatibility with old yap_ipython.config imports.
"""
# Copyright (c) yap_ipython Development Team.
# Distributed under the terms of the Modified BSD License.
import sys
from warnings import warn
from yap_ipython.utils.shimmodule import ShimModule, ShimWarning
warn("The `yap_ipython.config` package has been deprecated since yap_ipython 4.0. "
"You should import from traitlets.config instead.", ShimWarning)
# Unconditionally insert the shim into sys.modules so that further import calls
# trigger the custom attribute access above
sys.modules['yap_ipython.config'] = ShimModule(src='yap_ipython.config', mirror='traitlets.config')

View File

@@ -0,0 +1,12 @@
"""
Shim to maintain backwards compatibility with old yap_ipython.consoleapp imports.
"""
# Copyright (c) yap_ipython Development Team.
# Distributed under the terms of the Modified BSD License.
from warnings import warn
warn("The `yap_ipython.consoleapp` package has been deprecated. "
"You should import from jupyter_client.consoleapp instead.")
from jupyter_client.consoleapp import *

View File

@@ -0,0 +1,256 @@
# encoding: utf-8
"""
System command aliases.
Authors:
* Fernando Perez
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The yap_ipython Development Team
#
# Distributed under the terms of the BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import re
import sys
from traitlets.config.configurable import Configurable
from yap_ipython.core.error import UsageError
from traitlets import List, Instance
from logging import error
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
# This is used as the pattern for calls to split_user_input.
shell_line_split = re.compile(r'^(\s*)()(\S+)(.*$)')
def default_aliases():
"""Return list of shell aliases to auto-define.
"""
# Note: the aliases defined here should be safe to use on a kernel
# regardless of what frontend it is attached to. Frontends that use a
# kernel in-process can define additional aliases that will only work in
# their case. For example, things like 'less' or 'clear' that manipulate
# the terminal should NOT be declared here, as they will only work if the
# kernel is running inside a true terminal, and not over the network.
if os.name == 'posix':
default_aliases = [('mkdir', 'mkdir'), ('rmdir', 'rmdir'),
('mv', 'mv'), ('rm', 'rm'), ('cp', 'cp'),
('cat', 'cat'),
]
# Useful set of ls aliases. The GNU and BSD options are a little
# different, so we make aliases that provide as similar as possible
# behavior in ipython, by passing the right flags for each platform
if sys.platform.startswith('linux'):
ls_aliases = [('ls', 'ls -F --color'),
# long ls
('ll', 'ls -F -o --color'),
# ls normal files only
('lf', 'ls -F -o --color %l | grep ^-'),
# ls symbolic links
('lk', 'ls -F -o --color %l | grep ^l'),
# directories or links to directories,
('ldir', 'ls -F -o --color %l | grep /$'),
# things which are executable
('lx', 'ls -F -o --color %l | grep ^-..x'),
]
elif sys.platform.startswith('openbsd') or sys.platform.startswith('netbsd'):
# OpenBSD, NetBSD. The ls implementation on these platforms do not support
# the -G switch and lack the ability to use colorized output.
ls_aliases = [('ls', 'ls -F'),
# long ls
('ll', 'ls -F -l'),
# ls normal files only
('lf', 'ls -F -l %l | grep ^-'),
# ls symbolic links
('lk', 'ls -F -l %l | grep ^l'),
# directories or links to directories,
('ldir', 'ls -F -l %l | grep /$'),
# things which are executable
('lx', 'ls -F -l %l | grep ^-..x'),
]
else:
# BSD, OSX, etc.
ls_aliases = [('ls', 'ls -F -G'),
# long ls
('ll', 'ls -F -l -G'),
# ls normal files only
('lf', 'ls -F -l -G %l | grep ^-'),
# ls symbolic links
('lk', 'ls -F -l -G %l | grep ^l'),
# directories or links to directories,
('ldir', 'ls -F -G -l %l | grep /$'),
# things which are executable
('lx', 'ls -F -l -G %l | grep ^-..x'),
]
default_aliases = default_aliases + ls_aliases
elif os.name in ['nt', 'dos']:
default_aliases = [('ls', 'dir /on'),
('ddir', 'dir /ad /on'), ('ldir', 'dir /ad /on'),
('mkdir', 'mkdir'), ('rmdir', 'rmdir'),
('echo', 'echo'), ('ren', 'ren'), ('copy', 'copy'),
]
else:
default_aliases = []
return default_aliases
class AliasError(Exception):
pass
class InvalidAliasError(AliasError):
pass
class Alias(object):
"""Callable object storing the details of one alias.
Instances are registered as magic functions to allow use of aliases.
"""
# Prepare blacklist
blacklist = {'cd','popd','pushd','dhist','alias','unalias'}
def __init__(self, shell, name, cmd):
self.shell = shell
self.name = name
self.cmd = cmd
self.__doc__ = "Alias for `!{}`".format(cmd)
self.nargs = self.validate()
def validate(self):
"""Validate the alias, and return the number of arguments."""
if self.name in self.blacklist:
raise InvalidAliasError("The name %s can't be aliased "
"because it is a keyword or builtin." % self.name)
try:
caller = self.shell.magics_manager.magics['line'][self.name]
except KeyError:
pass
else:
if not isinstance(caller, Alias):
raise InvalidAliasError("The name %s can't be aliased "
"because it is another magic command." % self.name)
if not (isinstance(self.cmd, str)):
raise InvalidAliasError("An alias command must be a string, "
"got: %r" % self.cmd)
nargs = self.cmd.count('%s') - self.cmd.count('%%s')
if (nargs > 0) and (self.cmd.find('%l') >= 0):
raise InvalidAliasError('The %s and %l specifiers are mutually '
'exclusive in alias definitions.')
return nargs
def __repr__(self):
return "<alias {} for {!r}>".format(self.name, self.cmd)
def __call__(self, rest=''):
cmd = self.cmd
nargs = self.nargs
# Expand the %l special to be the user's input line
if cmd.find('%l') >= 0:
cmd = cmd.replace('%l', rest)
rest = ''
if nargs==0:
if cmd.find('%%s') >= 1:
cmd = cmd.replace('%%s', '%s')
# Simple, argument-less aliases
cmd = '%s %s' % (cmd, rest)
else:
# Handle aliases with positional arguments
args = rest.split(None, nargs)
if len(args) < nargs:
raise UsageError('Alias <%s> requires %s arguments, %s given.' %
(self.name, nargs, len(args)))
cmd = '%s %s' % (cmd % tuple(args[:nargs]),' '.join(args[nargs:]))
self.shell.system(cmd)
#-----------------------------------------------------------------------------
# Main AliasManager class
#-----------------------------------------------------------------------------
class AliasManager(Configurable):
default_aliases = List(default_aliases()).tag(config=True)
user_aliases = List(default_value=[]).tag(config=True)
shell = Instance('yap_ipython.core.interactiveshell.InteractiveShellABC', allow_none=True)
def __init__(self, shell=None, **kwargs):
super(AliasManager, self).__init__(shell=shell, **kwargs)
# For convenient access
self.linemagics = self.shell.magics_manager.magics['line']
self.init_aliases()
def init_aliases(self):
# Load default & user aliases
for name, cmd in self.default_aliases + self.user_aliases:
self.soft_define_alias(name, cmd)
@property
def aliases(self):
return [(n, func.cmd) for (n, func) in self.linemagics.items()
if isinstance(func, Alias)]
def soft_define_alias(self, name, cmd):
"""Define an alias, but don't raise on an AliasError."""
try:
self.define_alias(name, cmd)
except AliasError as e:
error("Invalid alias: %s" % e)
def define_alias(self, name, cmd):
"""Define a new alias after validating it.
This will raise an :exc:`AliasError` if there are validation
problems.
"""
caller = Alias(shell=self.shell, name=name, cmd=cmd)
self.shell.magics_manager.register_function(caller, magic_kind='line',
magic_name=name)
def get_alias(self, name):
"""Return an alias, or None if no alias by that name exists."""
aname = self.linemagics.get(name, None)
return aname if isinstance(aname, Alias) else None
def is_alias(self, name):
"""Return whether or not a given name has been defined as an alias"""
return self.get_alias(name) is not None
def undefine_alias(self, name):
if self.is_alias(name):
del self.linemagics[name]
else:
raise ValueError('%s is not an alias' % name)
def clear_aliases(self):
for name, cmd in self.aliases:
self.undefine_alias(name)
def retrieve_alias(self, name):
"""Retrieve the command to which an alias expands."""
caller = self.get_alias(name)
if caller:
return caller.cmd
else:
raise ValueError('%s is not an alias' % name)

View File

@@ -0,0 +1,462 @@
# encoding: utf-8
"""
An application for yap_ipython.
All top-level applications should use the classes in this module for
handling configuration and creating configurables.
The job of an :class:`Application` is to create the master configuration
object and then create the configurable objects, passing the config to them.
"""
# Copyright (c) yap_ipython Development Team.
# Distributed under the terms of the Modified BSD License.
import atexit
from copy import deepcopy
import glob
import logging
import os
import shutil
import sys
from traitlets.config.application import Application, catch_config_error
from traitlets.config.loader import ConfigFileNotFound, PyFileConfigLoader
from yap_ipython.core import release, crashhandler
from yap_ipython.core.profiledir import ProfileDir, ProfileDirError
from yap_ipython.paths import get_ipython_dir, get_ipython_package_dir
from yap_ipython.utils.path import ensure_dir_exists
from traitlets import (
List, Unicode, Type, Bool, Set, Instance, Undefined,
default, observe,
)
if os.name == 'nt':
programdata = os.environ.get('PROGRAMDATA', None)
if programdata:
SYSTEM_CONFIG_DIRS = [os.path.join(programdata, 'ipython')]
else: # PROGRAMDATA is not defined by default on XP.
SYSTEM_CONFIG_DIRS = []
else:
SYSTEM_CONFIG_DIRS = [
"/usr/local/etc/ipython",
"/etc/ipython",
]
ENV_CONFIG_DIRS = []
_env_config_dir = os.path.join(sys.prefix, 'etc', 'ipython')
if _env_config_dir not in SYSTEM_CONFIG_DIRS:
# only add ENV_CONFIG if sys.prefix is not already included
ENV_CONFIG_DIRS.append(_env_config_dir)
_envvar = os.environ.get('IPYTHON_SUPPRESS_CONFIG_ERRORS')
if _envvar in {None, ''}:
IPYTHON_SUPPRESS_CONFIG_ERRORS = None
else:
if _envvar.lower() in {'1','true'}:
IPYTHON_SUPPRESS_CONFIG_ERRORS = True
elif _envvar.lower() in {'0','false'} :
IPYTHON_SUPPRESS_CONFIG_ERRORS = False
else:
sys.exit("Unsupported value for environment variable: 'IPYTHON_SUPPRESS_CONFIG_ERRORS' is set to '%s' which is none of {'0', '1', 'false', 'true', ''}."% _envvar )
# aliases and flags
base_aliases = {
'profile-dir' : 'ProfileDir.location',
'profile' : 'BaseYAPApplication.profile',
'ipython-dir' : 'BaseYAPApplication.ipython_dir',
'log-level' : 'Application.log_level',
'config' : 'BaseYAPApplication.extra_config_file',
}
base_flags = dict(
debug = ({'Application' : {'log_level' : logging.DEBUG}},
"set log level to logging.DEBUG (maximize logging output)"),
quiet = ({'Application' : {'log_level' : logging.CRITICAL}},
"set log level to logging.CRITICAL (minimize logging output)"),
init = ({'BaseYAPApplication' : {
'copy_config_files' : True,
'auto_create' : True}
}, """Initialize profile with default config files. This is equivalent
to running `ipython profile create <profile>` prior to startup.
""")
)
class ProfileAwareConfigLoader(PyFileConfigLoader):
"""A Python file config loader that is aware of yap_ipython profiles."""
def load_subconfig(self, fname, path=None, profile=None):
if profile is not None:
try:
profile_dir = ProfileDir.find_profile_dir_by_name(
get_ipython_dir(),
profile,
)
except ProfileDirError:
return
path = profile_dir.location
return super(ProfileAwareConfigLoader, self).load_subconfig(fname, path=path)
class BaseYAPApplication(Application):
name = u'ipython'
description = Unicode(u'yap_ipython: an enhanced interactive Python shell.')
version = Unicode(release.version)
aliases = base_aliases
flags = base_flags
classes = List([ProfileDir])
# enable `load_subconfig('cfg.py', profile='name')`
python_config_loader_class = ProfileAwareConfigLoader
# Track whether the config_file has changed,
# because some logic happens only if we aren't using the default.
config_file_specified = Set()
config_file_name = Unicode()
@default('config_file_name')
def _config_file_name_default(self):
return self.name.replace('-','_') + u'_config.py'
@observe('config_file_name')
def _config_file_name_changed(self, change):
if change['new'] != change['old']:
self.config_file_specified.add(change['new'])
# The directory that contains yap_ipython's builtin profiles.
builtin_profile_dir = Unicode(
os.path.join(get_ipython_package_dir(), u'config', u'profile', u'default')
)
config_file_paths = List(Unicode())
@default('config_file_paths')
def _config_file_paths_default(self):
return [os.getcwd()]
extra_config_file = Unicode(
help="""Path to an extra config file to load.
If specified, load this config file in addition to any other yap_ipython config.
""").tag(config=True)
@observe('extra_config_file')
def _extra_config_file_changed(self, change):
old = change['old']
new = change['new']
try:
self.config_files.remove(old)
except ValueError:
pass
self.config_file_specified.add(new)
self.config_files.append(new)
profile = Unicode(u'default',
help="""The yap_ipython profile to use."""
).tag(config=True)
@observe('profile')
def _profile_changed(self, change):
self.builtin_profile_dir = os.path.join(
get_ipython_package_dir(), u'config', u'profile', change['new']
)
ipython_dir = Unicode(
help="""
The name of the yap_ipython directory. This directory is used for logging
configuration (through profiles), history storage, etc. The default
is usually $HOME/.ipython. This option can also be specified through
the environment variable IPYTHONDIR.
"""
).tag(config=True)
@default('ipython_dir')
def _ipython_dir_default(self):
d = get_ipython_dir()
self._ipython_dir_changed({
'name': 'ipython_dir',
'old': d,
'new': d,
})
return d
_in_init_profile_dir = False
profile_dir = Instance(ProfileDir, allow_none=True)
@default('profile_dir')
def _profile_dir_default(self):
# avoid recursion
if self._in_init_profile_dir:
return
# profile_dir requested early, force initialization
self.init_profile_dir()
return self.profile_dir
overwrite = Bool(False,
help="""Whether to overwrite existing config files when copying"""
).tag(config=True)
auto_create = Bool(False,
help="""Whether to create profile dir if it doesn't exist"""
).tag(config=True)
config_files = List(Unicode())
@default('config_files')
def _config_files_default(self):
return [self.config_file_name]
copy_config_files = Bool(False,
help="""Whether to install the default config files into the profile dir.
If a new profile is being created, and yap_ipython contains config files for that
profile, then they will be staged into the new directory. Otherwise,
default config files will be automatically generated.
""").tag(config=True)
verbose_crash = Bool(False,
help="""Create a massive crash report when yap_ipython encounters what may be an
internal error. The default is to append a short message to the
usual traceback""").tag(config=True)
# The class to use as the crash handler.
crash_handler_class = Type(crashhandler.CrashHandler)
@catch_config_error
def __init__(self, **kwargs):
super(BaseYAPApplication, self).__init__(**kwargs)
# ensure current working directory exists
try:
os.getcwd()
except:
# exit if cwd doesn't exist
self.log.error("Current working directory doesn't exist.")
self.exit(1)
#-------------------------------------------------------------------------
# Various stages of Application creation
#-------------------------------------------------------------------------
deprecated_subcommands = {}
def initialize_subcommand(self, subc, argv=None):
if subc in self.deprecated_subcommands:
self.log.warning("Subcommand `ipython {sub}` is deprecated and will be removed "
"in future versions.".format(sub=subc))
self.log.warning("You likely want to use `jupyter {sub}` in the "
"future".format(sub=subc))
return super(BaseYAPApplication, self).initialize_subcommand(subc, argv)
def init_crash_handler(self):
"""Create a crash handler, typically setting sys.excepthook to it."""
self.crash_handler = self.crash_handler_class(self)
sys.excepthook = self.excepthook
def unset_crashhandler():
sys.excepthook = sys.__excepthook__
atexit.register(unset_crashhandler)
def excepthook(self, etype, evalue, tb):
"""this is sys.excepthook after init_crashhandler
set self.verbose_crash=True to use our full crashhandler, instead of
a regular traceback with a short message (crash_handler_lite)
"""
if self.verbose_crash:
return self.crash_handler(etype, evalue, tb)
else:
return crashhandler.crash_handler_lite(etype, evalue, tb)
@observe('ipython_dir')
def _ipython_dir_changed(self, change):
old = change['old']
new = change['new']
if old is not Undefined:
str_old = os.path.abspath(old)
if str_old in sys.path:
sys.path.remove(str_old)
str_path = os.path.abspath(new)
sys.path.append(str_path)
ensure_dir_exists(new)
readme = os.path.join(new, 'README')
readme_src = os.path.join(get_ipython_package_dir(), u'config', u'profile', 'README')
if not os.path.exists(readme) and os.path.exists(readme_src):
shutil.copy(readme_src, readme)
for d in ('extensions', 'nbextensions'):
path = os.path.join(new, d)
try:
ensure_dir_exists(path)
except OSError as e:
# this will not be EEXIST
self.log.error("couldn't create path %s: %s", path, e)
self.log.debug("IPYTHONDIR set to: %s" % new)
def load_config_file(self, suppress_errors=IPYTHON_SUPPRESS_CONFIG_ERRORS):
"""Load the config file.
By default, errors in loading config are handled, and a warning
printed on screen. For testing, the suppress_errors option is set
to False, so errors will make tests fail.
`supress_errors` default value is to be `None` in which case the
behavior default to the one of `traitlets.Application`.
The default value can be set :
- to `False` by setting 'IPYTHON_SUPPRESS_CONFIG_ERRORS' environment variable to '0', or 'false' (case insensitive).
- to `True` by setting 'IPYTHON_SUPPRESS_CONFIG_ERRORS' environment variable to '1' or 'true' (case insensitive).
- to `None` by setting 'IPYTHON_SUPPRESS_CONFIG_ERRORS' environment variable to '' (empty string) or leaving it unset.
Any other value are invalid, and will make yap_ipython exit with a non-zero return code.
"""
self.log.debug("Searching path %s for config files", self.config_file_paths)
base_config = 'ipython_config.py'
self.log.debug("Attempting to load config file: %s" %
base_config)
try:
if suppress_errors is not None:
old_value = Application.raise_config_file_errors
Application.raise_config_file_errors = not suppress_errors;
Application.load_config_file(
self,
base_config,
path=self.config_file_paths
)
except ConfigFileNotFound:
# ignore errors loading parent
self.log.debug("Config file %s not found", base_config)
pass
if suppress_errors is not None:
Application.raise_config_file_errors = old_value
for config_file_name in self.config_files:
if not config_file_name or config_file_name == base_config:
continue
self.log.debug("Attempting to load config file: %s" %
self.config_file_name)
try:
Application.load_config_file(
self,
config_file_name,
path=self.config_file_paths
)
except ConfigFileNotFound:
# Only warn if the default config file was NOT being used.
if config_file_name in self.config_file_specified:
msg = self.log.warning
else:
msg = self.log.debug
msg("Config file not found, skipping: %s", config_file_name)
except Exception:
# For testing purposes.
if not suppress_errors:
raise
self.log.warning("Error loading config file: %s" %
self.config_file_name, exc_info=True)
def init_profile_dir(self):
"""initialize the profile dir"""
self._in_init_profile_dir = True
if self.profile_dir is not None:
# already ran
return
if 'ProfileDir.location' not in self.config:
# location not specified, find by profile name
try:
p = ProfileDir.find_profile_dir_by_name(self.ipython_dir, self.profile, self.config)
except ProfileDirError:
# not found, maybe create it (always create default profile)
if self.auto_create or self.profile == 'default':
try:
p = ProfileDir.create_profile_dir_by_name(self.ipython_dir, self.profile, self.config)
except ProfileDirError:
self.log.fatal("Could not create profile: %r"%self.profile)
self.exit(1)
else:
self.log.info("Created profile dir: %r"%p.location)
else:
self.log.fatal("Profile %r not found."%self.profile)
self.exit(1)
else:
self.log.debug("Using existing profile dir: %r"%p.location)
else:
location = self.config.ProfileDir.location
# location is fully specified
try:
p = ProfileDir.find_profile_dir(location, self.config)
except ProfileDirError:
# not found, maybe create it
if self.auto_create:
try:
p = ProfileDir.create_profile_dir(location, self.config)
except ProfileDirError:
self.log.fatal("Could not create profile directory: %r"%location)
self.exit(1)
else:
self.log.debug("Creating new profile dir: %r"%location)
else:
self.log.fatal("Profile directory %r not found."%location)
self.exit(1)
else:
self.log.info("Using existing profile dir: %r"%location)
# if profile_dir is specified explicitly, set profile name
dir_name = os.path.basename(p.location)
if dir_name.startswith('profile_'):
self.profile = dir_name[8:]
self.profile_dir = p
self.config_file_paths.append(p.location)
self._in_init_profile_dir = False
def init_config_files(self):
"""[optionally] copy default config files into profile dir."""
self.config_file_paths.extend(ENV_CONFIG_DIRS)
self.config_file_paths.extend(SYSTEM_CONFIG_DIRS)
# copy config files
path = self.builtin_profile_dir
if self.copy_config_files:
src = self.profile
cfg = self.config_file_name
if path and os.path.exists(os.path.join(path, cfg)):
self.log.warning("Staging %r from %s into %r [overwrite=%s]"%(
cfg, src, self.profile_dir.location, self.overwrite)
)
self.profile_dir.copy_config_file(cfg, path=path, overwrite=self.overwrite)
else:
self.stage_default_config_file()
else:
# Still stage *bundled* config files, but not generated ones
# This is necessary for `ipython profile=sympy` to load the profile
# on the first go
files = glob.glob(os.path.join(path, '*.py'))
for fullpath in files:
cfg = os.path.basename(fullpath)
if self.profile_dir.copy_config_file(cfg, path=path, overwrite=False):
# file was copied
self.log.warning("Staging bundled %s from %s into %r"%(
cfg, self.profile, self.profile_dir.location)
)
def stage_default_config_file(self):
"""auto generate default config file, and stage it into the profile."""
s = self.generate_config_file()
fname = os.path.join(self.profile_dir.location, self.config_file_name)
if self.overwrite or not os.path.exists(fname):
self.log.warning("Generating default config file: %r"%(fname))
with open(fname, 'w') as f:
f.write(s)
@catch_config_error
def initialize(self, argv=None):
# don't hook up crash handler before parsing command-line
self.parse_command_line(argv)
self.init_crash_handler()
if self.subapp is not None:
# stop here if subapp is taking over
return
# save a copy of CLI config to re-load after config files
# so that it has highest priority
cl_config = deepcopy(self.config)
self.init_profile_dir()
self.init_config_files()
self.load_config_file()
# enforce cl-opts override configfile opts:
self.update_config(cl_config)

View File

@@ -0,0 +1,70 @@
# encoding: utf-8
"""
Autocall capabilities for yap_ipython.core.
Authors:
* Brian Granger
* Fernando Perez
* Thomas Kluyver
Notes
-----
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The yap_ipython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
class IPyAutocall(object):
""" Instances of this class are always autocalled
This happens regardless of 'autocall' variable state. Use this to
develop macro-like mechanisms.
"""
_ip = None
rewrite = True
def __init__(self, ip=None):
self._ip = ip
def set_ip(self, ip):
""" Will be used to set _ip point to current ipython instance b/f call
Override this method if you don't want this to happen.
"""
self._ip = ip
class ExitAutocall(IPyAutocall):
"""An autocallable object which will be added to the user namespace so that
exit, exit(), quit or quit() are all valid ways to close the shell."""
rewrite = False
def __call__(self):
self._ip.ask_exit()
class ZMQExitAutocall(ExitAutocall):
"""Exit yap_ipython. Autocallable, so it needn't be explicitly called.
Parameters
----------
keep_kernel : bool
If True, leave the kernel alive. Otherwise, tell the kernel to exit too
(default).
"""
def __call__(self, keep_kernel=False):
self._ip.keepkernel_on_exit = keep_kernel
self._ip.ask_exit()

View File

@@ -0,0 +1,86 @@
"""
A context manager for managing things injected into :mod:`builtins`.
"""
# Copyright (c) yap_ipython Development Team.
# Distributed under the terms of the Modified BSD License.
import builtins as builtin_mod
from traitlets.config.configurable import Configurable
from traitlets import Instance
class __BuiltinUndefined(object): pass
BuiltinUndefined = __BuiltinUndefined()
class __HideBuiltin(object): pass
HideBuiltin = __HideBuiltin()
class BuiltinTrap(Configurable):
shell = Instance('yap_ipython.core.interactiveshell.InteractiveShellABC',
allow_none=True)
def __init__(self, shell=None):
super(BuiltinTrap, self).__init__(shell=shell, config=None)
self._orig_builtins = {}
# We define this to track if a single BuiltinTrap is nested.
# Only turn off the trap when the outermost call to __exit__ is made.
self._nested_level = 0
self.shell = shell
# builtins we always add - if set to HideBuiltin, they will just
# be removed instead of being replaced by something else
self.auto_builtins = {'exit': HideBuiltin,
'quit': HideBuiltin,
'get_ipython': self.shell.get_ipython,
}
def __enter__(self):
if self._nested_level == 0:
self.activate()
self._nested_level += 1
# I return self, so callers can use add_builtin in a with clause.
return self
def __exit__(self, type, value, traceback):
if self._nested_level == 1:
self.deactivate()
self._nested_level -= 1
# Returning False will cause exceptions to propagate
return False
def add_builtin(self, key, value):
"""Add a builtin and save the original."""
bdict = builtin_mod.__dict__
orig = bdict.get(key, BuiltinUndefined)
if value is HideBuiltin:
if orig is not BuiltinUndefined: #same as 'key in bdict'
self._orig_builtins[key] = orig
del bdict[key]
else:
self._orig_builtins[key] = orig
bdict[key] = value
def remove_builtin(self, key, orig):
"""Remove an added builtin and re-set the original."""
if orig is BuiltinUndefined:
del builtin_mod.__dict__[key]
else:
builtin_mod.__dict__[key] = orig
def activate(self):
"""Store ipython references in the __builtin__ namespace."""
add_builtin = self.add_builtin
for name, func in self.auto_builtins.items():
add_builtin(name, func)
def deactivate(self):
"""Remove any builtins which might have been added by add_builtins, or
restore overwritten ones to their previous values."""
remove_builtin = self.remove_builtin
for key, val in self._orig_builtins.items():
remove_builtin(key, val)
self._orig_builtins.clear()
self._builtins_added = False

View File

@@ -0,0 +1,143 @@
"""Compiler tools with improved interactive support.
Provides compilation machinery similar to codeop, but with caching support so
we can provide interactive tracebacks.
Authors
-------
* Robert Kern
* Fernando Perez
* Thomas Kluyver
"""
# Note: though it might be more natural to name this module 'compiler', that
# name is in the stdlib and name collisions with the stdlib tend to produce
# weird problems (often with third-party tools).
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The yap_ipython Development Team.
#
# Distributed under the terms of the BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib imports
import __future__
from ast import PyCF_ONLY_AST
import codeop
import functools
import hashlib
import linecache
import operator
import time
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
# Roughtly equal to PyCF_MASK | PyCF_MASK_OBSOLETE as defined in pythonrun.h,
# this is used as a bitmask to extract future-related code flags.
PyCF_MASK = functools.reduce(operator.or_,
(getattr(__future__, fname).compiler_flag
for fname in __future__.all_feature_names))
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
def code_name(code, number=0):
""" Compute a (probably) unique name for code for caching.
This now expects code to be unicode.
"""
hash_digest = hashlib.sha1(code.encode("utf-8")).hexdigest()
# Include the number and 12 characters of the hash in the name. It's
# pretty much impossible that in a single session we'll have collisions
# even with truncated hashes, and the full one makes tracebacks too long
return '<ipython-input-{0}-{1}>'.format(number, hash_digest[:12])
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class CachingCompiler(codeop.Compile):
"""A compiler that caches code compiled from interactive statements.
"""
def __init__(self):
codeop.Compile.__init__(self)
# This is ugly, but it must be done this way to allow multiple
# simultaneous ipython instances to coexist. Since Python itself
# directly accesses the data structures in the linecache module, and
# the cache therein is global, we must work with that data structure.
# We must hold a reference to the original checkcache routine and call
# that in our own check_cache() below, but the special yap_ipython cache
# must also be shared by all yap_ipython instances. If we were to hold
# separate caches (one in each CachingCompiler instance), any call made
# by Python itself to linecache.checkcache() would obliterate the
# cached data from the other yap_ipython instances.
if not hasattr(linecache, '_ipython_cache'):
linecache._ipython_cache = {}
if not hasattr(linecache, '_checkcache_ori'):
linecache._checkcache_ori = linecache.checkcache
# Now, we must monkeypatch the linecache directly so that parts of the
# stdlib that call it outside our control go through our codepath
# (otherwise we'd lose our tracebacks).
linecache.checkcache = check_linecache_ipython
def ast_parse(self, source, filename='<unknown>', symbol='exec'):
"""Parse code to an AST with the current compiler flags active.
Arguments are exactly the same as ast.parse (in the standard library),
and are passed to the built-in compile function."""
return compile(source, filename, symbol, self.flags | PyCF_ONLY_AST, 1)
def reset_compiler_flags(self):
"""Reset compiler flags to default state."""
# This value is copied from codeop.Compile.__init__, so if that ever
# changes, it will need to be updated.
self.flags = codeop.PyCF_DONT_IMPLY_DEDENT
@property
def compiler_flags(self):
"""Flags currently active in the compilation process.
"""
return self.flags
def cache(self, code, number=0):
"""Make a name for a block of code, and cache the code.
Parameters
----------
code : str
The Python source code to cache.
number : int
A number which forms part of the code's name. Used for the execution
counter.
Returns
-------
The name of the cached code (as a string). Pass this as the filename
argument to compilation, so that tracebacks are correctly hooked up.
"""
name = code_name(code, number)
entry = (len(code), time.time(),
[line+'\n' for line in code.splitlines()], name)
linecache.cache[name] = entry
linecache._ipython_cache[name] = entry
return name
def check_linecache_ipython(*args):
"""Call linecache.checkcache() safely protecting our cached values.
"""
# First call the orignal checkcache as intended
linecache._checkcache_ori(*args)
# Then, update back the cache with our data, so that tracebacks related
# to our compiled codes can be produced.
linecache.cache.update(linecache._ipython_cache)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,354 @@
# encoding: utf-8
"""Implementations for various useful completers.
These are all loaded by default by yap_ipython.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The yap_ipython Development Team.
#
# Distributed under the terms of the BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib imports
import glob
import inspect
import os
import re
import sys
from importlib import import_module
from importlib.machinery import all_suffixes
# Third-party imports
from time import time
from zipimport import zipimporter
# Our own imports
from yap_ipython.core.completer import expand_user, compress_user
from yap_ipython.core.error import TryNext
from yap_ipython.utils._process_common import arg_split
# FIXME: this should be pulled in with the right call via the component system
from yap_ipython import get_ipython
from typing import List
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
_suffixes = all_suffixes()
# Time in seconds after which the rootmodules will be stored permanently in the
# ipython ip.db database (kept in the user's .ipython dir).
TIMEOUT_STORAGE = 2
# Time in seconds after which we give up
TIMEOUT_GIVEUP = 20
# Regular expression for the python import statement
import_re = re.compile(r'(?P<name>[a-zA-Z_][a-zA-Z0-9_]*?)'
r'(?P<package>[/\\]__init__)?'
r'(?P<suffix>%s)$' %
r'|'.join(re.escape(s) for s in _suffixes))
# RE for the ipython %run command (python + ipython scripts)
magic_run_re = re.compile(r'.*(\.ipy|\.ipynb|\.py[w]?)$')
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
def module_list(path):
"""
Return the list containing the names of the modules available in the given
folder.
"""
# sys.path has the cwd as an empty string, but isdir/listdir need it as '.'
if path == '':
path = '.'
# A few local constants to be used in loops below
pjoin = os.path.join
if os.path.isdir(path):
# Build a list of all files in the directory and all files
# in its subdirectories. For performance reasons, do not
# recurse more than one level into subdirectories.
files = []
for root, dirs, nondirs in os.walk(path, followlinks=True):
subdir = root[len(path)+1:]
if subdir:
files.extend(pjoin(subdir, f) for f in nondirs)
dirs[:] = [] # Do not recurse into additional subdirectories.
else:
files.extend(nondirs)
else:
try:
files = list(zipimporter(path)._files.keys())
except:
files = []
# Build a list of modules which match the import_re regex.
modules = []
for f in files:
m = import_re.match(f)
if m:
modules.append(m.group('name'))
return list(set(modules))
def get_root_modules():
"""
Returns a list containing the names of all the modules available in the
folders of the pythonpath.
ip.db['rootmodules_cache'] maps sys.path entries to list of modules.
"""
ip = get_ipython()
if ip is None:
# No global shell instance to store cached list of modules.
# Don't try to scan for modules every time.
return list(sys.builtin_module_names)
rootmodules_cache = ip.db.get('rootmodules_cache', {})
rootmodules = list(sys.builtin_module_names)
start_time = time()
store = False
for path in sys.path:
try:
modules = rootmodules_cache[path]
except KeyError:
modules = module_list(path)
try:
modules.remove('__init__')
except ValueError:
pass
if path not in ('', '.'): # cwd modules should not be cached
rootmodules_cache[path] = modules
if time() - start_time > TIMEOUT_STORAGE and not store:
store = True
print("\nCaching the list of root modules, please wait!")
print("(This will only be done once - type '%rehashx' to "
"reset cache!)\n")
sys.stdout.flush()
if time() - start_time > TIMEOUT_GIVEUP:
print("This is taking too long, we give up.\n")
return []
rootmodules.extend(modules)
if store:
ip.db['rootmodules_cache'] = rootmodules_cache
rootmodules = list(set(rootmodules))
return rootmodules
def is_importable(module, attr, only_modules):
if only_modules:
return inspect.ismodule(getattr(module, attr))
else:
return not(attr[:2] == '__' and attr[-2:] == '__')
def try_import(mod: str, only_modules=False) -> List[str]:
"""
Try to import given module and return list of potential completions.
"""
mod = mod.rstrip('.')
try:
m = import_module(mod)
except:
return []
m_is_init = hasattr(m, '__file__') and '__init__' in m.__file__
completions = []
if (not hasattr(m, '__file__')) or (not only_modules) or m_is_init:
completions.extend( [attr for attr in dir(m) if
is_importable(m, attr, only_modules)])
completions.extend(getattr(m, '__all__', []))
if m_is_init:
completions.extend(module_list(os.path.dirname(m.__file__)))
completions_set = {c for c in completions if isinstance(c, str)}
completions_set.discard('__init__')
return list(completions_set)
#-----------------------------------------------------------------------------
# Completion-related functions.
#-----------------------------------------------------------------------------
def quick_completer(cmd, completions):
""" Easily create a trivial completer for a command.
Takes either a list of completions, or all completions in string (that will
be split on whitespace).
Example::
[d:\ipython]|1> import ipy_completers
[d:\ipython]|2> ipy_completers.quick_completer('foo', ['bar','baz'])
[d:\ipython]|3> foo b<TAB>
bar baz
[d:\ipython]|3> foo ba
"""
if isinstance(completions, str):
completions = completions.split()
def do_complete(self, event):
return completions
get_ipython().set_hook('complete_command',do_complete, str_key = cmd)
def module_completion(line):
"""
Returns a list containing the completion possibilities for an import line.
The line looks like this :
'import xml.d'
'from xml.dom import'
"""
words = line.split(' ')
nwords = len(words)
# from whatever <tab> -> 'import '
if nwords == 3 and words[0] == 'from':
return ['import ']
# 'from xy<tab>' or 'import xy<tab>'
if nwords < 3 and (words[0] in {'%aimport', 'import', 'from'}) :
if nwords == 1:
return get_root_modules()
mod = words[1].split('.')
if len(mod) < 2:
return get_root_modules()
completion_list = try_import('.'.join(mod[:-1]), True)
return ['.'.join(mod[:-1] + [el]) for el in completion_list]
# 'from xyz import abc<tab>'
if nwords >= 3 and words[0] == 'from':
mod = words[1]
return try_import(mod)
#-----------------------------------------------------------------------------
# Completers
#-----------------------------------------------------------------------------
# These all have the func(self, event) signature to be used as custom
# completers
def module_completer(self,event):
"""Give completions after user has typed 'import ...' or 'from ...'"""
# This works in all versions of python. While 2.5 has
# pkgutil.walk_packages(), that particular routine is fairly dangerous,
# since it imports *EVERYTHING* on sys.path. That is: a) very slow b) full
# of possibly problematic side effects.
# This search the folders in the sys.path for available modules.
return module_completion(event.line)
# FIXME: there's a lot of logic common to the run, cd and builtin file
# completers, that is currently reimplemented in each.
def magic_run_completer(self, event):
"""Complete files that end in .py or .ipy or .ipynb for the %run command.
"""
comps = arg_split(event.line, strict=False)
# relpath should be the current token that we need to complete.
if (len(comps) > 1) and (not event.line.endswith(' ')):
relpath = comps[-1].strip("'\"")
else:
relpath = ''
#print("\nev=", event) # dbg
#print("rp=", relpath) # dbg
#print('comps=', comps) # dbg
lglob = glob.glob
isdir = os.path.isdir
relpath, tilde_expand, tilde_val = expand_user(relpath)
# Find if the user has already typed the first filename, after which we
# should complete on all files, since after the first one other files may
# be arguments to the input script.
if any(magic_run_re.match(c) for c in comps):
matches = [f.replace('\\','/') + ('/' if isdir(f) else '')
for f in lglob(relpath+'*')]
else:
dirs = [f.replace('\\','/') + "/" for f in lglob(relpath+'*') if isdir(f)]
pys = [f.replace('\\','/')
for f in lglob(relpath+'*.py') + lglob(relpath+'*.ipy') +
lglob(relpath+'*.ipynb') + lglob(relpath + '*.pyw')]
matches = dirs + pys
#print('run comp:', dirs+pys) # dbg
return [compress_user(p, tilde_expand, tilde_val) for p in matches]
def cd_completer(self, event):
"""Completer function for cd, which only returns directories."""
ip = get_ipython()
relpath = event.symbol
#print(event) # dbg
if event.line.endswith('-b') or ' -b ' in event.line:
# return only bookmark completions
bkms = self.db.get('bookmarks', None)
if bkms:
return bkms.keys()
else:
return []
if event.symbol == '-':
width_dh = str(len(str(len(ip.user_ns['_dh']) + 1)))
# jump in directory history by number
fmt = '-%0' + width_dh +'d [%s]'
ents = [ fmt % (i,s) for i,s in enumerate(ip.user_ns['_dh'])]
if len(ents) > 1:
return ents
return []
if event.symbol.startswith('--'):
return ["--" + os.path.basename(d) for d in ip.user_ns['_dh']]
# Expand ~ in path and normalize directory separators.
relpath, tilde_expand, tilde_val = expand_user(relpath)
relpath = relpath.replace('\\','/')
found = []
for d in [f.replace('\\','/') + '/' for f in glob.glob(relpath+'*')
if os.path.isdir(f)]:
if ' ' in d:
# we don't want to deal with any of that, complex code
# for this is elsewhere
raise TryNext
found.append(d)
if not found:
if os.path.isdir(relpath):
return [compress_user(relpath, tilde_expand, tilde_val)]
# if no completions so far, try bookmarks
bks = self.db.get('bookmarks',{})
bkmatches = [s for s in bks if s.startswith(event.symbol)]
if bkmatches:
return bkmatches
raise TryNext
return [compress_user(p, tilde_expand, tilde_val) for p in found]
def reset_completer(self, event):
"A completer for %reset magic"
return '-f -s in out array dhist'.split()

View File

@@ -0,0 +1,215 @@
# encoding: utf-8
"""sys.excepthook for yap_ipython itself, leaves a detailed report on disk.
Authors:
* Fernando Perez
* Brian E. Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
# Copyright (C) 2008-2011 The yap_ipython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import sys
import traceback
from pprint import pformat
from yap_ipython.core import ultratb
from yap_ipython.core.release import author_email
from yap_ipython.utils.sysinfo import sys_info
from yap_ipython.utils.py3compat import input
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
# Template for the user message.
_default_message_template = """\
Oops, {app_name} crashed. We do our best to make it stable, but...
A crash report was automatically generated with the following information:
- A verbatim copy of the crash traceback.
- A copy of your input history during this session.
- Data on your current {app_name} configuration.
It was left in the file named:
\t'{crash_report_fname}'
If you can email this file to the developers, the information in it will help
them in understanding and correcting the problem.
You can mail it to: {contact_name} at {contact_email}
with the subject '{app_name} Crash Report'.
If you want to do it now, the following command will work (under Unix):
mail -s '{app_name} Crash Report' {contact_email} < {crash_report_fname}
To ensure accurate tracking of this issue, please file a report about it at:
{bug_tracker}
"""
_lite_message_template = """
If you suspect this is an yap_ipython bug, please report it at:
https://github.com/ipython/ipython/issues
or send an email to the mailing list at {email}
You can print a more detailed traceback right now with "%tb", or use "%debug"
to interactively debug it.
Extra-detailed tracebacks for bug-reporting purposes can be enabled via:
{config}Application.verbose_crash=True
"""
class CrashHandler(object):
"""Customizable crash handlers for yap_ipython applications.
Instances of this class provide a :meth:`__call__` method which can be
used as a ``sys.excepthook``. The :meth:`__call__` signature is::
def __call__(self, etype, evalue, etb)
"""
message_template = _default_message_template
section_sep = '\n\n'+'*'*75+'\n\n'
def __init__(self, app, contact_name=None, contact_email=None,
bug_tracker=None, show_crash_traceback=True, call_pdb=False):
"""Create a new crash handler
Parameters
----------
app : Application
A running :class:`Application` instance, which will be queried at
crash time for internal information.
contact_name : str
A string with the name of the person to contact.
contact_email : str
A string with the email address of the contact.
bug_tracker : str
A string with the URL for your project's bug tracker.
show_crash_traceback : bool
If false, don't print the crash traceback on stderr, only generate
the on-disk report
Non-argument instance attributes:
These instances contain some non-argument attributes which allow for
further customization of the crash handler's behavior. Please see the
source for further details.
"""
self.crash_report_fname = "Crash_report_%s.txt" % app.name
self.app = app
self.call_pdb = call_pdb
#self.call_pdb = True # dbg
self.show_crash_traceback = show_crash_traceback
self.info = dict(app_name = app.name,
contact_name = contact_name,
contact_email = contact_email,
bug_tracker = bug_tracker,
crash_report_fname = self.crash_report_fname)
def __call__(self, etype, evalue, etb):
"""Handle an exception, call for compatible with sys.excepthook"""
# do not allow the crash handler to be called twice without reinstalling it
# this prevents unlikely errors in the crash handling from entering an
# infinite loop.
sys.excepthook = sys.__excepthook__
# Report tracebacks shouldn't use color in general (safer for users)
color_scheme = 'NoColor'
# Use this ONLY for developer debugging (keep commented out for release)
#color_scheme = 'Linux' # dbg
try:
rptdir = self.app.ipython_dir
except:
rptdir = os.getcwd()
if rptdir is None or not os.path.isdir(rptdir):
rptdir = os.getcwd()
report_name = os.path.join(rptdir,self.crash_report_fname)
# write the report filename into the instance dict so it can get
# properly expanded out in the user message template
self.crash_report_fname = report_name
self.info['crash_report_fname'] = report_name
TBhandler = ultratb.VerboseTB(
color_scheme=color_scheme,
long_header=1,
call_pdb=self.call_pdb,
)
if self.call_pdb:
TBhandler(etype,evalue,etb)
return
else:
traceback = TBhandler.text(etype,evalue,etb,context=31)
# print traceback to screen
if self.show_crash_traceback:
print(traceback, file=sys.stderr)
# and generate a complete report on disk
try:
report = open(report_name,'w')
except:
print('Could not create crash report on disk.', file=sys.stderr)
return
# Inform user on stderr of what happened
print('\n'+'*'*70+'\n', file=sys.stderr)
print(self.message_template.format(**self.info), file=sys.stderr)
# Construct report on disk
report.write(self.make_report(traceback))
report.close()
input("Hit <Enter> to quit (your terminal may close):")
def make_report(self,traceback):
"""Return a string containing a crash report."""
sec_sep = self.section_sep
report = ['*'*75+'\n\n'+'yap_ipython post-mortem report\n\n']
rpt_add = report.append
rpt_add(sys_info())
try:
config = pformat(self.app.config)
rpt_add(sec_sep)
rpt_add('Application name: %s\n\n' % self.app_name)
rpt_add('Current user configuration structure:\n\n')
rpt_add(config)
except:
pass
rpt_add(sec_sep+'Crash traceback:\n\n' + traceback)
return ''.join(report)
def crash_handler_lite(etype, evalue, tb):
"""a light excepthook, adding a small message to the usual traceback"""
traceback.print_exception(etype, evalue, tb)
from yap_ipython.core.interactiveshell import InteractiveShell
if InteractiveShell.initialized():
# we are in a Shell environment, give %magic example
config = "%config "
else:
# we are not in a shell, show generic config
config = "c."
print(_lite_message_template.format(email=author_email, config=config), file=sys.stderr)

View File

@@ -0,0 +1,645 @@
# -*- coding: utf-8 -*-
"""
Pdb debugger class.
Modified from the standard pdb.Pdb class to avoid including readline, so that
the command line completion of other programs which include this isn't
damaged.
In the future, this class will be expanded with improvements over the standard
pdb.
The code in this file is mainly lifted out of cmd.py in Python 2.2, with minor
changes. Licensing should therefore be under the standard Python terms. For
details on the PSF (Python Software Foundation) standard license, see:
https://docs.python.org/2/license.html
"""
#*****************************************************************************
#
# This file is licensed under the PSF license.
#
# Copyright (C) 2001 Python Software Foundation, www.python.org
# Copyright (C) 2005-2006 Fernando Perez. <fperez@colorado.edu>
#
#
#*****************************************************************************
import bdb
import functools
import inspect
import linecache
import sys
import warnings
import re
from yap_ipython import get_ipython
from yap_ipython.utils import PyColorize
from yap_ipython.utils import coloransi, py3compat
from yap_ipython.core.excolors import exception_colors
from yap_ipython.testing.skipdoctest import skip_doctest
prompt = 'ipdb> '
#We have to check this directly from sys.argv, config struct not yet available
from pdb import Pdb as OldPdb
# Allow the set_trace code to operate outside of an ipython instance, even if
# it does so with some limitations. The rest of this support is implemented in
# the Tracer constructor.
def make_arrow(pad):
"""generate the leading arrow in front of traceback or debugger"""
if pad >= 2:
return '-'*(pad-2) + '> '
elif pad == 1:
return '>'
return ''
def BdbQuit_excepthook(et, ev, tb, excepthook=None):
"""Exception hook which handles `BdbQuit` exceptions.
All other exceptions are processed using the `excepthook`
parameter.
"""
warnings.warn("`BdbQuit_excepthook` is deprecated since version 5.1",
DeprecationWarning, stacklevel=2)
if et==bdb.BdbQuit:
print('Exiting Debugger.')
elif excepthook is not None:
excepthook(et, ev, tb)
else:
# Backwards compatibility. Raise deprecation warning?
BdbQuit_excepthook.excepthook_ori(et,ev,tb)
def BdbQuit_IPython_excepthook(self,et,ev,tb,tb_offset=None):
warnings.warn(
"`BdbQuit_IPython_excepthook` is deprecated since version 5.1",
DeprecationWarning, stacklevel=2)
print('Exiting Debugger.')
class Tracer(object):
"""
DEPRECATED
Class for local debugging, similar to pdb.set_trace.
Instances of this class, when called, behave like pdb.set_trace, but
providing yap_ipython's enhanced capabilities.
This is implemented as a class which must be initialized in your own code
and not as a standalone function because we need to detect at runtime
whether yap_ipython is already active or not. That detection is done in the
constructor, ensuring that this code plays nicely with a running yap_ipython,
while functioning acceptably (though with limitations) if outside of it.
"""
@skip_doctest
def __init__(self, colors=None):
"""
DEPRECATED
Create a local debugger instance.
Parameters
----------
colors : str, optional
The name of the color scheme to use, it must be one of yap_ipython's
valid color schemes. If not given, the function will default to
the current yap_ipython scheme when running inside yap_ipython, and to
'NoColor' otherwise.
Examples
--------
::
from yap_ipython.core.debugger import Tracer; debug_here = Tracer()
Later in your code::
debug_here() # -> will open up the debugger at that point.
Once the debugger activates, you can use all of its regular commands to
step through code, set breakpoints, etc. See the pdb documentation
from the Python standard library for usage details.
"""
warnings.warn("`Tracer` is deprecated since version 5.1, directly use "
"`yap_ipython.core.debugger.Pdb.set_trace()`",
DeprecationWarning, stacklevel=2)
ip = get_ipython()
if ip is None:
# Outside of ipython, we set our own exception hook manually
sys.excepthook = functools.partial(BdbQuit_excepthook,
excepthook=sys.excepthook)
def_colors = 'NoColor'
else:
# In ipython, we use its custom exception handler mechanism
def_colors = ip.colors
ip.set_custom_exc((bdb.BdbQuit,), BdbQuit_IPython_excepthook)
if colors is None:
colors = def_colors
# The stdlib debugger internally uses a modified repr from the `repr`
# module, that limits the length of printed strings to a hardcoded
# limit of 30 characters. That much trimming is too aggressive, let's
# at least raise that limit to 80 chars, which should be enough for
# most interactive uses.
try:
try:
from reprlib import aRepr # Py 3
except ImportError:
from repr import aRepr # Py 2
aRepr.maxstring = 80
except:
# This is only a user-facing convenience, so any error we encounter
# here can be warned about but can be otherwise ignored. These
# printouts will tell us about problems if this API changes
import traceback
traceback.print_exc()
self.debugger = Pdb(colors)
def __call__(self):
"""Starts an interactive debugger at the point where called.
This is similar to the pdb.set_trace() function from the std lib, but
using yap_ipython's enhanced debugger."""
self.debugger.set_trace(sys._getframe().f_back)
RGX_EXTRA_INDENT = re.compile('(?<=\n)\s+')
def strip_indentation(multiline_string):
return RGX_EXTRA_INDENT.sub('', multiline_string)
def decorate_fn_with_doc(new_fn, old_fn, additional_text=""):
"""Make new_fn have old_fn's doc string. This is particularly useful
for the ``do_...`` commands that hook into the help system.
Adapted from from a comp.lang.python posting
by Duncan Booth."""
def wrapper(*args, **kw):
return new_fn(*args, **kw)
if old_fn.__doc__:
wrapper.__doc__ = strip_indentation(old_fn.__doc__) + additional_text
return wrapper
def _file_lines(fname):
"""Return the contents of a named file as a list of lines.
This function never raises an IOError exception: if the file can't be
read, it simply returns an empty list."""
try:
outfile = open(fname)
except IOError:
return []
else:
out = outfile.readlines()
outfile.close()
return out
class Pdb(OldPdb):
"""Modified Pdb class, does not load readline.
for a standalone version that uses prompt_toolkit, see
`yap_ipython.terminal.debugger.TerminalPdb` and
`yap_ipython.terminal.debugger.set_trace()`
"""
def __init__(self, color_scheme=None, completekey=None,
stdin=None, stdout=None, context=5):
# Parent constructor:
try:
self.context = int(context)
if self.context <= 0:
raise ValueError("Context must be a positive integer")
except (TypeError, ValueError):
raise ValueError("Context must be a positive integer")
OldPdb.__init__(self, completekey, stdin, stdout)
# yap_ipython changes...
self.shell = get_ipython()
if self.shell is None:
save_main = sys.modules['__main__']
# No yap_ipython instance running, we must create one
from yap_ipython.terminal.interactiveshell import \
TerminalInteractiveShell
self.shell = TerminalInteractiveShell.instance()
# needed by any code which calls __import__("__main__") after
# the debugger was entered. See also #9941.
sys.modules['__main__'] = save_main
if color_scheme is not None:
warnings.warn(
"The `color_scheme` argument is deprecated since version 5.1",
DeprecationWarning, stacklevel=2)
else:
color_scheme = self.shell.colors
self.aliases = {}
# Create color table: we copy the default one from the traceback
# module and add a few attributes needed for debugging
self.color_scheme_table = exception_colors()
# shorthands
C = coloransi.TermColors
cst = self.color_scheme_table
cst['NoColor'].colors.prompt = C.NoColor
cst['NoColor'].colors.breakpoint_enabled = C.NoColor
cst['NoColor'].colors.breakpoint_disabled = C.NoColor
cst['Linux'].colors.prompt = C.Green
cst['Linux'].colors.breakpoint_enabled = C.LightRed
cst['Linux'].colors.breakpoint_disabled = C.Red
cst['LightBG'].colors.prompt = C.Blue
cst['LightBG'].colors.breakpoint_enabled = C.LightRed
cst['LightBG'].colors.breakpoint_disabled = C.Red
cst['Neutral'].colors.prompt = C.Blue
cst['Neutral'].colors.breakpoint_enabled = C.LightRed
cst['Neutral'].colors.breakpoint_disabled = C.Red
# Add a python parser so we can syntax highlight source while
# debugging.
self.parser = PyColorize.Parser(style=color_scheme)
self.set_colors(color_scheme)
# Set the prompt - the default prompt is '(Pdb)'
self.prompt = prompt
def set_colors(self, scheme):
"""Shorthand access to the color table scheme selector method."""
self.color_scheme_table.set_active_scheme(scheme)
self.parser.style = scheme
def interaction(self, frame, traceback):
try:
OldPdb.interaction(self, frame, traceback)
except KeyboardInterrupt:
sys.stdout.write('\n' + self.shell.get_exception_only())
def new_do_up(self, arg):
OldPdb.do_up(self, arg)
do_u = do_up = decorate_fn_with_doc(new_do_up, OldPdb.do_up)
def new_do_down(self, arg):
OldPdb.do_down(self, arg)
do_d = do_down = decorate_fn_with_doc(new_do_down, OldPdb.do_down)
def new_do_frame(self, arg):
OldPdb.do_frame(self, arg)
def new_do_quit(self, arg):
if hasattr(self, 'old_all_completions'):
self.shell.Completer.all_completions=self.old_all_completions
return OldPdb.do_quit(self, arg)
do_q = do_quit = decorate_fn_with_doc(new_do_quit, OldPdb.do_quit)
def new_do_restart(self, arg):
"""Restart command. In the context of ipython this is exactly the same
thing as 'quit'."""
self.msg("Restart doesn't make sense here. Using 'quit' instead.")
return self.do_quit(arg)
def print_stack_trace(self, context=None):
if context is None:
context = self.context
try:
context=int(context)
if context <= 0:
raise ValueError("Context must be a positive integer")
except (TypeError, ValueError):
raise ValueError("Context must be a positive integer")
try:
for frame_lineno in self.stack:
self.print_stack_entry(frame_lineno, context=context)
except KeyboardInterrupt:
pass
def print_stack_entry(self,frame_lineno, prompt_prefix='\n-> ',
context=None):
if context is None:
context = self.context
try:
context=int(context)
if context <= 0:
raise ValueError("Context must be a positive integer")
except (TypeError, ValueError):
raise ValueError("Context must be a positive integer")
print(self.format_stack_entry(frame_lineno, '', context))
# vds: >>
frame, lineno = frame_lineno
filename = frame.f_code.co_filename
self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
# vds: <<
def format_stack_entry(self, frame_lineno, lprefix=': ', context=None):
if context is None:
context = self.context
try:
context=int(context)
if context <= 0:
print("Context must be a positive integer")
except (TypeError, ValueError):
print("Context must be a positive integer")
try:
import reprlib # Py 3
except ImportError:
import repr as reprlib # Py 2
ret = []
Colors = self.color_scheme_table.active_colors
ColorsNormal = Colors.Normal
tpl_link = u'%s%%s%s' % (Colors.filenameEm, ColorsNormal)
tpl_call = u'%s%%s%s%%s%s' % (Colors.vName, Colors.valEm, ColorsNormal)
tpl_line = u'%%s%s%%s %s%%s' % (Colors.lineno, ColorsNormal)
tpl_line_em = u'%%s%s%%s %s%%s%s' % (Colors.linenoEm, Colors.line,
ColorsNormal)
frame, lineno = frame_lineno
return_value = ''
if '__return__' in frame.f_locals:
rv = frame.f_locals['__return__']
#return_value += '->'
return_value += reprlib.repr(rv) + '\n'
ret.append(return_value)
#s = filename + '(' + `lineno` + ')'
filename = self.canonic(frame.f_code.co_filename)
link = tpl_link % py3compat.cast_unicode(filename)
if frame.f_code.co_name:
func = frame.f_code.co_name
else:
func = "<lambda>"
call = ''
if func != '?':
if '__args__' in frame.f_locals:
args = reprlib.repr(frame.f_locals['__args__'])
else:
args = '()'
call = tpl_call % (func, args)
# The level info should be generated in the same format pdb uses, to
# avoid breaking the pdbtrack functionality of python-mode in *emacs.
if frame is self.curframe:
ret.append('> ')
else:
ret.append(' ')
ret.append(u'%s(%s)%s\n' % (link,lineno,call))
start = lineno - 1 - context//2
lines = linecache.getlines(filename)
start = min(start, len(lines) - context)
start = max(start, 0)
lines = lines[start : start + context]
for i,line in enumerate(lines):
show_arrow = (start + 1 + i == lineno)
linetpl = (frame is self.curframe or show_arrow) \
and tpl_line_em \
or tpl_line
ret.append(self.__format_line(linetpl, filename,
start + 1 + i, line,
arrow = show_arrow) )
return ''.join(ret)
def __format_line(self, tpl_line, filename, lineno, line, arrow = False):
bp_mark = ""
bp_mark_color = ""
new_line, err = self.parser.format2(line, 'str')
if not err:
line = new_line
bp = None
if lineno in self.get_file_breaks(filename):
bps = self.get_breaks(filename, lineno)
bp = bps[-1]
if bp:
Colors = self.color_scheme_table.active_colors
bp_mark = str(bp.number)
bp_mark_color = Colors.breakpoint_enabled
if not bp.enabled:
bp_mark_color = Colors.breakpoint_disabled
numbers_width = 7
if arrow:
# This is the line with the error
pad = numbers_width - len(str(lineno)) - len(bp_mark)
num = '%s%s' % (make_arrow(pad), str(lineno))
else:
num = '%*s' % (numbers_width - len(bp_mark), str(lineno))
return tpl_line % (bp_mark_color + bp_mark, num, line)
def print_list_lines(self, filename, first, last):
"""The printing (as opposed to the parsing part of a 'list'
command."""
try:
Colors = self.color_scheme_table.active_colors
ColorsNormal = Colors.Normal
tpl_line = '%%s%s%%s %s%%s' % (Colors.lineno, ColorsNormal)
tpl_line_em = '%%s%s%%s %s%%s%s' % (Colors.linenoEm, Colors.line, ColorsNormal)
src = []
if filename == "<string>" and hasattr(self, "_exec_filename"):
filename = self._exec_filename
for lineno in range(first, last+1):
line = linecache.getline(filename, lineno)
if not line:
break
if lineno == self.curframe.f_lineno:
line = self.__format_line(tpl_line_em, filename, lineno, line, arrow = True)
else:
line = self.__format_line(tpl_line, filename, lineno, line, arrow = False)
src.append(line)
self.lineno = lineno
print(''.join(src))
except KeyboardInterrupt:
pass
def do_list(self, arg):
"""Print lines of code from the current stack frame
"""
self.lastcmd = 'list'
last = None
if arg:
try:
x = eval(arg, {}, {})
if type(x) == type(()):
first, last = x
first = int(first)
last = int(last)
if last < first:
# Assume it's a count
last = first + last
else:
first = max(1, int(x) - 5)
except:
print('*** Error in argument:', repr(arg))
return
elif self.lineno is None:
first = max(1, self.curframe.f_lineno - 5)
else:
first = self.lineno + 1
if last is None:
last = first + 10
self.print_list_lines(self.curframe.f_code.co_filename, first, last)
# vds: >>
lineno = first
filename = self.curframe.f_code.co_filename
self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
# vds: <<
do_l = do_list
def getsourcelines(self, obj):
lines, lineno = inspect.findsource(obj)
if inspect.isframe(obj) and obj.f_globals is obj.f_locals:
# must be a module frame: do not try to cut a block out of it
return lines, 1
elif inspect.ismodule(obj):
return lines, 1
return inspect.getblock(lines[lineno:]), lineno+1
def do_longlist(self, arg):
"""Print lines of code from the current stack frame.
Shows more lines than 'list' does.
"""
self.lastcmd = 'longlist'
try:
lines, lineno = self.getsourcelines(self.curframe)
except OSError as err:
self.error(err)
return
last = lineno + len(lines)
self.print_list_lines(self.curframe.f_code.co_filename, lineno, last)
do_ll = do_longlist
def do_debug(self, arg):
"""debug code
Enter a recursive debugger that steps through the code
argument (which is an arbitrary expression or statement to be
executed in the current environment).
"""
sys.settrace(None)
globals = self.curframe.f_globals
locals = self.curframe_locals
p = self.__class__(completekey=self.completekey,
stdin=self.stdin, stdout=self.stdout)
p.use_rawinput = self.use_rawinput
p.prompt = "(%s) " % self.prompt.strip()
self.message("ENTERING RECURSIVE DEBUGGER")
sys.call_tracing(p.run, (arg, globals, locals))
self.message("LEAVING RECURSIVE DEBUGGER")
sys.settrace(self.trace_dispatch)
self.lastcmd = p.lastcmd
def do_pdef(self, arg):
"""Print the call signature for any callable object.
The debugger interface to %pdef"""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('pdef')(arg, namespaces=namespaces)
def do_pdoc(self, arg):
"""Print the docstring for an object.
The debugger interface to %pdoc."""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('pdoc')(arg, namespaces=namespaces)
def do_pfile(self, arg):
"""Print (or run through pager) the file where an object is defined.
The debugger interface to %pfile.
"""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('pfile')(arg, namespaces=namespaces)
def do_pinfo(self, arg):
"""Provide detailed information about an object.
The debugger interface to %pinfo, i.e., obj?."""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('pinfo')(arg, namespaces=namespaces)
def do_pinfo2(self, arg):
"""Provide extra detailed information about an object.
The debugger interface to %pinfo2, i.e., obj??."""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('pinfo2')(arg, namespaces=namespaces)
def do_psource(self, arg):
"""Print (or run through pager) the source code for an object."""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('psource')(arg, namespaces=namespaces)
def do_where(self, arg):
"""w(here)
Print a stack trace, with the most recent frame at the bottom.
An arrow indicates the "current frame", which determines the
context of most commands. 'bt' is an alias for this command.
Take a number as argument as an (optional) number of context line to
print"""
if arg:
context = int(arg)
self.print_stack_trace(context)
else:
self.print_stack_trace()
do_w = do_where
def set_trace(frame=None):
"""
Start debugging from `frame`.
If frame is not specified, debugging starts from caller's frame.
"""
Pdb().set_trace(frame or sys._getframe().f_back)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,70 @@
# encoding: utf-8
"""
A context manager for handling sys.displayhook.
Authors:
* Robert Kern
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The yap_ipython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
from traitlets.config.configurable import Configurable
from traitlets import Any
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class DisplayTrap(Configurable):
"""Object to manage sys.displayhook.
This came from yap_ipython.core.kernel.display_hook, but is simplified
(no callbacks or formatters) until more of the core is refactored.
"""
hook = Any()
def __init__(self, hook=None):
super(DisplayTrap, self).__init__(hook=hook, config=None)
self.old_hook = None
# We define this to track if a single BuiltinTrap is nested.
# Only turn off the trap when the outermost call to __exit__ is made.
self._nested_level = 0
def __enter__(self):
if self._nested_level == 0:
self.set()
self._nested_level += 1
return self
def __exit__(self, type, value, traceback):
if self._nested_level == 1:
self.unset()
self._nested_level -= 1
# Returning False will cause exceptions to propagate
return False
def set(self):
"""Set the hook."""
if sys.displayhook is not self.hook:
self.old_hook = sys.displayhook
sys.displayhook = self.hook
def unset(self):
"""Unset the hook."""
sys.displayhook = self.old_hook

View File

@@ -0,0 +1,320 @@
# -*- coding: utf-8 -*-
"""Displayhook for yap_ipython.
This defines a callable class that yap_ipython uses for `sys.displayhook`.
"""
# Copyright (c) yap_ipython Development Team.
# Distributed under the terms of the Modified BSD License.
import builtins as builtin_mod
import sys
import io as _io
import tokenize
from traitlets.config.configurable import Configurable
from traitlets import Instance, Float
from warnings import warn
# TODO: Move the various attributes (cache_size, [others now moved]). Some
# of these are also attributes of InteractiveShell. They should be on ONE object
# only and the other objects should ask that one object for their values.
class DisplayHook(Configurable):
"""The custom yap_ipython displayhook to replace sys.displayhook.
This class does many things, but the basic idea is that it is a callable
that gets called anytime user code returns a value.
"""
shell = Instance('yap_ipython.core.interactiveshell.InteractiveShellABC',
allow_none=True)
exec_result = Instance('yap_ipython.core.interactiveshell.ExecutionResult',
allow_none=True)
cull_fraction = Float(0.2)
def __init__(self, shell=None, cache_size=1000, **kwargs):
super(DisplayHook, self).__init__(shell=shell, **kwargs)
cache_size_min = 3
if cache_size <= 0:
self.do_full_cache = 0
cache_size = 0
elif cache_size < cache_size_min:
self.do_full_cache = 0
cache_size = 0
warn('caching was disabled (min value for cache size is %s).' %
cache_size_min,stacklevel=3)
else:
self.do_full_cache = 1
self.cache_size = cache_size
# we need a reference to the user-level namespace
self.shell = shell
self._,self.__,self.___ = '','',''
# these are deliberately global:
to_user_ns = {'_':self._,'__':self.__,'___':self.___}
self.shell.user_ns.update(to_user_ns)
@property
def prompt_count(self):
return self.shell.execution_count
#-------------------------------------------------------------------------
# Methods used in __call__. Override these methods to modify the behavior
# of the displayhook.
#-------------------------------------------------------------------------
def check_for_underscore(self):
"""Check if the user has set the '_' variable by hand."""
# If something injected a '_' variable in __builtin__, delete
# ipython's automatic one so we don't clobber that. gettext() in
# particular uses _, so we need to stay away from it.
if '_' in builtin_mod.__dict__:
try:
user_value = self.shell.user_ns['_']
if user_value is not self._:
return
del self.shell.user_ns['_']
except KeyError:
pass
def quiet(self):
"""Should we silence the display hook because of ';'?"""
# do not print output if input ends in ';'
try:
cell = self.shell.history_manager.input_hist_parsed[-1]
except IndexError:
# some uses of ipshellembed may fail here
return False
sio = _io.StringIO(cell)
tokens = list(tokenize.generate_tokens(sio.readline))
for token in reversed(tokens):
if token[0] in (tokenize.ENDMARKER, tokenize.NL, tokenize.NEWLINE, tokenize.COMMENT):
continue
if (token[0] == tokenize.OP) and (token[1] == ';'):
return True
else:
return False
def start_displayhook(self):
"""Start the displayhook, initializing resources."""
pass
def write_output_prompt(self):
"""Write the output prompt.
The default implementation simply writes the prompt to
``sys.stdout``.
"""
# Use write, not print which adds an extra space.
sys.stdout.write(self.shell.separate_out)
outprompt = 'Out[{}]: '.format(self.shell.execution_count)
if self.do_full_cache:
sys.stdout.write(outprompt)
def compute_format_data(self, result):
"""Compute format data of the object to be displayed.
The format data is a generalization of the :func:`repr` of an object.
In the default implementation the format data is a :class:`dict` of
key value pair where the keys are valid MIME types and the values
are JSON'able data structure containing the raw data for that MIME
type. It is up to frontends to determine pick a MIME to to use and
display that data in an appropriate manner.
This method only computes the format data for the object and should
NOT actually print or write that to a stream.
Parameters
----------
result : object
The Python object passed to the display hook, whose format will be
computed.
Returns
-------
(format_dict, md_dict) : dict
format_dict is a :class:`dict` whose keys are valid MIME types and values are
JSON'able raw data for that MIME type. It is recommended that
all return values of this should always include the "text/plain"
MIME type representation of the object.
md_dict is a :class:`dict` with the same MIME type keys
of metadata associated with each output.
"""
return self.shell.display_formatter.format(result)
# This can be set to True by the write_output_prompt method in a subclass
prompt_end_newline = False
def write_format_data(self, format_dict, md_dict=None):
"""Write the format data dict to the frontend.
This default version of this method simply writes the plain text
representation of the object to ``sys.stdout``. Subclasses should
override this method to send the entire `format_dict` to the
frontends.
Parameters
----------
format_dict : dict
The format dict for the object passed to `sys.displayhook`.
md_dict : dict (optional)
The metadata dict to be associated with the display data.
"""
if 'text/plain' not in format_dict:
# nothing to do
return
# We want to print because we want to always make sure we have a
# newline, even if all the prompt separators are ''. This is the
# standard yap_ipython behavior.
result_repr = format_dict['text/plain']
if '\n' in result_repr:
# So that multi-line strings line up with the left column of
# the screen, instead of having the output prompt mess up
# their first line.
# We use the prompt template instead of the expanded prompt
# because the expansion may add ANSI escapes that will interfere
# with our ability to determine whether or not we should add
# a newline.
if not self.prompt_end_newline:
# But avoid extraneous empty lines.
result_repr = '\n' + result_repr
print(result_repr)
def update_user_ns(self, result):
"""Update user_ns with various things like _, __, _1, etc."""
# Avoid recursive reference when displaying _oh/Out
if result is not self.shell.user_ns['_oh']:
if len(self.shell.user_ns['_oh']) >= self.cache_size and self.do_full_cache:
self.cull_cache()
# Don't overwrite '_' and friends if '_' is in __builtin__
# (otherwise we cause buggy behavior for things like gettext). and
# do not overwrite _, __ or ___ if one of these has been assigned
# by the user.
update_unders = True
for unders in ['_'*i for i in range(1,4)]:
if not unders in self.shell.user_ns:
continue
if getattr(self, unders) is not self.shell.user_ns.get(unders):
update_unders = False
self.___ = self.__
self.__ = self._
self._ = result
if ('_' not in builtin_mod.__dict__) and (update_unders):
self.shell.push({'_':self._,
'__':self.__,
'___':self.___}, interactive=False)
# hackish access to top-level namespace to create _1,_2... dynamically
to_main = {}
if self.do_full_cache:
new_result = '_%s' % self.prompt_count
to_main[new_result] = result
self.shell.push(to_main, interactive=False)
self.shell.user_ns['_oh'][self.prompt_count] = result
def fill_exec_result(self, result):
if self.exec_result is not None:
self.exec_result.result = result
def log_output(self, format_dict):
"""Log the output."""
if 'text/plain' not in format_dict:
# nothing to do
return
if self.shell.logger.log_output:
self.shell.logger.log_write(format_dict['text/plain'], 'output')
self.shell.history_manager.output_hist_reprs[self.prompt_count] = \
format_dict['text/plain']
def finish_displayhook(self):
"""Finish up all displayhook activities."""
sys.stdout.write(self.shell.separate_out2)
sys.stdout.flush()
def __call__(self, result=None):
"""Printing with history cache management.
This is invoked everytime the interpreter needs to print, and is
activated by setting the variable sys.displayhook to it.
"""
self.check_for_underscore()
if result is not None and not self.quiet():
self.start_displayhook()
self.write_output_prompt()
format_dict, md_dict = self.compute_format_data(result)
self.update_user_ns(result)
self.fill_exec_result(result)
if format_dict:
self.write_format_data(format_dict, md_dict)
self.log_output(format_dict)
self.finish_displayhook()
def cull_cache(self):
"""Output cache is full, cull the oldest entries"""
oh = self.shell.user_ns.get('_oh', {})
sz = len(oh)
cull_count = max(int(sz * self.cull_fraction), 2)
warn('Output cache limit (currently {sz} entries) hit.\n'
'Flushing oldest {cull_count} entries.'.format(sz=sz, cull_count=cull_count))
for i, n in enumerate(sorted(oh)):
if i >= cull_count:
break
self.shell.user_ns.pop('_%i' % n, None)
oh.pop(n, None)
def flush(self):
if not self.do_full_cache:
raise ValueError("You shouldn't have reached the cache flush "
"if full caching is not enabled!")
# delete auto-generated vars from global namespace
for n in range(1,self.prompt_count + 1):
key = '_'+repr(n)
try:
del self.shell.user_ns[key]
except: pass
# In some embedded circumstances, the user_ns doesn't have the
# '_oh' key set up.
oh = self.shell.user_ns.get('_oh', None)
if oh is not None:
oh.clear()
# Release our own references to objects:
self._, self.__, self.___ = '', '', ''
if '_' not in builtin_mod.__dict__:
self.shell.user_ns.update({'_':None,'__':None, '___':None})
import gc
# TODO: Is this really needed?
# IronPython blocks here forever
if sys.platform != "cli":
gc.collect()
class CapturingDisplayHook(object):
def __init__(self, shell, outputs=None):
self.shell = shell
if outputs is None:
outputs = []
self.outputs = outputs
def __call__(self, result=None):
if result is None:
return
format_dict, md_dict = self.shell.display_formatter.format(result)
self.outputs.append((format_dict, md_dict))

View File

@@ -0,0 +1,125 @@
"""An interface for publishing rich data to frontends.
There are two components of the display system:
* Display formatters, which take a Python object and compute the
representation of the object in various formats (text, HTML, SVG, etc.).
* The display publisher that is used to send the representation data to the
various frontends.
This module defines the logic display publishing. The display publisher uses
the ``display_data`` message type that is defined in the yap_ipython messaging
spec.
"""
# Copyright (c) yap_ipython Development Team.
# Distributed under the terms of the Modified BSD License.
import sys
from traitlets.config.configurable import Configurable
from traitlets import List
# This used to be defined here - it is imported for backwards compatibility
from .display import publish_display_data
#-----------------------------------------------------------------------------
# Main payload class
#-----------------------------------------------------------------------------
class DisplayPublisher(Configurable):
"""A traited class that publishes display data to frontends.
Instances of this class are created by the main yap_ipython object and should
be accessed there.
"""
def _validate_data(self, data, metadata=None):
"""Validate the display data.
Parameters
----------
data : dict
The formata data dictionary.
metadata : dict
Any metadata for the data.
"""
if not isinstance(data, dict):
raise TypeError('data must be a dict, got: %r' % data)
if metadata is not None:
if not isinstance(metadata, dict):
raise TypeError('metadata must be a dict, got: %r' % data)
# use * to indicate transient, update are keyword-only
def publish(self, data, metadata=None, source=None, *, transient=None, update=False, **kwargs):
"""Publish data and metadata to all frontends.
See the ``display_data`` message in the messaging documentation for
more details about this message type.
The following MIME types are currently implemented:
* text/plain
* text/html
* text/markdown
* text/latex
* application/json
* application/javascript
* image/png
* image/jpeg
* image/svg+xml
Parameters
----------
data : dict
A dictionary having keys that are valid MIME types (like
'text/plain' or 'image/svg+xml') and values that are the data for
that MIME type. The data itself must be a JSON'able data
structure. Minimally all data should have the 'text/plain' data,
which can be displayed by all frontends. If more than the plain
text is given, it is up to the frontend to decide which
representation to use.
metadata : dict
A dictionary for metadata related to the data. This can contain
arbitrary key, value pairs that frontends can use to interpret
the data. Metadata specific to each mime-type can be specified
in the metadata dict with the same mime-type keys as
the data itself.
source : str, deprecated
Unused.
transient: dict, keyword-only
A dictionary for transient data.
Data in this dictionary should not be persisted as part of saving this output.
Examples include 'display_id'.
update: bool, keyword-only, default: False
If True, only update existing outputs with the same display_id,
rather than creating a new output.
"""
# The default is to simply write the plain text data using sys.stdout.
if 'text/plain' in data:
print(data['text/plain'])
def clear_output(self, wait=False):
"""Clear the output of the cell receiving output."""
print('\033[2K\r', end='')
sys.stdout.flush()
print('\033[2K\r', end='')
sys.stderr.flush()
class CapturingDisplayPublisher(DisplayPublisher):
"""A DisplayPublisher that stores"""
outputs = List()
def publish(self, data, metadata=None, source=None, *, transient=None, update=False):
self.outputs.append({'data':data, 'metadata':metadata,
'transient':transient, 'update':update})
def clear_output(self, wait=False):
super(CapturingDisplayPublisher, self).clear_output(wait)
# empty the list, *do not* reassign a new list
self.outputs.clear()

View File

@@ -0,0 +1,60 @@
# encoding: utf-8
"""
Global exception classes for yap_ipython.core.
Authors:
* Brian Granger
* Fernando Perez
* Min Ragan-Kelley
Notes
-----
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008 The yap_ipython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Exception classes
#-----------------------------------------------------------------------------
class IPythonCoreError(Exception):
pass
class TryNext(IPythonCoreError):
"""Try next hook exception.
Raise this in your hook function to indicate that the next hook handler
should be used to handle the operation.
"""
class UsageError(IPythonCoreError):
"""Error in magic function arguments, etc.
Something that probably won't warrant a full traceback, but should
nevertheless interrupt a macro / batch file.
"""
class StdinNotImplementedError(IPythonCoreError, NotImplementedError):
"""raw_input was requested in a context where it is not supported
For use in yap_ipython kernels, where only some frontends may support
stdin requests.
"""
class InputRejected(Exception):
"""Input rejected by ast transformer.
Raise this in your NodeTransformer to indicate that InteractiveShell should
not execute the supplied input.
"""

View File

@@ -0,0 +1,160 @@
"""Infrastructure for registering and firing callbacks on application events.
Unlike :mod:`yap_ipython.core.hooks`, which lets end users set single functions to
be called at specific times, or a collection of alternative methods to try,
callbacks are designed to be used by extension authors. A number of callbacks
can be registered for the same event without needing to be aware of one another.
The functions defined in this module are no-ops indicating the names of available
events and the arguments which will be passed to them.
.. note::
This API is experimental in yap_ipython 2.0, and may be revised in future versions.
"""
from backcall import callback_prototype
class EventManager(object):
"""Manage a collection of events and a sequence of callbacks for each.
This is attached to :class:`~yap_ipython.core.interactiveshell.InteractiveShell`
instances as an ``events`` attribute.
.. note::
This API is experimental in yap_ipython 2.0, and may be revised in future versions.
"""
def __init__(self, shell, available_events):
"""Initialise the :class:`CallbackManager`.
Parameters
----------
shell
The :class:`~yap_ipython.core.interactiveshell.InteractiveShell` instance
available_callbacks
An iterable of names for callback events.
"""
self.shell = shell
self.callbacks = {n:[] for n in available_events}
def register(self, event, function):
"""Register a new event callback.
Parameters
----------
event : str
The event for which to register this callback.
function : callable
A function to be called on the given event. It should take the same
parameters as the appropriate callback prototype.
Raises
------
TypeError
If ``function`` is not callable.
KeyError
If ``event`` is not one of the known events.
"""
if not callable(function):
raise TypeError('Need a callable, got %r' % function)
callback_proto = available_events.get(event)
self.callbacks[event].append(callback_proto.adapt(function))
def unregister(self, event, function):
"""Remove a callback from the given event."""
if function in self.callbacks[event]:
return self.callbacks[event].remove(function)
# Remove callback in case ``function`` was adapted by `backcall`.
for callback in self.callbacks[event]:
try:
if callback.__wrapped__ is function:
return self.callbacks[event].remove(callback)
except AttributeError:
pass
raise ValueError('Function {!r} is not registered as a {} callback'.format(function, event))
def trigger(self, event, *args, **kwargs):
"""Call callbacks for ``event``.
Any additional arguments are passed to all callbacks registered for this
event. Exceptions raised by callbacks are caught, and a message printed.
"""
for func in self.callbacks[event][:]:
try:
func(*args, **kwargs)
except Exception:
print("Error in callback {} (for {}):".format(func, event))
self.shell.showtraceback()
# event_name -> prototype mapping
available_events = {}
def _define_event(callback_function):
callback_proto = callback_prototype(callback_function)
available_events[callback_function.__name__] = callback_proto
return callback_proto
# ------------------------------------------------------------------------------
# Callback prototypes
#
# No-op functions which describe the names of available events and the
# signatures of callbacks for those events.
# ------------------------------------------------------------------------------
@_define_event
def pre_execute():
"""Fires before code is executed in response to user/frontend action.
This includes comm and widget messages and silent execution, as well as user
code cells.
"""
pass
@_define_event
def pre_run_cell(info):
"""Fires before user-entered code runs.
Parameters
----------
info : :class:`~yap_ipython.core.interactiveshell.ExecutionInfo`
An object containing information used for the code execution.
"""
pass
@_define_event
def post_execute():
"""Fires after code is executed in response to user/frontend action.
This includes comm and widget messages and silent execution, as well as user
code cells.
"""
pass
@_define_event
def post_run_cell(result):
"""Fires after user-entered code runs.
Parameters
----------
result : :class:`~yap_ipython.core.interactiveshell.ExecutionResult`
The object which will be returned as the execution result.
"""
pass
@_define_event
def shell_initialized(ip):
"""Fires after initialisation of :class:`~yap_ipython.core.interactiveshell.InteractiveShell`.
This is before extensions and startup scripts are loaded, so it can only be
set by subclassing.
Parameters
----------
ip : :class:`~yap_ipython.core.interactiveshell.InteractiveShell`
The newly initialised shell.
"""
pass

View File

@@ -0,0 +1,184 @@
# -*- coding: utf-8 -*-
"""
Color schemes for exception handling code in yap_ipython.
"""
import os
import warnings
#*****************************************************************************
# Copyright (C) 2005-2006 Fernando Perez <fperez@colorado.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
from yap_ipython.utils.coloransi import ColorSchemeTable, TermColors, ColorScheme
def exception_colors():
"""Return a color table with fields for exception reporting.
The table is an instance of ColorSchemeTable with schemes added for
'Neutral', 'Linux', 'LightBG' and 'NoColor' and fields for exception handling filled
in.
Examples:
>>> ec = exception_colors()
>>> ec.active_scheme_name
''
>>> print(ec.active_colors)
None
Now we activate a color scheme:
>>> ec.set_active_scheme('NoColor')
>>> ec.active_scheme_name
'NoColor'
>>> sorted(ec.active_colors.keys())
['Normal', 'caret', 'em', 'excName', 'filename', 'filenameEm', 'line',
'lineno', 'linenoEm', 'name', 'nameEm', 'normalEm', 'topline', 'vName',
'val', 'valEm']
"""
ex_colors = ColorSchemeTable()
# Populate it with color schemes
C = TermColors # shorthand and local lookup
ex_colors.add_scheme(ColorScheme(
'NoColor',
# The color to be used for the top line
topline = C.NoColor,
# The colors to be used in the traceback
filename = C.NoColor,
lineno = C.NoColor,
name = C.NoColor,
vName = C.NoColor,
val = C.NoColor,
em = C.NoColor,
# Emphasized colors for the last frame of the traceback
normalEm = C.NoColor,
filenameEm = C.NoColor,
linenoEm = C.NoColor,
nameEm = C.NoColor,
valEm = C.NoColor,
# Colors for printing the exception
excName = C.NoColor,
line = C.NoColor,
caret = C.NoColor,
Normal = C.NoColor
))
# make some schemes as instances so we can copy them for modification easily
ex_colors.add_scheme(ColorScheme(
'Linux',
# The color to be used for the top line
topline = C.LightRed,
# The colors to be used in the traceback
filename = C.Green,
lineno = C.Green,
name = C.Purple,
vName = C.Cyan,
val = C.Green,
em = C.LightCyan,
# Emphasized colors for the last frame of the traceback
normalEm = C.LightCyan,
filenameEm = C.LightGreen,
linenoEm = C.LightGreen,
nameEm = C.LightPurple,
valEm = C.LightBlue,
# Colors for printing the exception
excName = C.LightRed,
line = C.Yellow,
caret = C.White,
Normal = C.Normal
))
# For light backgrounds, swap dark/light colors
ex_colors.add_scheme(ColorScheme(
'LightBG',
# The color to be used for the top line
topline = C.Red,
# The colors to be used in the traceback
filename = C.LightGreen,
lineno = C.LightGreen,
name = C.LightPurple,
vName = C.Cyan,
val = C.LightGreen,
em = C.Cyan,
# Emphasized colors for the last frame of the traceback
normalEm = C.Cyan,
filenameEm = C.Green,
linenoEm = C.Green,
nameEm = C.Purple,
valEm = C.Blue,
# Colors for printing the exception
excName = C.Red,
#line = C.Brown, # brown often is displayed as yellow
line = C.Red,
caret = C.Normal,
Normal = C.Normal,
))
ex_colors.add_scheme(ColorScheme(
'Neutral',
# The color to be used for the top line
topline = C.Red,
# The colors to be used in the traceback
filename = C.LightGreen,
lineno = C.LightGreen,
name = C.LightPurple,
vName = C.Cyan,
val = C.LightGreen,
em = C.Cyan,
# Emphasized colors for the last frame of the traceback
normalEm = C.Cyan,
filenameEm = C.Green,
linenoEm = C.Green,
nameEm = C.Purple,
valEm = C.Blue,
# Colors for printing the exception
excName = C.Red,
#line = C.Brown, # brown often is displayed as yellow
line = C.Red,
caret = C.Normal,
Normal = C.Normal,
))
# Hack: the 'neutral' colours are not very visible on a dark background on
# Windows. Since Windows command prompts have a dark background by default, and
# relatively few users are likely to alter that, we will use the 'Linux' colours,
# designed for a dark background, as the default on Windows.
if os.name == "nt":
ex_colors.add_scheme(ex_colors['Linux'].copy('Neutral'))
return ex_colors
class Deprec(object):
def __init__(self, wrapped_obj):
self.wrapped=wrapped_obj
def __getattr__(self, name):
val = getattr(self.wrapped, name)
warnings.warn("Using ExceptionColors global is deprecated and will be removed in yap_ipython 6.0",
DeprecationWarning, stacklevel=2)
# using getattr after warnings break ipydoctest in weird way for 3.5
return val
# For backwards compatibility, keep around a single global object. Note that
# this should NOT be used, the factory function should be used instead, since
# these objects are stateful and it's very easy to get strange bugs if any code
# modifies the module-level object's state.
ExceptionColors = Deprec(exception_colors())

View File

@@ -0,0 +1,155 @@
# encoding: utf-8
"""A class for managing yap_ipython extensions."""
# Copyright (c) yap_ipython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import os.path
import sys
from importlib import import_module
from traitlets.config.configurable import Configurable
from yap_ipython.utils.path import ensure_dir_exists, compress_user
from yap_ipython.utils.decorators import undoc
from traitlets import Instance
try:
from importlib import reload
except ImportError :
## deprecated since 3.4
from imp import reload
#-----------------------------------------------------------------------------
# Main class
#-----------------------------------------------------------------------------
class ExtensionManager(Configurable):
"""A class to manage yap_ipython extensions.
An yap_ipython extension is an importable Python module that has
a function with the signature::
def load_ipython_extension(ipython):
# Do things with ipython
This function is called after your extension is imported and the
currently active :class:`InteractiveShell` instance is passed as
the only argument. You can do anything you want with yap_ipython at
that point, including defining new magic and aliases, adding new
components, etc.
You can also optionally define an :func:`unload_ipython_extension(ipython)`
function, which will be called if the user unloads or reloads the extension.
The extension manager will only call :func:`load_ipython_extension` again
if the extension is reloaded.
You can put your extension modules anywhere you want, as long as
they can be imported by Python's standard import mechanism. However,
to make it easy to write extensions, you can also put your extensions
in ``os.path.join(self.ipython_dir, 'extensions')``. This directory
is added to ``sys.path`` automatically.
"""
shell = Instance('yap_ipython.core.interactiveshell.InteractiveShellABC', allow_none=True)
def __init__(self, shell=None, **kwargs):
super(ExtensionManager, self).__init__(shell=shell, **kwargs)
self.shell.observe(
self._on_ipython_dir_changed, names=('ipython_dir',)
)
self.loaded = set()
@property
def ipython_extension_dir(self):
return os.path.join(self.shell.ipython_dir, u'extensions')
def _on_ipython_dir_changed(self, change):
ensure_dir_exists(self.ipython_extension_dir)
def load_extension(self, module_str):
"""Load an yap_ipython extension by its module name.
Returns the string "already loaded" if the extension is already loaded,
"no load function" if the module doesn't have a load_ipython_extension
function, or None if it succeeded.
"""
if module_str in self.loaded:
return "already loaded"
from yap_ipython.utils.syspathcontext import prepended_to_syspath
with self.shell.builtin_trap:
if module_str not in sys.modules:
with prepended_to_syspath(self.ipython_extension_dir):
mod = import_module(module_str)
if mod.__file__.startswith(self.ipython_extension_dir):
print(("Loading extensions from {dir} is deprecated. "
"We recommend managing extensions like any "
"other Python packages, in site-packages.").format(
dir=compress_user(self.ipython_extension_dir)))
mod = sys.modules[module_str]
if self._call_load_ipython_extension(mod):
self.loaded.add(module_str)
else:
return "no load function"
def unload_extension(self, module_str):
"""Unload an yap_ipython extension by its module name.
This function looks up the extension's name in ``sys.modules`` and
simply calls ``mod.unload_ipython_extension(self)``.
Returns the string "no unload function" if the extension doesn't define
a function to unload itself, "not loaded" if the extension isn't loaded,
otherwise None.
"""
if module_str not in self.loaded:
return "not loaded"
if module_str in sys.modules:
mod = sys.modules[module_str]
if self._call_unload_ipython_extension(mod):
self.loaded.discard(module_str)
else:
return "no unload function"
def reload_extension(self, module_str):
"""Reload an yap_ipython extension by calling reload.
If the module has not been loaded before,
:meth:`InteractiveShell.load_extension` is called. Otherwise
:func:`reload` is called and then the :func:`load_ipython_extension`
function of the module, if it exists is called.
"""
from yap_ipython.utils.syspathcontext import prepended_to_syspath
if (module_str in self.loaded) and (module_str in sys.modules):
self.unload_extension(module_str)
mod = sys.modules[module_str]
with prepended_to_syspath(self.ipython_extension_dir):
reload(mod)
if self._call_load_ipython_extension(mod):
self.loaded.add(module_str)
else:
self.load_extension(module_str)
def _call_load_ipython_extension(self, mod):
if hasattr(mod, 'load_ipython_extension'):
mod.load_ipython_extension(self.shell)
return True
def _call_unload_ipython_extension(self, mod):
if hasattr(mod, 'unload_ipython_extension'):
mod.unload_ipython_extension(self.shell)
return True
@undoc
def install_extension(self, url, filename=None):
"""
Deprecated.
"""
# Ensure the extension directory exists
raise DeprecationWarning(
'`install_extension` and the `install_ext` magic have been deprecated since yap_ipython 4.0'
'Use pip or other package managers to manage ipython extensions.')

View File

@@ -3,11 +3,11 @@
Inheritance diagram:
.. inheritance-diagram:: IPython.core.formatters
.. inheritance-diagram:: yap_ipython.core.formatters
:parts: 3
"""
# Copyright (c) IPython Development Team.
# Copyright (c) yap_ipython Development Team.
# Distributed under the terms of the Modified BSD License.
import abc
@@ -17,18 +17,18 @@ import traceback
import warnings
from io import StringIO
from IPython.lib import pretty
from IPython.utils.dir2 import get_real_method
from IPython.utils.sentinel import Sentinel
from decorator import decorator
from traitlets.config.configurable import Configurable
from yap_ipython.core.getipython import get_ipython
from yap_ipython.utils.sentinel import Sentinel
from yap_ipython.utils.dir2 import get_real_method
from yap_ipython.lib import pretty
from traitlets import (
Bool, Dict, Integer, Unicode, CUnicode, ObjectName, List,
ForwardDeclaredInstance,
default, observe,
)
from traitlets.config.configurable import Configurable
from packages.python.yap_kernel.core.yap_kernel.getipython import get_ipython
class DisplayFormatter(Configurable):
@@ -51,12 +51,17 @@ class DisplayFormatter(Configurable):
formatter.enabled = True
else:
formatter.enabled = False
ipython_display_formatter = ForwardDeclaredInstance('FormatterABC')
@default('ipython_display_formatter')
def _default_formatter(self):
return IPythonDisplayFormatter(parent=self)
mimebundle_formatter = ForwardDeclaredInstance('FormatterABC')
@default('mimebundle_formatter')
def _default_mime_formatter(self):
return MimeBundleFormatter(parent=self)
# A dict of formatter whose keys are format types (MIME types) and whose
# values are subclasses of BaseFormatter.
formatters = Dict()
@@ -86,7 +91,7 @@ class DisplayFormatter(Configurable):
By default all format types will be computed.
The following MIME types are currently implemented:
The following MIME types are usually implemented:
* text/plain
* text/html
@@ -103,14 +108,15 @@ class DisplayFormatter(Configurable):
----------
obj : object
The Python object whose format data will be computed.
include : list or tuple, optional
include : list, tuple or set; optional
A list of format type strings (MIME types) to include in the
format data dict. If this is set *only* the format types included
in this list will be computed.
exclude : list or tuple, optional
exclude : list, tuple or set; optional
A list of format type string (MIME types) to exclude in the format
data dict. If this is set all format types will be computed,
except for those included in this argument.
Mimetypes present in exclude will take precedence over the ones in include
Returns
-------
@@ -124,6 +130,15 @@ class DisplayFormatter(Configurable):
metadata_dict is a dictionary of metadata about each mime-type output.
Its keys will be a strict subset of the keys in format_dict.
Notes
-----
If an object implement `_repr_mimebundle_` as well as various
`_repr_*_`, the data returned by `_repr_mimebundle_` will take
precedence and the corresponding `_repr_*_` for this mimetype will
not be called.
"""
format_dict = {}
md_dict = {}
@@ -131,8 +146,30 @@ class DisplayFormatter(Configurable):
if self.ipython_display_formatter(obj):
# object handled itself, don't proceed
return {}, {}
format_dict, md_dict = self.mimebundle_formatter(obj, include=include, exclude=exclude)
if format_dict or md_dict:
if include:
format_dict = {k:v for k,v in format_dict.items() if k in include}
md_dict = {k:v for k,v in md_dict.items() if k in include}
if exclude:
format_dict = {k:v for k,v in format_dict.items() if k not in exclude}
md_dict = {k:v for k,v in md_dict.items() if k not in exclude}
for format_type, formatter in self.formatters.items():
if format_type in format_dict:
# already got it from mimebundle, maybe don't render again.
# exception: manually registered per-mime renderer
# check priority:
# 1. user-registered per-mime formatter
# 2. mime-bundle (user-registered or repr method)
# 3. default per-mime formatter (e.g. repr method)
try:
formatter.lookup(obj)
except KeyError:
# no special formatter, use mime-bundle-provided value
continue
if include and format_type not in include:
continue
if exclude and format_type in exclude:
@@ -153,7 +190,6 @@ class DisplayFormatter(Configurable):
format_dict[format_type] = data
if md is not None:
md_dict[format_type] = md
return format_dict, md_dict
@property
@@ -188,7 +224,7 @@ def catch_format_error(method, self, *args, **kwargs):
r = method(self, *args, **kwargs)
except NotImplementedError:
# don't warn on NotImplementedErrors
return None
return self._check_return(None, args[0])
except Exception:
exc_info = sys.exc_info()
ip = get_ipython()
@@ -196,7 +232,7 @@ def catch_format_error(method, self, *args, **kwargs):
ip.showtraceback(exc_info)
else:
traceback.print_exception(*exc_info)
return None
return self._check_return(None, args[0])
return self._check_return(r, args[0])
@@ -531,9 +567,9 @@ class BaseFormatter(Configurable):
class PlainTextFormatter(BaseFormatter):
"""The default pretty-printer.
This uses :mod:`IPython.lib.pretty` to compute the format data of
This uses :mod:`yap_ipython.lib.pretty` to compute the format data of
the object. If the object cannot be pretty printed, :func:`repr` is used.
See the documentation of :mod:`IPython.lib.pretty` for details on
See the documentation of :mod:`yap_ipython.lib.pretty` for details on
how to write pretty printers. Here is a simple example::
def dtype_pprinter(obj, p, cycle):
@@ -633,7 +669,7 @@ class PlainTextFormatter(BaseFormatter):
numpy.set_printoptions(precision=8)
self.float_format = fmt
# Use the default pretty printers from IPython.lib.pretty.
# Use the default pretty printers from yap_ipython.lib.pretty.
@default('singleton_printers')
def _singleton_printers_default(self):
return pretty._singleton_pprinters.copy()
@@ -797,7 +833,7 @@ class JSONFormatter(BaseFormatter):
# unpack data, metadata tuple for type checking on first element
r, md = r
# handle deprecated JSON-as-string form from IPython < 3
# handle deprecated JSON-as-string form from yap_ipython < 3
if isinstance(r, str):
warnings.warn("JSON expects JSONable list/dict containers, not JSON strings",
FormatterWarning)
@@ -843,7 +879,7 @@ class PDFFormatter(BaseFormatter):
_return_type = (bytes, str)
class IPythonDisplayFormatter(BaseFormatter):
"""A Formatter for objects that know how to display themselves.
"""An escape-hatch Formatter for objects that know how to display themselves.
To define the callables that compute the representation of your
objects, define a :meth:`_ipython_display_` method or use the :meth:`for_type`
@@ -853,10 +889,16 @@ class IPythonDisplayFormatter(BaseFormatter):
This display formatter has highest priority.
If it fires, no other display formatter will be called.
Prior to yap_ipython 6.1, `_ipython_display_` was the only way to display custom mime-types
without registering a new Formatter.
yap_ipython 6.1 introduces `_repr_mimebundle_` for displaying custom mime-types,
so `_ipython_display_` should only be used for objects that require unusual
display patterns, such as multiple display calls.
"""
print_method = ObjectName('_ipython_display_')
_return_type = (type(None), bool)
@catch_format_error
def __call__(self, obj):
@@ -877,6 +919,60 @@ class IPythonDisplayFormatter(BaseFormatter):
return True
class MimeBundleFormatter(BaseFormatter):
"""A Formatter for arbitrary mime-types.
Unlike other `_repr_<mimetype>_` methods,
`_repr_mimebundle_` should return mime-bundle data,
either the mime-keyed `data` dictionary or the tuple `(data, metadata)`.
Any mime-type is valid.
To define the callables that compute the mime-bundle representation of your
objects, define a :meth:`_repr_mimebundle_` method or use the :meth:`for_type`
or :meth:`for_type_by_name` methods to register functions that handle
this.
.. versionadded:: 6.1
"""
print_method = ObjectName('_repr_mimebundle_')
_return_type = dict
def _check_return(self, r, obj):
r = super(MimeBundleFormatter, self)._check_return(r, obj)
# always return (data, metadata):
if r is None:
return {}, {}
if not isinstance(r, tuple):
return r, {}
return r
@catch_format_error
def __call__(self, obj, include=None, exclude=None):
"""Compute the format for an object.
Identical to parent's method but we pass extra parameters to the method.
Unlike other _repr_*_ `_repr_mimebundle_` should allow extra kwargs, in
particular `include` and `exclude`.
"""
if self.enabled:
# lookup registered printer
try:
printer = self.lookup(obj)
except KeyError:
pass
else:
return printer(obj)
# Finally look for special method names
method = get_real_method(obj, self.print_method)
if method is not None:
return method(include=include, exclude=exclude)
return None
else:
return None
FormatterABC.register(BaseFormatter)
FormatterABC.register(PlainTextFormatter)
FormatterABC.register(HTMLFormatter)
@@ -889,6 +985,7 @@ FormatterABC.register(LatexFormatter)
FormatterABC.register(JSONFormatter)
FormatterABC.register(JavascriptFormatter)
FormatterABC.register(IPythonDisplayFormatter)
FormatterABC.register(MimeBundleFormatter)
def format_display_data(obj, include=None, exclude=None):
@@ -896,19 +993,6 @@ def format_display_data(obj, include=None, exclude=None):
By default all format types will be computed.
The following MIME types are currently implemented:
* text/plain
* text/html
* text/markdown
* text/latex
* application/json
* application/javascript
* application/pdf
* image/png
* image/jpeg
* image/svg+xml
Parameters
----------
obj : object
@@ -938,4 +1022,3 @@ def format_display_data(obj, include=None, exclude=None):
include,
exclude
)

View File

@@ -1,5 +1,9 @@
# encoding: utf-8
"""Simple function to call to get the current InteractiveShell instance
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
# Copyright (C) 2013 The yap_ipython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
@@ -12,9 +16,9 @@
def get_ipython():
"""Get the global InteractiveShell instance.
Returns None if no InteractiveShell instance is registered.
"""
from yap_ipython.core.interactiveshell import YAPInteractive
if YAPInteractive.initialized():
return YAPInteractive.instance()
from yap_ipython.core.interactiveshell import InteractiveShell
if InteractiveShell.initialized():
return InteractiveShell.instance()

View File

@@ -0,0 +1,906 @@
""" History related magics and functionality """
# Copyright (c) yap_ipython Development Team.
# Distributed under the terms of the Modified BSD License.
import atexit
import datetime
import os
import re
try:
import sqlite3
except ImportError:
try:
from pysqlite2 import dbapi2 as sqlite3
except ImportError:
sqlite3 = None
import threading
from traitlets.config.configurable import LoggingConfigurable
from decorator import decorator
from yap_ipython.utils.decorators import undoc
from yap_ipython.utils.path import locate_profile
from traitlets import (
Any, Bool, Dict, Instance, Integer, List, Unicode, TraitError,
default, observe,
)
from warnings import warn
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
@undoc
class DummyDB(object):
"""Dummy DB that will act as a black hole for history.
Only used in the absence of sqlite"""
def execute(*args, **kwargs):
return []
def commit(self, *args, **kwargs):
pass
def __enter__(self, *args, **kwargs):
pass
def __exit__(self, *args, **kwargs):
pass
@decorator
def needs_sqlite(f, self, *a, **kw):
"""Decorator: return an empty list in the absence of sqlite."""
if sqlite3 is None or not self.enabled:
return []
else:
return f(self, *a, **kw)
if sqlite3 is not None:
DatabaseError = sqlite3.DatabaseError
OperationalError = sqlite3.OperationalError
else:
@undoc
class DatabaseError(Exception):
"Dummy exception when sqlite could not be imported. Should never occur."
@undoc
class OperationalError(Exception):
"Dummy exception when sqlite could not be imported. Should never occur."
# use 16kB as threshold for whether a corrupt history db should be saved
# that should be at least 100 entries or so
_SAVE_DB_SIZE = 16384
@decorator
def catch_corrupt_db(f, self, *a, **kw):
"""A decorator which wraps HistoryAccessor method calls to catch errors from
a corrupt SQLite database, move the old database out of the way, and create
a new one.
We avoid clobbering larger databases because this may be triggered due to filesystem issues,
not just a corrupt file.
"""
try:
return f(self, *a, **kw)
except (DatabaseError, OperationalError) as e:
self._corrupt_db_counter += 1
self.log.error("Failed to open SQLite history %s (%s).", self.hist_file, e)
if self.hist_file != ':memory:':
if self._corrupt_db_counter > self._corrupt_db_limit:
self.hist_file = ':memory:'
self.log.error("Failed to load history too many times, history will not be saved.")
elif os.path.isfile(self.hist_file):
# move the file out of the way
base, ext = os.path.splitext(self.hist_file)
size = os.stat(self.hist_file).st_size
if size >= _SAVE_DB_SIZE:
# if there's significant content, avoid clobbering
now = datetime.datetime.now().isoformat().replace(':', '.')
newpath = base + '-corrupt-' + now + ext
# don't clobber previous corrupt backups
for i in range(100):
if not os.path.isfile(newpath):
break
else:
newpath = base + '-corrupt-' + now + (u'-%i' % i) + ext
else:
# not much content, possibly empty; don't worry about clobbering
# maybe we should just delete it?
newpath = base + '-corrupt' + ext
os.rename(self.hist_file, newpath)
self.log.error("History file was moved to %s and a new file created.", newpath)
self.init_db()
return []
else:
# Failed with :memory:, something serious is wrong
raise
class HistoryAccessorBase(LoggingConfigurable):
"""An abstract class for History Accessors """
def get_tail(self, n=10, raw=True, output=False, include_latest=False):
raise NotImplementedError
def search(self, pattern="*", raw=True, search_raw=True,
output=False, n=None, unique=False):
raise NotImplementedError
def get_range(self, session, start=1, stop=None, raw=True,output=False):
raise NotImplementedError
def get_range_by_str(self, rangestr, raw=True, output=False):
raise NotImplementedError
class HistoryAccessor(HistoryAccessorBase):
"""Access the history database without adding to it.
This is intended for use by standalone history tools. yap_ipython shells use
HistoryManager, below, which is a subclass of this."""
# counter for init_db retries, so we don't keep trying over and over
_corrupt_db_counter = 0
# after two failures, fallback on :memory:
_corrupt_db_limit = 2
# String holding the path to the history file
hist_file = Unicode(
help="""Path to file to use for SQLite history database.
By default, yap_ipython will put the history database in the yap_ipython
profile directory. If you would rather share one history among
profiles, you can set this value in each, so that they are consistent.
Due to an issue with fcntl, SQLite is known to misbehave on some NFS
mounts. If you see yap_ipython hanging, try setting this to something on a
local disk, e.g::
ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
you can also use the specific value `:memory:` (including the colon
at both end but not the back ticks), to avoid creating an history file.
""").tag(config=True)
enabled = Bool(True,
help="""enable the SQLite history
set enabled=False to disable the SQLite history,
in which case there will be no stored history, no SQLite connection,
and no background saving thread. This may be necessary in some
threaded environments where yap_ipython is embedded.
"""
).tag(config=True)
connection_options = Dict(
help="""Options for configuring the SQLite connection
These options are passed as keyword args to sqlite3.connect
when establishing database conenctions.
"""
).tag(config=True)
# The SQLite database
db = Any()
@observe('db')
def _db_changed(self, change):
"""validate the db, since it can be an Instance of two different types"""
new = change['new']
connection_types = (DummyDB,)
if sqlite3 is not None:
connection_types = (DummyDB, sqlite3.Connection)
if not isinstance(new, connection_types):
msg = "%s.db must be sqlite3 Connection or DummyDB, not %r" % \
(self.__class__.__name__, new)
raise TraitError(msg)
def __init__(self, profile='default', hist_file=u'', **traits):
"""Create a new history accessor.
Parameters
----------
profile : str
The name of the profile from which to open history.
hist_file : str
Path to an SQLite history database stored by yap_ipython. If specified,
hist_file overrides profile.
config : :class:`~traitlets.config.loader.Config`
Config object. hist_file can also be set through this.
"""
# We need a pointer back to the shell for various tasks.
super(HistoryAccessor, self).__init__(**traits)
# defer setting hist_file from kwarg until after init,
# otherwise the default kwarg value would clobber any value
# set by config
if hist_file:
self.hist_file = hist_file
if self.hist_file == u'':
# No one has set the hist_file, yet.
self.hist_file = self._get_hist_file_name(profile)
if sqlite3 is None and self.enabled:
warn("yap_ipython History requires SQLite, your history will not be saved")
self.enabled = False
self.init_db()
def _get_hist_file_name(self, profile='default'):
"""Find the history file for the given profile name.
This is overridden by the HistoryManager subclass, to use the shell's
active profile.
Parameters
----------
profile : str
The name of a profile which has a history file.
"""
return os.path.join(locate_profile(profile), 'history.sqlite')
@catch_corrupt_db
def init_db(self):
"""Connect to the database, and create tables if necessary."""
if not self.enabled:
self.db = DummyDB()
return
# use detect_types so that timestamps return datetime objects
kwargs = dict(detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
kwargs.update(self.connection_options)
self.db = sqlite3.connect(self.hist_file, **kwargs)
self.db.execute("""CREATE TABLE IF NOT EXISTS sessions (session integer
primary key autoincrement, start timestamp,
end timestamp, num_cmds integer, remark text)""")
self.db.execute("""CREATE TABLE IF NOT EXISTS history
(session integer, line integer, source text, source_raw text,
PRIMARY KEY (session, line))""")
# Output history is optional, but ensure the table's there so it can be
# enabled later.
self.db.execute("""CREATE TABLE IF NOT EXISTS output_history
(session integer, line integer, output text,
PRIMARY KEY (session, line))""")
self.db.commit()
# success! reset corrupt db count
self._corrupt_db_counter = 0
def writeout_cache(self):
"""Overridden by HistoryManager to dump the cache before certain
database lookups."""
pass
## -------------------------------
## Methods for retrieving history:
## -------------------------------
def _run_sql(self, sql, params, raw=True, output=False):
"""Prepares and runs an SQL query for the history database.
Parameters
----------
sql : str
Any filtering expressions to go after SELECT ... FROM ...
params : tuple
Parameters passed to the SQL query (to replace "?")
raw, output : bool
See :meth:`get_range`
Returns
-------
Tuples as :meth:`get_range`
"""
toget = 'source_raw' if raw else 'source'
sqlfrom = "history"
if output:
sqlfrom = "history LEFT JOIN output_history USING (session, line)"
toget = "history.%s, output_history.output" % toget
cur = self.db.execute("SELECT session, line, %s FROM %s " %\
(toget, sqlfrom) + sql, params)
if output: # Regroup into 3-tuples, and parse JSON
return ((ses, lin, (inp, out)) for ses, lin, inp, out in cur)
return cur
@needs_sqlite
@catch_corrupt_db
def get_session_info(self, session):
"""Get info about a session.
Parameters
----------
session : int
Session number to retrieve.
Returns
-------
session_id : int
Session ID number
start : datetime
Timestamp for the start of the session.
end : datetime
Timestamp for the end of the session, or None if yap_ipython crashed.
num_cmds : int
Number of commands run, or None if yap_ipython crashed.
remark : unicode
A manually set description.
"""
query = "SELECT * from sessions where session == ?"
return self.db.execute(query, (session,)).fetchone()
@catch_corrupt_db
def get_last_session_id(self):
"""Get the last session ID currently in the database.
Within yap_ipython, this should be the same as the value stored in
:attr:`HistoryManager.session_number`.
"""
for record in self.get_tail(n=1, include_latest=True):
return record[0]
@catch_corrupt_db
def get_tail(self, n=10, raw=True, output=False, include_latest=False):
"""Get the last n lines from the history database.
Parameters
----------
n : int
The number of lines to get
raw, output : bool
See :meth:`get_range`
include_latest : bool
If False (default), n+1 lines are fetched, and the latest one
is discarded. This is intended to be used where the function
is called by a user command, which it should not return.
Returns
-------
Tuples as :meth:`get_range`
"""
self.writeout_cache()
if not include_latest:
n += 1
cur = self._run_sql("ORDER BY session DESC, line DESC LIMIT ?",
(n,), raw=raw, output=output)
if not include_latest:
return reversed(list(cur)[1:])
return reversed(list(cur))
@catch_corrupt_db
def search(self, pattern="*", raw=True, search_raw=True,
output=False, n=None, unique=False):
"""Search the database using unix glob-style matching (wildcards
* and ?).
Parameters
----------
pattern : str
The wildcarded pattern to match when searching
search_raw : bool
If True, search the raw input, otherwise, the parsed input
raw, output : bool
See :meth:`get_range`
n : None or int
If an integer is given, it defines the limit of
returned entries.
unique : bool
When it is true, return only unique entries.
Returns
-------
Tuples as :meth:`get_range`
"""
tosearch = "source_raw" if search_raw else "source"
if output:
tosearch = "history." + tosearch
self.writeout_cache()
sqlform = "WHERE %s GLOB ?" % tosearch
params = (pattern,)
if unique:
sqlform += ' GROUP BY {0}'.format(tosearch)
if n is not None:
sqlform += " ORDER BY session DESC, line DESC LIMIT ?"
params += (n,)
elif unique:
sqlform += " ORDER BY session, line"
cur = self._run_sql(sqlform, params, raw=raw, output=output)
if n is not None:
return reversed(list(cur))
return cur
@catch_corrupt_db
def get_range(self, session, start=1, stop=None, raw=True,output=False):
"""Retrieve input by session.
Parameters
----------
session : int
Session number to retrieve.
start : int
First line to retrieve.
stop : int
End of line range (excluded from output itself). If None, retrieve
to the end of the session.
raw : bool
If True, return untranslated input
output : bool
If True, attempt to include output. This will be 'real' Python
objects for the current session, or text reprs from previous
sessions if db_log_output was enabled at the time. Where no output
is found, None is used.
Returns
-------
entries
An iterator over the desired lines. Each line is a 3-tuple, either
(session, line, input) if output is False, or
(session, line, (input, output)) if output is True.
"""
if stop:
lineclause = "line >= ? AND line < ?"
params = (session, start, stop)
else:
lineclause = "line>=?"
params = (session, start)
return self._run_sql("WHERE session==? AND %s" % lineclause,
params, raw=raw, output=output)
def get_range_by_str(self, rangestr, raw=True, output=False):
"""Get lines of history from a string of ranges, as used by magic
commands %hist, %save, %macro, etc.
Parameters
----------
rangestr : str
A string specifying ranges, e.g. "5 ~2/1-4". See
:func:`magic_history` for full details.
raw, output : bool
As :meth:`get_range`
Returns
-------
Tuples as :meth:`get_range`
"""
for sess, s, e in extract_hist_ranges(rangestr):
for line in self.get_range(sess, s, e, raw=raw, output=output):
yield line
class HistoryManager(HistoryAccessor):
"""A class to organize all history-related functionality in one place.
"""
# Public interface
# An instance of the yap_ipython shell we are attached to
shell = Instance('yap_ipython.core.interactiveshell.InteractiveShellABC',
allow_none=True)
# Lists to hold processed and raw history. These start with a blank entry
# so that we can index them starting from 1
input_hist_parsed = List([""])
input_hist_raw = List([""])
# A list of directories visited during session
dir_hist = List()
@default('dir_hist')
def _dir_hist_default(self):
try:
return [os.getcwd()]
except OSError:
return []
# A dict of output history, keyed with ints from the shell's
# execution count.
output_hist = Dict()
# The text/plain repr of outputs.
output_hist_reprs = Dict()
# The number of the current session in the history database
session_number = Integer()
db_log_output = Bool(False,
help="Should the history database include output? (default: no)"
).tag(config=True)
db_cache_size = Integer(0,
help="Write to database every x commands (higher values save disk access & power).\n"
"Values of 1 or less effectively disable caching."
).tag(config=True)
# The input and output caches
db_input_cache = List()
db_output_cache = List()
# History saving in separate thread
save_thread = Instance('yap_ipython.core.history.HistorySavingThread',
allow_none=True)
save_flag = Instance(threading.Event, allow_none=True)
# Private interface
# Variables used to store the three last inputs from the user. On each new
# history update, we populate the user's namespace with these, shifted as
# necessary.
_i00 = Unicode(u'')
_i = Unicode(u'')
_ii = Unicode(u'')
_iii = Unicode(u'')
# A regex matching all forms of the exit command, so that we don't store
# them in the history (it's annoying to rewind the first entry and land on
# an exit call).
_exit_re = re.compile(r"(exit|quit)(\s*\(.*\))?$")
def __init__(self, shell=None, config=None, **traits):
"""Create a new history manager associated with a shell instance.
"""
# We need a pointer back to the shell for various tasks.
super(HistoryManager, self).__init__(shell=shell, config=config,
**traits)
self.save_flag = threading.Event()
self.db_input_cache_lock = threading.Lock()
self.db_output_cache_lock = threading.Lock()
try:
self.new_session()
except OperationalError:
self.log.error("Failed to create history session in %s. History will not be saved.",
self.hist_file, exc_info=True)
self.hist_file = ':memory:'
if self.enabled and self.hist_file != ':memory:':
self.save_thread = HistorySavingThread(self)
self.save_thread.start()
def _get_hist_file_name(self, profile=None):
"""Get default history file name based on the Shell's profile.
The profile parameter is ignored, but must exist for compatibility with
the parent class."""
profile_dir = self.shell.profile_dir.location
return os.path.join(profile_dir, 'history.sqlite')
@needs_sqlite
def new_session(self, conn=None):
"""Get a new session number."""
if conn is None:
conn = self.db
with conn:
cur = conn.execute("""INSERT INTO sessions VALUES (NULL, ?, NULL,
NULL, "") """, (datetime.datetime.now(),))
self.session_number = cur.lastrowid
def end_session(self):
"""Close the database session, filling in the end time and line count."""
self.writeout_cache()
with self.db:
self.db.execute("""UPDATE sessions SET end=?, num_cmds=? WHERE
session==?""", (datetime.datetime.now(),
len(self.input_hist_parsed)-1, self.session_number))
self.session_number = 0
def name_session(self, name):
"""Give the current session a name in the history database."""
with self.db:
self.db.execute("UPDATE sessions SET remark=? WHERE session==?",
(name, self.session_number))
def reset(self, new_session=True):
"""Clear the session history, releasing all object references, and
optionally open a new session."""
self.output_hist.clear()
# The directory history can't be completely empty
self.dir_hist[:] = [os.getcwd()]
if new_session:
if self.session_number:
self.end_session()
self.input_hist_parsed[:] = [""]
self.input_hist_raw[:] = [""]
self.new_session()
# ------------------------------
# Methods for retrieving history
# ------------------------------
def get_session_info(self, session=0):
"""Get info about a session.
Parameters
----------
session : int
Session number to retrieve. The current session is 0, and negative
numbers count back from current session, so -1 is the previous session.
Returns
-------
session_id : int
Session ID number
start : datetime
Timestamp for the start of the session.
end : datetime
Timestamp for the end of the session, or None if yap_ipython crashed.
num_cmds : int
Number of commands run, or None if yap_ipython crashed.
remark : unicode
A manually set description.
"""
if session <= 0:
session += self.session_number
return super(HistoryManager, self).get_session_info(session=session)
def _get_range_session(self, start=1, stop=None, raw=True, output=False):
"""Get input and output history from the current session. Called by
get_range, and takes similar parameters."""
input_hist = self.input_hist_raw if raw else self.input_hist_parsed
n = len(input_hist)
if start < 0:
start += n
if not stop or (stop > n):
stop = n
elif stop < 0:
stop += n
for i in range(start, stop):
if output:
line = (input_hist[i], self.output_hist_reprs.get(i))
else:
line = input_hist[i]
yield (0, i, line)
def get_range(self, session=0, start=1, stop=None, raw=True,output=False):
"""Retrieve input by session.
Parameters
----------
session : int
Session number to retrieve. The current session is 0, and negative
numbers count back from current session, so -1 is previous session.
start : int
First line to retrieve.
stop : int
End of line range (excluded from output itself). If None, retrieve
to the end of the session.
raw : bool
If True, return untranslated input
output : bool
If True, attempt to include output. This will be 'real' Python
objects for the current session, or text reprs from previous
sessions if db_log_output was enabled at the time. Where no output
is found, None is used.
Returns
-------
entries
An iterator over the desired lines. Each line is a 3-tuple, either
(session, line, input) if output is False, or
(session, line, (input, output)) if output is True.
"""
if session <= 0:
session += self.session_number
if session==self.session_number: # Current session
return self._get_range_session(start, stop, raw, output)
return super(HistoryManager, self).get_range(session, start, stop, raw,
output)
## ----------------------------
## Methods for storing history:
## ----------------------------
def store_inputs(self, line_num, source, source_raw=None):
"""Store source and raw input in history and create input cache
variables ``_i*``.
Parameters
----------
line_num : int
The prompt number of this input.
source : str
Python input.
source_raw : str, optional
If given, this is the raw input without any yap_ipython transformations
applied to it. If not given, ``source`` is used.
"""
if source_raw is None:
source_raw = source
source = source.rstrip('\n')
source_raw = source_raw.rstrip('\n')
# do not store exit/quit commands
if self._exit_re.match(source_raw.strip()):
return
self.input_hist_parsed.append(source)
self.input_hist_raw.append(source_raw)
with self.db_input_cache_lock:
self.db_input_cache.append((line_num, source, source_raw))
# Trigger to flush cache and write to DB.
if len(self.db_input_cache) >= self.db_cache_size:
self.save_flag.set()
# update the auto _i variables
self._iii = self._ii
self._ii = self._i
self._i = self._i00
self._i00 = source_raw
# hackish access to user namespace to create _i1,_i2... dynamically
new_i = '_i%s' % line_num
to_main = {'_i': self._i,
'_ii': self._ii,
'_iii': self._iii,
new_i : self._i00 }
if self.shell is not None:
self.shell.push(to_main, interactive=False)
def store_output(self, line_num):
"""If database output logging is enabled, this saves all the
outputs from the indicated prompt number to the database. It's
called by run_cell after code has been executed.
Parameters
----------
line_num : int
The line number from which to save outputs
"""
if (not self.db_log_output) or (line_num not in self.output_hist_reprs):
return
output = self.output_hist_reprs[line_num]
with self.db_output_cache_lock:
self.db_output_cache.append((line_num, output))
if self.db_cache_size <= 1:
self.save_flag.set()
def _writeout_input_cache(self, conn):
with conn:
for line in self.db_input_cache:
conn.execute("INSERT INTO history VALUES (?, ?, ?, ?)",
(self.session_number,)+line)
def _writeout_output_cache(self, conn):
with conn:
for line in self.db_output_cache:
conn.execute("INSERT INTO output_history VALUES (?, ?, ?)",
(self.session_number,)+line)
@needs_sqlite
def writeout_cache(self, conn=None):
"""Write any entries in the cache to the database."""
if conn is None:
conn = self.db
with self.db_input_cache_lock:
try:
self._writeout_input_cache(conn)
except sqlite3.IntegrityError:
self.new_session(conn)
print("ERROR! Session/line number was not unique in",
"database. History logging moved to new session",
self.session_number)
try:
# Try writing to the new session. If this fails, don't
# recurse
self._writeout_input_cache(conn)
except sqlite3.IntegrityError:
pass
finally:
self.db_input_cache = []
with self.db_output_cache_lock:
try:
self._writeout_output_cache(conn)
except sqlite3.IntegrityError:
print("!! Session/line number for output was not unique",
"in database. Output will not be stored.")
finally:
self.db_output_cache = []
class HistorySavingThread(threading.Thread):
"""This thread takes care of writing history to the database, so that
the UI isn't held up while that happens.
It waits for the HistoryManager's save_flag to be set, then writes out
the history cache. The main thread is responsible for setting the flag when
the cache size reaches a defined threshold."""
daemon = True
stop_now = False
enabled = True
def __init__(self, history_manager):
super(HistorySavingThread, self).__init__(name="IPythonHistorySavingThread")
self.history_manager = history_manager
self.enabled = history_manager.enabled
atexit.register(self.stop)
@needs_sqlite
def run(self):
# We need a separate db connection per thread:
try:
self.db = sqlite3.connect(self.history_manager.hist_file,
**self.history_manager.connection_options
)
while True:
self.history_manager.save_flag.wait()
if self.stop_now:
self.db.close()
return
self.history_manager.save_flag.clear()
self.history_manager.writeout_cache(self.db)
except Exception as e:
print(("The history saving thread hit an unexpected error (%s)."
"History will not be written to the database.") % repr(e))
def stop(self):
"""This can be called from the main thread to safely stop this thread.
Note that it does not attempt to write out remaining history before
exiting. That should be done by calling the HistoryManager's
end_session method."""
self.stop_now = True
self.history_manager.save_flag.set()
self.join()
# To match, e.g. ~5/8-~2/3
range_re = re.compile(r"""
((?P<startsess>~?\d+)/)?
(?P<start>\d+)?
((?P<sep>[\-:])
((?P<endsess>~?\d+)/)?
(?P<end>\d+))?
$""", re.VERBOSE)
def extract_hist_ranges(ranges_str):
"""Turn a string of history ranges into 3-tuples of (session, start, stop).
Examples
--------
>>> list(extract_hist_ranges("~8/5-~7/4 2"))
[(-8, 5, None), (-7, 1, 5), (0, 2, 3)]
"""
for range_str in ranges_str.split():
rmatch = range_re.match(range_str)
if not rmatch:
continue
start = rmatch.group("start")
if start:
start = int(start)
end = rmatch.group("end")
# If no end specified, get (a, a + 1)
end = int(end) if end else start + 1
else: # start not specified
if not rmatch.group('startsess'): # no startsess
continue
start = 1
end = None # provide the entire session hist
if rmatch.group("sep") == "-": # 1-3 == 1:4 --> [1, 2, 3]
end += 1
startsess = rmatch.group("startsess") or "0"
endsess = rmatch.group("endsess") or startsess
startsess = int(startsess.replace("~","-"))
endsess = int(endsess.replace("~","-"))
assert endsess >= startsess, "start session must be earlier than end session"
if endsess == startsess:
yield (startsess, start, end)
continue
# Multiple sessions in one range:
yield (startsess, start, None)
for sess in range(startsess+1, endsess):
yield (sess, 1, None)
yield (endsess, 1, end)
def _format_lineno(session, line):
"""Helper function to format line numbers properly."""
if session == 0:
return str(line)
return "%s#%s" % (session, line)

View File

@@ -0,0 +1,161 @@
# encoding: utf-8
"""
An application for managing yap_ipython history.
To be invoked as the `ipython history` subcommand.
"""
import os
import sqlite3
from traitlets.config.application import Application
from yap_ipython.core.application import BaseYAPApplication
from traitlets import Bool, Int, Dict
from yap_ipython.utils.io import ask_yes_no
trim_hist_help = """Trim the yap_ipython history database to the last 1000 entries.
This actually copies the last 1000 entries to a new database, and then replaces
the old file with the new. Use the `--keep=` argument to specify a number
other than 1000.
"""
clear_hist_help = """Clear the yap_ipython history database, deleting all entries.
Because this is a destructive operation, yap_ipython will prompt the user if they
really want to do this. Passing a `-f` flag will force clearing without a
prompt.
This is an handy alias to `ipython history trim --keep=0`
"""
class HistoryTrim(BaseYAPApplication):
description = trim_hist_help
backup = Bool(False,
help="Keep the old history file as history.sqlite.<N>"
).tag(config=True)
keep = Int(1000,
help="Number of recent lines to keep in the database."
).tag(config=True)
flags = Dict(dict(
backup = ({'HistoryTrim' : {'backup' : True}},
backup.help
)
))
aliases=Dict(dict(
keep = 'HistoryTrim.keep'
))
def start(self):
profile_dir = self.profile_dir.location
hist_file = os.path.join(profile_dir, 'history.sqlite')
con = sqlite3.connect(hist_file)
# Grab the recent history from the current database.
inputs = list(con.execute('SELECT session, line, source, source_raw FROM '
'history ORDER BY session DESC, line DESC LIMIT ?', (self.keep+1,)))
if len(inputs) <= self.keep:
print("There are already at most %d entries in the history database." % self.keep)
print("Not doing anything. Use --keep= argument to keep fewer entries")
return
print("Trimming history to the most recent %d entries." % self.keep)
inputs.pop() # Remove the extra element we got to check the length.
inputs.reverse()
if inputs:
first_session = inputs[0][0]
outputs = list(con.execute('SELECT session, line, output FROM '
'output_history WHERE session >= ?', (first_session,)))
sessions = list(con.execute('SELECT session, start, end, num_cmds, remark FROM '
'sessions WHERE session >= ?', (first_session,)))
con.close()
# Create the new history database.
new_hist_file = os.path.join(profile_dir, 'history.sqlite.new')
i = 0
while os.path.exists(new_hist_file):
# Make sure we don't interfere with an existing file.
i += 1
new_hist_file = os.path.join(profile_dir, 'history.sqlite.new'+str(i))
new_db = sqlite3.connect(new_hist_file)
new_db.execute("""CREATE TABLE IF NOT EXISTS sessions (session integer
primary key autoincrement, start timestamp,
end timestamp, num_cmds integer, remark text)""")
new_db.execute("""CREATE TABLE IF NOT EXISTS history
(session integer, line integer, source text, source_raw text,
PRIMARY KEY (session, line))""")
new_db.execute("""CREATE TABLE IF NOT EXISTS output_history
(session integer, line integer, output text,
PRIMARY KEY (session, line))""")
new_db.commit()
if inputs:
with new_db:
# Add the recent history into the new database.
new_db.executemany('insert into sessions values (?,?,?,?,?)', sessions)
new_db.executemany('insert into history values (?,?,?,?)', inputs)
new_db.executemany('insert into output_history values (?,?,?)', outputs)
new_db.close()
if self.backup:
i = 1
backup_hist_file = os.path.join(profile_dir, 'history.sqlite.old.%d' % i)
while os.path.exists(backup_hist_file):
i += 1
backup_hist_file = os.path.join(profile_dir, 'history.sqlite.old.%d' % i)
os.rename(hist_file, backup_hist_file)
print("Backed up longer history file to", backup_hist_file)
else:
os.remove(hist_file)
os.rename(new_hist_file, hist_file)
class HistoryClear(HistoryTrim):
description = clear_hist_help
keep = Int(0,
help="Number of recent lines to keep in the database.")
force = Bool(False,
help="Don't prompt user for confirmation"
).tag(config=True)
flags = Dict(dict(
force = ({'HistoryClear' : {'force' : True}},
force.help),
f = ({'HistoryTrim' : {'force' : True}},
force.help
)
))
aliases = Dict()
def start(self):
if self.force or ask_yes_no("Really delete all ipython history? ",
default="no", interrupt="no"):
HistoryTrim.start(self)
class HistoryApp(Application):
name = u'ipython-history'
description = "Manage the yap_ipython history database."
subcommands = Dict(dict(
trim = (HistoryTrim, HistoryTrim.description.splitlines()[0]),
clear = (HistoryClear, HistoryClear.description.splitlines()[0]),
))
def start(self):
if self.subapp is None:
print("No subcommand specified. Must specify one of: %s" % \
(self.subcommands.keys()))
print()
self.print_description()
self.print_subcommands()
self.exit(1)
else:
return self.subapp.start()

View File

@@ -0,0 +1,229 @@
"""Hooks for yap_ipython.
In Python, it is possible to overwrite any method of any object if you really
want to. But yap_ipython exposes a few 'hooks', methods which are *designed* to
be overwritten by users for customization purposes. This module defines the
default versions of all such hooks, which get used by yap_ipython if not
overridden by the user.
Hooks are simple functions, but they should be declared with ``self`` as their
first argument, because when activated they are registered into yap_ipython as
instance methods. The self argument will be the yap_ipython running instance
itself, so hooks have full access to the entire yap_ipython object.
If you wish to define a new hook and activate it, you can make an :doc:`extension
</config/extensions/index>` or a :ref:`startup script <startup_files>`. For
example, you could use a startup file like this::
import os
def calljed(self,filename, linenum):
"My editor hook calls the jed editor directly."
print "Calling my own editor, jed ..."
if os.system('jed +%d %s' % (linenum,filename)) != 0:
raise TryNext()
def load_ipython_extension(ip):
ip.set_hook('editor', calljed)
"""
#*****************************************************************************
# Copyright (C) 2005 Fernando Perez. <fperez@colorado.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
import os
import subprocess
import warnings
import sys
from yap_ipython.core.error import TryNext
# List here all the default hooks. For now it's just the editor functions
# but over time we'll move here all the public API for user-accessible things.
__all__ = ['editor', 'synchronize_with_editor',
'shutdown_hook', 'late_startup_hook',
'show_in_pager','pre_prompt_hook',
'pre_run_code_hook', 'clipboard_get']
deprecated = {'pre_run_code_hook': "a callback for the 'pre_execute' or 'pre_run_cell' event",
'late_startup_hook': "a callback for the 'shell_initialized' event",
'shutdown_hook': "the atexit module",
}
def editor(self, filename, linenum=None, wait=True):
"""Open the default editor at the given filename and linenumber.
This is yap_ipython's default editor hook, you can use it as an example to
write your own modified one. To set your own editor function as the
new editor hook, call ip.set_hook('editor',yourfunc)."""
# yap_ipython configures a default editor at startup by reading $EDITOR from
# the environment, and falling back on vi (unix) or notepad (win32).
editor = self.editor
# marker for at which line to open the file (for existing objects)
if linenum is None or editor=='notepad':
linemark = ''
else:
linemark = '+%d' % int(linenum)
# Enclose in quotes if necessary and legal
if ' ' in editor and os.path.isfile(editor) and editor[0] != '"':
editor = '"%s"' % editor
# Call the actual editor
proc = subprocess.Popen('%s %s %s' % (editor, linemark, filename),
shell=True)
if wait and proc.wait() != 0:
raise TryNext()
import tempfile
from yap_ipython.utils.decorators import undoc
@undoc
def fix_error_editor(self,filename,linenum,column,msg):
"""DEPRECATED
Open the editor at the given filename, linenumber, column and
show an error message. This is used for correcting syntax errors.
The current implementation only has special support for the VIM editor,
and falls back on the 'editor' hook if VIM is not used.
Call ip.set_hook('fix_error_editor',yourfunc) to use your own function,
"""
warnings.warn("""
`fix_error_editor` is deprecated as of yap_ipython 6.0 and will be removed
in future versions. It appears to be used only for automatically fixing syntax
error that has been broken for a few years and has thus been removed. If you
happened to use this function and still need it please make your voice heard on
the mailing list ipython-dev@python.org , or on the GitHub Issue tracker:
https://github.com/ipython/ipython/issues/9649 """, UserWarning)
def vim_quickfix_file():
t = tempfile.NamedTemporaryFile()
t.write('%s:%d:%d:%s\n' % (filename,linenum,column,msg))
t.flush()
return t
if os.path.basename(self.editor) != 'vim':
self.hooks.editor(filename,linenum)
return
t = vim_quickfix_file()
try:
if os.system('vim --cmd "set errorformat=%f:%l:%c:%m" -q ' + t.name):
raise TryNext()
finally:
t.close()
def synchronize_with_editor(self, filename, linenum, column):
pass
class CommandChainDispatcher:
""" Dispatch calls to a chain of commands until some func can handle it
Usage: instantiate, execute "add" to add commands (with optional
priority), execute normally via f() calling mechanism.
"""
def __init__(self,commands=None):
if commands is None:
self.chain = []
else:
self.chain = commands
def __call__(self,*args, **kw):
""" Command chain is called just like normal func.
This will call all funcs in chain with the same args as were given to
this function, and return the result of first func that didn't raise
TryNext"""
last_exc = TryNext()
for prio,cmd in self.chain:
#print "prio",prio,"cmd",cmd #dbg
try:
return cmd(*args, **kw)
except TryNext as exc:
last_exc = exc
# if no function will accept it, raise TryNext up to the caller
raise last_exc
def __str__(self):
return str(self.chain)
def add(self, func, priority=0):
""" Add a func to the cmd chain with given priority """
self.chain.append((priority, func))
self.chain.sort(key=lambda x: x[0])
def __iter__(self):
""" Return all objects in chain.
Handy if the objects are not callable.
"""
return iter(self.chain)
def shutdown_hook(self):
""" default shutdown hook
Typically, shotdown hooks should raise TryNext so all shutdown ops are done
"""
#print "default shutdown hook ok" # dbg
return
def late_startup_hook(self):
""" Executed after ipython has been constructed and configured
"""
#print "default startup hook ok" # dbg
def show_in_pager(self, data, start, screen_lines):
""" Run a string through pager """
# raising TryNext here will use the default paging functionality
raise TryNext
def pre_prompt_hook(self):
""" Run before displaying the next prompt
Use this e.g. to display output from asynchronous operations (in order
to not mess up text entry)
"""
return None
def pre_run_code_hook(self):
""" Executed before running the (prefiltered) code in yap_ipython """
return None
def clipboard_get(self):
""" Get text from the clipboard.
"""
from yap_ipython.lib.clipboard import (
osx_clipboard_get, tkinter_clipboard_get,
win32_clipboard_get
)
if sys.platform == 'win32':
chain = [win32_clipboard_get, tkinter_clipboard_get]
elif sys.platform == 'darwin':
chain = [osx_clipboard_get, tkinter_clipboard_get]
else:
chain = [tkinter_clipboard_get]
dispatcher = CommandChainDispatcher()
for func in chain:
dispatcher.add(func)
text = dispatcher()
return text

View File

@@ -0,0 +1,766 @@
"""Input handling and transformation machinery.
The first class in this module, :class:`InputSplitter`, is designed to tell when
input from a line-oriented frontend is complete and should be executed, and when
the user should be prompted for another line of code instead. The name 'input
splitter' is largely for historical reasons.
A companion, :class:`IPythonInputSplitter`, provides the same functionality but
with full support for the extended yap_ipython syntax (magics, system calls, etc).
The code to actually do these transformations is in :mod:`yap_ipython.core.inputtransformer`.
:class:`IPythonInputSplitter` feeds the raw code to the transformers in order
and stores the results.
For more details, see the class docstrings below.
"""
# Copyright (c) yap_ipython Development Team.
# Distributed under the terms of the Modified BSD License.
import ast
import codeop
import io
import re
import sys
import tokenize
import warnings
from yap_ipython.utils.py3compat import cast_unicode
from yap_ipython.core.inputtransformer import (leading_indent,
classic_prompt,
ipy_prompt,
cellmagic,
assemble_logical_lines,
help_end,
escaped_commands,
assign_from_magic,
assign_from_system,
assemble_python_lines,
)
# These are available in this module for backwards compatibility.
from yap_ipython.core.inputtransformer import (ESC_SHELL, ESC_SH_CAP, ESC_HELP,
ESC_HELP2, ESC_MAGIC, ESC_MAGIC2,
ESC_QUOTE, ESC_QUOTE2, ESC_PAREN, ESC_SEQUENCES)
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
# FIXME: These are general-purpose utilities that later can be moved to the
# general ward. Kept here for now because we're being very strict about test
# coverage with this code, and this lets us ensure that we keep 100% coverage
# while developing.
# compiled regexps for autoindent management
dedent_re = re.compile('|'.join([
r'^\s+raise(\s.*)?$', # raise statement (+ space + other stuff, maybe)
r'^\s+raise\([^\)]*\).*$', # wacky raise with immediate open paren
r'^\s+return(\s.*)?$', # normal return (+ space + other stuff, maybe)
r'^\s+return\([^\)]*\).*$', # wacky return with immediate open paren
r'^\s+pass\s*$', # pass (optionally followed by trailing spaces)
r'^\s+break\s*$', # break (optionally followed by trailing spaces)
r'^\s+continue\s*$', # continue (optionally followed by trailing spaces)
]))
ini_spaces_re = re.compile(r'^([ \t\r\f\v]+)')
# regexp to match pure comment lines so we don't accidentally insert 'if 1:'
# before pure comments
comment_line_re = re.compile('^\s*\#')
def num_ini_spaces(s):
"""Return the number of initial spaces in a string.
Note that tabs are counted as a single space. For now, we do *not* support
mixing of tabs and spaces in the user's input.
Parameters
----------
s : string
Returns
-------
n : int
"""
ini_spaces = ini_spaces_re.match(s)
if ini_spaces:
return ini_spaces.end()
else:
return 0
# Fake token types for partial_tokenize:
INCOMPLETE_STRING = tokenize.N_TOKENS
IN_MULTILINE_STATEMENT = tokenize.N_TOKENS + 1
# The 2 classes below have the same API as TokenInfo, but don't try to look up
# a token type name that they won't find.
class IncompleteString:
type = exact_type = INCOMPLETE_STRING
def __init__(self, s, start, end, line):
self.s = s
self.start = start
self.end = end
self.line = line
class InMultilineStatement:
type = exact_type = IN_MULTILINE_STATEMENT
def __init__(self, pos, line):
self.s = ''
self.start = self.end = pos
self.line = line
def partial_tokens(s):
"""Iterate over tokens from a possibly-incomplete string of code.
This adds two special token types: INCOMPLETE_STRING and
IN_MULTILINE_STATEMENT. These can only occur as the last token yielded, and
represent the two main ways for code to be incomplete.
"""
readline = io.StringIO(s).readline
token = tokenize.TokenInfo(tokenize.NEWLINE, '', (1, 0), (1, 0), '')
try:
for token in tokenize.generate_tokens(readline):
yield token
except tokenize.TokenError as e:
# catch EOF error
lines = s.splitlines(keepends=True)
end = len(lines), len(lines[-1])
if 'multi-line string' in e.args[0]:
l, c = start = token.end
s = lines[l-1][c:] + ''.join(lines[l:])
yield IncompleteString(s, start, end, lines[-1])
elif 'multi-line statement' in e.args[0]:
yield InMultilineStatement(end, lines[-1])
else:
raise
def find_next_indent(code):
"""Find the number of spaces for the next line of indentation"""
tokens = list(partial_tokens(code))
if tokens[-1].type == tokenize.ENDMARKER:
tokens.pop()
if not tokens:
return 0
while (tokens[-1].type in {tokenize.DEDENT, tokenize.NEWLINE, tokenize.COMMENT}):
tokens.pop()
if tokens[-1].type == INCOMPLETE_STRING:
# Inside a multiline string
return 0
# Find the indents used before
prev_indents = [0]
def _add_indent(n):
if n != prev_indents[-1]:
prev_indents.append(n)
tokiter = iter(tokens)
for tok in tokiter:
if tok.type in {tokenize.INDENT, tokenize.DEDENT}:
_add_indent(tok.end[1])
elif (tok.type == tokenize.NL):
try:
_add_indent(next(tokiter).start[1])
except StopIteration:
break
last_indent = prev_indents.pop()
# If we've just opened a multiline statement (e.g. 'a = ['), indent more
if tokens[-1].type == IN_MULTILINE_STATEMENT:
if tokens[-2].exact_type in {tokenize.LPAR, tokenize.LSQB, tokenize.LBRACE}:
return last_indent + 4
return last_indent
if tokens[-1].exact_type == tokenize.COLON:
# Line ends with colon - indent
return last_indent + 4
if last_indent:
# Examine the last line for dedent cues - statements like return or
# raise which normally end a block of code.
last_line_starts = 0
for i, tok in enumerate(tokens):
if tok.type == tokenize.NEWLINE:
last_line_starts = i + 1
last_line_tokens = tokens[last_line_starts:]
names = [t.string for t in last_line_tokens if t.type == tokenize.NAME]
if names and names[0] in {'raise', 'return', 'pass', 'break', 'continue'}:
# Find the most recent indentation less than the current level
for indent in reversed(prev_indents):
if indent < last_indent:
return indent
return last_indent
def last_blank(src):
"""Determine if the input source ends in a blank.
A blank is either a newline or a line consisting of whitespace.
Parameters
----------
src : string
A single or multiline string.
"""
if not src: return False
ll = src.splitlines()[-1]
return (ll == '') or ll.isspace()
last_two_blanks_re = re.compile(r'\n\s*\n\s*$', re.MULTILINE)
last_two_blanks_re2 = re.compile(r'.+\n\s*\n\s+$', re.MULTILINE)
def last_two_blanks(src):
"""Determine if the input source ends in two blanks.
A blank is either a newline or a line consisting of whitespace.
Parameters
----------
src : string
A single or multiline string.
"""
if not src: return False
# The logic here is tricky: I couldn't get a regexp to work and pass all
# the tests, so I took a different approach: split the source by lines,
# grab the last two and prepend '###\n' as a stand-in for whatever was in
# the body before the last two lines. Then, with that structure, it's
# possible to analyze with two regexps. Not the most elegant solution, but
# it works. If anyone tries to change this logic, make sure to validate
# the whole test suite first!
new_src = '\n'.join(['###\n'] + src.splitlines()[-2:])
return (bool(last_two_blanks_re.match(new_src)) or
bool(last_two_blanks_re2.match(new_src)) )
def remove_comments(src):
"""Remove all comments from input source.
Note: comments are NOT recognized inside of strings!
Parameters
----------
src : string
A single or multiline input string.
Returns
-------
String with all Python comments removed.
"""
return re.sub('#.*', '', src)
def get_input_encoding():
"""Return the default standard input encoding.
If sys.stdin has no encoding, 'ascii' is returned."""
# There are strange environments for which sys.stdin.encoding is None. We
# ensure that a valid encoding is returned.
encoding = getattr(sys.stdin, 'encoding', None)
if encoding is None:
encoding = 'ascii'
return encoding
#-----------------------------------------------------------------------------
# Classes and functions for normal Python syntax handling
#-----------------------------------------------------------------------------
class InputSplitter(object):
r"""An object that can accumulate lines of Python source before execution.
This object is designed to be fed python source line-by-line, using
:meth:`push`. It will return on each push whether the currently pushed
code could be executed already. In addition, it provides a method called
:meth:`push_accepts_more` that can be used to query whether more input
can be pushed into a single interactive block.
This is a simple example of how an interactive terminal-based client can use
this tool::
isp = InputSplitter()
while isp.push_accepts_more():
indent = ' '*isp.indent_spaces
prompt = '>>> ' + indent
line = indent + raw_input(prompt)
isp.push(line)
print 'Input source was:\n', isp.source_reset(),
"""
# A cache for storing the current indentation
# The first value stores the most recently processed source input
# The second value is the number of spaces for the current indentation
# If self.source matches the first value, the second value is a valid
# current indentation. Otherwise, the cache is invalid and the indentation
# must be recalculated.
_indent_spaces_cache = None, None
# String, indicating the default input encoding. It is computed by default
# at initialization time via get_input_encoding(), but it can be reset by a
# client with specific knowledge of the encoding.
encoding = ''
# String where the current full source input is stored, properly encoded.
# Reading this attribute is the normal way of querying the currently pushed
# source code, that has been properly encoded.
source = ''
# Code object corresponding to the current source. It is automatically
# synced to the source, so it can be queried at any time to obtain the code
# object; it will be None if the source doesn't compile to valid Python.
code = None
# Private attributes
# List with lines of input accumulated so far
_buffer = None
# Command compiler
_compile = None
# Boolean indicating whether the current block is complete
_is_complete = None
# Boolean indicating whether the current block has an unrecoverable syntax error
_is_invalid = False
def __init__(self):
"""Create a new InputSplitter instance.
"""
self._buffer = []
self._compile = codeop.CommandCompiler()
self.encoding = get_input_encoding()
def reset(self):
"""Reset the input buffer and associated state."""
self._buffer[:] = []
self.source = ''
self.code = None
self._is_complete = False
self._is_invalid = False
def source_reset(self):
"""Return the input source and perform a full reset.
"""
out = self.source
self.reset()
return out
def check_complete(self, source):
"""Return whether a block of code is ready to execute, or should be continued
This is a non-stateful API, and will reset the state of this InputSplitter.
Parameters
----------
source : string
Python input code, which can be multiline.
Returns
-------
status : str
One of 'complete', 'incomplete', or 'invalid' if source is not a
prefix of valid code.
indent_spaces : int or None
The number of spaces by which to indent the next line of code. If
status is not 'incomplete', this is None.
"""
self.reset()
try:
self.push(source)
except SyntaxError:
# Transformers in IPythonInputSplitter can raise SyntaxError,
# which push() will not catch.
return 'invalid', None
else:
if self._is_invalid:
return 'invalid', None
elif self.push_accepts_more():
return 'incomplete', self.get_indent_spaces()
else:
return 'complete', None
finally:
self.reset()
def push(self, lines):
"""Push one or more lines of input.
This stores the given lines and returns a status code indicating
whether the code forms a complete Python block or not.
Any exceptions generated in compilation are swallowed, but if an
exception was produced, the method returns True.
Parameters
----------
lines : string
One or more lines of Python input.
Returns
-------
is_complete : boolean
True if the current input source (the result of the current input
plus prior inputs) forms a complete Python execution block. Note that
this value is also stored as a private attribute (``_is_complete``), so it
can be queried at any time.
"""
self._store(lines)
source = self.source
# Before calling _compile(), reset the code object to None so that if an
# exception is raised in compilation, we don't mislead by having
# inconsistent code/source attributes.
self.code, self._is_complete = None, None
self._is_invalid = False
# Honor termination lines properly
if source.endswith('\\\n'):
return False
try:
with warnings.catch_warnings():
warnings.simplefilter('error', SyntaxWarning)
self.code = self._compile(source, symbol="exec")
# Invalid syntax can produce any of a number of different errors from
# inside the compiler, so we have to catch them all. Syntax errors
# immediately produce a 'ready' block, so the invalid Python can be
# sent to the kernel for evaluation with possible ipython
# special-syntax conversion.
except (SyntaxError, OverflowError, ValueError, TypeError,
MemoryError, SyntaxWarning):
self._is_complete = True
self._is_invalid = True
else:
# Compilation didn't produce any exceptions (though it may not have
# given a complete code object)
self._is_complete = self.code is not None
return self._is_complete
def push_accepts_more(self):
"""Return whether a block of interactive input can accept more input.
This method is meant to be used by line-oriented frontends, who need to
guess whether a block is complete or not based solely on prior and
current input lines. The InputSplitter considers it has a complete
interactive block and will not accept more input when either:
* A SyntaxError is raised
* The code is complete and consists of a single line or a single
non-compound statement
* The code is complete and has a blank line at the end
If the current input produces a syntax error, this method immediately
returns False but does *not* raise the syntax error exception, as
typically clients will want to send invalid syntax to an execution
backend which might convert the invalid syntax into valid Python via
one of the dynamic yap_ipython mechanisms.
"""
# With incomplete input, unconditionally accept more
# A syntax error also sets _is_complete to True - see push()
if not self._is_complete:
#print("Not complete") # debug
return True
# The user can make any (complete) input execute by leaving a blank line
last_line = self.source.splitlines()[-1]
if (not last_line) or last_line.isspace():
#print("Blank line") # debug
return False
# If there's just a single line or AST node, and we're flush left, as is
# the case after a simple statement such as 'a=1', we want to execute it
# straight away.
if self.get_indent_spaces() == 0:
if len(self.source.splitlines()) <= 1:
return False
try:
code_ast = ast.parse(u''.join(self._buffer))
except Exception:
#print("Can't parse AST") # debug
return False
else:
if len(code_ast.body) == 1 and \
not hasattr(code_ast.body[0], 'body'):
#print("Simple statement") # debug
return False
# General fallback - accept more code
return True
def get_indent_spaces(self):
sourcefor, n = self._indent_spaces_cache
if sourcefor == self.source:
return n
# self.source always has a trailing newline
n = find_next_indent(self.source[:-1])
self._indent_spaces_cache = (self.source, n)
return n
# Backwards compatibility. I think all code that used .indent_spaces was
# inside yap_ipython, but we can leave this here until yap_ipython 7 in case any
# other modules are using it. -TK, November 2017
indent_spaces = property(get_indent_spaces)
def _store(self, lines, buffer=None, store='source'):
"""Store one or more lines of input.
If input lines are not newline-terminated, a newline is automatically
appended."""
if buffer is None:
buffer = self._buffer
if lines.endswith('\n'):
buffer.append(lines)
else:
buffer.append(lines+'\n')
setattr(self, store, self._set_source(buffer))
def _set_source(self, buffer):
return u''.join(buffer)
class IPythonInputSplitter(InputSplitter):
"""An input splitter that recognizes all of yap_ipython's special syntax."""
# String with raw, untransformed input.
source_raw = ''
# Flag to track when a transformer has stored input that it hasn't given
# back yet.
transformer_accumulating = False
# Flag to track when assemble_python_lines has stored input that it hasn't
# given back yet.
within_python_line = False
# Private attributes
# List with lines of raw input accumulated so far.
_buffer_raw = None
def __init__(self, line_input_checker=True, physical_line_transforms=None,
logical_line_transforms=None, python_line_transforms=None):
super(IPythonInputSplitter, self).__init__()
self._buffer_raw = []
self._validate = True
if physical_line_transforms is not None:
self.physical_line_transforms = physical_line_transforms
else:
self.physical_line_transforms = [
leading_indent(),
classic_prompt(),
ipy_prompt(),
cellmagic(end_on_blank_line=line_input_checker),
]
self.assemble_logical_lines = assemble_logical_lines()
if logical_line_transforms is not None:
self.logical_line_transforms = logical_line_transforms
else:
self.logical_line_transforms = [
help_end(),
escaped_commands(),
assign_from_magic(),
assign_from_system(),
]
self.assemble_python_lines = assemble_python_lines()
if python_line_transforms is not None:
self.python_line_transforms = python_line_transforms
else:
# We don't use any of these at present
self.python_line_transforms = []
@property
def transforms(self):
"Quick access to all transformers."
return self.physical_line_transforms + \
[self.assemble_logical_lines] + self.logical_line_transforms + \
[self.assemble_python_lines] + self.python_line_transforms
@property
def transforms_in_use(self):
"""Transformers, excluding logical line transformers if we're in a
Python line."""
t = self.physical_line_transforms[:]
if not self.within_python_line:
t += [self.assemble_logical_lines] + self.logical_line_transforms
return t + [self.assemble_python_lines] + self.python_line_transforms
def reset(self):
"""Reset the input buffer and associated state."""
super(IPythonInputSplitter, self).reset()
self._buffer_raw[:] = []
self.source_raw = ''
self.transformer_accumulating = False
self.within_python_line = False
for t in self.transforms:
try:
t.reset()
except SyntaxError:
# Nothing that calls reset() expects to handle transformer
# errors
pass
def flush_transformers(self):
def _flush(transform, outs):
"""yield transformed lines
always strings, never None
transform: the current transform
outs: an iterable of previously transformed inputs.
Each may be multiline, which will be passed
one line at a time to transform.
"""
for out in outs:
for line in out.splitlines():
# push one line at a time
tmp = transform.push(line)
if tmp is not None:
yield tmp
# reset the transform
tmp = transform.reset()
if tmp is not None:
yield tmp
out = []
for t in self.transforms_in_use:
out = _flush(t, out)
out = list(out)
if out:
self._store('\n'.join(out))
def raw_reset(self):
"""Return raw input only and perform a full reset.
"""
out = self.source_raw
self.reset()
return out
def source_reset(self):
try:
self.flush_transformers()
return self.source
finally:
self.reset()
def push_accepts_more(self):
if self.transformer_accumulating:
return True
else:
return super(IPythonInputSplitter, self).push_accepts_more()
def transform_cell(self, cell):
"""Process and translate a cell of input.
"""
self.reset()
try:
self.push(cell)
self.flush_transformers()
return self.source
finally:
self.reset()
def push(self, lines):
"""Push one or more lines of yap_ipython input.
This stores the given lines and returns a status code indicating
whether the code forms a complete Python block or not, after processing
all input lines for special yap_ipython syntax.
Any exceptions generated in compilation are swallowed, but if an
exception was produced, the method returns True.
Parameters
----------
lines : string
One or more lines of Python input.
Returns
-------
is_complete : boolean
True if the current input source (the result of the current input
plus prior inputs) forms a complete Python execution block. Note that
this value is also stored as a private attribute (_is_complete), so it
can be queried at any time.
"""
# We must ensure all input is pure unicode
lines = cast_unicode(lines, self.encoding)
# ''.splitlines() --> [], but we need to push the empty line to transformers
lines_list = lines.splitlines()
if not lines_list:
lines_list = ['']
# Store raw source before applying any transformations to it. Note
# that this must be done *after* the reset() call that would otherwise
# flush the buffer.
self._store(lines, self._buffer_raw, 'source_raw')
transformed_lines_list = []
for line in lines_list:
transformed = self._transform_line(line)
if transformed is not None:
transformed_lines_list.append(transformed)
if transformed_lines_list:
transformed_lines = '\n'.join(transformed_lines_list)
return super(IPythonInputSplitter, self).push(transformed_lines)
else:
# Got nothing back from transformers - they must be waiting for
# more input.
return False
def _transform_line(self, line):
"""Push a line of input code through the various transformers.
Returns any output from the transformers, or None if a transformer
is accumulating lines.
Sets self.transformer_accumulating as a side effect.
"""
def _accumulating(dbg):
#print(dbg)
self.transformer_accumulating = True
return None
for transformer in self.physical_line_transforms:
line = transformer.push(line)
if line is None:
return _accumulating(transformer)
if not self.within_python_line:
line = self.assemble_logical_lines.push(line)
if line is None:
return _accumulating('acc logical line')
for transformer in self.logical_line_transforms:
line = transformer.push(line)
if line is None:
return _accumulating(transformer)
line = self.assemble_python_lines.push(line)
if line is None:
self.within_python_line = True
return _accumulating('acc python line')
else:
self.within_python_line = False
for transformer in self.python_line_transforms:
line = transformer.push(line)
if line is None:
return _accumulating(transformer)
#print("transformers clear") #debug
self.transformer_accumulating = False
return line

View File

@@ -1,4 +1,4 @@
"""Input transformer classes to support IPython special syntax.
"""Input transformer classes to support yap_ipython special syntax.
This includes the machinery to recognise and transform ``%magic`` commands,
``!system`` commands, ``help?`` querying, prompt stripping, and so forth.
@@ -8,18 +8,18 @@ import functools
import re
from io import StringIO
from IPython.core.splitinput import LineInfo
from IPython.utils import tokenize2
from IPython.utils.tokenize2 import generate_tokens, untokenize, TokenError
from yap_ipython.core.splitinput import LineInfo
from yap_ipython.utils import tokenize2
from yap_ipython.utils.tokenize2 import generate_tokens, untokenize, TokenError
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# The escape sequences that define the syntax transformations IPython will
# The escape sequences that define the syntax transformations yap_ipython will
# apply to user input. These can NOT be just changed here: many regular
# expressions and other parts of the code may use their hardcoded values, and
# for all intents and purposes they constitute the 'IPython syntax', so they
# for all intents and purposes they constitute the 'yap_ipython syntax', so they
# should be considered fixed.
ESC_SHELL = '!' # Send line to underlying system shell
@@ -198,11 +198,14 @@ def _make_help_call(target, esc, lspace, next_input=None):
else 'psearch' if '*' in target \
else 'pinfo'
arg = " ".join([method, target])
#Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
t_magic_name, _, t_magic_arg_s = arg.partition(' ')
t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
if next_input is None:
return '%sget_ipython().magic(%r)' % (lspace, arg)
return '%sget_ipython().run_line_magic(%r, %r)' % (lspace, t_magic_name, t_magic_arg_s)
else:
return '%sget_ipython().set_next_input(%r);get_ipython().magic(%r)' % \
(lspace, next_input, arg)
return '%sget_ipython().set_next_input(%r);get_ipython().run_line_magic(%r, %r)' % \
(lspace, next_input, t_magic_name, t_magic_arg_s)
# These define the transformations for the different escape characters.
def _tr_system(line_info):
@@ -225,11 +228,14 @@ def _tr_help(line_info):
def _tr_magic(line_info):
"Translate lines escaped with: %"
tpl = '%sget_ipython().magic(%r)'
tpl = '%sget_ipython().run_line_magic(%r, %r)'
if line_info.line.startswith(ESC_MAGIC2):
return line_info.line
cmd = ' '.join([line_info.ifun, line_info.the_rest]).strip()
return tpl % (line_info.pre, cmd)
#Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
t_magic_name, _, t_magic_arg_s = cmd.partition(' ')
t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
return tpl % (line_info.pre, t_magic_name, t_magic_arg_s)
def _tr_quote(line_info):
"Translate lines escaped with: ,"
@@ -450,13 +456,13 @@ def classic_prompt():
# FIXME: non-capturing version (?:...) usable?
prompt_re = re.compile(r'^(>>>|\.\.\.)( |$)')
initial_re = re.compile(r'^>>>( |$)')
# Any %magic/!system is IPython syntax, so we needn't look for >>> prompts
# Any %magic/!system is yap_ipython syntax, so we needn't look for >>> prompts
turnoff_re = re.compile(r'^[%!]')
return _strip_prompts(prompt_re, initial_re, turnoff_re)
@CoroutineInputTransformer.wrap
def ipy_prompt():
"""Strip IPython's In [1]:/...: prompts."""
"""Strip yap_ipython's In [1]:/...: prompts."""
# FIXME: non-capturing version (?:...) usable?
prompt_re = re.compile(r'^(In \[\d+\]: |\s*\.{3,}: ?)')
# Disable prompt stripping inside cell magics
@@ -514,12 +520,15 @@ def assign_from_system(line):
return assign_system_template % m.group('lhs', 'cmd')
assign_magic_re = re.compile(r'{}%\s*(?P<cmd>.*)'.format(_assign_pat), re.VERBOSE)
assign_magic_template = '%s = get_ipython().magic(%r)'
assign_magic_template = '%s = get_ipython().run_line_magic(%r, %r)'
@StatelessInputTransformer.wrap
def assign_from_magic(line):
"""Transform assignment from magic commands (e.g. a = %who_ls)"""
m = assign_magic_re.match(line)
if m is None:
return line
return assign_magic_template % m.group('lhs', 'cmd')
#Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
m_lhs, m_cmd = m.group('lhs', 'cmd')
t_magic_name, _, t_magic_arg_s = m_cmd.partition(' ')
t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
return assign_magic_template % (m_lhs, t_magic_name, t_magic_arg_s)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,218 @@
"""Logger class for yap_ipython's logging facilities.
"""
#*****************************************************************************
# Copyright (C) 2001 Janko Hauser <jhauser@zscout.de> and
# Copyright (C) 2001-2006 Fernando Perez <fperez@colorado.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
#****************************************************************************
# Modules and globals
# Python standard modules
import glob
import io
import os
import time
#****************************************************************************
# FIXME: This class isn't a mixin anymore, but it still needs attributes from
# ipython and does input cache management. Finish cleanup later...
class Logger(object):
"""A Logfile class with different policies for file creation"""
def __init__(self, home_dir, logfname='Logger.log', loghead=u'',
logmode='over'):
# this is the full ipython instance, we need some attributes from it
# which won't exist until later. What a mess, clean up later...
self.home_dir = home_dir
self.logfname = logfname
self.loghead = loghead
self.logmode = logmode
self.logfile = None
# Whether to log raw or processed input
self.log_raw_input = False
# whether to also log output
self.log_output = False
# whether to put timestamps before each log entry
self.timestamp = False
# activity control flags
self.log_active = False
# logmode is a validated property
def _set_mode(self,mode):
if mode not in ['append','backup','global','over','rotate']:
raise ValueError('invalid log mode %s given' % mode)
self._logmode = mode
def _get_mode(self):
return self._logmode
logmode = property(_get_mode,_set_mode)
def logstart(self, logfname=None, loghead=None, logmode=None,
log_output=False, timestamp=False, log_raw_input=False):
"""Generate a new log-file with a default header.
Raises RuntimeError if the log has already been started"""
if self.logfile is not None:
raise RuntimeError('Log file is already active: %s' %
self.logfname)
# The parameters can override constructor defaults
if logfname is not None: self.logfname = logfname
if loghead is not None: self.loghead = loghead
if logmode is not None: self.logmode = logmode
# Parameters not part of the constructor
self.timestamp = timestamp
self.log_output = log_output
self.log_raw_input = log_raw_input
# init depending on the log mode requested
isfile = os.path.isfile
logmode = self.logmode
if logmode == 'append':
self.logfile = io.open(self.logfname, 'a', encoding='utf-8')
elif logmode == 'backup':
if isfile(self.logfname):
backup_logname = self.logfname+'~'
# Manually remove any old backup, since os.rename may fail
# under Windows.
if isfile(backup_logname):
os.remove(backup_logname)
os.rename(self.logfname,backup_logname)
self.logfile = io.open(self.logfname, 'w', encoding='utf-8')
elif logmode == 'global':
self.logfname = os.path.join(self.home_dir,self.logfname)
self.logfile = io.open(self.logfname, 'a', encoding='utf-8')
elif logmode == 'over':
if isfile(self.logfname):
os.remove(self.logfname)
self.logfile = io.open(self.logfname,'w', encoding='utf-8')
elif logmode == 'rotate':
if isfile(self.logfname):
if isfile(self.logfname+'.001~'):
old = glob.glob(self.logfname+'.*~')
old.sort()
old.reverse()
for f in old:
root, ext = os.path.splitext(f)
num = int(ext[1:-1])+1
os.rename(f, root+'.'+repr(num).zfill(3)+'~')
os.rename(self.logfname, self.logfname+'.001~')
self.logfile = io.open(self.logfname, 'w', encoding='utf-8')
if logmode != 'append':
self.logfile.write(self.loghead)
self.logfile.flush()
self.log_active = True
def switch_log(self,val):
"""Switch logging on/off. val should be ONLY a boolean."""
if val not in [False,True,0,1]:
raise ValueError('Call switch_log ONLY with a boolean argument, '
'not with: %s' % val)
label = {0:'OFF',1:'ON',False:'OFF',True:'ON'}
if self.logfile is None:
print("""
Logging hasn't been started yet (use logstart for that).
%logon/%logoff are for temporarily starting and stopping logging for a logfile
which already exists. But you must first start the logging process with
%logstart (optionally giving a logfile name).""")
else:
if self.log_active == val:
print('Logging is already',label[val])
else:
print('Switching logging',label[val])
self.log_active = not self.log_active
self.log_active_out = self.log_active
def logstate(self):
"""Print a status message about the logger."""
if self.logfile is None:
print('Logging has not been activated.')
else:
state = self.log_active and 'active' or 'temporarily suspended'
print('Filename :', self.logfname)
print('Mode :', self.logmode)
print('Output logging :', self.log_output)
print('Raw input log :', self.log_raw_input)
print('Timestamping :', self.timestamp)
print('State :', state)
def log(self, line_mod, line_ori):
"""Write the sources to a log.
Inputs:
- line_mod: possibly modified input, such as the transformations made
by input prefilters or input handlers of various kinds. This should
always be valid Python.
- line_ori: unmodified input line from the user. This is not
necessarily valid Python.
"""
# Write the log line, but decide which one according to the
# log_raw_input flag, set when the log is started.
if self.log_raw_input:
self.log_write(line_ori)
else:
self.log_write(line_mod)
def log_write(self, data, kind='input'):
"""Write data to the log file, if active"""
#print 'data: %r' % data # dbg
if self.log_active and data:
write = self.logfile.write
if kind=='input':
if self.timestamp:
write(time.strftime('# %a, %d %b %Y %H:%M:%S\n', time.localtime()))
write(data)
elif kind=='output' and self.log_output:
odata = u'\n'.join([u'#[Out]# %s' % s
for s in data.splitlines()])
write(u'%s\n' % odata)
self.logfile.flush()
def logstop(self):
"""Fully stop logging and close log file.
In order to start logging again, a new logstart() call needs to be
made, possibly (though not necessarily) with a new filename, mode and
other options."""
if self.logfile is not None:
self.logfile.close()
self.logfile = None
else:
print("Logging hadn't been started.")
self.log_active = False
# For backwards compatibility, in case anyone was using this.
close_log = logstop

View File

@@ -0,0 +1,53 @@
"""Support for interactive macros in yap_ipython"""
#*****************************************************************************
# Copyright (C) 2001-2005 Fernando Perez <fperez@colorado.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
import re
from yap_ipython.utils.encoding import DEFAULT_ENCODING
coding_declaration = re.compile(r"#\s*coding[:=]\s*([-\w.]+)")
class Macro(object):
"""Simple class to store the value of macros as strings.
Macro is just a callable that executes a string of yap_ipython
input when called.
"""
def __init__(self,code):
"""store the macro value, as a single string which can be executed"""
lines = []
enc = None
for line in code.splitlines():
coding_match = coding_declaration.match(line)
if coding_match:
enc = coding_match.group(1)
else:
lines.append(line)
code = "\n".join(lines)
if isinstance(code, bytes):
code = code.decode(enc or DEFAULT_ENCODING)
self.value = code + '\n'
def __str__(self):
return self.value
def __repr__(self):
return 'yap_ipython.macro.Macro(%s)' % repr(self.value)
def __getstate__(self):
""" needed for safe pickling via %store """
return {'value': self.value}
def __add__(self, other):
if isinstance(other, Macro):
return Macro(self.value + other.value)
elif isinstance(other, str):
return Macro(self.value + other)
raise TypeError

View File

@@ -0,0 +1,684 @@
# encoding: utf-8
"""Magic functions for InteractiveShell.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2001 Janko Hauser <jhauser@zscout.de> and
# Copyright (C) 2001 Fernando Perez <fperez@colorado.edu>
# Copyright (C) 2008 The yap_ipython Development Team
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import os
import re
import sys
from getopt import getopt, GetoptError
from traitlets.config.configurable import Configurable
from yap_ipython.core import oinspect
from yap_ipython.core.error import UsageError
from yap_ipython.core.inputsplitter import ESC_MAGIC, ESC_MAGIC2
from decorator import decorator
from yap_ipython.utils.ipstruct import Struct
from yap_ipython.utils.process import arg_split
from yap_ipython.utils.text import dedent
from traitlets import Bool, Dict, Instance, observe
from logging import error
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# A dict we'll use for each class that has magics, used as temporary storage to
# pass information between the @line/cell_magic method decorators and the
# @magics_class class decorator, because the method decorators have no
# access to the class when they run. See for more details:
# http://stackoverflow.com/questions/2366713/can-a-python-decorator-of-an-instance-method-access-the-class
magics = dict(line={}, cell={})
magic_kinds = ('line', 'cell')
magic_spec = ('line', 'cell', 'line_cell')
magic_escapes = dict(line=ESC_MAGIC, cell=ESC_MAGIC2)
#-----------------------------------------------------------------------------
# Utility classes and functions
#-----------------------------------------------------------------------------
class Bunch: pass
def on_off(tag):
"""Return an ON/OFF string for a 1/0 input. Simple utility function."""
return ['OFF','ON'][tag]
def compress_dhist(dh):
"""Compress a directory history into a new one with at most 20 entries.
Return a new list made from the first and last 10 elements of dhist after
removal of duplicates.
"""
head, tail = dh[:-10], dh[-10:]
newhead = []
done = set()
for h in head:
if h in done:
continue
newhead.append(h)
done.add(h)
return newhead + tail
def needs_local_scope(func):
"""Decorator to mark magic functions which need to local scope to run."""
func.needs_local_scope = True
return func
#-----------------------------------------------------------------------------
# Class and method decorators for registering magics
#-----------------------------------------------------------------------------
def magics_class(cls):
"""Class decorator for all subclasses of the main Magics class.
Any class that subclasses Magics *must* also apply this decorator, to
ensure that all the methods that have been decorated as line/cell magics
get correctly registered in the class instance. This is necessary because
when method decorators run, the class does not exist yet, so they
temporarily store their information into a module global. Application of
this class decorator copies that global data to the class instance and
clears the global.
Obviously, this mechanism is not thread-safe, which means that the
*creation* of subclasses of Magic should only be done in a single-thread
context. Instantiation of the classes has no restrictions. Given that
these classes are typically created at yap_ipython startup time and before user
application code becomes active, in practice this should not pose any
problems.
"""
cls.registered = True
cls.magics = dict(line = magics['line'],
cell = magics['cell'])
magics['line'] = {}
magics['cell'] = {}
return cls
def record_magic(dct, magic_kind, magic_name, func):
"""Utility function to store a function as a magic of a specific kind.
Parameters
----------
dct : dict
A dictionary with 'line' and 'cell' subdicts.
magic_kind : str
Kind of magic to be stored.
magic_name : str
Key to store the magic as.
func : function
Callable object to store.
"""
if magic_kind == 'line_cell':
dct['line'][magic_name] = dct['cell'][magic_name] = func
else:
dct[magic_kind][magic_name] = func
def validate_type(magic_kind):
"""Ensure that the given magic_kind is valid.
Check that the given magic_kind is one of the accepted spec types (stored
in the global `magic_spec`), raise ValueError otherwise.
"""
if magic_kind not in magic_spec:
raise ValueError('magic_kind must be one of %s, %s given' %
magic_kinds, magic_kind)
# The docstrings for the decorator below will be fairly similar for the two
# types (method and function), so we generate them here once and reuse the
# templates below.
_docstring_template = \
"""Decorate the given {0} as {1} magic.
The decorator can be used with or without arguments, as follows.
i) without arguments: it will create a {1} magic named as the {0} being
decorated::
@deco
def foo(...)
will create a {1} magic named `foo`.
ii) with one string argument: which will be used as the actual name of the
resulting magic::
@deco('bar')
def foo(...)
will create a {1} magic named `bar`.
To register a class magic use ``Interactiveshell.register_magic(class or instance)``.
"""
# These two are decorator factories. While they are conceptually very similar,
# there are enough differences in the details that it's simpler to have them
# written as completely standalone functions rather than trying to share code
# and make a single one with convoluted logic.
def _method_magic_marker(magic_kind):
"""Decorator factory for methods in Magics subclasses.
"""
validate_type(magic_kind)
# This is a closure to capture the magic_kind. We could also use a class,
# but it's overkill for just that one bit of state.
def magic_deco(arg):
call = lambda f, *a, **k: f(*a, **k)
if callable(arg):
# "Naked" decorator call (just @foo, no args)
func = arg
name = func.__name__
retval = decorator(call, func)
record_magic(magics, magic_kind, name, name)
elif isinstance(arg, str):
# Decorator called with arguments (@foo('bar'))
name = arg
def mark(func, *a, **kw):
record_magic(magics, magic_kind, name, func.__name__)
return decorator(call, func)
retval = mark
else:
raise TypeError("Decorator can only be called with "
"string or function")
return retval
# Ensure the resulting decorator has a usable docstring
magic_deco.__doc__ = _docstring_template.format('method', magic_kind)
return magic_deco
def _function_magic_marker(magic_kind):
"""Decorator factory for standalone functions.
"""
validate_type(magic_kind)
# This is a closure to capture the magic_kind. We could also use a class,
# but it's overkill for just that one bit of state.
def magic_deco(arg):
call = lambda f, *a, **k: f(*a, **k)
# Find get_ipython() in the caller's namespace
caller = sys._getframe(1)
for ns in ['f_locals', 'f_globals', 'f_builtins']:
get_ipython = getattr(caller, ns).get('get_ipython')
if get_ipython is not None:
break
else:
raise NameError('Decorator can only run in context where '
'`get_ipython` exists')
ip = get_ipython()
if callable(arg):
# "Naked" decorator call (just @foo, no args)
func = arg
name = func.__name__
ip.register_magic_function(func, magic_kind, name)
retval = decorator(call, func)
elif isinstance(arg, str):
# Decorator called with arguments (@foo('bar'))
name = arg
def mark(func, *a, **kw):
ip.register_magic_function(func, magic_kind, name)
return decorator(call, func)
retval = mark
else:
raise TypeError("Decorator can only be called with "
"string or function")
return retval
# Ensure the resulting decorator has a usable docstring
ds = _docstring_template.format('function', magic_kind)
ds += dedent("""
Note: this decorator can only be used in a context where yap_ipython is already
active, so that the `get_ipython()` call succeeds. You can therefore use
it in your startup files loaded after yap_ipython initializes, but *not* in the
yap_ipython configuration file itself, which is executed before yap_ipython is
fully up and running. Any file located in the `startup` subdirectory of
your configuration profile will be OK in this sense.
""")
magic_deco.__doc__ = ds
return magic_deco
# Create the actual decorators for public use
# These three are used to decorate methods in class definitions
line_magic = _method_magic_marker('line')
cell_magic = _method_magic_marker('cell')
line_cell_magic = _method_magic_marker('line_cell')
# These three decorate standalone functions and perform the decoration
# immediately. They can only run where get_ipython() works
register_line_magic = _function_magic_marker('line')
register_cell_magic = _function_magic_marker('cell')
register_line_cell_magic = _function_magic_marker('line_cell')
#-----------------------------------------------------------------------------
# Core Magic classes
#-----------------------------------------------------------------------------
class MagicsManager(Configurable):
"""Object that handles all magic-related functionality for yap_ipython.
"""
# Non-configurable class attributes
# A two-level dict, first keyed by magic type, then by magic function, and
# holding the actual callable object as value. This is the dict used for
# magic function dispatch
magics = Dict()
# A registry of the original objects that we've been given holding magics.
registry = Dict()
shell = Instance('yap_ipython.core.interactiveshell.InteractiveShellABC', allow_none=True)
auto_magic = Bool(True, help=
"Automatically call line magics without requiring explicit % prefix"
).tag(config=True)
@observe('auto_magic')
def _auto_magic_changed(self, change):
self.shell.automagic = change['new']
_auto_status = [
'Automagic is OFF, % prefix IS needed for line magics.',
'Automagic is ON, % prefix IS NOT needed for line magics.']
user_magics = Instance('yap_ipython.core.magics.UserMagics', allow_none=True)
def __init__(self, shell=None, config=None, user_magics=None, **traits):
super(MagicsManager, self).__init__(shell=shell, config=config,
user_magics=user_magics, **traits)
self.magics = dict(line={}, cell={})
# Let's add the user_magics to the registry for uniformity, so *all*
# registered magic containers can be found there.
self.registry[user_magics.__class__.__name__] = user_magics
def auto_status(self):
"""Return descriptive string with automagic status."""
return self._auto_status[self.auto_magic]
def lsmagic(self):
"""Return a dict of currently available magic functions.
The return dict has the keys 'line' and 'cell', corresponding to the
two types of magics we support. Each value is a list of names.
"""
return self.magics
def lsmagic_docs(self, brief=False, missing=''):
"""Return dict of documentation of magic functions.
The return dict has the keys 'line' and 'cell', corresponding to the
two types of magics we support. Each value is a dict keyed by magic
name whose value is the function docstring. If a docstring is
unavailable, the value of `missing` is used instead.
If brief is True, only the first line of each docstring will be returned.
"""
docs = {}
for m_type in self.magics:
m_docs = {}
for m_name, m_func in self.magics[m_type].items():
if m_func.__doc__:
if brief:
m_docs[m_name] = m_func.__doc__.split('\n', 1)[0]
else:
m_docs[m_name] = m_func.__doc__.rstrip()
else:
m_docs[m_name] = missing
docs[m_type] = m_docs
return docs
def register(self, *magic_objects):
"""Register one or more instances of Magics.
Take one or more classes or instances of classes that subclass the main
`core.Magic` class, and register them with yap_ipython to use the magic
functions they provide. The registration process will then ensure that
any methods that have decorated to provide line and/or cell magics will
be recognized with the `%x`/`%%x` syntax as a line/cell magic
respectively.
If classes are given, they will be instantiated with the default
constructor. If your classes need a custom constructor, you should
instanitate them first and pass the instance.
The provided arguments can be an arbitrary mix of classes and instances.
Parameters
----------
magic_objects : one or more classes or instances
"""
# Start by validating them to ensure they have all had their magic
# methods registered at the instance level
for m in magic_objects:
if not m.registered:
raise ValueError("Class of magics %r was constructed without "
"the @register_magics class decorator")
if isinstance(m, type):
# If we're given an uninstantiated class
m = m(shell=self.shell)
# Now that we have an instance, we can register it and update the
# table of callables
self.registry[m.__class__.__name__] = m
for mtype in magic_kinds:
self.magics[mtype].update(m.magics[mtype])
def register_function(self, func, magic_kind='line', magic_name=None):
"""Expose a standalone function as magic function for yap_ipython.
This will create an yap_ipython magic (line, cell or both) from a
standalone function. The functions should have the following
signatures:
* For line magics: `def f(line)`
* For cell magics: `def f(line, cell)`
* For a function that does both: `def f(line, cell=None)`
In the latter case, the function will be called with `cell==None` when
invoked as `%f`, and with cell as a string when invoked as `%%f`.
Parameters
----------
func : callable
Function to be registered as a magic.
magic_kind : str
Kind of magic, one of 'line', 'cell' or 'line_cell'
magic_name : optional str
If given, the name the magic will have in the yap_ipython namespace. By
default, the name of the function itself is used.
"""
# Create the new method in the user_magics and register it in the
# global table
validate_type(magic_kind)
magic_name = func.__name__ if magic_name is None else magic_name
setattr(self.user_magics, magic_name, func)
record_magic(self.magics, magic_kind, magic_name, func)
def register_alias(self, alias_name, magic_name, magic_kind='line', magic_params=None):
"""Register an alias to a magic function.
The alias is an instance of :class:`MagicAlias`, which holds the
name and kind of the magic it should call. Binding is done at
call time, so if the underlying magic function is changed the alias
will call the new function.
Parameters
----------
alias_name : str
The name of the magic to be registered.
magic_name : str
The name of an existing magic.
magic_kind : str
Kind of magic, one of 'line' or 'cell'
"""
# `validate_type` is too permissive, as it allows 'line_cell'
# which we do not handle.
if magic_kind not in magic_kinds:
raise ValueError('magic_kind must be one of %s, %s given' %
magic_kinds, magic_kind)
alias = MagicAlias(self.shell, magic_name, magic_kind, magic_params)
setattr(self.user_magics, alias_name, alias)
record_magic(self.magics, magic_kind, alias_name, alias)
# Key base class that provides the central functionality for magics.
class Magics(Configurable):
"""Base class for implementing magic functions.
Shell functions which can be reached as %function_name. All magic
functions should accept a string, which they can parse for their own
needs. This can make some functions easier to type, eg `%cd ../`
vs. `%cd("../")`
Classes providing magic functions need to subclass this class, and they
MUST:
- Use the method decorators `@line_magic` and `@cell_magic` to decorate
individual methods as magic functions, AND
- Use the class decorator `@magics_class` to ensure that the magic
methods are properly registered at the instance level upon instance
initialization.
See :mod:`magic_functions` for examples of actual implementation classes.
"""
# Dict holding all command-line options for each magic.
options_table = None
# Dict for the mapping of magic names to methods, set by class decorator
magics = None
# Flag to check that the class decorator was properly applied
registered = False
# Instance of yap_ipython shell
shell = None
def __init__(self, shell=None, **kwargs):
if not(self.__class__.registered):
raise ValueError('Magics subclass without registration - '
'did you forget to apply @magics_class?')
if shell is not None:
if hasattr(shell, 'configurables'):
shell.configurables.append(self)
if hasattr(shell, 'config'):
kwargs.setdefault('parent', shell)
self.shell = shell
self.options_table = {}
# The method decorators are run when the instance doesn't exist yet, so
# they can only record the names of the methods they are supposed to
# grab. Only now, that the instance exists, can we create the proper
# mapping to bound methods. So we read the info off the original names
# table and replace each method name by the actual bound method.
# But we mustn't clobber the *class* mapping, in case of multiple instances.
class_magics = self.magics
self.magics = {}
for mtype in magic_kinds:
tab = self.magics[mtype] = {}
cls_tab = class_magics[mtype]
for magic_name, meth_name in cls_tab.items():
if isinstance(meth_name, str):
# it's a method name, grab it
tab[magic_name] = getattr(self, meth_name)
else:
# it's the real thing
tab[magic_name] = meth_name
# Configurable **needs** to be initiated at the end or the config
# magics get screwed up.
super(Magics, self).__init__(**kwargs)
def arg_err(self,func):
"""Print docstring if incorrect arguments were passed"""
print('Error in arguments:')
print(oinspect.getdoc(func))
def format_latex(self, strng):
"""Format a string for latex inclusion."""
# Characters that need to be escaped for latex:
escape_re = re.compile(r'(%|_|\$|#|&)',re.MULTILINE)
# Magic command names as headers:
cmd_name_re = re.compile(r'^(%s.*?):' % ESC_MAGIC,
re.MULTILINE)
# Magic commands
cmd_re = re.compile(r'(?P<cmd>%s.+?\b)(?!\}\}:)' % ESC_MAGIC,
re.MULTILINE)
# Paragraph continue
par_re = re.compile(r'\\$',re.MULTILINE)
# The "\n" symbol
newline_re = re.compile(r'\\n')
# Now build the string for output:
#strng = cmd_name_re.sub(r'\n\\texttt{\\textsl{\\large \1}}:',strng)
strng = cmd_name_re.sub(r'\n\\bigskip\n\\texttt{\\textbf{ \1}}:',
strng)
strng = cmd_re.sub(r'\\texttt{\g<cmd>}',strng)
strng = par_re.sub(r'\\\\',strng)
strng = escape_re.sub(r'\\\1',strng)
strng = newline_re.sub(r'\\textbackslash{}n',strng)
return strng
def parse_options(self, arg_str, opt_str, *long_opts, **kw):
"""Parse options passed to an argument string.
The interface is similar to that of :func:`getopt.getopt`, but it
returns a :class:`~yap_ipython.utils.struct.Struct` with the options as keys
and the stripped argument string still as a string.
arg_str is quoted as a true sys.argv vector by using shlex.split.
This allows us to easily expand variables, glob files, quote
arguments, etc.
Parameters
----------
arg_str : str
The arguments to parse.
opt_str : str
The options specification.
mode : str, default 'string'
If given as 'list', the argument string is returned as a list (split
on whitespace) instead of a string.
list_all : bool, default False
Put all option values in lists. Normally only options
appearing more than once are put in a list.
posix : bool, default True
Whether to split the input line in POSIX mode or not, as per the
conventions outlined in the :mod:`shlex` module from the standard
library.
"""
# inject default options at the beginning of the input line
caller = sys._getframe(1).f_code.co_name
arg_str = '%s %s' % (self.options_table.get(caller,''),arg_str)
mode = kw.get('mode','string')
if mode not in ['string','list']:
raise ValueError('incorrect mode given: %s' % mode)
# Get options
list_all = kw.get('list_all',0)
posix = kw.get('posix', os.name == 'posix')
strict = kw.get('strict', True)
# Check if we have more than one argument to warrant extra processing:
odict = {} # Dictionary with options
args = arg_str.split()
if len(args) >= 1:
# If the list of inputs only has 0 or 1 thing in it, there's no
# need to look for options
argv = arg_split(arg_str, posix, strict)
# Do regular option processing
try:
opts,args = getopt(argv, opt_str, long_opts)
except GetoptError as e:
raise UsageError('%s ( allowed: "%s" %s)' % (e.msg,opt_str,
" ".join(long_opts)))
for o,a in opts:
if o.startswith('--'):
o = o[2:]
else:
o = o[1:]
try:
odict[o].append(a)
except AttributeError:
odict[o] = [odict[o],a]
except KeyError:
if list_all:
odict[o] = [a]
else:
odict[o] = a
# Prepare opts,args for return
opts = Struct(odict)
if mode == 'string':
args = ' '.join(args)
return opts,args
def default_option(self, fn, optstr):
"""Make an entry in the options_table for fn, with value optstr"""
if fn not in self.lsmagic():
error("%s is not a magic function" % fn)
self.options_table[fn] = optstr
class MagicAlias(object):
"""An alias to another magic function.
An alias is determined by its magic name and magic kind. Lookup
is done at call time, so if the underlying magic changes the alias
will call the new function.
Use the :meth:`MagicsManager.register_alias` method or the
`%alias_magic` magic function to create and register a new alias.
"""
def __init__(self, shell, magic_name, magic_kind, magic_params=None):
self.shell = shell
self.magic_name = magic_name
self.magic_params = magic_params
self.magic_kind = magic_kind
self.pretty_target = '%s%s' % (magic_escapes[self.magic_kind], self.magic_name)
self.__doc__ = "Alias for `%s`." % self.pretty_target
self._in_call = False
def __call__(self, *args, **kwargs):
"""Call the magic alias."""
fn = self.shell.find_magic(self.magic_name, self.magic_kind)
if fn is None:
raise UsageError("Magic `%s` not found." % self.pretty_target)
# Protect against infinite recursion.
if self._in_call:
raise UsageError("Infinite recursion detected; "
"magic aliases cannot call themselves.")
self._in_call = True
try:
if self.magic_params:
args_list = list(args)
args_list[0] = self.magic_params + " " + args[0]
args = tuple(args_list)
return fn(*args, **kwargs)
finally:
self._in_call = False

View File

@@ -0,0 +1,278 @@
''' A decorator-based method of constructing yap_ipython magics with `argparse`
option handling.
New magic functions can be defined like so::
from yap_ipython.core.magic_arguments import (argument, magic_arguments,
parse_argstring)
@magic_arguments()
@argument('-o', '--option', help='An optional argument.')
@argument('arg', type=int, help='An integer positional argument.')
def magic_cool(self, arg):
""" A really cool magic command.
"""
args = parse_argstring(magic_cool, arg)
...
The `@magic_arguments` decorator marks the function as having argparse arguments.
The `@argument` decorator adds an argument using the same syntax as argparse's
`add_argument()` method. More sophisticated uses may also require the
`@argument_group` or `@kwds` decorator to customize the formatting and the
parsing.
Help text for the magic is automatically generated from the docstring and the
arguments::
In[1]: %cool?
%cool [-o OPTION] arg
A really cool magic command.
positional arguments:
arg An integer positional argument.
optional arguments:
-o OPTION, --option OPTION
An optional argument.
Inheritance diagram:
.. inheritance-diagram:: yap_ipython.core.magic_arguments
:parts: 3
'''
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011, yap_ipython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import argparse
import re
# Our own imports
from yap_ipython.core.error import UsageError
from yap_ipython.utils.decorators import undoc
from yap_ipython.utils.process import arg_split
from yap_ipython.utils.text import dedent
NAME_RE = re.compile(r"[a-zA-Z][a-zA-Z0-9_-]*$")
@undoc
class MagicHelpFormatter(argparse.RawDescriptionHelpFormatter):
"""A HelpFormatter with a couple of changes to meet our needs.
"""
# Modified to dedent text.
def _fill_text(self, text, width, indent):
return argparse.RawDescriptionHelpFormatter._fill_text(self, dedent(text), width, indent)
# Modified to wrap argument placeholders in <> where necessary.
def _format_action_invocation(self, action):
if not action.option_strings:
metavar, = self._metavar_formatter(action, action.dest)(1)
return metavar
else:
parts = []
# if the Optional doesn't take a value, format is:
# -s, --long
if action.nargs == 0:
parts.extend(action.option_strings)
# if the Optional takes a value, format is:
# -s ARGS, --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
# IPYTHON MODIFICATION: If args_string is not a plain name, wrap
# it in <> so it's valid RST.
if not NAME_RE.match(args_string):
args_string = "<%s>" % args_string
for option_string in action.option_strings:
parts.append('%s %s' % (option_string, args_string))
return ', '.join(parts)
# Override the default prefix ('usage') to our % magic escape,
# in a code block.
def add_usage(self, usage, actions, groups, prefix="::\n\n %"):
super(MagicHelpFormatter, self).add_usage(usage, actions, groups, prefix)
class MagicArgumentParser(argparse.ArgumentParser):
""" An ArgumentParser tweaked for use by yap_ipython magics.
"""
def __init__(self,
prog=None,
usage=None,
description=None,
epilog=None,
parents=None,
formatter_class=MagicHelpFormatter,
prefix_chars='-',
argument_default=None,
conflict_handler='error',
add_help=False):
if parents is None:
parents = []
super(MagicArgumentParser, self).__init__(prog=prog, usage=usage,
description=description, epilog=epilog,
parents=parents, formatter_class=formatter_class,
prefix_chars=prefix_chars, argument_default=argument_default,
conflict_handler=conflict_handler, add_help=add_help)
def error(self, message):
""" Raise a catchable error instead of exiting.
"""
raise UsageError(message)
def parse_argstring(self, argstring):
""" Split a string into an argument list and parse that argument list.
"""
argv = arg_split(argstring)
return self.parse_args(argv)
def construct_parser(magic_func):
""" Construct an argument parser using the function decorations.
"""
kwds = getattr(magic_func, 'argcmd_kwds', {})
if 'description' not in kwds:
kwds['description'] = getattr(magic_func, '__doc__', None)
arg_name = real_name(magic_func)
parser = MagicArgumentParser(arg_name, **kwds)
# Reverse the list of decorators in order to apply them in the
# order in which they appear in the source.
group = None
for deco in magic_func.decorators[::-1]:
result = deco.add_to_parser(parser, group)
if result is not None:
group = result
# Replace the magic function's docstring with the full help text.
magic_func.__doc__ = parser.format_help()
return parser
def parse_argstring(magic_func, argstring):
""" Parse the string of arguments for the given magic function.
"""
return magic_func.parser.parse_argstring(argstring)
def real_name(magic_func):
""" Find the real name of the magic.
"""
magic_name = magic_func.__name__
if magic_name.startswith('magic_'):
magic_name = magic_name[len('magic_'):]
return getattr(magic_func, 'argcmd_name', magic_name)
class ArgDecorator(object):
""" Base class for decorators to add ArgumentParser information to a method.
"""
def __call__(self, func):
if not getattr(func, 'has_arguments', False):
func.has_arguments = True
func.decorators = []
func.decorators.append(self)
return func
def add_to_parser(self, parser, group):
""" Add this object's information to the parser, if necessary.
"""
pass
class magic_arguments(ArgDecorator):
""" Mark the magic as having argparse arguments and possibly adjust the
name.
"""
def __init__(self, name=None):
self.name = name
def __call__(self, func):
if not getattr(func, 'has_arguments', False):
func.has_arguments = True
func.decorators = []
if self.name is not None:
func.argcmd_name = self.name
# This should be the first decorator in the list of decorators, thus the
# last to execute. Build the parser.
func.parser = construct_parser(func)
return func
class ArgMethodWrapper(ArgDecorator):
"""
Base class to define a wrapper for ArgumentParser method.
Child class must define either `_method_name` or `add_to_parser`.
"""
_method_name = None
def __init__(self, *args, **kwds):
self.args = args
self.kwds = kwds
def add_to_parser(self, parser, group):
""" Add this object's information to the parser.
"""
if group is not None:
parser = group
getattr(parser, self._method_name)(*self.args, **self.kwds)
return None
class argument(ArgMethodWrapper):
""" Store arguments and keywords to pass to add_argument().
Instances also serve to decorate command methods.
"""
_method_name = 'add_argument'
class defaults(ArgMethodWrapper):
""" Store arguments and keywords to pass to set_defaults().
Instances also serve to decorate command methods.
"""
_method_name = 'set_defaults'
class argument_group(ArgMethodWrapper):
""" Store arguments and keywords to pass to add_argument_group().
Instances also serve to decorate command methods.
"""
def add_to_parser(self, parser, group):
""" Add this object's information to the parser.
"""
return parser.add_argument_group(*self.args, **self.kwds)
class kwds(ArgDecorator):
""" Provide other keywords to the sub-parser constructor.
"""
def __init__(self, **kwds):
self.kwds = kwds
def __call__(self, func):
func = super(kwds, self).__call__(func)
func.argcmd_kwds = self.kwds
return func
__all__ = ['magic_arguments', 'argument', 'argument_group', 'kwds',
'parse_argstring']

View File

@@ -1,13 +0,0 @@
from modulefinder import ModuleFinder
finder = ModuleFinder()
finder.run_script('__main__.py')
print('Loaded modules:')
for name, mod in finder.modules.items():
print('%s: ' % name, end='')
print(','.join(list(mod.globalnames.keys())[:3]))
print('-'*50)
print('Modules not imported:')
print('\n'.join(finder.badmodules.keys()))

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,366 @@
# encoding: utf-8
"""
Paging capabilities for yap_ipython.core
Notes
-----
For now this uses yap_ipython hooks, so it can't be in yap_ipython.utils. If we can get
rid of that dependency, we could move it there.
-----
"""
# Copyright (c) yap_ipython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import re
import sys
import tempfile
from io import UnsupportedOperation
from yap_ipython import get_ipython
from yap_ipython.core.display import display
from yap_ipython.core.error import TryNext
from yap_ipython.utils.data import chop
from yap_ipython.utils.process import system
from yap_ipython.utils.terminal import get_terminal_size
from yap_ipython.utils import py3compat
def display_page(strng, start=0, screen_lines=25):
"""Just display, no paging. screen_lines is ignored."""
if isinstance(strng, dict):
data = strng
else:
if start:
strng = u'\n'.join(strng.splitlines()[start:])
data = { 'text/plain': strng }
display(data, raw=True)
def as_hook(page_func):
"""Wrap a pager func to strip the `self` arg
so it can be called as a hook.
"""
return lambda self, *args, **kwargs: page_func(*args, **kwargs)
esc_re = re.compile(r"(\x1b[^m]+m)")
def page_dumb(strng, start=0, screen_lines=25):
"""Very dumb 'pager' in Python, for when nothing else works.
Only moves forward, same interface as page(), except for pager_cmd and
mode.
"""
if isinstance(strng, dict):
strng = strng.get('text/plain', '')
out_ln = strng.splitlines()[start:]
screens = chop(out_ln,screen_lines-1)
if len(screens) == 1:
print(os.linesep.join(screens[0]))
else:
last_escape = ""
for scr in screens[0:-1]:
hunk = os.linesep.join(scr)
print(last_escape + hunk)
if not page_more():
return
esc_list = esc_re.findall(hunk)
if len(esc_list) > 0:
last_escape = esc_list[-1]
print(last_escape + os.linesep.join(screens[-1]))
def _detect_screen_size(screen_lines_def):
"""Attempt to work out the number of lines on the screen.
This is called by page(). It can raise an error (e.g. when run in the
test suite), so it's separated out so it can easily be called in a try block.
"""
TERM = os.environ.get('TERM',None)
if not((TERM=='xterm' or TERM=='xterm-color') and sys.platform != 'sunos5'):
# curses causes problems on many terminals other than xterm, and
# some termios calls lock up on Sun OS5.
return screen_lines_def
try:
import termios
import curses
except ImportError:
return screen_lines_def
# There is a bug in curses, where *sometimes* it fails to properly
# initialize, and then after the endwin() call is made, the
# terminal is left in an unusable state. Rather than trying to
# check everytime for this (by requesting and comparing termios
# flags each time), we just save the initial terminal state and
# unconditionally reset it every time. It's cheaper than making
# the checks.
try:
term_flags = termios.tcgetattr(sys.stdout)
except termios.error as err:
# can fail on Linux 2.6, pager_page will catch the TypeError
raise TypeError('termios error: {0}'.format(err))
try:
scr = curses.initscr()
except AttributeError:
# Curses on Solaris may not be complete, so we can't use it there
return screen_lines_def
screen_lines_real,screen_cols = scr.getmaxyx()
curses.endwin()
# Restore terminal state in case endwin() didn't.
termios.tcsetattr(sys.stdout,termios.TCSANOW,term_flags)
# Now we have what we needed: the screen size in rows/columns
return screen_lines_real
#print '***Screen size:',screen_lines_real,'lines x',\
#screen_cols,'columns.' # dbg
def pager_page(strng, start=0, screen_lines=0, pager_cmd=None):
"""Display a string, piping through a pager after a certain length.
strng can be a mime-bundle dict, supplying multiple representations,
keyed by mime-type.
The screen_lines parameter specifies the number of *usable* lines of your
terminal screen (total lines minus lines you need to reserve to show other
information).
If you set screen_lines to a number <=0, page() will try to auto-determine
your screen size and will only use up to (screen_size+screen_lines) for
printing, paging after that. That is, if you want auto-detection but need
to reserve the bottom 3 lines of the screen, use screen_lines = -3, and for
auto-detection without any lines reserved simply use screen_lines = 0.
If a string won't fit in the allowed lines, it is sent through the
specified pager command. If none given, look for PAGER in the environment,
and ultimately default to less.
If no system pager works, the string is sent through a 'dumb pager'
written in python, very simplistic.
"""
# for compatibility with mime-bundle form:
if isinstance(strng, dict):
strng = strng['text/plain']
# Ugly kludge, but calling curses.initscr() flat out crashes in emacs
TERM = os.environ.get('TERM','dumb')
if TERM in ['dumb','emacs'] and os.name != 'nt':
print(strng)
return
# chop off the topmost part of the string we don't want to see
str_lines = strng.splitlines()[start:]
str_toprint = os.linesep.join(str_lines)
num_newlines = len(str_lines)
len_str = len(str_toprint)
# Dumb heuristics to guesstimate number of on-screen lines the string
# takes. Very basic, but good enough for docstrings in reasonable
# terminals. If someone later feels like refining it, it's not hard.
numlines = max(num_newlines,int(len_str/80)+1)
screen_lines_def = get_terminal_size()[1]
# auto-determine screen size
if screen_lines <= 0:
try:
screen_lines += _detect_screen_size(screen_lines_def)
except (TypeError, UnsupportedOperation):
print(str_toprint)
return
#print 'numlines',numlines,'screenlines',screen_lines # dbg
if numlines <= screen_lines :
#print '*** normal print' # dbg
print(str_toprint)
else:
# Try to open pager and default to internal one if that fails.
# All failure modes are tagged as 'retval=1', to match the return
# value of a failed system command. If any intermediate attempt
# sets retval to 1, at the end we resort to our own page_dumb() pager.
pager_cmd = get_pager_cmd(pager_cmd)
pager_cmd += ' ' + get_pager_start(pager_cmd,start)
if os.name == 'nt':
if pager_cmd.startswith('type'):
# The default WinXP 'type' command is failing on complex strings.
retval = 1
else:
fd, tmpname = tempfile.mkstemp('.txt')
try:
os.close(fd)
with open(tmpname, 'wt') as tmpfile:
tmpfile.write(strng)
cmd = "%s < %s" % (pager_cmd, tmpname)
# tmpfile needs to be closed for windows
if os.system(cmd):
retval = 1
else:
retval = None
finally:
os.remove(tmpname)
else:
try:
retval = None
# if I use popen4, things hang. No idea why.
#pager,shell_out = os.popen4(pager_cmd)
pager = os.popen(pager_cmd, 'w')
try:
pager_encoding = pager.encoding or sys.stdout.encoding
pager.write(strng)
finally:
retval = pager.close()
except IOError as msg: # broken pipe when user quits
if msg.args == (32, 'Broken pipe'):
retval = None
else:
retval = 1
except OSError:
# Other strange problems, sometimes seen in Win2k/cygwin
retval = 1
if retval is not None:
page_dumb(strng,screen_lines=screen_lines)
def page(data, start=0, screen_lines=0, pager_cmd=None):
"""Display content in a pager, piping through a pager after a certain length.
data can be a mime-bundle dict, supplying multiple representations,
keyed by mime-type, or text.
Pager is dispatched via the `show_in_pager` yap_ipython hook.
If no hook is registered, `pager_page` will be used.
"""
# Some routines may auto-compute start offsets incorrectly and pass a
# negative value. Offset to 0 for robustness.
start = max(0, start)
# first, try the hook
ip = get_ipython()
if ip:
try:
ip.hooks.show_in_pager(data, start=start, screen_lines=screen_lines)
return
except TryNext:
pass
# fallback on default pager
return pager_page(data, start, screen_lines, pager_cmd)
def page_file(fname, start=0, pager_cmd=None):
"""Page a file, using an optional pager command and starting line.
"""
pager_cmd = get_pager_cmd(pager_cmd)
pager_cmd += ' ' + get_pager_start(pager_cmd,start)
try:
if os.environ['TERM'] in ['emacs','dumb']:
raise EnvironmentError
system(pager_cmd + ' ' + fname)
except:
try:
if start > 0:
start -= 1
page(open(fname).read(),start)
except:
print('Unable to show file',repr(fname))
def get_pager_cmd(pager_cmd=None):
"""Return a pager command.
Makes some attempts at finding an OS-correct one.
"""
if os.name == 'posix':
default_pager_cmd = 'less -R' # -R for color control sequences
elif os.name in ['nt','dos']:
default_pager_cmd = 'type'
if pager_cmd is None:
try:
pager_cmd = os.environ['PAGER']
except:
pager_cmd = default_pager_cmd
if pager_cmd == 'less' and '-r' not in os.environ.get('LESS', '').lower():
pager_cmd += ' -R'
return pager_cmd
def get_pager_start(pager, start):
"""Return the string for paging files with an offset.
This is the '+N' argument which less and more (under Unix) accept.
"""
if pager in ['less','more']:
if start:
start_string = '+' + str(start)
else:
start_string = ''
else:
start_string = ''
return start_string
# (X)emacs on win32 doesn't like to be bypassed with msvcrt.getch()
if os.name == 'nt' and os.environ.get('TERM','dumb') != 'emacs':
import msvcrt
def page_more():
""" Smart pausing between pages
@return: True if need print more lines, False if quit
"""
sys.stdout.write('---Return to continue, q to quit--- ')
ans = msvcrt.getwch()
if ans in ("q", "Q"):
result = False
else:
result = True
sys.stdout.write("\b"*37 + " "*37 + "\b"*37)
return result
else:
def page_more():
ans = py3compat.input('---Return to continue, q to quit--- ')
if ans.lower().startswith('q'):
return False
else:
return True
def snip_print(str,width = 75,print_full = 0,header = ''):
"""Print a string snipping the midsection to fit in width.
print_full: mode control:
- 0: only snip long strings
- 1: send to page() directly.
- 2: snip long strings and ask for full length viewing with page()
Return 1 if snipping was necessary, 0 otherwise."""
if print_full == 1:
page(header+str)
return 0
print(header, end=' ')
if len(str) < width:
print(str)
snip = 0
else:
whalf = int((width -5)/2)
print(str[:whalf] + ' <...> ' + str[-whalf:])
snip = 1
if snip and print_full == 2:
if py3compat.input(header+' Snipped. View (y/n)? [N]').lower() == 'y':
page(str)
return snip

View File

@@ -0,0 +1,55 @@
# -*- coding: utf-8 -*-
"""Payload system for yap_ipython.
Authors:
* Fernando Perez
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The yap_ipython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from traitlets.config.configurable import Configurable
from traitlets import List
#-----------------------------------------------------------------------------
# Main payload class
#-----------------------------------------------------------------------------
class PayloadManager(Configurable):
_payload = List([])
def write_payload(self, data, single=True):
"""Include or update the specified `data` payload in the PayloadManager.
If a previous payload with the same source exists and `single` is True,
it will be overwritten with the new one.
"""
if not isinstance(data, dict):
raise TypeError('Each payload write must be a dict, got: %r' % data)
if single and 'source' in data:
source = data['source']
for i, pl in enumerate(self._payload):
if 'source' in pl and pl['source'] == source:
self._payload[i] = data
return
self._payload.append(data)
def read_payload(self):
return self._payload
def clear_payload(self):
self._payload = []

View File

@@ -0,0 +1,52 @@
# encoding: utf-8
"""A payload based version of page."""
# Copyright (c) yap_ipython Development Team.
# Distributed under the terms of the Modified BSD License.
import warnings
from yap_ipython.core.getipython import get_ipython
def page(strng, start=0, screen_lines=0, pager_cmd=None):
"""Print a string, piping through a pager.
This version ignores the screen_lines and pager_cmd arguments and uses
yap_ipython's payload system instead.
Parameters
----------
strng : str or mime-dict
Text to page, or a mime-type keyed dict of already formatted data.
start : int
Starting line at which to place the display.
"""
# Some routines may auto-compute start offsets incorrectly and pass a
# negative value. Offset to 0 for robustness.
start = max(0, start)
shell = get_ipython()
if isinstance(strng, dict):
data = strng
else:
data = {'text/plain' : strng}
payload = dict(
source='page',
data=data,
start=start,
)
shell.payload_manager.write_payload(payload)
def install_payload_page():
"""DEPRECATED, use show_in_pager hook
Install this version of page as yap_ipython.core.page.page.
"""
warnings.warn("""install_payload_page is deprecated.
Use `ip.set_hook('show_in_pager, page.as_hook(payloadpage.page))`
""")
from yap_ipython.core import page as corepage
corepage.page = page

View File

@@ -0,0 +1,709 @@
# encoding: utf-8
"""
Prefiltering components.
Prefilters transform user input before it is exec'd by Python. These
transforms are used to implement additional syntax such as !ls and %magic.
"""
# Copyright (c) yap_ipython Development Team.
# Distributed under the terms of the Modified BSD License.
from keyword import iskeyword
import re
from yap_ipython.core.autocall import IPyAutocall
from traitlets.config.configurable import Configurable
from yap_ipython.core.inputsplitter import (
ESC_MAGIC,
ESC_QUOTE,
ESC_QUOTE2,
ESC_PAREN,
)
from yap_ipython.core.macro import Macro
from yap_ipython.core.splitinput import LineInfo
from traitlets import (
List, Integer, Unicode, Bool, Instance, CRegExp
)
#-----------------------------------------------------------------------------
# Global utilities, errors and constants
#-----------------------------------------------------------------------------
class PrefilterError(Exception):
pass
# RegExp to identify potential function names
re_fun_name = re.compile(r'[a-zA-Z_]([a-zA-Z0-9_.]*) *$')
# RegExp to exclude strings with this start from autocalling. In
# particular, all binary operators should be excluded, so that if foo is
# callable, foo OP bar doesn't become foo(OP bar), which is invalid. The
# characters '!=()' don't need to be checked for, as the checkPythonChars
# routine explicitly does so, to catch direct calls and rebindings of
# existing names.
# Warning: the '-' HAS TO BE AT THE END of the first group, otherwise
# it affects the rest of the group in square brackets.
re_exclude_auto = re.compile(r'^[,&^\|\*/\+-]'
r'|^is |^not |^in |^and |^or ')
# try to catch also methods for stuff in lists/tuples/dicts: off
# (experimental). For this to work, the line_split regexp would need
# to be modified so it wouldn't break things at '['. That line is
# nasty enough that I shouldn't change it until I can test it _well_.
#self.re_fun_name = re.compile (r'[a-zA-Z_]([a-zA-Z0-9_.\[\]]*) ?$')
# Handler Check Utilities
def is_shadowed(identifier, ip):
"""Is the given identifier defined in one of the namespaces which shadow
the alias and magic namespaces? Note that an identifier is different
than ifun, because it can not contain a '.' character."""
# This is much safer than calling ofind, which can change state
return (identifier in ip.user_ns \
or identifier in ip.user_global_ns \
or identifier in ip.ns_table['builtin']\
or iskeyword(identifier))
#-----------------------------------------------------------------------------
# Main Prefilter manager
#-----------------------------------------------------------------------------
class PrefilterManager(Configurable):
"""Main prefilter component.
The yap_ipython prefilter is run on all user input before it is run. The
prefilter consumes lines of input and produces transformed lines of
input.
The iplementation consists of two phases:
1. Transformers
2. Checkers and handlers
Over time, we plan on deprecating the checkers and handlers and doing
everything in the transformers.
The transformers are instances of :class:`PrefilterTransformer` and have
a single method :meth:`transform` that takes a line and returns a
transformed line. The transformation can be accomplished using any
tool, but our current ones use regular expressions for speed.
After all the transformers have been run, the line is fed to the checkers,
which are instances of :class:`PrefilterChecker`. The line is passed to
the :meth:`check` method, which either returns `None` or a
:class:`PrefilterHandler` instance. If `None` is returned, the other
checkers are tried. If an :class:`PrefilterHandler` instance is returned,
the line is passed to the :meth:`handle` method of the returned
handler and no further checkers are tried.
Both transformers and checkers have a `priority` attribute, that determines
the order in which they are called. Smaller priorities are tried first.
Both transformers and checkers also have `enabled` attribute, which is
a boolean that determines if the instance is used.
Users or developers can change the priority or enabled attribute of
transformers or checkers, but they must call the :meth:`sort_checkers`
or :meth:`sort_transformers` method after changing the priority.
"""
multi_line_specials = Bool(True).tag(config=True)
shell = Instance('yap_ipython.core.interactiveshell.InteractiveShellABC', allow_none=True)
def __init__(self, shell=None, **kwargs):
super(PrefilterManager, self).__init__(shell=shell, **kwargs)
self.shell = shell
self.init_transformers()
self.init_handlers()
self.init_checkers()
#-------------------------------------------------------------------------
# API for managing transformers
#-------------------------------------------------------------------------
def init_transformers(self):
"""Create the default transformers."""
self._transformers = []
for transformer_cls in _default_transformers:
transformer_cls(
shell=self.shell, prefilter_manager=self, parent=self
)
def sort_transformers(self):
"""Sort the transformers by priority.
This must be called after the priority of a transformer is changed.
The :meth:`register_transformer` method calls this automatically.
"""
self._transformers.sort(key=lambda x: x.priority)
@property
def transformers(self):
"""Return a list of checkers, sorted by priority."""
return self._transformers
def register_transformer(self, transformer):
"""Register a transformer instance."""
if transformer not in self._transformers:
self._transformers.append(transformer)
self.sort_transformers()
def unregister_transformer(self, transformer):
"""Unregister a transformer instance."""
if transformer in self._transformers:
self._transformers.remove(transformer)
#-------------------------------------------------------------------------
# API for managing checkers
#-------------------------------------------------------------------------
def init_checkers(self):
"""Create the default checkers."""
self._checkers = []
for checker in _default_checkers:
checker(
shell=self.shell, prefilter_manager=self, parent=self
)
def sort_checkers(self):
"""Sort the checkers by priority.
This must be called after the priority of a checker is changed.
The :meth:`register_checker` method calls this automatically.
"""
self._checkers.sort(key=lambda x: x.priority)
@property
def checkers(self):
"""Return a list of checkers, sorted by priority."""
return self._checkers
def register_checker(self, checker):
"""Register a checker instance."""
if checker not in self._checkers:
self._checkers.append(checker)
self.sort_checkers()
def unregister_checker(self, checker):
"""Unregister a checker instance."""
if checker in self._checkers:
self._checkers.remove(checker)
#-------------------------------------------------------------------------
# API for managing handlers
#-------------------------------------------------------------------------
def init_handlers(self):
"""Create the default handlers."""
self._handlers = {}
self._esc_handlers = {}
for handler in _default_handlers:
handler(
shell=self.shell, prefilter_manager=self, parent=self
)
@property
def handlers(self):
"""Return a dict of all the handlers."""
return self._handlers
def register_handler(self, name, handler, esc_strings):
"""Register a handler instance by name with esc_strings."""
self._handlers[name] = handler
for esc_str in esc_strings:
self._esc_handlers[esc_str] = handler
def unregister_handler(self, name, handler, esc_strings):
"""Unregister a handler instance by name with esc_strings."""
try:
del self._handlers[name]
except KeyError:
pass
for esc_str in esc_strings:
h = self._esc_handlers.get(esc_str)
if h is handler:
del self._esc_handlers[esc_str]
def get_handler_by_name(self, name):
"""Get a handler by its name."""
return self._handlers.get(name)
def get_handler_by_esc(self, esc_str):
"""Get a handler by its escape string."""
return self._esc_handlers.get(esc_str)
#-------------------------------------------------------------------------
# Main prefiltering API
#-------------------------------------------------------------------------
def prefilter_line_info(self, line_info):
"""Prefilter a line that has been converted to a LineInfo object.
This implements the checker/handler part of the prefilter pipe.
"""
# print "prefilter_line_info: ", line_info
handler = self.find_handler(line_info)
return handler.handle(line_info)
def find_handler(self, line_info):
"""Find a handler for the line_info by trying checkers."""
for checker in self.checkers:
if checker.enabled:
handler = checker.check(line_info)
if handler:
return handler
return self.get_handler_by_name('normal')
def transform_line(self, line, continue_prompt):
"""Calls the enabled transformers in order of increasing priority."""
for transformer in self.transformers:
if transformer.enabled:
line = transformer.transform(line, continue_prompt)
return line
def prefilter_line(self, line, continue_prompt=False):
"""Prefilter a single input line as text.
This method prefilters a single line of text by calling the
transformers and then the checkers/handlers.
"""
# print "prefilter_line: ", line, continue_prompt
# All handlers *must* return a value, even if it's blank ('').
# save the line away in case we crash, so the post-mortem handler can
# record it
self.shell._last_input_line = line
if not line:
# Return immediately on purely empty lines, so that if the user
# previously typed some whitespace that started a continuation
# prompt, he can break out of that loop with just an empty line.
# This is how the default python prompt works.
return ''
# At this point, we invoke our transformers.
if not continue_prompt or (continue_prompt and self.multi_line_specials):
line = self.transform_line(line, continue_prompt)
# Now we compute line_info for the checkers and handlers
line_info = LineInfo(line, continue_prompt)
# the input history needs to track even empty lines
stripped = line.strip()
normal_handler = self.get_handler_by_name('normal')
if not stripped:
return normal_handler.handle(line_info)
# special handlers are only allowed for single line statements
if continue_prompt and not self.multi_line_specials:
return normal_handler.handle(line_info)
prefiltered = self.prefilter_line_info(line_info)
# print "prefiltered line: %r" % prefiltered
return prefiltered
def prefilter_lines(self, lines, continue_prompt=False):
"""Prefilter multiple input lines of text.
This is the main entry point for prefiltering multiple lines of
input. This simply calls :meth:`prefilter_line` for each line of
input.
This covers cases where there are multiple lines in the user entry,
which is the case when the user goes back to a multiline history
entry and presses enter.
"""
llines = lines.rstrip('\n').split('\n')
# We can get multiple lines in one shot, where multiline input 'blends'
# into one line, in cases like recalling from the readline history
# buffer. We need to make sure that in such cases, we correctly
# communicate downstream which line is first and which are continuation
# ones.
if len(llines) > 1:
out = '\n'.join([self.prefilter_line(line, lnum>0)
for lnum, line in enumerate(llines) ])
else:
out = self.prefilter_line(llines[0], continue_prompt)
return out
#-----------------------------------------------------------------------------
# Prefilter transformers
#-----------------------------------------------------------------------------
class PrefilterTransformer(Configurable):
"""Transform a line of user input."""
priority = Integer(100).tag(config=True)
# Transformers don't currently use shell or prefilter_manager, but as we
# move away from checkers and handlers, they will need them.
shell = Instance('yap_ipython.core.interactiveshell.InteractiveShellABC', allow_none=True)
prefilter_manager = Instance('yap_ipython.core.prefilter.PrefilterManager', allow_none=True)
enabled = Bool(True).tag(config=True)
def __init__(self, shell=None, prefilter_manager=None, **kwargs):
super(PrefilterTransformer, self).__init__(
shell=shell, prefilter_manager=prefilter_manager, **kwargs
)
self.prefilter_manager.register_transformer(self)
def transform(self, line, continue_prompt):
"""Transform a line, returning the new one."""
return None
def __repr__(self):
return "<%s(priority=%r, enabled=%r)>" % (
self.__class__.__name__, self.priority, self.enabled)
#-----------------------------------------------------------------------------
# Prefilter checkers
#-----------------------------------------------------------------------------
class PrefilterChecker(Configurable):
"""Inspect an input line and return a handler for that line."""
priority = Integer(100).tag(config=True)
shell = Instance('yap_ipython.core.interactiveshell.InteractiveShellABC', allow_none=True)
prefilter_manager = Instance('yap_ipython.core.prefilter.PrefilterManager', allow_none=True)
enabled = Bool(True).tag(config=True)
def __init__(self, shell=None, prefilter_manager=None, **kwargs):
super(PrefilterChecker, self).__init__(
shell=shell, prefilter_manager=prefilter_manager, **kwargs
)
self.prefilter_manager.register_checker(self)
def check(self, line_info):
"""Inspect line_info and return a handler instance or None."""
return None
def __repr__(self):
return "<%s(priority=%r, enabled=%r)>" % (
self.__class__.__name__, self.priority, self.enabled)
class EmacsChecker(PrefilterChecker):
priority = Integer(100).tag(config=True)
enabled = Bool(False).tag(config=True)
def check(self, line_info):
"Emacs ipython-mode tags certain input lines."
if line_info.line.endswith('# PYTHON-MODE'):
return self.prefilter_manager.get_handler_by_name('emacs')
else:
return None
class MacroChecker(PrefilterChecker):
priority = Integer(250).tag(config=True)
def check(self, line_info):
obj = self.shell.user_ns.get(line_info.ifun)
if isinstance(obj, Macro):
return self.prefilter_manager.get_handler_by_name('macro')
else:
return None
class IPyAutocallChecker(PrefilterChecker):
priority = Integer(300).tag(config=True)
def check(self, line_info):
"Instances of IPyAutocall in user_ns get autocalled immediately"
obj = self.shell.user_ns.get(line_info.ifun, None)
if isinstance(obj, IPyAutocall):
obj.set_ip(self.shell)
return self.prefilter_manager.get_handler_by_name('auto')
else:
return None
class AssignmentChecker(PrefilterChecker):
priority = Integer(600).tag(config=True)
def check(self, line_info):
"""Check to see if user is assigning to a var for the first time, in
which case we want to avoid any sort of automagic / autocall games.
This allows users to assign to either alias or magic names true python
variables (the magic/alias systems always take second seat to true
python code). E.g. ls='hi', or ls,that=1,2"""
if line_info.the_rest:
if line_info.the_rest[0] in '=,':
return self.prefilter_manager.get_handler_by_name('normal')
else:
return None
class AutoMagicChecker(PrefilterChecker):
priority = Integer(700).tag(config=True)
def check(self, line_info):
"""If the ifun is magic, and automagic is on, run it. Note: normal,
non-auto magic would already have been triggered via '%' in
check_esc_chars. This just checks for automagic. Also, before
triggering the magic handler, make sure that there is nothing in the
user namespace which could shadow it."""
if not self.shell.automagic or not self.shell.find_magic(line_info.ifun):
return None
# We have a likely magic method. Make sure we should actually call it.
if line_info.continue_prompt and not self.prefilter_manager.multi_line_specials:
return None
head = line_info.ifun.split('.',1)[0]
if is_shadowed(head, self.shell):
return None
return self.prefilter_manager.get_handler_by_name('magic')
class PythonOpsChecker(PrefilterChecker):
priority = Integer(900).tag(config=True)
def check(self, line_info):
"""If the 'rest' of the line begins with a function call or pretty much
any python operator, we should simply execute the line (regardless of
whether or not there's a possible autocall expansion). This avoids
spurious (and very confusing) geattr() accesses."""
if line_info.the_rest and line_info.the_rest[0] in '!=()<>,+*/%^&|':
return self.prefilter_manager.get_handler_by_name('normal')
else:
return None
class AutocallChecker(PrefilterChecker):
priority = Integer(1000).tag(config=True)
function_name_regexp = CRegExp(re_fun_name,
help="RegExp to identify potential function names."
).tag(config=True)
exclude_regexp = CRegExp(re_exclude_auto,
help="RegExp to exclude strings with this start from autocalling."
).tag(config=True)
def check(self, line_info):
"Check if the initial word/function is callable and autocall is on."
if not self.shell.autocall:
return None
oinfo = line_info.ofind(self.shell) # This can mutate state via getattr
if not oinfo['found']:
return None
ignored_funs = ['b', 'f', 'r', 'u', 'br', 'rb', 'fr', 'rf']
ifun = line_info.ifun
line = line_info.line
if ifun.lower() in ignored_funs and (line.startswith(ifun + "'") or line.startswith(ifun + '"')):
return None
if callable(oinfo['obj']) \
and (not self.exclude_regexp.match(line_info.the_rest)) \
and self.function_name_regexp.match(line_info.ifun):
return self.prefilter_manager.get_handler_by_name('auto')
else:
return None
#-----------------------------------------------------------------------------
# Prefilter handlers
#-----------------------------------------------------------------------------
class PrefilterHandler(Configurable):
handler_name = Unicode('normal')
esc_strings = List([])
shell = Instance('yap_ipython.core.interactiveshell.InteractiveShellABC', allow_none=True)
prefilter_manager = Instance('yap_ipython.core.prefilter.PrefilterManager', allow_none=True)
def __init__(self, shell=None, prefilter_manager=None, **kwargs):
super(PrefilterHandler, self).__init__(
shell=shell, prefilter_manager=prefilter_manager, **kwargs
)
self.prefilter_manager.register_handler(
self.handler_name,
self,
self.esc_strings
)
def handle(self, line_info):
# print "normal: ", line_info
"""Handle normal input lines. Use as a template for handlers."""
# With autoindent on, we need some way to exit the input loop, and I
# don't want to force the user to have to backspace all the way to
# clear the line. The rule will be in this case, that either two
# lines of pure whitespace in a row, or a line of pure whitespace but
# of a size different to the indent level, will exit the input loop.
line = line_info.line
continue_prompt = line_info.continue_prompt
if (continue_prompt and
self.shell.autoindent and
line.isspace() and
0 < abs(len(line) - self.shell.indent_current_nsp) <= 2):
line = ''
return line
def __str__(self):
return "<%s(name=%s)>" % (self.__class__.__name__, self.handler_name)
class MacroHandler(PrefilterHandler):
handler_name = Unicode("macro")
def handle(self, line_info):
obj = self.shell.user_ns.get(line_info.ifun)
pre_space = line_info.pre_whitespace
line_sep = "\n" + pre_space
return pre_space + line_sep.join(obj.value.splitlines())
class MagicHandler(PrefilterHandler):
handler_name = Unicode('magic')
esc_strings = List([ESC_MAGIC])
def handle(self, line_info):
"""Execute magic functions."""
ifun = line_info.ifun
the_rest = line_info.the_rest
#Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
t_arg_s = ifun + " " + the_rest
t_magic_name, _, t_magic_arg_s = t_arg_s.partition(' ')
t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
cmd = '%sget_ipython().run_line_magic(%r, %r)' % (line_info.pre_whitespace, t_magic_name, t_magic_arg_s)
return cmd
class AutoHandler(PrefilterHandler):
handler_name = Unicode('auto')
esc_strings = List([ESC_PAREN, ESC_QUOTE, ESC_QUOTE2])
def handle(self, line_info):
"""Handle lines which can be auto-executed, quoting if requested."""
line = line_info.line
ifun = line_info.ifun
the_rest = line_info.the_rest
esc = line_info.esc
continue_prompt = line_info.continue_prompt
obj = line_info.ofind(self.shell)['obj']
# This should only be active for single-line input!
if continue_prompt:
return line
force_auto = isinstance(obj, IPyAutocall)
# User objects sometimes raise exceptions on attribute access other
# than AttributeError (we've seen it in the past), so it's safest to be
# ultra-conservative here and catch all.
try:
auto_rewrite = obj.rewrite
except Exception:
auto_rewrite = True
if esc == ESC_QUOTE:
# Auto-quote splitting on whitespace
newcmd = '%s("%s")' % (ifun,'", "'.join(the_rest.split()) )
elif esc == ESC_QUOTE2:
# Auto-quote whole string
newcmd = '%s("%s")' % (ifun,the_rest)
elif esc == ESC_PAREN:
newcmd = '%s(%s)' % (ifun,",".join(the_rest.split()))
else:
# Auto-paren.
if force_auto:
# Don't rewrite if it is already a call.
do_rewrite = not the_rest.startswith('(')
else:
if not the_rest:
# We only apply it to argument-less calls if the autocall
# parameter is set to 2.
do_rewrite = (self.shell.autocall >= 2)
elif the_rest.startswith('[') and hasattr(obj, '__getitem__'):
# Don't autocall in this case: item access for an object
# which is BOTH callable and implements __getitem__.
do_rewrite = False
else:
do_rewrite = True
# Figure out the rewritten command
if do_rewrite:
if the_rest.endswith(';'):
newcmd = '%s(%s);' % (ifun.rstrip(),the_rest[:-1])
else:
newcmd = '%s(%s)' % (ifun.rstrip(), the_rest)
else:
normal_handler = self.prefilter_manager.get_handler_by_name('normal')
return normal_handler.handle(line_info)
# Display the rewritten call
if auto_rewrite:
self.shell.auto_rewrite_input(newcmd)
return newcmd
class EmacsHandler(PrefilterHandler):
handler_name = Unicode('emacs')
esc_strings = List([])
def handle(self, line_info):
"""Handle input lines marked by python-mode."""
# Currently, nothing is done. Later more functionality can be added
# here if needed.
# The input cache shouldn't be updated
return line_info.line
#-----------------------------------------------------------------------------
# Defaults
#-----------------------------------------------------------------------------
_default_transformers = [
]
_default_checkers = [
EmacsChecker,
MacroChecker,
IPyAutocallChecker,
AssignmentChecker,
AutoMagicChecker,
PythonOpsChecker,
AutocallChecker
]
_default_handlers = [
PrefilterHandler,
MacroHandler,
MagicHandler,
AutoHandler,
EmacsHandler
]

View File

@@ -0,0 +1,312 @@
# encoding: utf-8
"""
An application for managing yap_ipython profiles.
To be invoked as the `ipython profile` subcommand.
Authors:
* Min RK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008 The yap_ipython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
from traitlets.config.application import Application
from yap_ipython.core.application import (
BaseYAPApplication, base_flags
)
from yap_ipython.core.profiledir import ProfileDir
from yap_ipython.utils.importstring import import_item
from yap_ipython.paths import get_ipython_dir, get_ipython_package_dir
from traitlets import Unicode, Bool, Dict, observe
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
create_help = """Create an yap_ipython profile by name
Create an ipython profile directory by its name or
profile directory path. Profile directories contain
configuration, log and security related files and are named
using the convention 'profile_<name>'. By default they are
located in your ipython directory. Once created, you will
can edit the configuration files in the profile
directory to configure yap_ipython. Most users will create a
profile directory by name,
`ipython profile create myprofile`, which will put the directory
in `<ipython_dir>/profile_myprofile`.
"""
list_help = """List available yap_ipython profiles
List all available profiles, by profile location, that can
be found in the current working directly or in the ipython
directory. Profile directories are named using the convention
'profile_<profile>'.
"""
profile_help = """Manage yap_ipython profiles
Profile directories contain
configuration, log and security related files and are named
using the convention 'profile_<name>'. By default they are
located in your ipython directory. You can create profiles
with `ipython profile create <name>`, or see the profiles you
already have with `ipython profile list`
To get started configuring yap_ipython, simply do:
$> ipython profile create
and yap_ipython will create the default profile in <ipython_dir>/profile_default,
where you can edit ipython_config.py to start configuring yap_ipython.
"""
_list_examples = "ipython profile list # list all profiles"
_create_examples = """
ipython profile create foo # create profile foo w/ default config files
ipython profile create foo --reset # restage default config files over current
ipython profile create foo --parallel # also stage parallel config files
"""
_main_examples = """
ipython profile create -h # show the help string for the create subcommand
ipython profile list -h # show the help string for the list subcommand
ipython locate profile foo # print the path to the directory for profile 'foo'
"""
#-----------------------------------------------------------------------------
# Profile Application Class (for `ipython profile` subcommand)
#-----------------------------------------------------------------------------
def list_profiles_in(path):
"""list profiles in a given root directory"""
files = os.listdir(path)
profiles = []
for f in files:
try:
full_path = os.path.join(path, f)
except UnicodeError:
continue
if os.path.isdir(full_path) and f.startswith('profile_'):
profiles.append(f.split('_',1)[-1])
return profiles
def list_bundled_profiles():
"""list profiles that are bundled with yap_ipython."""
path = os.path.join(get_ipython_package_dir(), u'core', u'profile')
files = os.listdir(path)
profiles = []
for profile in files:
full_path = os.path.join(path, profile)
if os.path.isdir(full_path) and profile != "__pycache__":
profiles.append(profile)
return profiles
class ProfileLocate(BaseYAPApplication):
description = """print the path to an yap_ipython profile dir"""
def parse_command_line(self, argv=None):
super(ProfileLocate, self).parse_command_line(argv)
if self.extra_args:
self.profile = self.extra_args[0]
def start(self):
print(self.profile_dir.location)
class ProfileList(Application):
name = u'ipython-profile'
description = list_help
examples = _list_examples
aliases = Dict({
'ipython-dir' : 'ProfileList.ipython_dir',
'log-level' : 'Application.log_level',
})
flags = Dict(dict(
debug = ({'Application' : {'log_level' : 0}},
"Set Application.log_level to 0, maximizing log output."
)
))
ipython_dir = Unicode(get_ipython_dir(),
help="""
The name of the yap_ipython directory. This directory is used for logging
configuration (through profiles), history storage, etc. The default
is usually $HOME/.ipython. This options can also be specified through
the environment variable IPYTHONDIR.
"""
).tag(config=True)
def _print_profiles(self, profiles):
"""print list of profiles, indented."""
for profile in profiles:
print(' %s' % profile)
def list_profile_dirs(self):
profiles = list_bundled_profiles()
if profiles:
print()
print("Available profiles in yap_ipython:")
self._print_profiles(profiles)
print()
print(" The first request for a bundled profile will copy it")
print(" into your yap_ipython directory (%s)," % self.ipython_dir)
print(" where you can customize it.")
profiles = list_profiles_in(self.ipython_dir)
if profiles:
print()
print("Available profiles in %s:" % self.ipython_dir)
self._print_profiles(profiles)
profiles = list_profiles_in(os.getcwd())
if profiles:
print()
print("Available profiles in current directory (%s):" % os.getcwd())
self._print_profiles(profiles)
print()
print("To use any of the above profiles, start yap_ipython with:")
print(" ipython --profile=<name>")
print()
def start(self):
self.list_profile_dirs()
create_flags = {}
create_flags.update(base_flags)
# don't include '--init' flag, which implies running profile create in other apps
create_flags.pop('init')
create_flags['reset'] = ({'ProfileCreate': {'overwrite' : True}},
"reset config files in this profile to the defaults.")
create_flags['parallel'] = ({'ProfileCreate': {'parallel' : True}},
"Include the config files for parallel "
"computing apps (ipengine, ipcontroller, etc.)")
class ProfileCreate(BaseYAPApplication):
name = u'ipython-profile'
description = create_help
examples = _create_examples
auto_create = Bool(True)
def _log_format_default(self):
return "[%(name)s] %(message)s"
def _copy_config_files_default(self):
return True
parallel = Bool(False,
help="whether to include parallel computing config files"
).tag(config=True)
@observe('parallel')
def _parallel_changed(self, change):
parallel_files = [ 'ipcontroller_config.py',
'ipengine_config.py',
'ipcluster_config.py'
]
if change['new']:
for cf in parallel_files:
self.config_files.append(cf)
else:
for cf in parallel_files:
if cf in self.config_files:
self.config_files.remove(cf)
def parse_command_line(self, argv):
super(ProfileCreate, self).parse_command_line(argv)
# accept positional arg as profile name
if self.extra_args:
self.profile = self.extra_args[0]
flags = Dict(create_flags)
classes = [ProfileDir]
def _import_app(self, app_path):
"""import an app class"""
app = None
name = app_path.rsplit('.', 1)[-1]
try:
app = import_item(app_path)
except ImportError:
self.log.info("Couldn't import %s, config file will be excluded", name)
except Exception:
self.log.warning('Unexpected error importing %s', name, exc_info=True)
return app
def init_config_files(self):
super(ProfileCreate, self).init_config_files()
# use local imports, since these classes may import from here
from yap_ipython.terminal.ipapp import TerminalIPythonApp
apps = [TerminalIPythonApp]
for app_path in (
'yap_kernel.kernelapp.YAPKernelApp',
):
app = self._import_app(app_path)
if app is not None:
apps.append(app)
if self.parallel:
from ipyparallel.apps.ipcontrollerapp import IPControllerApp
from ipyparallel.apps.ipengineapp import IPEngineApp
from ipyparallel.apps.ipclusterapp import IPClusterStart
apps.extend([
IPControllerApp,
IPEngineApp,
IPClusterStart,
])
for App in apps:
app = App()
app.config.update(self.config)
app.log = self.log
app.overwrite = self.overwrite
app.copy_config_files=True
app.ipython_dir=self.ipython_dir
app.profile_dir=self.profile_dir
app.init_config_files()
def stage_default_config_file(self):
pass
class ProfileApp(Application):
name = u'ipython profile'
description = profile_help
examples = _main_examples
subcommands = Dict(dict(
create = (ProfileCreate, ProfileCreate.description.splitlines()[0]),
list = (ProfileList, ProfileList.description.splitlines()[0]),
locate = (ProfileLocate, ProfileLocate.description.splitlines()[0]),
))
def start(self):
if self.subapp is None:
print("No subcommand specified. Must specify one of: %s"%(self.subcommands.keys()))
print()
self.print_description()
self.print_subcommands()
self.exit(1)
else:
return self.subapp.start()

View File

@@ -0,0 +1,223 @@
# encoding: utf-8
"""An object for managing yap_ipython profile directories."""
# Copyright (c) yap_ipython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import shutil
import errno
from traitlets.config.configurable import LoggingConfigurable
from yap_ipython.paths import get_ipython_package_dir
from yap_ipython.utils.path import expand_path, ensure_dir_exists
from traitlets import Unicode, Bool, observe
#-----------------------------------------------------------------------------
# Module errors
#-----------------------------------------------------------------------------
class ProfileDirError(Exception):
pass
#-----------------------------------------------------------------------------
# Class for managing profile directories
#-----------------------------------------------------------------------------
class ProfileDir(LoggingConfigurable):
"""An object to manage the profile directory and its resources.
The profile directory is used by all yap_ipython applications, to manage
configuration, logging and security.
This object knows how to find, create and manage these directories. This
should be used by any code that wants to handle profiles.
"""
security_dir_name = Unicode('security')
log_dir_name = Unicode('log')
startup_dir_name = Unicode('startup')
pid_dir_name = Unicode('pid')
static_dir_name = Unicode('static')
security_dir = Unicode(u'')
log_dir = Unicode(u'')
startup_dir = Unicode(u'')
pid_dir = Unicode(u'')
static_dir = Unicode(u'')
location = Unicode(u'',
help="""Set the profile location directly. This overrides the logic used by the
`profile` option.""",
).tag(config=True)
_location_isset = Bool(False) # flag for detecting multiply set location
@observe('location')
def _location_changed(self, change):
if self._location_isset:
raise RuntimeError("Cannot set profile location more than once.")
self._location_isset = True
new = change['new']
ensure_dir_exists(new)
# ensure config files exist:
self.security_dir = os.path.join(new, self.security_dir_name)
self.log_dir = os.path.join(new, self.log_dir_name)
self.startup_dir = os.path.join(new, self.startup_dir_name)
self.pid_dir = os.path.join(new, self.pid_dir_name)
self.static_dir = os.path.join(new, self.static_dir_name)
self.check_dirs()
def _mkdir(self, path, mode=None):
"""ensure a directory exists at a given path
This is a version of os.mkdir, with the following differences:
- returns True if it created the directory, False otherwise
- ignores EEXIST, protecting against race conditions where
the dir may have been created in between the check and
the creation
- sets permissions if requested and the dir already exists
"""
if os.path.exists(path):
if mode and os.stat(path).st_mode != mode:
try:
os.chmod(path, mode)
except OSError:
self.log.warning(
"Could not set permissions on %s",
path
)
return False
try:
if mode:
os.mkdir(path, mode)
else:
os.mkdir(path)
except OSError as e:
if e.errno == errno.EEXIST:
return False
else:
raise
return True
@observe('log_dir')
def check_log_dir(self, change=None):
self._mkdir(self.log_dir)
@observe('startup_dir')
def check_startup_dir(self, change=None):
self._mkdir(self.startup_dir)
readme = os.path.join(self.startup_dir, 'README')
src = os.path.join(get_ipython_package_dir(), u'core', u'profile', u'README_STARTUP')
if not os.path.exists(src):
self.log.warning("Could not copy README_STARTUP to startup dir. Source file %s does not exist.", src)
if os.path.exists(src) and not os.path.exists(readme):
shutil.copy(src, readme)
@observe('security_dir')
def check_security_dir(self, change=None):
self._mkdir(self.security_dir, 0o40700)
@observe('pid_dir')
def check_pid_dir(self, change=None):
self._mkdir(self.pid_dir, 0o40700)
def check_dirs(self):
self.check_security_dir()
self.check_log_dir()
self.check_pid_dir()
self.check_startup_dir()
def copy_config_file(self, config_file, path=None, overwrite=False):
"""Copy a default config file into the active profile directory.
Default configuration files are kept in :mod:`yap_ipython.core.profile`.
This function moves these from that location to the working profile
directory.
"""
dst = os.path.join(self.location, config_file)
if os.path.isfile(dst) and not overwrite:
return False
if path is None:
path = os.path.join(get_ipython_package_dir(), u'core', u'profile', u'default')
src = os.path.join(path, config_file)
shutil.copy(src, dst)
return True
@classmethod
def create_profile_dir(cls, profile_dir, config=None):
"""Create a new profile directory given a full path.
Parameters
----------
profile_dir : str
The full path to the profile directory. If it does exist, it will
be used. If not, it will be created.
"""
return cls(location=profile_dir, config=config)
@classmethod
def create_profile_dir_by_name(cls, path, name=u'default', config=None):
"""Create a profile dir by profile name and path.
Parameters
----------
path : unicode
The path (directory) to put the profile directory in.
name : unicode
The name of the profile. The name of the profile directory will
be "profile_<profile>".
"""
if not os.path.isdir(path):
raise ProfileDirError('Directory not found: %s' % path)
profile_dir = os.path.join(path, u'profile_' + name)
return cls(location=profile_dir, config=config)
@classmethod
def find_profile_dir_by_name(cls, ipython_dir, name=u'default', config=None):
"""Find an existing profile dir by profile name, return its ProfileDir.
This searches through a sequence of paths for a profile dir. If it
is not found, a :class:`ProfileDirError` exception will be raised.
The search path algorithm is:
1. ``os.getcwd()``
2. ``ipython_dir``
Parameters
----------
ipython_dir : unicode or str
The yap_ipython directory to use.
name : unicode or str
The name of the profile. The name of the profile directory
will be "profile_<profile>".
"""
dirname = u'profile_' + name
paths = [os.getcwd(), ipython_dir]
for p in paths:
profile_dir = os.path.join(p, dirname)
if os.path.isdir(profile_dir):
return cls(location=profile_dir, config=config)
else:
raise ProfileDirError('Profile directory not found in paths: %s' % dirname)
@classmethod
def find_profile_dir(cls, profile_dir, config=None):
"""Find/create a profile dir and return its ProfileDir.
This will create the profile directory if it doesn't exist.
Parameters
----------
profile_dir : unicode or str
The path of the profile directory.
"""
profile_dir = expand_path(profile_dir)
if not os.path.isdir(profile_dir):
raise ProfileDirError('Profile directory not found: %s' % profile_dir)
return cls(location=profile_dir, config=config)

View File

@@ -0,0 +1,21 @@
# -*- coding: utf-8 -*-
"""Being removed
"""
class LazyEvaluate(object):
"""This is used for formatting strings with values that need to be updated
at that time, such as the current time or working directory."""
def __init__(self, func, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def __call__(self, **kwargs):
self.kwargs.update(kwargs)
return self.func(*self.args, **self.kwargs)
def __str__(self):
return str(self())
def __format__(self, format_spec):
return format(self(), format_spec)

View File

@@ -0,0 +1,412 @@
# -*- coding: utf-8 -*-
"""Pylab (matplotlib) support utilities."""
# Copyright (c) yap_ipython Development Team.
# Distributed under the terms of the Modified BSD License.
from io import BytesIO
from yap_ipython.core.display import _pngxy
from yap_ipython.utils.decorators import flag_calls
# If user specifies a GUI, that dictates the backend, otherwise we read the
# user's mpl default from the mpl rc structure
backends = {'tk': 'TkAgg',
'gtk': 'GTKAgg',
'gtk3': 'GTK3Agg',
'wx': 'WXAgg',
'qt4': 'Qt4Agg',
'qt5': 'Qt5Agg',
'qt': 'Qt5Agg',
'osx': 'MacOSX',
'nbagg': 'nbAgg',
'notebook': 'nbAgg',
'agg': 'agg',
'svg': 'svg',
'pdf': 'pdf',
'ps': 'ps',
'inline': 'module://yap_kernel.pylab.backend_inline',
'ipympl': 'module://ipympl.backend_nbagg',
'widget': 'module://ipympl.backend_nbagg',
}
# We also need a reverse backends2guis mapping that will properly choose which
# GUI support to activate based on the desired matplotlib backend. For the
# most part it's just a reverse of the above dict, but we also need to add a
# few others that map to the same GUI manually:
backend2gui = dict(zip(backends.values(), backends.keys()))
# Our tests expect backend2gui to just return 'qt'
backend2gui['Qt4Agg'] = 'qt'
# In the reverse mapping, there are a few extra valid matplotlib backends that
# map to the same GUI support
backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
backend2gui['GTK3Cairo'] = 'gtk3'
backend2gui['WX'] = 'wx'
backend2gui['CocoaAgg'] = 'osx'
# And some backends that don't need GUI integration
del backend2gui['nbAgg']
del backend2gui['agg']
del backend2gui['module://yap_kernel.pylab.backend_inline']
#-----------------------------------------------------------------------------
# Matplotlib utilities
#-----------------------------------------------------------------------------
def getfigs(*fig_nums):
"""Get a list of matplotlib figures by figure numbers.
If no arguments are given, all available figures are returned. If the
argument list contains references to invalid figures, a warning is printed
but the function continues pasting further figures.
Parameters
----------
figs : tuple
A tuple of ints giving the figure numbers of the figures to return.
"""
from matplotlib._pylab_helpers import Gcf
if not fig_nums:
fig_managers = Gcf.get_all_fig_managers()
return [fm.canvas.figure for fm in fig_managers]
else:
figs = []
for num in fig_nums:
f = Gcf.figs.get(num)
if f is None:
print('Warning: figure %s not available.' % num)
else:
figs.append(f.canvas.figure)
return figs
def figsize(sizex, sizey):
"""Set the default figure size to be [sizex, sizey].
This is just an easy to remember, convenience wrapper that sets::
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
"""
import matplotlib
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
def print_figure(fig, fmt='png', bbox_inches='tight', **kwargs):
"""Print a figure to an image, and return the resulting file data
Returned data will be bytes unless ``fmt='svg'``,
in which case it will be unicode.
Any keyword args are passed to fig.canvas.print_figure,
such as ``quality`` or ``bbox_inches``.
"""
# When there's an empty figure, we shouldn't return anything, otherwise we
# get big blank areas in the qt console.
if not fig.axes and not fig.lines:
return
dpi = fig.dpi
if fmt == 'retina':
dpi = dpi * 2
fmt = 'png'
# build keyword args
kw = {
"format":fmt,
"facecolor":fig.get_facecolor(),
"edgecolor":fig.get_edgecolor(),
"dpi":dpi,
"bbox_inches":bbox_inches,
}
# **kwargs get higher priority
kw.update(kwargs)
bytes_io = BytesIO()
fig.canvas.print_figure(bytes_io, **kw)
data = bytes_io.getvalue()
if fmt == 'svg':
data = data.decode('utf-8')
return data
def retina_figure(fig, **kwargs):
"""format a figure as a pixel-doubled (retina) PNG"""
pngdata = print_figure(fig, fmt='retina', **kwargs)
# Make sure that retina_figure acts just like print_figure and returns
# None when the figure is empty.
if pngdata is None:
return
w, h = _pngxy(pngdata)
metadata = {"width": w//2, "height":h//2}
return pngdata, metadata
# We need a little factory function here to create the closure where
# safe_execfile can live.
def mpl_runner(safe_execfile):
"""Factory to return a matplotlib-enabled runner for %run.
Parameters
----------
safe_execfile : function
This must be a function with the same interface as the
:meth:`safe_execfile` method of yap_ipython.
Returns
-------
A function suitable for use as the ``runner`` argument of the %run magic
function.
"""
def mpl_execfile(fname,*where,**kw):
"""matplotlib-aware wrapper around safe_execfile.
Its interface is identical to that of the :func:`execfile` builtin.
This is ultimately a call to execfile(), but wrapped in safeties to
properly handle interactive rendering."""
import matplotlib
import matplotlib.pyplot as plt
#print '*** Matplotlib runner ***' # dbg
# turn off rendering until end of script
is_interactive = matplotlib.rcParams['interactive']
matplotlib.interactive(False)
safe_execfile(fname,*where,**kw)
matplotlib.interactive(is_interactive)
# make rendering call now, if the user tried to do it
if plt.draw_if_interactive.called:
plt.draw()
plt.draw_if_interactive.called = False
# re-draw everything that is stale
try:
da = plt.draw_all
except AttributeError:
pass
else:
da()
return mpl_execfile
def _reshow_nbagg_figure(fig):
"""reshow an nbagg figure"""
try:
reshow = fig.canvas.manager.reshow
except AttributeError:
raise NotImplementedError()
else:
reshow()
def select_figure_formats(shell, formats, **kwargs):
"""Select figure formats for the inline backend.
Parameters
==========
shell : InteractiveShell
The main yap_ipython instance.
formats : str or set
One or a set of figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
**kwargs : any
Extra keyword arguments to be passed to fig.canvas.print_figure.
"""
import matplotlib
from matplotlib.figure import Figure
svg_formatter = shell.display_formatter.formatters['image/svg+xml']
png_formatter = shell.display_formatter.formatters['image/png']
jpg_formatter = shell.display_formatter.formatters['image/jpeg']
pdf_formatter = shell.display_formatter.formatters['application/pdf']
if isinstance(formats, str):
formats = {formats}
# cast in case of list / tuple
formats = set(formats)
[ f.pop(Figure, None) for f in shell.display_formatter.formatters.values() ]
mplbackend = matplotlib.get_backend().lower()
if mplbackend == 'nbagg' or mplbackend == 'module://ipympl.backend_nbagg':
formatter = shell.display_formatter.ipython_display_formatter
formatter.for_type(Figure, _reshow_nbagg_figure)
supported = {'png', 'png2x', 'retina', 'jpg', 'jpeg', 'svg', 'pdf'}
bad = formats.difference(supported)
if bad:
bs = "%s" % ','.join([repr(f) for f in bad])
gs = "%s" % ','.join([repr(f) for f in supported])
raise ValueError("supported formats are: %s not %s" % (gs, bs))
if 'png' in formats:
png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs))
if 'retina' in formats or 'png2x' in formats:
png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs))
if 'jpg' in formats or 'jpeg' in formats:
jpg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'jpg', **kwargs))
if 'svg' in formats:
svg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'svg', **kwargs))
if 'pdf' in formats:
pdf_formatter.for_type(Figure, lambda fig: print_figure(fig, 'pdf', **kwargs))
#-----------------------------------------------------------------------------
# Code for initializing matplotlib and importing pylab
#-----------------------------------------------------------------------------
def find_gui_and_backend(gui=None, gui_select=None):
"""Given a gui string return the gui and mpl backend.
Parameters
----------
gui : str
Can be one of ('tk','gtk','wx','qt','qt4','inline','agg').
gui_select : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
This is any gui already selected by the shell.
Returns
-------
A tuple of (gui, backend) where backend is one of ('TkAgg','GTKAgg',
'WXAgg','Qt4Agg','module://yap_kernel.pylab.backend_inline','agg').
"""
import matplotlib
if gui and gui != 'auto':
# select backend based on requested gui
backend = backends[gui]
if gui == 'agg':
gui = None
else:
# We need to read the backend from the original data structure, *not*
# from mpl.rcParams, since a prior invocation of %matplotlib may have
# overwritten that.
# WARNING: this assumes matplotlib 1.1 or newer!!
backend = matplotlib.rcParamsOrig['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for yap_ipython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
# If we have already had a gui active, we need it and inline are the
# ones allowed.
if gui_select and gui != gui_select:
gui = gui_select
backend = backends[gui]
return gui, backend
def activate_matplotlib(backend):
"""Activate the given backend and set interactive to True."""
import matplotlib
matplotlib.interactive(True)
# Matplotlib had a bug where even switch_backend could not force
# the rcParam to update. This needs to be set *before* the module
# magic of switch_backend().
matplotlib.rcParams['backend'] = backend
import matplotlib.pyplot
matplotlib.pyplot.switch_backend(backend)
# This must be imported last in the matplotlib series, after
# backend/interactivity choices have been made
import matplotlib.pyplot as plt
plt.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
plt.draw_if_interactive = flag_calls(plt.draw_if_interactive)
def import_pylab(user_ns, import_all=True):
"""Populate the namespace with pylab-related values.
Imports matplotlib, pylab, numpy, and everything from pylab and numpy.
Also imports a few names from yap_ipython (figsize, display, getfigs)
"""
# Import numpy as np/pyplot as plt are conventions we're trying to
# somewhat standardize on. Making them available to users by default
# will greatly help this.
s = ("import numpy\n"
"import matplotlib\n"
"from matplotlib import pylab, mlab, pyplot\n"
"np = numpy\n"
"plt = pyplot\n"
)
exec(s, user_ns)
if import_all:
s = ("from matplotlib.pylab import *\n"
"from numpy import *\n")
exec(s, user_ns)
# yap_ipython symbols to add
user_ns['figsize'] = figsize
from yap_ipython.core.display import display
# Add display and getfigs to the user's namespace
user_ns['display'] = display
user_ns['getfigs'] = getfigs
def configure_inline_support(shell, backend):
"""Configure an yap_ipython shell object for matplotlib use.
Parameters
----------
shell : InteractiveShell instance
backend : matplotlib backend
"""
# If using our svg payload backend, register the post-execution
# function that will pick up the results for display. This can only be
# done with access to the real shell object.
# Note: if we can't load the inline backend, then there's no point
# continuing (such as in terminal-only shells in environments without
# zeromq available).
try:
from yap_kernel.pylab.backend_inline import InlineBackend
except ImportError:
return
import matplotlib
cfg = InlineBackend.instance(parent=shell)
cfg.shell = shell
if cfg not in shell.configurables:
shell.configurables.append(cfg)
if backend == backends['inline']:
from yap_kernel.pylab.backend_inline import flush_figures
shell.events.register('post_execute', flush_figures)
# Save rcParams that will be overwrittern
shell._saved_rcParams = {}
for k in cfg.rc:
shell._saved_rcParams[k] = matplotlib.rcParams[k]
# load inline_rc
matplotlib.rcParams.update(cfg.rc)
new_backend_name = "inline"
else:
from yap_kernel.pylab.backend_inline import flush_figures
try:
shell.events.unregister('post_execute', flush_figures)
except ValueError:
pass
if hasattr(shell, '_saved_rcParams'):
matplotlib.rcParams.update(shell._saved_rcParams)
del shell._saved_rcParams
new_backend_name = "other"
# only enable the formats once -> don't change the enabled formats (which the user may
# has changed) when getting another "%matplotlib inline" call.
# See https://github.com/ipython/yap_kernel/issues/29
cur_backend = getattr(configure_inline_support, "current_backend", "unset")
if new_backend_name != cur_backend:
# Setup the default figure format
select_figure_formats(shell, cfg.figure_formats, **cfg.print_figure_kwargs)
configure_inline_support.current_backend = new_backend_name

View File

@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
"""Release data for the IPython project."""
"""Release data for the yap_ipython project."""
#-----------------------------------------------------------------------------
# Copyright (c) 2008, IPython Development Team.
# Copyright (c) 2008, yap_ipython Development Team.
# Copyright (c) 2001, Fernando Perez <fernando.perez@colorado.edu>
# Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
# Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
@@ -16,15 +16,15 @@
# the tarballs and RPMs made by distutils, so it's best to lowercase it.
name = 'ipython'
# IPython version information. An empty _version_extra corresponds to a full
# yap_ipython version information. An empty _version_extra corresponds to a full
# release. 'dev' as a _version_extra string means this is a development
# version
_version_major = 0
_version_minor = 1
_version_major = 6
_version_minor = 2
_version_patch = 0
_version_extra = '.dev'
# _version_extra = 'rc2'
_version_extra = '' # Uncomment this for full releases
#_version_extra = '' # Uncomment this for full releases
# Construct full version string from these.
_ver = [_version_major, _version_minor, _version_patch]
@@ -40,11 +40,11 @@ version_info = (_version_major, _version_minor, _version_patch, _version_extra)
kernel_protocol_version_info = (5, 0)
kernel_protocol_version = "%i.%i" % kernel_protocol_version_info
description = "IPython: Productive Interactive Computing"
description = "yap_ipython: Productive Interactive Computing"
long_description = \
"""
IPython provides a rich toolkit to help you make the most out of using Python
yap_ipython provides a rich toolkit to help you make the most out of using Python
interactively. Its main components are:
* A powerful interactive Python shell
@@ -64,7 +64,7 @@ The enhanced interactive Python shells have the following main features:
variables and keywords, filenames and function keywords.
* Extensible system of 'magic' commands for controlling the environment and
performing many tasks related either to IPython or the operating system.
performing many tasks related either to yap_ipython or the operating system.
* A rich configuration system with easy switching between different setups
(simpler than changing $PYTHONSTARTUP environment variables every time).
@@ -79,7 +79,7 @@ The enhanced interactive Python shells have the following main features:
* Integrated access to the pdb debugger and the Python profiler.
The latest development version is always available from IPython's `GitHub
The latest development version is always available from yap_ipython's `GitHub
site <http://github.com/ipython>`_.
"""
@@ -96,7 +96,7 @@ authors = {'Fernando' : ('Fernando Perez','fperez.net@gmail.com'),
'Matthias' : ('Matthias Bussonnier', 'bussonniermatthias@gmail.com'),
}
author = 'The IPython Development Team'
author = 'The yap_ipython Development Team'
author_email = 'ipython-dev@python.org'
@@ -108,11 +108,12 @@ platforms = ['Linux','Mac OSX','Windows']
keywords = ['Interactive','Interpreter','Shell', 'Embedding']
classifiers = [
'Framework :: IPython',
'Framework :: yap_ipython',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Topic :: System :: Shells'
]

View File

@@ -1,10 +1,10 @@
# encoding: utf-8
"""
A mixin for :class:`~IPython.core.application.Application` classes that
A mixin for :class:`~yap_ipython.core.application.Application` classes that
launch InteractiveShell instances, load extensions, etc.
"""
# Copyright (c) IPython Development Team.
# Copyright (c) yap_ipython Development Team.
# Distributed under the terms of the Modified BSD License.
import glob
@@ -15,20 +15,14 @@ import sys
from traitlets.config.application import boolean_flag
from traitlets.config.configurable import Configurable
from traitlets.config.loader import Config
from IPython.core.application import SYSTEM_CONFIG_DIRS
from IPython.core import pylabtools
from IPython.utils.contexts import preserve_keys
from IPython.utils.path import filefind
from yap_ipython.core.application import SYSTEM_CONFIG_DIRS, ENV_CONFIG_DIRS
from yap_ipython.core import pylabtools
from yap_ipython.utils.contexts import preserve_keys
from yap_ipython.utils.path import filefind
from traitlets import (
Unicode, Instance, List, Bool, CaselessStrEnum, observe,
)
from IPython.terminal import pt_inputhooks
ENV_CONFIG_DIRS = []
_env_config_dir = os.path.join(sys.prefix, 'etc', 'ipython')
if _env_config_dir not in SYSTEM_CONFIG_DIRS:
# only add ENV_CONFIG if sys.prefix is not already included
ENV_CONFIG_DIRS.append(_env_config_dir)
from yap_ipython.terminal import pt_inputhooks
#-----------------------------------------------------------------------------
# Aliases and Flags
@@ -42,15 +36,15 @@ backend_keys.insert(0, 'auto')
shell_flags = {}
addflag = lambda *args: shell_flags.update(boolean_flag(*args))
addflag('autoindent', 'YAPInteractive.autoindent',
addflag('autoindent', 'InteractiveShell.autoindent',
'Turn on autoindenting.', 'Turn off autoindenting.'
)
addflag('automagic', 'YAPInteractive.automagic',
addflag('automagic', 'InteractiveShell.automagic',
"""Turn on the auto calling of magic commands. Type %%magic at the
IPython prompt for more information.""",
yap_ipython prompt for more information.""",
'Turn off the auto calling of magic commands.'
)
addflag('pdb', 'YAPInteractive.pdb',
addflag('pdb', 'InteractiveShell.pdb',
"Enable auto calling the pdb debugger after every exception.",
"Disable auto calling the pdb debugger after every exception."
)
@@ -58,8 +52,8 @@ addflag('pprint', 'PlainTextFormatter.pprint',
"Enable auto pretty printing of results.",
"Disable auto pretty printing of results."
)
addflag('color-info', 'YAPInteractive.color_info',
"""IPython can display information about objects via a set of functions,
addflag('color-info', 'InteractiveShell.color_info',
"""yap_ipython can display information about objects via a set of functions,
and optionally can use colors for this, syntax highlighting
source code and various other elements. This is on by default, but can cause
problems with some pagers. If you see such problems, you can disable the
@@ -67,43 +61,43 @@ addflag('color-info', 'YAPInteractive.color_info',
"Disable using colors for info related things."
)
nosep_config = Config()
nosep_config.YAPInteractive.separate_in = ''
nosep_config.YAPInteractive.separate_out = ''
nosep_config.YAPInteractive.separate_out2 = ''
nosep_config.InteractiveShell.separate_in = ''
nosep_config.InteractiveShell.separate_out = ''
nosep_config.InteractiveShell.separate_out2 = ''
shell_flags['nosep']=(nosep_config, "Eliminate all spacing between prompts.")
shell_flags['pylab'] = (
{'YAPInteractiveApp' : {'pylab' : 'auto'}},
{'InteractiveShellApp' : {'pylab' : 'auto'}},
"""Pre-load matplotlib and numpy for interactive use with
the default matplotlib backend."""
)
shell_flags['matplotlib'] = (
{'YAPInteractiveApp' : {'matplotlib' : 'auto'}},
{'InteractiveShellApp' : {'matplotlib' : 'auto'}},
"""Configure matplotlib for interactive use with
the default matplotlib backend."""
)
# it's possible we don't want short aliases for *all* of these:
shell_aliases = dict(
autocall='YAPInteractive.autocall',
colors='YAPInteractive.colors',
logfile='YAPInteractive.logfile',
logappend='YAPInteractive.logappend',
c='YAPInteractiveApp.code_to_run',
m='YAPInteractiveApp.module_to_run',
ext='YAPInteractiveApp.extra_extension',
gui='YAPInteractiveApp.gui',
pylab='YAPInteractiveApp.pylab',
matplotlib='YAPInteractiveApp.matplotlib',
autocall='InteractiveShell.autocall',
colors='InteractiveShell.colors',
logfile='InteractiveShell.logfile',
logappend='InteractiveShell.logappend',
c='InteractiveShellApp.code_to_run',
m='InteractiveShellApp.module_to_run',
ext='InteractiveShellApp.extra_extension',
gui='InteractiveShellApp.gui',
pylab='InteractiveShellApp.pylab',
matplotlib='InteractiveShellApp.matplotlib',
)
shell_aliases['cache-size'] = 'YAPInteractive.cache_size'
shell_aliases['cache-size'] = 'InteractiveShell.cache_size'
#-----------------------------------------------------------------------------
# Main classes and functions
#-----------------------------------------------------------------------------
class YAPInteractiveApp(Configurable):
"""A Mixin for applications that start YAPInteractive instances.
class InteractiveShellApp(Configurable):
"""A Mixin for applications that start InteractiveShell instances.
Provides configurables for loading extensions and executing files
as part of configuring a Shell environment.
@@ -118,14 +112,14 @@ class YAPInteractiveApp(Configurable):
- :meth:`init_code`
"""
extensions = List(Unicode(),
help="A list of dotted module names of IPython extensions to load."
help="A list of dotted module names of yap_ipython extensions to load."
).tag(config=True)
extra_extension = Unicode('',
help="dotted module name of an IPython extension to load."
help="dotted module name of an yap_ipython extension to load."
).tag(config=True)
reraise_ipython_extension_failures = Bool(False,
help="Reraise exceptions encountered loading IPython extensions?",
help="Reraise exceptions encountered loading yap_ipython extensions?",
).tag(config=True)
# Extensions that are always loaded (not configurable)
@@ -137,17 +131,17 @@ class YAPInteractiveApp(Configurable):
).tag(config=True)
exec_files = List(Unicode(),
help="""List of files to run at IPython startup."""
help="""List of files to run at yap_ipython startup."""
).tag(config=True)
exec_PYTHONSTARTUP = Bool(True,
help="""Run the file referenced by the PYTHONSTARTUP environment
variable at IPython startup."""
variable at yap_ipython startup."""
).tag(config=True)
file_to_run = Unicode('',
help="""A file to be run""").tag(config=True)
exec_lines = List(Unicode(),
help="""lines of code to run at IPython startup."""
help="""lines of code to run at yap_ipython startup."""
).tag(config=True)
code_to_run = Unicode('',
help="Execute the given command string."
@@ -168,13 +162,13 @@ class YAPInteractiveApp(Configurable):
"""
).tag(config=True)
pylab_import_all = Bool(True,
help="""If true, IPython will populate the user namespace with numpy, pylab, etc.
help="""If true, yap_ipython will populate the user namespace with numpy, pylab, etc.
and an ``import *`` is done from numpy and pylab, when using pylab mode.
When False, pylab mode should not import any names into the user namespace.
"""
).tag(config=True)
shell = Instance('yap_ipython.core.interactiveshell.YAPInteractiveABC',
shell = Instance('yap_ipython.core.interactiveshell.InteractiveShellABC',
allow_none=True)
# whether interact-loop should start
interact = Bool(True)
@@ -234,19 +228,19 @@ class YAPInteractiveApp(Configurable):
"eventloop=%s", gui)
def init_extensions(self):
"""Load all IPython extensions in IPythonApp.extensions.
"""Load all yap_ipython extensions in IPythonApp.extensions.
This uses the :meth:`ExtensionManager.load_extensions` to load all
the extensions listed in ``self.extensions``.
"""
try:
self.log.debug("Loading IPython extensions...")
self.log.debug("Loading yap_ipython extensions...")
extensions = self.default_extensions + self.extensions
if self.extra_extension:
extensions.append(self.extra_extension)
for ext in extensions:
try:
self.log.info("Loading IPython extension: %s" % ext)
self.log.info("Loading yap_ipython extension: %s" % ext)
self.shell.extension_manager.load_extension(ext)
except:
if self.reraise_ipython_extension_failures:

View File

@@ -0,0 +1,137 @@
# encoding: utf-8
"""
Simple utility for splitting user input. This is used by both inputsplitter and
prefilter.
Authors:
* Brian Granger
* Fernando Perez
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The yap_ipython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import re
import sys
from yap_ipython.utils import py3compat
from yap_ipython.utils.encoding import get_stream_enc
#-----------------------------------------------------------------------------
# Main function
#-----------------------------------------------------------------------------
# RegExp for splitting line contents into pre-char//first word-method//rest.
# For clarity, each group in on one line.
# WARNING: update the regexp if the escapes in interactiveshell are changed, as
# they are hardwired in.
# Although it's not solely driven by the regex, note that:
# ,;/% only trigger if they are the first character on the line
# ! and !! trigger if they are first char(s) *or* follow an indent
# ? triggers as first or last char.
line_split = re.compile("""
^(\s*) # any leading space
([,;/%]|!!?|\?\??)? # escape character or characters
\s*(%{0,2}[\w\.\*]*) # function/method, possibly with leading %
# to correctly treat things like '?%magic'
(.*?$|$) # rest of line
""", re.VERBOSE)
def split_user_input(line, pattern=None):
"""Split user input into initial whitespace, escape character, function part
and the rest.
"""
# We need to ensure that the rest of this routine deals only with unicode
encoding = get_stream_enc(sys.stdin, 'utf-8')
line = py3compat.cast_unicode(line, encoding)
if pattern is None:
pattern = line_split
match = pattern.match(line)
if not match:
# print "match failed for line '%s'" % line
try:
ifun, the_rest = line.split(None,1)
except ValueError:
# print "split failed for line '%s'" % line
ifun, the_rest = line, u''
pre = re.match('^(\s*)(.*)',line).groups()[0]
esc = ""
else:
pre, esc, ifun, the_rest = match.groups()
#print 'line:<%s>' % line # dbg
#print 'pre <%s> ifun <%s> rest <%s>' % (pre,ifun.strip(),the_rest) # dbg
return pre, esc or '', ifun.strip(), the_rest.lstrip()
class LineInfo(object):
"""A single line of input and associated info.
Includes the following as properties:
line
The original, raw line
continue_prompt
Is this line a continuation in a sequence of multiline input?
pre
Any leading whitespace.
esc
The escape character(s) in pre or the empty string if there isn't one.
Note that '!!' and '??' are possible values for esc. Otherwise it will
always be a single character.
ifun
The 'function part', which is basically the maximal initial sequence
of valid python identifiers and the '.' character. This is what is
checked for alias and magic transformations, used for auto-calling,
etc. In contrast to Python identifiers, it may start with "%" and contain
"*".
the_rest
Everything else on the line.
"""
def __init__(self, line, continue_prompt=False):
self.line = line
self.continue_prompt = continue_prompt
self.pre, self.esc, self.ifun, self.the_rest = split_user_input(line)
self.pre_char = self.pre.strip()
if self.pre_char:
self.pre_whitespace = '' # No whitespace allowd before esc chars
else:
self.pre_whitespace = self.pre
def ofind(self, ip):
"""Do a full, attribute-walking lookup of the ifun in the various
namespaces for the given yap_ipython InteractiveShell instance.
Return a dict with keys: {found, obj, ospace, ismagic}
Note: can cause state changes because of calling getattr, but should
only be run if autocall is on and if the line hasn't matched any
other, less dangerous handlers.
Does cache the results of the call, so can be called multiple times
without worrying about *further* damaging state.
"""
return ip._ofind(self.ifun)
def __str__(self):
return "LineInfo [%s|%s|%s|%s]" %(self.pre, self.esc, self.ifun, self.the_rest)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,348 @@
# -*- coding: utf-8 -*-
"""Usage information for the main yap_ipython applications.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The yap_ipython Development Team
# Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import sys
from yap_ipython.core import release
cl_usage = """\
=========
yap_ipython
=========
Tools for Interactive Computing in Python
=========================================
A Python shell with automatic history (input and output), dynamic object
introspection, easier configuration, command completion, access to the
system shell and more. yap_ipython can also be embedded in running programs.
Usage
ipython [subcommand] [options] [-c cmd | -m mod | file] [--] [arg] ...
If invoked with no options, it executes the file and exits, passing the
remaining arguments to the script, just as if you had specified the same
command with python. You may need to specify `--` before args to be passed
to the script, to prevent yap_ipython from attempting to parse them. If you
specify the option `-i` before the filename, it will enter an interactive
yap_ipython session after running the script, rather than exiting. Files ending
in .py will be treated as normal Python, but files ending in .ipy can
contain special yap_ipython syntax (magic commands, shell expansions, etc.).
Almost all configuration in yap_ipython is available via the command-line. Do
`ipython --help-all` to see all available options. For persistent
configuration, look into your `ipython_config.py` configuration file for
details.
This file is typically installed in the `IPYTHONDIR` directory, and there
is a separate configuration directory for each profile. The default profile
directory will be located in $IPYTHONDIR/profile_default. IPYTHONDIR
defaults to to `$HOME/.ipython`. For Windows users, $HOME resolves to
C:\\Users\\YourUserName in most instances.
To initialize a profile with the default configuration file, do::
$> ipython profile create
and start editing `IPYTHONDIR/profile_default/ipython_config.py`
In yap_ipython's documentation, we will refer to this directory as
`IPYTHONDIR`, you can change its default location by creating an
environment variable with this name and setting it to the desired path.
For more information, see the manual available in HTML and PDF in your
installation, or online at http://ipython.org/documentation.html.
"""
interactive_usage = """
yap_ipython -- An enhanced Interactive Python
=========================================
yap_ipython offers a fully compatible replacement for the standard Python
interpreter, with convenient shell features, special commands, command
history mechanism and output results caching.
At your system command line, type 'ipython -h' to see the command line
options available. This document only describes interactive features.
GETTING HELP
------------
Within yap_ipython you have various way to access help:
? -> Introduction and overview of yap_ipython's features (this screen).
object? -> Details about 'object'.
object?? -> More detailed, verbose information about 'object'.
%quickref -> Quick reference of all yap_ipython specific syntax and magics.
help -> Access Python's own help system.
If you are in terminal yap_ipython you can quit this screen by pressing `q`.
MAIN FEATURES
-------------
* Access to the standard Python help with object docstrings and the Python
manuals. Simply type 'help' (no quotes) to invoke it.
* Magic commands: type %magic for information on the magic subsystem.
* System command aliases, via the %alias command or the configuration file(s).
* Dynamic object information:
Typing ?word or word? prints detailed information about an object. Certain
long strings (code, etc.) get snipped in the center for brevity.
Typing ??word or word?? gives access to the full information without
snipping long strings. Strings that are longer than the screen are printed
through the less pager.
The ?/?? system gives access to the full source code for any object (if
available), shows function prototypes and other useful information.
If you just want to see an object's docstring, type '%pdoc object' (without
quotes, and without % if you have automagic on).
* Tab completion in the local namespace:
At any time, hitting tab will complete any available python commands or
variable names, and show you a list of the possible completions if there's
no unambiguous one. It will also complete filenames in the current directory.
* Search previous command history in multiple ways:
- Start typing, and then use arrow keys up/down or (Ctrl-p/Ctrl-n) to search
through the history items that match what you've typed so far.
- Hit Ctrl-r: opens a search prompt. Begin typing and the system searches
your history for lines that match what you've typed so far, completing as
much as it can.
- %hist: search history by index.
* Persistent command history across sessions.
* Logging of input with the ability to save and restore a working session.
* System shell with !. Typing !ls will run 'ls' in the current directory.
* The reload command does a 'deep' reload of a module: changes made to the
module since you imported will actually be available without having to exit.
* Verbose and colored exception traceback printouts. See the magic xmode and
xcolor functions for details (just type %magic).
* Input caching system:
yap_ipython offers numbered prompts (In/Out) with input and output caching. All
input is saved and can be retrieved as variables (besides the usual arrow
key recall).
The following GLOBAL variables always exist (so don't overwrite them!):
_i: stores previous input.
_ii: next previous.
_iii: next-next previous.
_ih : a list of all input _ih[n] is the input from line n.
Additionally, global variables named _i<n> are dynamically created (<n>
being the prompt counter), such that _i<n> == _ih[<n>]
For example, what you typed at prompt 14 is available as _i14 and _ih[14].
You can create macros which contain multiple input lines from this history,
for later re-execution, with the %macro function.
The history function %hist allows you to see any part of your input history
by printing a range of the _i variables. Note that inputs which contain
magic functions (%) appear in the history with a prepended comment. This is
because they aren't really valid Python code, so you can't exec them.
* Output caching system:
For output that is returned from actions, a system similar to the input
cache exists but using _ instead of _i. Only actions that produce a result
(NOT assignments, for example) are cached. If you are familiar with
Mathematica, yap_ipython's _ variables behave exactly like Mathematica's %
variables.
The following GLOBAL variables always exist (so don't overwrite them!):
_ (one underscore): previous output.
__ (two underscores): next previous.
___ (three underscores): next-next previous.
Global variables named _<n> are dynamically created (<n> being the prompt
counter), such that the result of output <n> is always available as _<n>.
Finally, a global dictionary named _oh exists with entries for all lines
which generated output.
* Directory history:
Your history of visited directories is kept in the global list _dh, and the
magic %cd command can be used to go to any entry in that list.
* Auto-parentheses and auto-quotes (adapted from Nathan Gray's LazyPython)
1. Auto-parentheses
Callable objects (i.e. functions, methods, etc) can be invoked like
this (notice the commas between the arguments)::
In [1]: callable_ob arg1, arg2, arg3
and the input will be translated to this::
callable_ob(arg1, arg2, arg3)
This feature is off by default (in rare cases it can produce
undesirable side-effects), but you can activate it at the command-line
by starting yap_ipython with `--autocall 1`, set it permanently in your
configuration file, or turn on at runtime with `%autocall 1`.
You can force auto-parentheses by using '/' as the first character
of a line. For example::
In [1]: /globals # becomes 'globals()'
Note that the '/' MUST be the first character on the line! This
won't work::
In [2]: print /globals # syntax error
In most cases the automatic algorithm should work, so you should
rarely need to explicitly invoke /. One notable exception is if you
are trying to call a function with a list of tuples as arguments (the
parenthesis will confuse yap_ipython)::
In [1]: zip (1,2,3),(4,5,6) # won't work
but this will work::
In [2]: /zip (1,2,3),(4,5,6)
------> zip ((1,2,3),(4,5,6))
Out[2]= [(1, 4), (2, 5), (3, 6)]
yap_ipython tells you that it has altered your command line by
displaying the new command line preceded by -->. e.g.::
In [18]: callable list
-------> callable (list)
2. Auto-Quoting
You can force auto-quoting of a function's arguments by using ',' as
the first character of a line. For example::
In [1]: ,my_function /home/me # becomes my_function("/home/me")
If you use ';' instead, the whole argument is quoted as a single
string (while ',' splits on whitespace)::
In [2]: ,my_function a b c # becomes my_function("a","b","c")
In [3]: ;my_function a b c # becomes my_function("a b c")
Note that the ',' MUST be the first character on the line! This
won't work::
In [4]: x = ,my_function /home/me # syntax error
"""
interactive_usage_min = """\
An enhanced console for Python.
Some of its features are:
- Tab completion in the local namespace.
- Logging of input, see command-line options.
- System shell escape via ! , eg !ls.
- Magic commands, starting with a % (like %ls, %pwd, %cd, etc.)
- Keeps track of locally defined variables via %who, %whos.
- Show object information with a ? eg ?x or x? (use ?? for more info).
"""
quick_reference = r"""
yap_ipython -- An enhanced Interactive Python - Quick Reference Card
================================================================
obj?, obj?? : Get help, or more help for object (also works as
?obj, ??obj).
?foo.*abc* : List names in 'foo' containing 'abc' in them.
%magic : Information about yap_ipython's 'magic' % functions.
Magic functions are prefixed by % or %%, and typically take their arguments
without parentheses, quotes or even commas for convenience. Line magics take a
single % and cell magics are prefixed with two %%.
Example magic function calls:
%alias d ls -F : 'd' is now an alias for 'ls -F'
alias d ls -F : Works if 'alias' not a python name
alist = %alias : Get list of aliases to 'alist'
cd /usr/share : Obvious. cd -<tab> to choose from visited dirs.
%cd?? : See help AND source for magic %cd
%timeit x=10 : time the 'x=10' statement with high precision.
%%timeit x=2**100
x**100 : time 'x**100' with a setup of 'x=2**100'; setup code is not
counted. This is an example of a cell magic.
System commands:
!cp a.txt b/ : System command escape, calls os.system()
cp a.txt b/ : after %rehashx, most system commands work without !
cp ${f}.txt $bar : Variable expansion in magics and system commands
files = !ls /usr : Capture system command output
files.s, files.l, files.n: "a b c", ['a','b','c'], 'a\nb\nc'
History:
_i, _ii, _iii : Previous, next previous, next next previous input
_i4, _ih[2:5] : Input history line 4, lines 2-4
exec _i81 : Execute input history line #81 again
%rep 81 : Edit input history line #81
_, __, ___ : previous, next previous, next next previous output
_dh : Directory history
_oh : Output history
%hist : Command history of current session.
%hist -g foo : Search command history of (almost) all sessions for 'foo'.
%hist -g : Command history of (almost) all sessions.
%hist 1/2-8 : Command history containing lines 2-8 of session 1.
%hist 1/ ~2/ : Command history of session 1 and 2 sessions before current.
%hist ~8/1-~6/5 : Command history from line 1 of 8 sessions ago to
line 5 of 6 sessions ago.
%edit 0/ : Open editor to execute code with history of current session.
Autocall:
f 1,2 : f(1,2) # Off by default, enable with %autocall magic.
/f 1,2 : f(1,2) (forced autoparen)
,f 1 2 : f("1","2")
;f 1 2 : f("1 2")
Remember: TAB completion works in many contexts, not just file names
or python names.
The following magic functions are currently available:
"""
default_banner_parts = ["Python %s\n"%sys.version.split("\n")[0],
"Type 'copyright', 'credits' or 'license' for more information\n" ,
"yap_ipython {version} -- An enhanced Interactive Python. Type '?' for help.\n".format(version=release.version),
]
default_banner = ''.join(default_banner_parts)
# deprecated GUI banner
default_gui_banner = '\n'.join([
'DEPRECATED: yap_ipython.core.usage.default_gui_banner is deprecated and will be removed',
default_banner,
])

View File

@@ -1,8 +1,8 @@
"""Public API for display tools in IPython.
"""Public API for display tools in yap_ipython.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2012 The IPython Development Team
# Copyright (C) 2012 The yap_ipython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
@@ -12,5 +12,5 @@
# Imports
#-----------------------------------------------------------------------------
from IPython.core.display import *
from IPython.lib.display import *
from yap_ipython.core.display import *
from yap_ipython.lib.display import *

View File

@@ -0,0 +1,2 @@
# -*- coding: utf-8 -*-
"""This directory is meant for yap_ipython extensions."""

View File

@@ -0,0 +1,524 @@
"""yap_ipython extension to reload modules before executing user code.
``autoreload`` reloads modules automatically before entering the execution of
code typed at the yap_ipython prompt.
This makes for example the following workflow possible:
.. sourcecode:: ipython
In [1]: %load_ext autoreload
In [2]: %autoreload 2
In [3]: from foo import some_function
In [4]: some_function()
Out[4]: 42
In [5]: # open foo.py in an editor and change some_function to return 43
In [6]: some_function()
Out[6]: 43
The module was reloaded without reloading it explicitly, and the object
imported with ``from foo import ...`` was also updated.
Usage
=====
The following magic commands are provided:
``%autoreload``
Reload all modules (except those excluded by ``%aimport``)
automatically now.
``%autoreload 0``
Disable automatic reloading.
``%autoreload 1``
Reload all modules imported with ``%aimport`` every time before
executing the Python code typed.
``%autoreload 2``
Reload all modules (except those excluded by ``%aimport``) every
time before executing the Python code typed.
``%aimport``
List modules which are to be automatically imported or not to be imported.
``%aimport foo``
Import module 'foo' and mark it to be autoreloaded for ``%autoreload 1``
``%aimport foo, bar``
Import modules 'foo', 'bar' and mark them to be autoreloaded for ``%autoreload 1``
``%aimport -foo``
Mark module 'foo' to not be autoreloaded.
Caveats
=======
Reloading Python modules in a reliable way is in general difficult,
and unexpected things may occur. ``%autoreload`` tries to work around
common pitfalls by replacing function code objects and parts of
classes previously in the module with new versions. This makes the
following things to work:
- Functions and classes imported via 'from xxx import foo' are upgraded
to new versions when 'xxx' is reloaded.
- Methods and properties of classes are upgraded on reload, so that
calling 'c.foo()' on an object 'c' created before the reload causes
the new code for 'foo' to be executed.
Some of the known remaining caveats are:
- Replacing code objects does not always succeed: changing a @property
in a class to an ordinary method or a method to a member variable
can cause problems (but in old objects only).
- Functions that are removed (eg. via monkey-patching) from a module
before it is reloaded are not upgraded.
- C extension modules cannot be reloaded, and so cannot be autoreloaded.
"""
skip_doctest = True
#-----------------------------------------------------------------------------
# Copyright (C) 2000 Thomas Heller
# Copyright (C) 2008 Pauli Virtanen <pav@iki.fi>
# Copyright (C) 2012 The yap_ipython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#
# This yap_ipython module is written by Pauli Virtanen, based on the autoreload
# code by Thomas Heller.
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import sys
import traceback
import types
import weakref
from importlib import import_module
from imp import reload
from yap_ipython.utils import openpy
#------------------------------------------------------------------------------
# Autoreload functionality
#------------------------------------------------------------------------------
class ModuleReloader(object):
enabled = False
"""Whether this reloader is enabled"""
check_all = True
"""Autoreload all modules, not just those listed in 'modules'"""
def __init__(self):
# Modules that failed to reload: {module: mtime-on-failed-reload, ...}
self.failed = {}
# Modules specially marked as autoreloadable.
self.modules = {}
# Modules specially marked as not autoreloadable.
self.skip_modules = {}
# (module-name, name) -> weakref, for replacing old code objects
self.old_objects = {}
# Module modification timestamps
self.modules_mtimes = {}
# Cache module modification times
self.check(check_all=True, do_reload=False)
def mark_module_skipped(self, module_name):
"""Skip reloading the named module in the future"""
try:
del self.modules[module_name]
except KeyError:
pass
self.skip_modules[module_name] = True
def mark_module_reloadable(self, module_name):
"""Reload the named module in the future (if it is imported)"""
try:
del self.skip_modules[module_name]
except KeyError:
pass
self.modules[module_name] = True
def aimport_module(self, module_name):
"""Import a module, and mark it reloadable
Returns
-------
top_module : module
The imported module if it is top-level, or the top-level
top_name : module
Name of top_module
"""
self.mark_module_reloadable(module_name)
import_module(module_name)
top_name = module_name.split('.')[0]
top_module = sys.modules[top_name]
return top_module, top_name
def filename_and_mtime(self, module):
if not hasattr(module, '__file__') or module.__file__ is None:
return None, None
if getattr(module, '__name__', None) in [None, '__mp_main__', '__main__']:
# we cannot reload(__main__) or reload(__mp_main__)
return None, None
filename = module.__file__
path, ext = os.path.splitext(filename)
if ext.lower() == '.py':
py_filename = filename
else:
try:
py_filename = openpy.source_from_cache(filename)
except ValueError:
return None, None
try:
pymtime = os.stat(py_filename).st_mtime
except OSError:
return None, None
return py_filename, pymtime
def check(self, check_all=False, do_reload=True):
"""Check whether some modules need to be reloaded."""
if not self.enabled and not check_all:
return
if check_all or self.check_all:
modules = list(sys.modules.keys())
else:
modules = list(self.modules.keys())
for modname in modules:
m = sys.modules.get(modname, None)
if modname in self.skip_modules:
continue
py_filename, pymtime = self.filename_and_mtime(m)
if py_filename is None:
continue
try:
if pymtime <= self.modules_mtimes[modname]:
continue
except KeyError:
self.modules_mtimes[modname] = pymtime
continue
else:
if self.failed.get(py_filename, None) == pymtime:
continue
self.modules_mtimes[modname] = pymtime
# If we've reached this point, we should try to reload the module
if do_reload:
try:
superreload(m, reload, self.old_objects)
if py_filename in self.failed:
del self.failed[py_filename]
except:
print("[autoreload of %s failed: %s]" % (
modname, traceback.format_exc(10)), file=sys.stderr)
self.failed[py_filename] = pymtime
#------------------------------------------------------------------------------
# superreload
#------------------------------------------------------------------------------
func_attrs = ['__code__', '__defaults__', '__doc__',
'__closure__', '__globals__', '__dict__']
def update_function(old, new):
"""Upgrade the code object of a function"""
for name in func_attrs:
try:
setattr(old, name, getattr(new, name))
except (AttributeError, TypeError):
pass
def update_class(old, new):
"""Replace stuff in the __dict__ of a class, and upgrade
method code objects"""
for key in list(old.__dict__.keys()):
old_obj = getattr(old, key)
try:
new_obj = getattr(new, key)
if old_obj == new_obj:
continue
except AttributeError:
# obsolete attribute: remove it
try:
delattr(old, key)
except (AttributeError, TypeError):
pass
continue
if update_generic(old_obj, new_obj): continue
try:
setattr(old, key, getattr(new, key))
except (AttributeError, TypeError):
pass # skip non-writable attributes
def update_property(old, new):
"""Replace get/set/del functions of a property"""
update_generic(old.fdel, new.fdel)
update_generic(old.fget, new.fget)
update_generic(old.fset, new.fset)
def isinstance2(a, b, typ):
return isinstance(a, typ) and isinstance(b, typ)
UPDATE_RULES = [
(lambda a, b: isinstance2(a, b, type),
update_class),
(lambda a, b: isinstance2(a, b, types.FunctionType),
update_function),
(lambda a, b: isinstance2(a, b, property),
update_property),
]
UPDATE_RULES.extend([(lambda a, b: isinstance2(a, b, types.MethodType),
lambda a, b: update_function(a.__func__, b.__func__)),
])
def update_generic(a, b):
for type_check, update in UPDATE_RULES:
if type_check(a, b):
update(a, b)
return True
return False
class StrongRef(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
return self.obj
def superreload(module, reload=reload, old_objects={}):
"""Enhanced version of the builtin reload function.
superreload remembers objects previously in the module, and
- upgrades the class dictionary of every old class in the module
- upgrades the code object of every old function and method
- clears the module's namespace before reloading
"""
# collect old objects in the module
for name, obj in list(module.__dict__.items()):
if not hasattr(obj, '__module__') or obj.__module__ != module.__name__:
continue
key = (module.__name__, name)
try:
old_objects.setdefault(key, []).append(weakref.ref(obj))
except TypeError:
pass
# reload module
try:
# clear namespace first from old cruft
old_dict = module.__dict__.copy()
old_name = module.__name__
module.__dict__.clear()
module.__dict__['__name__'] = old_name
module.__dict__['__loader__'] = old_dict['__loader__']
except (TypeError, AttributeError, KeyError):
pass
try:
module = reload(module)
except:
# restore module dictionary on failed reload
module.__dict__.update(old_dict)
raise
# iterate over all objects and update functions & classes
for name, new_obj in list(module.__dict__.items()):
key = (module.__name__, name)
if key not in old_objects: continue
new_refs = []
for old_ref in old_objects[key]:
old_obj = old_ref()
if old_obj is None: continue
new_refs.append(old_ref)
update_generic(old_obj, new_obj)
if new_refs:
old_objects[key] = new_refs
else:
del old_objects[key]
return module
#------------------------------------------------------------------------------
# yap_ipython connectivity
#------------------------------------------------------------------------------
from yap_ipython.core.magic import Magics, magics_class, line_magic
@magics_class
class AutoreloadMagics(Magics):
def __init__(self, *a, **kw):
super(AutoreloadMagics, self).__init__(*a, **kw)
self._reloader = ModuleReloader()
self._reloader.check_all = False
self.loaded_modules = set(sys.modules)
@line_magic
def autoreload(self, parameter_s=''):
r"""%autoreload => Reload modules automatically
%autoreload
Reload all modules (except those excluded by %aimport) automatically
now.
%autoreload 0
Disable automatic reloading.
%autoreload 1
Reload all modules imported with %aimport every time before executing
the Python code typed.
%autoreload 2
Reload all modules (except those excluded by %aimport) every time
before executing the Python code typed.
Reloading Python modules in a reliable way is in general
difficult, and unexpected things may occur. %autoreload tries to
work around common pitfalls by replacing function code objects and
parts of classes previously in the module with new versions. This
makes the following things to work:
- Functions and classes imported via 'from xxx import foo' are upgraded
to new versions when 'xxx' is reloaded.
- Methods and properties of classes are upgraded on reload, so that
calling 'c.foo()' on an object 'c' created before the reload causes
the new code for 'foo' to be executed.
Some of the known remaining caveats are:
- Replacing code objects does not always succeed: changing a @property
in a class to an ordinary method or a method to a member variable
can cause problems (but in old objects only).
- Functions that are removed (eg. via monkey-patching) from a module
before it is reloaded are not upgraded.
- C extension modules cannot be reloaded, and so cannot be
autoreloaded.
"""
if parameter_s == '':
self._reloader.check(True)
elif parameter_s == '0':
self._reloader.enabled = False
elif parameter_s == '1':
self._reloader.check_all = False
self._reloader.enabled = True
elif parameter_s == '2':
self._reloader.check_all = True
self._reloader.enabled = True
@line_magic
def aimport(self, parameter_s='', stream=None):
"""%aimport => Import modules for automatic reloading.
%aimport
List modules to automatically import and not to import.
%aimport foo
Import module 'foo' and mark it to be autoreloaded for %autoreload 1
%aimport foo, bar
Import modules 'foo', 'bar' and mark them to be autoreloaded for %autoreload 1
%aimport -foo
Mark module 'foo' to not be autoreloaded for %autoreload 1
"""
modname = parameter_s
if not modname:
to_reload = sorted(self._reloader.modules.keys())
to_skip = sorted(self._reloader.skip_modules.keys())
if stream is None:
stream = sys.stdout
if self._reloader.check_all:
stream.write("Modules to reload:\nall-except-skipped\n")
else:
stream.write("Modules to reload:\n%s\n" % ' '.join(to_reload))
stream.write("\nModules to skip:\n%s\n" % ' '.join(to_skip))
elif modname.startswith('-'):
modname = modname[1:]
self._reloader.mark_module_skipped(modname)
else:
for _module in ([_.strip() for _ in modname.split(',')]):
top_module, top_name = self._reloader.aimport_module(_module)
# Inject module to user namespace
self.shell.push({top_name: top_module})
def pre_run_cell(self):
if self._reloader.enabled:
try:
self._reloader.check()
except:
pass
def post_execute_hook(self):
"""Cache the modification times of any modules imported in this execution
"""
newly_loaded_modules = set(sys.modules) - self.loaded_modules
for modname in newly_loaded_modules:
_, pymtime = self._reloader.filename_and_mtime(sys.modules[modname])
if pymtime is not None:
self._reloader.modules_mtimes[modname] = pymtime
self.loaded_modules.update(newly_loaded_modules)
def load_ipython_extension(ip):
"""Load the extension in yap_ipython."""
auto_reload = AutoreloadMagics(ip)
ip.register_magics(auto_reload)
ip.events.register('pre_run_cell', auto_reload.pre_run_cell)
ip.events.register('post_execute', auto_reload.post_execute_hook)

View File

@@ -0,0 +1,21 @@
# -*- coding: utf-8 -*-
"""
**DEPRECATED**
The cython magic has been integrated into Cython itself,
which is now released in version 0.21.
cf github `Cython` organisation, `Cython` repo, under the
file `Cython/Build/IpythonMagic.py`
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011, yap_ipython Development Team.
#-----------------------------------------------------------------------------
import warnings
## still load the magic in yap_ipython 3.x, remove completely in future versions.
def load_ipython_extension(ip):
"""Load the extension in yap_ipython."""
warnings.warn("""The Cython magic has been moved to the Cython package""")

View File

@@ -0,0 +1,12 @@
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (C) 2012 The yap_ipython Development Team
#-----------------------------------------------------------------------------
import warnings
def load_ipython_extension(ip):
"""Load the extension in yap_ipython."""
warnings.warn("The rmagic extension in yap_ipython has moved to "
"`rpy2.ipython`, please see `rpy2` documentation.")

View File

@@ -0,0 +1,226 @@
# -*- coding: utf-8 -*-
"""
%store magic for lightweight persistence.
Stores variables, aliases and macros in yap_ipython's database.
To automatically restore stored variables at startup, add this to your
:file:`ipython_config.py` file::
c.StoreMagics.autorestore = True
"""
# Copyright (c) yap_ipython Development Team.
# Distributed under the terms of the Modified BSD License.
import inspect, os, sys, textwrap
from yap_ipython.core.error import UsageError
from yap_ipython.core.magic import Magics, magics_class, line_magic
from traitlets import Bool
def restore_aliases(ip):
staliases = ip.db.get('stored_aliases', {})
for k,v in staliases.items():
#print "restore alias",k,v # dbg
#self.alias_table[k] = v
ip.alias_manager.define_alias(k,v)
def refresh_variables(ip):
db = ip.db
for key in db.keys('autorestore/*'):
# strip autorestore
justkey = os.path.basename(key)
try:
obj = db[key]
except KeyError:
print("Unable to restore variable '%s', ignoring (use %%store -d to forget!)" % justkey)
print("The error was:", sys.exc_info()[0])
else:
#print "restored",justkey,"=",obj #dbg
ip.user_ns[justkey] = obj
def restore_dhist(ip):
ip.user_ns['_dh'] = ip.db.get('dhist',[])
def restore_data(ip):
refresh_variables(ip)
restore_aliases(ip)
restore_dhist(ip)
@magics_class
class StoreMagics(Magics):
"""Lightweight persistence for python variables.
Provides the %store magic."""
autorestore = Bool(False, help=
"""If True, any %store-d variables will be automatically restored
when yap_ipython starts.
"""
).tag(config=True)
def __init__(self, shell):
super(StoreMagics, self).__init__(shell=shell)
self.shell.configurables.append(self)
if self.autorestore:
restore_data(self.shell)
@line_magic
def store(self, parameter_s=''):
"""Lightweight persistence for python variables.
Example::
In [1]: l = ['hello',10,'world']
In [2]: %store l
In [3]: exit
(yap_ipython session is closed and started again...)
ville@badger:~$ ipython
In [1]: l
NameError: name 'l' is not defined
In [2]: %store -r
In [3]: l
Out[3]: ['hello', 10, 'world']
Usage:
* ``%store`` - Show list of all variables and their current
values
* ``%store spam`` - Store the *current* value of the variable spam
to disk
* ``%store -d spam`` - Remove the variable and its value from storage
* ``%store -z`` - Remove all variables from storage
* ``%store -r`` - Refresh all variables from store (overwrite
current vals)
* ``%store -r spam bar`` - Refresh specified variables from store
(delete current val)
* ``%store foo >a.txt`` - Store value of foo to new file a.txt
* ``%store foo >>a.txt`` - Append value of foo to file a.txt
It should be noted that if you change the value of a variable, you
need to %store it again if you want to persist the new value.
Note also that the variables will need to be pickleable; most basic
python types can be safely %store'd.
Also aliases can be %store'd across sessions.
"""
opts,argsl = self.parse_options(parameter_s,'drz',mode='string')
args = argsl.split(None,1)
ip = self.shell
db = ip.db
# delete
if 'd' in opts:
try:
todel = args[0]
except IndexError:
raise UsageError('You must provide the variable to forget')
else:
try:
del db['autorestore/' + todel]
except:
raise UsageError("Can't delete variable '%s'" % todel)
# reset
elif 'z' in opts:
for k in db.keys('autorestore/*'):
del db[k]
elif 'r' in opts:
if args:
for arg in args:
try:
obj = db['autorestore/' + arg]
except KeyError:
print("no stored variable %s" % arg)
else:
ip.user_ns[arg] = obj
else:
restore_data(ip)
# run without arguments -> list variables & values
elif not args:
vars = db.keys('autorestore/*')
vars.sort()
if vars:
size = max(map(len, vars))
else:
size = 0
print('Stored variables and their in-db values:')
fmt = '%-'+str(size)+'s -> %s'
get = db.get
for var in vars:
justkey = os.path.basename(var)
# print 30 first characters from every var
print(fmt % (justkey, repr(get(var, '<unavailable>'))[:50]))
# default action - store the variable
else:
# %store foo >file.txt or >>file.txt
if len(args) > 1 and args[1].startswith('>'):
fnam = os.path.expanduser(args[1].lstrip('>').lstrip())
if args[1].startswith('>>'):
fil = open(fnam, 'a')
else:
fil = open(fnam, 'w')
obj = ip.ev(args[0])
print("Writing '%s' (%s) to file '%s'." % (args[0],
obj.__class__.__name__, fnam))
if not isinstance (obj, str):
from pprint import pprint
pprint(obj, fil)
else:
fil.write(obj)
if not obj.endswith('\n'):
fil.write('\n')
fil.close()
return
# %store foo
try:
obj = ip.user_ns[args[0]]
except KeyError:
# it might be an alias
name = args[0]
try:
cmd = ip.alias_manager.retrieve_alias(name)
except ValueError:
raise UsageError("Unknown variable '%s'" % name)
staliases = db.get('stored_aliases',{})
staliases[name] = cmd
db['stored_aliases'] = staliases
print("Alias stored: %s (%s)" % (name, cmd))
return
else:
modname = getattr(inspect.getmodule(obj), '__name__', '')
if modname == '__main__':
print(textwrap.dedent("""\
Warning:%s is %s
Proper storage of interactively declared classes (or instances
of those classes) is not possible! Only instances
of classes in real modules on file system can be %%store'd.
""" % (args[0], obj) ))
return
#pickled = pickle.dumps(obj)
db[ 'autorestore/' + args[0] ] = obj
print("Stored '%s' (%s)" % (args[0], obj.__class__.__name__))
def load_ipython_extension(ip):
"""Load the extension in yap_ipython."""
ip.register_magics(StoreMagics)

View File

@@ -0,0 +1,32 @@
"""
**DEPRECATED**
A print function that pretty prints sympy Basic objects.
:moduleauthor: Brian Granger
Usage
=====
Once the extension is loaded, Sympy Basic objects are automatically
pretty-printed.
As of SymPy 0.7.2, maintenance of this extension has moved to SymPy under
sympy.interactive.ipythonprinting, any modifications to account for changes to
SymPy should be submitted to SymPy rather than changed here. This module is
maintained here for backwards compatablitiy with old SymPy versions.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008 The yap_ipython Development Team
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import warnings
def load_ipython_extension(ip):
warnings.warn("The sympyprinting extension has moved to `sympy`, "
"use `from sympy import init_printing; init_printing()`")

View File

@@ -0,0 +1,5 @@
"""
This package contains all third-party modules bundled with yap_ipython.
"""
__all__ = ["simplegeneric"]

View File

@@ -0,0 +1,13 @@
#!/usr/bin/python
"""
`yap_ipython.external.mathjax` is deprecated with yap_ipython 4.0+
mathjax is now install by default with the notebook package
"""
import sys
if __name__ == '__main__' :
sys.exit("yap_ipython.external.mathjax is deprecated, Mathjax is now installed by default with the notebook package")

View File

@@ -0,0 +1,95 @@
""" Import Qt in a manner suitable for an yap_ipython kernel.
This is the import used for the `gui=qt` or `matplotlib=qt` initialization.
Import Priority:
if Qt has been imported anywhere else:
use that
if matplotlib has been imported and doesn't support v2 (<= 1.0.1):
use PyQt4 @v1
Next, ask QT_API env variable
if QT_API not set:
ask matplotlib what it's using. If Qt4Agg or Qt5Agg, then use the
version matplotlib is configured with
else: (matplotlib said nothing)
# this is the default path - nobody told us anything
try in this order:
PyQt default version, PySide, PyQt5
else:
use what QT_API says
"""
# NOTE: This is no longer an external, third-party module, and should be
# considered part of yap_ipython. For compatibility however, it is being kept in
# yap_ipython/external.
import os
import sys
from yap_ipython.utils.version import check_version
from yap_ipython.external.qt_loaders import (load_qt, loaded_api, QT_API_PYSIDE,
QT_API_PYSIDE2, QT_API_PYQT, QT_API_PYQT5,
QT_API_PYQTv1, QT_API_PYQT_DEFAULT)
_qt_apis = (QT_API_PYSIDE, QT_API_PYSIDE2, QT_API_PYQT, QT_API_PYQT5, QT_API_PYQTv1,
QT_API_PYQT_DEFAULT)
#Constraints placed on an imported matplotlib
def matplotlib_options(mpl):
if mpl is None:
return
backend = mpl.rcParams.get('backend', None)
if backend == 'Qt4Agg':
mpqt = mpl.rcParams.get('backend.qt4', None)
if mpqt is None:
return None
if mpqt.lower() == 'pyside':
return [QT_API_PYSIDE]
elif mpqt.lower() == 'pyqt4':
return [QT_API_PYQT_DEFAULT]
elif mpqt.lower() == 'pyqt4v2':
return [QT_API_PYQT]
raise ImportError("unhandled value for backend.qt4 from matplotlib: %r" %
mpqt)
elif backend == 'Qt5Agg':
mpqt = mpl.rcParams.get('backend.qt5', None)
if mpqt is None:
return None
if mpqt.lower() == 'pyqt5':
return [QT_API_PYQT5]
raise ImportError("unhandled value for backend.qt5 from matplotlib: %r" %
mpqt)
def get_options():
"""Return a list of acceptable QT APIs, in decreasing order of
preference
"""
#already imported Qt somewhere. Use that
loaded = loaded_api()
if loaded is not None:
return [loaded]
mpl = sys.modules.get('matplotlib', None)
if mpl is not None and not check_version(mpl.__version__, '1.0.2'):
#1.0.1 only supports PyQt4 v1
return [QT_API_PYQT_DEFAULT]
qt_api = os.environ.get('QT_API', None)
if qt_api is None:
#no ETS variable. Ask mpl, then use default fallback path
return matplotlib_options(mpl) or [QT_API_PYQT_DEFAULT, QT_API_PYSIDE,
QT_API_PYQT5, QT_API_PYSIDE2]
elif qt_api not in _qt_apis:
raise RuntimeError("Invalid Qt API %r, valid values are: %r" %
(qt_api, ', '.join(_qt_apis)))
else:
return [qt_api]
api_opts = get_options()
QtCore, QtGui, QtSvg, QT_API = load_qt(api_opts)

View File

@@ -0,0 +1,373 @@
"""
This module contains factory functions that attempt
to return Qt submodules from the various python Qt bindings.
It also protects against double-importing Qt with different
bindings, which is unstable and likely to crash
This is used primarily by qt and qt_for_kernel, and shouldn't
be accessed directly from the outside
"""
import sys
import types
from functools import partial
from importlib import import_module
from yap_ipython.utils.version import check_version
# Available APIs.
QT_API_PYQT = 'pyqt' # Force version 2
QT_API_PYQT5 = 'pyqt5'
QT_API_PYQTv1 = 'pyqtv1' # Force version 2
QT_API_PYQT_DEFAULT = 'pyqtdefault' # use system default for version 1 vs. 2
QT_API_PYSIDE = 'pyside'
QT_API_PYSIDE2 = 'pyside2'
api_to_module = {QT_API_PYSIDE2: 'PySide2',
QT_API_PYSIDE: 'PySide',
QT_API_PYQT: 'PyQt4',
QT_API_PYQTv1: 'PyQt4',
QT_API_PYQT5: 'PyQt5',
QT_API_PYQT_DEFAULT: 'PyQt4',
}
class ImportDenier(object):
"""Import Hook that will guard against bad Qt imports
once yap_ipython commits to a specific binding
"""
def __init__(self):
self.__forbidden = set()
def forbid(self, module_name):
sys.modules.pop(module_name, None)
self.__forbidden.add(module_name)
def find_module(self, fullname, path=None):
if path:
return
if fullname in self.__forbidden:
return self
def load_module(self, fullname):
raise ImportError("""
Importing %s disabled by yap_ipython, which has
already imported an Incompatible QT Binding: %s
""" % (fullname, loaded_api()))
ID = ImportDenier()
sys.meta_path.insert(0, ID)
def commit_api(api):
"""Commit to a particular API, and trigger ImportErrors on subsequent
dangerous imports"""
if api == QT_API_PYSIDE2:
ID.forbid('PySide')
ID.forbid('PyQt4')
ID.forbid('PyQt5')
if api == QT_API_PYSIDE:
ID.forbid('PySide2')
ID.forbid('PyQt4')
ID.forbid('PyQt5')
elif api == QT_API_PYQT5:
ID.forbid('PySide2')
ID.forbid('PySide')
ID.forbid('PyQt4')
else: # There are three other possibilities, all representing PyQt4
ID.forbid('PyQt5')
ID.forbid('PySide2')
ID.forbid('PySide')
def loaded_api():
"""Return which API is loaded, if any
If this returns anything besides None,
importing any other Qt binding is unsafe.
Returns
-------
None, 'pyside2', 'pyside', 'pyqt', 'pyqt5', or 'pyqtv1'
"""
if 'PyQt4.QtCore' in sys.modules:
if qtapi_version() == 2:
return QT_API_PYQT
else:
return QT_API_PYQTv1
elif 'PySide.QtCore' in sys.modules:
return QT_API_PYSIDE
elif 'PySide2.QtCore' in sys.modules:
return QT_API_PYSIDE2
elif 'PyQt5.QtCore' in sys.modules:
return QT_API_PYQT5
return None
def has_binding(api):
"""Safely check for PyQt4/5, PySide or PySide2, without importing submodules
Supports Python <= 3.3
Parameters
----------
api : str [ 'pyqtv1' | 'pyqt' | 'pyqt5' | 'pyside' | 'pyside2' | 'pyqtdefault']
Which module to check for
Returns
-------
True if the relevant module appears to be importable
"""
# we can't import an incomplete pyside and pyqt4
# this will cause a crash in sip (#1431)
# check for complete presence before importing
module_name = api_to_module[api]
import imp
try:
#importing top level PyQt4/PySide module is ok...
mod = import_module(module_name)
#...importing submodules is not
imp.find_module('QtCore', mod.__path__)
imp.find_module('QtGui', mod.__path__)
imp.find_module('QtSvg', mod.__path__)
if api in (QT_API_PYQT5, QT_API_PYSIDE2):
# QT5 requires QtWidgets too
imp.find_module('QtWidgets', mod.__path__)
#we can also safely check PySide version
if api == QT_API_PYSIDE:
return check_version(mod.__version__, '1.0.3')
else:
return True
except ImportError:
return False
def has_binding_new(api):
"""Safely check for PyQt4/5, PySide or PySide2, without importing submodules
Supports Python >= 3.4
Parameters
----------
api : str [ 'pyqtv1' | 'pyqt' | 'pyqt5' | 'pyside' | 'pyside2' | 'pyqtdefault']
Which module to check for
Returns
-------
True if the relevant module appears to be importable
"""
module_name = api_to_module[api]
from importlib.util import find_spec
required = ['QtCore', 'QtGui', 'QtSvg']
if api in (QT_API_PYQT5, QT_API_PYSIDE2):
# QT5 requires QtWidgets too
required.append('QtWidgets')
for submod in required:
try:
spec = find_spec('%s.%s' % (module_name, submod))
except ImportError:
# Package (e.g. PyQt5) not found
return False
else:
if spec is None:
# Submodule (e.g. PyQt5.QtCore) not found
return False
if api == QT_API_PYSIDE:
# We can also safely check PySide version
import PySide
return check_version(PySide.__version__, '1.0.3')
return True
if sys.version_info >= (3, 4):
has_binding = has_binding_new
def qtapi_version():
"""Return which QString API has been set, if any
Returns
-------
The QString API version (1 or 2), or None if not set
"""
try:
import sip
except ImportError:
return
try:
return sip.getapi('QString')
except ValueError:
return
def can_import(api):
"""Safely query whether an API is importable, without importing it"""
if not has_binding(api):
return False
current = loaded_api()
if api == QT_API_PYQT_DEFAULT:
return current in [QT_API_PYQT, QT_API_PYQTv1, None]
else:
return current in [api, None]
def import_pyqt4(version=2):
"""
Import PyQt4
Parameters
----------
version : 1, 2, or None
Which QString/QVariant API to use. Set to None to use the system
default
ImportErrors rasied within this function are non-recoverable
"""
# The new-style string API (version=2) automatically
# converts QStrings to Unicode Python strings. Also, automatically unpacks
# QVariants to their underlying objects.
import sip
if version is not None:
sip.setapi('QString', version)
sip.setapi('QVariant', version)
from PyQt4 import QtGui, QtCore, QtSvg
if not check_version(QtCore.PYQT_VERSION_STR, '4.7'):
raise ImportError("yap_ipython requires PyQt4 >= 4.7, found %s" %
QtCore.PYQT_VERSION_STR)
# Alias PyQt-specific functions for PySide compatibility.
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
# query for the API version (in case version == None)
version = sip.getapi('QString')
api = QT_API_PYQTv1 if version == 1 else QT_API_PYQT
return QtCore, QtGui, QtSvg, api
def import_pyqt5():
"""
Import PyQt5
ImportErrors rasied within this function are non-recoverable
"""
import sip
from PyQt5 import QtCore, QtSvg, QtWidgets, QtGui
# Alias PyQt-specific functions for PySide compatibility.
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
# Join QtGui and QtWidgets for Qt4 compatibility.
QtGuiCompat = types.ModuleType('QtGuiCompat')
QtGuiCompat.__dict__.update(QtGui.__dict__)
QtGuiCompat.__dict__.update(QtWidgets.__dict__)
api = QT_API_PYQT5
return QtCore, QtGuiCompat, QtSvg, api
def import_pyside():
"""
Import PySide
ImportErrors raised within this function are non-recoverable
"""
from PySide import QtGui, QtCore, QtSvg
return QtCore, QtGui, QtSvg, QT_API_PYSIDE
def import_pyside2():
"""
Import PySide2
ImportErrors raised within this function are non-recoverable
"""
from PySide2 import QtGui, QtCore, QtSvg, QtWidgets, QtPrintSupport
# Join QtGui and QtWidgets for Qt4 compatibility.
QtGuiCompat = types.ModuleType('QtGuiCompat')
QtGuiCompat.__dict__.update(QtGui.__dict__)
QtGuiCompat.__dict__.update(QtWidgets.__dict__)
QtGuiCompat.__dict__.update(QtPrintSupport.__dict__)
return QtCore, QtGuiCompat, QtSvg, QT_API_PYSIDE2
def load_qt(api_options):
"""
Attempt to import Qt, given a preference list
of permissible bindings
It is safe to call this function multiple times.
Parameters
----------
api_options: List of strings
The order of APIs to try. Valid items are 'pyside', 'pyside2',
'pyqt', 'pyqt5', 'pyqtv1' and 'pyqtdefault'
Returns
-------
A tuple of QtCore, QtGui, QtSvg, QT_API
The first three are the Qt modules. The last is the
string indicating which module was loaded.
Raises
------
ImportError, if it isn't possible to import any requested
bindings (either becaues they aren't installed, or because
an incompatible library has already been installed)
"""
loaders = {
QT_API_PYSIDE2: import_pyside2,
QT_API_PYSIDE: import_pyside,
QT_API_PYQT: import_pyqt4,
QT_API_PYQT5: import_pyqt5,
QT_API_PYQTv1: partial(import_pyqt4, version=1),
QT_API_PYQT_DEFAULT: partial(import_pyqt4, version=None)
}
for api in api_options:
if api not in loaders:
raise RuntimeError(
"Invalid Qt API %r, valid values are: %s" %
(api, ", ".join(["%r" % k for k in loaders.keys()])))
if not can_import(api):
continue
#cannot safely recover from an ImportError during this
result = loaders[api]()
api = result[-1] # changed if api = QT_API_PYQT_DEFAULT
commit_api(api)
return result
else:
raise ImportError("""
Could not load requested Qt binding. Please ensure that
PyQt4 >= 4.7, PyQt5, PySide >= 1.0.3 or PySide2 is available,
and only one is imported per session.
Currently-imported Qt library: %r
PyQt4 available (requires QtCore, QtGui, QtSvg): %s
PyQt5 available (requires QtCore, QtGui, QtSvg, QtWidgets): %s
PySide >= 1.0.3 installed: %s
PySide2 installed: %s
Tried to load: %r
""" % (loaded_api(),
has_binding(QT_API_PYQT),
has_binding(QT_API_PYQT5),
has_binding(QT_API_PYSIDE),
has_binding(QT_API_PYSIDE2),
api_options))

View File

@@ -0,0 +1,29 @@
"""
Shim to maintain backwards compatibility with old frontend imports.
We have moved all contents of the old `frontend` subpackage into top-level
subpackages (`html`, `qt` and `terminal`), and flattened the notebook into
just `yap_ipython.html`, formerly `yap_ipython.frontend.html.notebook`.
This will let code that was making `from yap_ipython.frontend...` calls continue
working, though a warning will be printed.
"""
# Copyright (c) yap_ipython Development Team.
# Distributed under the terms of the Modified BSD License.
import sys
from warnings import warn
from yap_ipython.utils.shimmodule import ShimModule, ShimWarning
warn("The top-level `frontend` package has been deprecated since yap_ipython 1.0. "
"All its subpackages have been moved to the top `yap_ipython` level.", ShimWarning)
# Unconditionally insert the shim into sys.modules so that further import calls
# trigger the custom attribute access above
sys.modules['yap_ipython.frontend.html.notebook'] = ShimModule(
src='yap_ipython.frontend.html.notebook', mirror='yap_ipython.html')
sys.modules['yap_ipython.frontend'] = ShimModule(
src='yap_ipython.frontend', mirror='yap_ipython')

View File

@@ -0,0 +1,28 @@
"""
Shim to maintain backwards compatibility with old yap_ipython.html imports.
"""
# Copyright (c) yap_ipython Development Team.
# Distributed under the terms of the Modified BSD License.
import sys
from warnings import warn
from yap_ipython.utils.shimmodule import ShimModule, ShimWarning
warn("The `yap_ipython.html` package has been deprecated since yap_ipython 4.0. "
"You should import from `notebook` instead. "
"`yap_ipython.html.widgets` has moved to `ipywidgets`.", ShimWarning)
_widgets = sys.modules['yap_ipython.html.widgets'] = ShimModule(
src='yap_ipython.html.widgets', mirror='ipywidgets')
_html = ShimModule(
src='yap_ipython.html', mirror='notebook')
# hook up widgets
_html.widgets = _widgets
sys.modules['yap_ipython.html'] = _html
if __name__ == '__main__':
from notebook import notebookapp as app
app.launch_new_instance()

View File

@@ -0,0 +1,35 @@
"""
Shim to maintain backwards compatibility with old yap_ipython.kernel imports.
"""
# Copyright (c) yap_ipython Development Team.
# Distributed under the terms of the Modified BSD License.
import sys
from warnings import warn
from yap_ipython.utils.shimmodule import ShimModule, ShimWarning
warn("The `yap_ipython.kernel` package has been deprecated since yap_ipython 4.0."
"You should import from yap_kernel or jupyter_client instead.", ShimWarning)
# zmq subdir is gone
sys.modules['yap_ipython.kernel.zmq.session'] = ShimModule(
src='yap_ipython.kernel.zmq.session', mirror='jupyter_client.session')
sys.modules['yap_ipython.kernel.zmq'] = ShimModule(
src='yap_ipython.kernel.zmq', mirror='yap_kernel')
for pkg in ('comm', 'inprocess'):
src = 'yap_ipython.kernel.%s' % pkg
sys.modules[src] = ShimModule(src=src, mirror='yap_kernel.%s' % pkg)
for pkg in ('ioloop', 'blocking'):
src = 'yap_ipython.kernel.%s' % pkg
sys.modules[src] = ShimModule(src=src, mirror='jupyter_client.%s' % pkg)
# required for `from yap_ipython.kernel import PKG`
from yap_kernel import comm, inprocess
from jupyter_client import ioloop, blocking
# public API
from yap_kernel.connect import *
from jupyter_client import *

View File

@@ -0,0 +1,3 @@
if __name__ == '__main__':
from yap_kernel import kernelapp as app
app.launch_new_instance()

View File

@@ -0,0 +1 @@
from jupyter_client.adapter import *

View File

@@ -0,0 +1 @@
from jupyter_client.channels import *

View File

@@ -0,0 +1 @@
from jupyter_client.channelsabc import *

View File

@@ -0,0 +1 @@
from jupyter_client.client import *

View File

@@ -0,0 +1 @@
from jupyter_client.clientabc import *

View File

@@ -0,0 +1,2 @@
from yap_kernel.connect import *
from jupyter_client.connect import *

View File

@@ -0,0 +1 @@
from jupyter_client.kernelspec import *

View File

@@ -0,0 +1 @@
from jupyter_client.kernelspecapp import *

View File

@@ -0,0 +1 @@
from jupyter_client.launcher import *

View File

@@ -0,0 +1 @@
from jupyter_client.manager import *

View File

@@ -0,0 +1 @@
from jupyter_client.managerabc import *

View File

@@ -0,0 +1 @@
from jupyter_client.multikernelmanager import *

View File

@@ -0,0 +1 @@
from jupyter_client.restarter import *

View File

@@ -0,0 +1 @@
from jupyter_client.threaded import *

View File

@@ -0,0 +1,21 @@
# encoding: utf-8
"""
Extra capabilities for yap_ipython
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The yap_ipython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from yap_ipython.lib.security import passwd
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------

View File

@@ -0,0 +1,489 @@
# -*- coding: utf-8 -*-
"""Manage background (threaded) jobs conveniently from an interactive shell.
This module provides a BackgroundJobManager class. This is the main class
meant for public usage, it implements an object which can create and manage
new background jobs.
It also provides the actual job classes managed by these BackgroundJobManager
objects, see their docstrings below.
This system was inspired by discussions with B. Granger and the
BackgroundCommand class described in the book Python Scripting for
Computational Science, by H. P. Langtangen:
http://folk.uio.no/hpl/scripting
(although ultimately no code from this text was used, as yap_ipython's system is a
separate implementation).
An example notebook is provided in our documentation illustrating interactive
use of the system.
"""
#*****************************************************************************
# Copyright (C) 2005-2006 Fernando Perez <fperez@colorado.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
# Code begins
import sys
import threading
from yap_ipython import get_ipython
from yap_ipython.core.ultratb import AutoFormattedTB
from logging import error, debug
class BackgroundJobManager(object):
"""Class to manage a pool of backgrounded threaded jobs.
Below, we assume that 'jobs' is a BackgroundJobManager instance.
Usage summary (see the method docstrings for details):
jobs.new(...) -> start a new job
jobs() or jobs.status() -> print status summary of all jobs
jobs[N] -> returns job number N.
foo = jobs[N].result -> assign to variable foo the result of job N
jobs[N].traceback() -> print the traceback of dead job N
jobs.remove(N) -> remove (finished) job N
jobs.flush() -> remove all finished jobs
As a convenience feature, BackgroundJobManager instances provide the
utility result and traceback methods which retrieve the corresponding
information from the jobs list:
jobs.result(N) <--> jobs[N].result
jobs.traceback(N) <--> jobs[N].traceback()
While this appears minor, it allows you to use tab completion
interactively on the job manager instance.
"""
def __init__(self):
# Lists for job management, accessed via a property to ensure they're
# up to date.x
self._running = []
self._completed = []
self._dead = []
# A dict of all jobs, so users can easily access any of them
self.all = {}
# For reporting
self._comp_report = []
self._dead_report = []
# Store status codes locally for fast lookups
self._s_created = BackgroundJobBase.stat_created_c
self._s_running = BackgroundJobBase.stat_running_c
self._s_completed = BackgroundJobBase.stat_completed_c
self._s_dead = BackgroundJobBase.stat_dead_c
@property
def running(self):
self._update_status()
return self._running
@property
def dead(self):
self._update_status()
return self._dead
@property
def completed(self):
self._update_status()
return self._completed
def new(self, func_or_exp, *args, **kwargs):
"""Add a new background job and start it in a separate thread.
There are two types of jobs which can be created:
1. Jobs based on expressions which can be passed to an eval() call.
The expression must be given as a string. For example:
job_manager.new('myfunc(x,y,z=1)'[,glob[,loc]])
The given expression is passed to eval(), along with the optional
global/local dicts provided. If no dicts are given, they are
extracted automatically from the caller's frame.
A Python statement is NOT a valid eval() expression. Basically, you
can only use as an eval() argument something which can go on the right
of an '=' sign and be assigned to a variable.
For example,"print 'hello'" is not valid, but '2+3' is.
2. Jobs given a function object, optionally passing additional
positional arguments:
job_manager.new(myfunc, x, y)
The function is called with the given arguments.
If you need to pass keyword arguments to your function, you must
supply them as a dict named kw:
job_manager.new(myfunc, x, y, kw=dict(z=1))
The reason for this assymmetry is that the new() method needs to
maintain access to its own keywords, and this prevents name collisions
between arguments to new() and arguments to your own functions.
In both cases, the result is stored in the job.result field of the
background job object.
You can set `daemon` attribute of the thread by giving the keyword
argument `daemon`.
Notes and caveats:
1. All threads running share the same standard output. Thus, if your
background jobs generate output, it will come out on top of whatever
you are currently writing. For this reason, background jobs are best
used with silent functions which simply return their output.
2. Threads also all work within the same global namespace, and this
system does not lock interactive variables. So if you send job to the
background which operates on a mutable object for a long time, and
start modifying that same mutable object interactively (or in another
backgrounded job), all sorts of bizarre behaviour will occur.
3. If a background job is spending a lot of time inside a C extension
module which does not release the Python Global Interpreter Lock
(GIL), this will block the yap_ipython prompt. This is simply because the
Python interpreter can only switch between threads at Python
bytecodes. While the execution is inside C code, the interpreter must
simply wait unless the extension module releases the GIL.
4. There is no way, due to limitations in the Python threads library,
to kill a thread once it has started."""
if callable(func_or_exp):
kw = kwargs.get('kw',{})
job = BackgroundJobFunc(func_or_exp,*args,**kw)
elif isinstance(func_or_exp, str):
if not args:
frame = sys._getframe(1)
glob, loc = frame.f_globals, frame.f_locals
elif len(args)==1:
glob = loc = args[0]
elif len(args)==2:
glob,loc = args
else:
raise ValueError(
'Expression jobs take at most 2 args (globals,locals)')
job = BackgroundJobExpr(func_or_exp, glob, loc)
else:
raise TypeError('invalid args for new job')
if kwargs.get('daemon', False):
job.daemon = True
job.num = len(self.all)+1 if self.all else 0
self.running.append(job)
self.all[job.num] = job
debug('Starting job # %s in a separate thread.' % job.num)
job.start()
return job
def __getitem__(self, job_key):
num = job_key if isinstance(job_key, int) else job_key.num
return self.all[num]
def __call__(self):
"""An alias to self.status(),
This allows you to simply call a job manager instance much like the
Unix `jobs` shell command."""
return self.status()
def _update_status(self):
"""Update the status of the job lists.
This method moves finished jobs to one of two lists:
- self.completed: jobs which completed successfully
- self.dead: jobs which finished but died.
It also copies those jobs to corresponding _report lists. These lists
are used to report jobs completed/dead since the last update, and are
then cleared by the reporting function after each call."""
# Status codes
srun, scomp, sdead = self._s_running, self._s_completed, self._s_dead
# State lists, use the actual lists b/c the public names are properties
# that call this very function on access
running, completed, dead = self._running, self._completed, self._dead
# Now, update all state lists
for num, job in enumerate(running):
stat = job.stat_code
if stat == srun:
continue
elif stat == scomp:
completed.append(job)
self._comp_report.append(job)
running[num] = False
elif stat == sdead:
dead.append(job)
self._dead_report.append(job)
running[num] = False
# Remove dead/completed jobs from running list
running[:] = filter(None, running)
def _group_report(self,group,name):
"""Report summary for a given job group.
Return True if the group had any elements."""
if group:
print('%s jobs:' % name)
for job in group:
print('%s : %s' % (job.num,job))
print()
return True
def _group_flush(self,group,name):
"""Flush a given job group
Return True if the group had any elements."""
njobs = len(group)
if njobs:
plural = {1:''}.setdefault(njobs,'s')
print('Flushing %s %s job%s.' % (njobs,name,plural))
group[:] = []
return True
def _status_new(self):
"""Print the status of newly finished jobs.
Return True if any new jobs are reported.
This call resets its own state every time, so it only reports jobs
which have finished since the last time it was called."""
self._update_status()
new_comp = self._group_report(self._comp_report, 'Completed')
new_dead = self._group_report(self._dead_report,
'Dead, call jobs.traceback() for details')
self._comp_report[:] = []
self._dead_report[:] = []
return new_comp or new_dead
def status(self,verbose=0):
"""Print a status of all jobs currently being managed."""
self._update_status()
self._group_report(self.running,'Running')
self._group_report(self.completed,'Completed')
self._group_report(self.dead,'Dead')
# Also flush the report queues
self._comp_report[:] = []
self._dead_report[:] = []
def remove(self,num):
"""Remove a finished (completed or dead) job."""
try:
job = self.all[num]
except KeyError:
error('Job #%s not found' % num)
else:
stat_code = job.stat_code
if stat_code == self._s_running:
error('Job #%s is still running, it can not be removed.' % num)
return
elif stat_code == self._s_completed:
self.completed.remove(job)
elif stat_code == self._s_dead:
self.dead.remove(job)
def flush(self):
"""Flush all finished jobs (completed and dead) from lists.
Running jobs are never flushed.
It first calls _status_new(), to update info. If any jobs have
completed since the last _status_new() call, the flush operation
aborts."""
# Remove the finished jobs from the master dict
alljobs = self.all
for job in self.completed+self.dead:
del(alljobs[job.num])
# Now flush these lists completely
fl_comp = self._group_flush(self.completed, 'Completed')
fl_dead = self._group_flush(self.dead, 'Dead')
if not (fl_comp or fl_dead):
print('No jobs to flush.')
def result(self,num):
"""result(N) -> return the result of job N."""
try:
return self.all[num].result
except KeyError:
error('Job #%s not found' % num)
def _traceback(self, job):
num = job if isinstance(job, int) else job.num
try:
self.all[num].traceback()
except KeyError:
error('Job #%s not found' % num)
def traceback(self, job=None):
if job is None:
self._update_status()
for deadjob in self.dead:
print("Traceback for: %r" % deadjob)
self._traceback(deadjob)
print()
else:
self._traceback(job)
class BackgroundJobBase(threading.Thread):
"""Base class to build BackgroundJob classes.
The derived classes must implement:
- Their own __init__, since the one here raises NotImplementedError. The
derived constructor must call self._init() at the end, to provide common
initialization.
- A strform attribute used in calls to __str__.
- A call() method, which will make the actual execution call and must
return a value to be held in the 'result' field of the job object.
"""
# Class constants for status, in string and as numerical codes (when
# updating jobs lists, we don't want to do string comparisons). This will
# be done at every user prompt, so it has to be as fast as possible
stat_created = 'Created'; stat_created_c = 0
stat_running = 'Running'; stat_running_c = 1
stat_completed = 'Completed'; stat_completed_c = 2
stat_dead = 'Dead (Exception), call jobs.traceback() for details'
stat_dead_c = -1
def __init__(self):
"""Must be implemented in subclasses.
Subclasses must call :meth:`_init` for standard initialisation.
"""
raise NotImplementedError("This class can not be instantiated directly.")
def _init(self):
"""Common initialization for all BackgroundJob objects"""
for attr in ['call','strform']:
assert hasattr(self,attr), "Missing attribute <%s>" % attr
# The num tag can be set by an external job manager
self.num = None
self.status = BackgroundJobBase.stat_created
self.stat_code = BackgroundJobBase.stat_created_c
self.finished = False
self.result = '<BackgroundJob has not completed>'
# reuse the ipython traceback handler if we can get to it, otherwise
# make a new one
try:
make_tb = get_ipython().InteractiveTB.text
except:
make_tb = AutoFormattedTB(mode = 'Context',
color_scheme='NoColor',
tb_offset = 1).text
# Note that the actual API for text() requires the three args to be
# passed in, so we wrap it in a simple lambda.
self._make_tb = lambda : make_tb(None, None, None)
# Hold a formatted traceback if one is generated.
self._tb = None
threading.Thread.__init__(self)
def __str__(self):
return self.strform
def __repr__(self):
return '<BackgroundJob #%d: %s>' % (self.num, self.strform)
def traceback(self):
print(self._tb)
def run(self):
try:
self.status = BackgroundJobBase.stat_running
self.stat_code = BackgroundJobBase.stat_running_c
self.result = self.call()
except:
self.status = BackgroundJobBase.stat_dead
self.stat_code = BackgroundJobBase.stat_dead_c
self.finished = None
self.result = ('<BackgroundJob died, call jobs.traceback() for details>')
self._tb = self._make_tb()
else:
self.status = BackgroundJobBase.stat_completed
self.stat_code = BackgroundJobBase.stat_completed_c
self.finished = True
class BackgroundJobExpr(BackgroundJobBase):
"""Evaluate an expression as a background job (uses a separate thread)."""
def __init__(self, expression, glob=None, loc=None):
"""Create a new job from a string which can be fed to eval().
global/locals dicts can be provided, which will be passed to the eval
call."""
# fail immediately if the given expression can't be compiled
self.code = compile(expression,'<BackgroundJob compilation>','eval')
glob = {} if glob is None else glob
loc = {} if loc is None else loc
self.expression = self.strform = expression
self.glob = glob
self.loc = loc
self._init()
def call(self):
return eval(self.code,self.glob,self.loc)
class BackgroundJobFunc(BackgroundJobBase):
"""Run a function call as a background job (uses a separate thread)."""
def __init__(self, func, *args, **kwargs):
"""Create a new job from a callable object.
Any positional arguments and keyword args given to this constructor
after the initial callable are passed directly to it."""
if not callable(func):
raise TypeError(
'first argument to BackgroundJobFunc must be callable')
self.func = func
self.args = args
self.kwargs = kwargs
# The string form will only include the function passed, because
# generating string representations of the arguments is a potentially
# _very_ expensive operation (e.g. with large arrays).
self.strform = str(func)
self._init()
def call(self):
return self.func(*self.args, **self.kwargs)

View File

@@ -0,0 +1,69 @@
""" Utilities for accessing the platform's clipboard.
"""
import subprocess
from yap_ipython.core.error import TryNext
import yap_ipython.utils.py3compat as py3compat
class ClipboardEmpty(ValueError):
pass
def win32_clipboard_get():
""" Get the current clipboard's text on Windows.
Requires Mark Hammond's pywin32 extensions.
"""
try:
import win32clipboard
except ImportError:
raise TryNext("Getting text from the clipboard requires the pywin32 "
"extensions: http://sourceforge.net/projects/pywin32/")
win32clipboard.OpenClipboard()
try:
text = win32clipboard.GetClipboardData(win32clipboard.CF_UNICODETEXT)
except (TypeError, win32clipboard.error):
try:
text = win32clipboard.GetClipboardData(win32clipboard.CF_TEXT)
text = py3compat.cast_unicode(text, py3compat.DEFAULT_ENCODING)
except (TypeError, win32clipboard.error):
raise ClipboardEmpty
finally:
win32clipboard.CloseClipboard()
return text
def osx_clipboard_get():
""" Get the clipboard's text on OS X.
"""
p = subprocess.Popen(['pbpaste', '-Prefer', 'ascii'],
stdout=subprocess.PIPE)
text, stderr = p.communicate()
# Text comes in with old Mac \r line endings. Change them to \n.
text = text.replace(b'\r', b'\n')
text = py3compat.cast_unicode(text, py3compat.DEFAULT_ENCODING)
return text
def tkinter_clipboard_get():
""" Get the clipboard's text using Tkinter.
This is the default on systems that are not Windows or OS X. It may
interfere with other UI toolkits and should be replaced with an
implementation that uses that toolkit.
"""
try:
from tkinter import Tk, TclError
except ImportError:
raise TryNext("Getting text from the clipboard on this platform requires tkinter.")
root = Tk()
root.withdraw()
try:
text = root.clipboard_get()
except TclError:
raise ClipboardEmpty
finally:
root.destroy()
text = py3compat.cast_unicode(text, py3compat.DEFAULT_ENCODING)
return text

View File

@@ -0,0 +1,347 @@
# -*- coding: utf-8 -*-
"""
Provides a reload() function that acts recursively.
Python's normal :func:`python:reload` function only reloads the module that it's
passed. The :func:`reload` function in this module also reloads everything
imported from that module, which is useful when you're changing files deep
inside a package.
To use this as your default reload function, type this for Python 2::
import __builtin__
from yap_ipython.lib import deepreload
__builtin__.reload = deepreload.reload
Or this for Python 3::
import builtins
from yap_ipython.lib import deepreload
builtins.reload = deepreload.reload
A reference to the original :func:`python:reload` is stored in this module as
:data:`original_reload`, so you can restore it later.
This code is almost entirely based on knee.py, which is a Python
re-implementation of hierarchical module import.
"""
#*****************************************************************************
# Copyright (C) 2001 Nathaniel Gray <n8gray@caltech.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
import builtins as builtin_mod
from contextlib import contextmanager
import imp
import sys
from types import ModuleType
from warnings import warn
import types
original_import = builtin_mod.__import__
@contextmanager
def replace_import_hook(new_import):
saved_import = builtin_mod.__import__
builtin_mod.__import__ = new_import
try:
yield
finally:
builtin_mod.__import__ = saved_import
def get_parent(globals, level):
"""
parent, name = get_parent(globals, level)
Return the package that an import is being performed in. If globals comes
from the module foo.bar.bat (not itself a package), this returns the
sys.modules entry for foo.bar. If globals is from a package's __init__.py,
the package's entry in sys.modules is returned.
If globals doesn't come from a package or a module in a package, or a
corresponding entry is not found in sys.modules, None is returned.
"""
orig_level = level
if not level or not isinstance(globals, dict):
return None, ''
pkgname = globals.get('__package__', None)
if pkgname is not None:
# __package__ is set, so use it
if not hasattr(pkgname, 'rindex'):
raise ValueError('__package__ set to non-string')
if len(pkgname) == 0:
if level > 0:
raise ValueError('Attempted relative import in non-package')
return None, ''
name = pkgname
else:
# __package__ not set, so figure it out and set it
if '__name__' not in globals:
return None, ''
modname = globals['__name__']
if '__path__' in globals:
# __path__ is set, so modname is already the package name
globals['__package__'] = name = modname
else:
# Normal module, so work out the package name if any
lastdot = modname.rfind('.')
if lastdot < 0 < level:
raise ValueError("Attempted relative import in non-package")
if lastdot < 0:
globals['__package__'] = None
return None, ''
globals['__package__'] = name = modname[:lastdot]
dot = len(name)
for x in range(level, 1, -1):
try:
dot = name.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
name = name[:dot]
try:
parent = sys.modules[name]
except:
if orig_level < 1:
warn("Parent module '%.200s' not found while handling absolute "
"import" % name)
parent = None
else:
raise SystemError("Parent module '%.200s' not loaded, cannot "
"perform relative import" % name)
# We expect, but can't guarantee, if parent != None, that:
# - parent.__name__ == name
# - parent.__dict__ is globals
# If this is violated... Who cares?
return parent, name
def load_next(mod, altmod, name, buf):
"""
mod, name, buf = load_next(mod, altmod, name, buf)
altmod is either None or same as mod
"""
if len(name) == 0:
# completely empty module name should only happen in
# 'from . import' (or '__import__("")')
return mod, None, buf
dot = name.find('.')
if dot == 0:
raise ValueError('Empty module name')
if dot < 0:
subname = name
next = None
else:
subname = name[:dot]
next = name[dot+1:]
if buf != '':
buf += '.'
buf += subname
result = import_submodule(mod, subname, buf)
if result is None and mod != altmod:
result = import_submodule(altmod, subname, subname)
if result is not None:
buf = subname
if result is None:
raise ImportError("No module named %.200s" % name)
return result, next, buf
# Need to keep track of what we've already reloaded to prevent cyclic evil
found_now = {}
def import_submodule(mod, subname, fullname):
"""m = import_submodule(mod, subname, fullname)"""
# Require:
# if mod == None: subname == fullname
# else: mod.__name__ + "." + subname == fullname
global found_now
if fullname in found_now and fullname in sys.modules:
m = sys.modules[fullname]
else:
print('Reloading', fullname)
found_now[fullname] = 1
oldm = sys.modules.get(fullname, None)
if mod is None:
path = None
elif hasattr(mod, '__path__'):
path = mod.__path__
else:
return None
try:
# This appears to be necessary on Python 3, because imp.find_module()
# tries to import standard libraries (like io) itself, and we don't
# want them to be processed by our deep_import_hook.
with replace_import_hook(original_import):
fp, filename, stuff = imp.find_module(subname, path)
except ImportError:
return None
try:
m = imp.load_module(fullname, fp, filename, stuff)
except:
# load_module probably removed name from modules because of
# the error. Put back the original module object.
if oldm:
sys.modules[fullname] = oldm
raise
finally:
if fp: fp.close()
add_submodule(mod, m, fullname, subname)
return m
def add_submodule(mod, submod, fullname, subname):
"""mod.{subname} = submod"""
if mod is None:
return #Nothing to do here.
if submod is None:
submod = sys.modules[fullname]
setattr(mod, subname, submod)
return
def ensure_fromlist(mod, fromlist, buf, recursive):
"""Handle 'from module import a, b, c' imports."""
if not hasattr(mod, '__path__'):
return
for item in fromlist:
if not hasattr(item, 'rindex'):
raise TypeError("Item in ``from list'' not a string")
if item == '*':
if recursive:
continue # avoid endless recursion
try:
all = mod.__all__
except AttributeError:
pass
else:
ret = ensure_fromlist(mod, all, buf, 1)
if not ret:
return 0
elif not hasattr(mod, item):
import_submodule(mod, item, buf + '.' + item)
def deep_import_hook(name, globals=None, locals=None, fromlist=None, level=-1):
"""Replacement for __import__()"""
parent, buf = get_parent(globals, level)
head, name, buf = load_next(parent, None if level < 0 else parent, name, buf)
tail = head
while name:
tail, name, buf = load_next(tail, tail, name, buf)
# If tail is None, both get_parent and load_next found
# an empty module name: someone called __import__("") or
# doctored faulty bytecode
if tail is None:
raise ValueError('Empty module name')
if not fromlist:
return head
ensure_fromlist(tail, fromlist, buf, 0)
return tail
modules_reloading = {}
def deep_reload_hook(m):
"""Replacement for reload()."""
# Hardcode this one as it would raise a NotImplemeentedError from the
# bowels of Python and screw up the import machinery after.
# unlike other imports the `exclude` list already in place is not enough.
if m is types:
return m
if not isinstance(m, ModuleType):
raise TypeError("reload() argument must be module")
name = m.__name__
if name not in sys.modules:
raise ImportError("reload(): module %.200s not in sys.modules" % name)
global modules_reloading
try:
return modules_reloading[name]
except:
modules_reloading[name] = m
dot = name.rfind('.')
if dot < 0:
subname = name
path = None
else:
try:
parent = sys.modules[name[:dot]]
except KeyError:
modules_reloading.clear()
raise ImportError("reload(): parent %.200s not in sys.modules" % name[:dot])
subname = name[dot+1:]
path = getattr(parent, "__path__", None)
try:
# This appears to be necessary on Python 3, because imp.find_module()
# tries to import standard libraries (like io) itself, and we don't
# want them to be processed by our deep_import_hook.
with replace_import_hook(original_import):
fp, filename, stuff = imp.find_module(subname, path)
finally:
modules_reloading.clear()
try:
newm = imp.load_module(name, fp, filename, stuff)
except:
# load_module probably removed name from modules because of
# the error. Put back the original module object.
sys.modules[name] = m
raise
finally:
if fp: fp.close()
modules_reloading.clear()
return newm
# Save the original hooks
original_reload = imp.reload
# Replacement for reload()
def reload(module, exclude=('sys', 'os.path', 'builtins', '__main__',
'numpy', 'numpy._globals')):
"""Recursively reload all modules used in the given module. Optionally
takes a list of modules to exclude from reloading. The default exclude
list contains sys, __main__, and __builtin__, to prevent, e.g., resetting
display, exception, and io hooks.
"""
global found_now
for i in exclude:
found_now[i] = 1
try:
with replace_import_hook(deep_import_hook):
return deep_reload_hook(module)
finally:
found_now = {}

View File

@@ -0,0 +1,667 @@
"""Module for interactive demos using yap_ipython.
This module implements a few classes for running Python scripts interactively
in yap_ipython for demonstrations. With very simple markup (a few tags in
comments), you can control points where the script stops executing and returns
control to yap_ipython.
Provided classes
----------------
The classes are (see their docstrings for further details):
- Demo: pure python demos
- IPythonDemo: demos with input to be processed by yap_ipython as if it had been
typed interactively (so magics work, as well as any other special syntax you
may have added via input prefilters).
- LineDemo: single-line version of the Demo class. These demos are executed
one line at a time, and require no markup.
- IPythonLineDemo: yap_ipython version of the LineDemo class (the demo is
executed a line at a time, but processed via yap_ipython).
- ClearMixin: mixin to make Demo classes with less visual clutter. It
declares an empty marquee and a pre_cmd that clears the screen before each
block (see Subclassing below).
- ClearDemo, ClearIPDemo: mixin-enabled versions of the Demo and IPythonDemo
classes.
Inheritance diagram:
.. inheritance-diagram:: yap_ipython.lib.demo
:parts: 3
Subclassing
-----------
The classes here all include a few methods meant to make customization by
subclassing more convenient. Their docstrings below have some more details:
- highlight(): format every block and optionally highlight comments and
docstring content.
- marquee(): generates a marquee to provide visible on-screen markers at each
block start and end.
- pre_cmd(): run right before the execution of each block.
- post_cmd(): run right after the execution of each block. If the block
raises an exception, this is NOT called.
Operation
---------
The file is run in its own empty namespace (though you can pass it a string of
arguments as if in a command line environment, and it will see those as
sys.argv). But at each stop, the global yap_ipython namespace is updated with the
current internal demo namespace, so you can work interactively with the data
accumulated so far.
By default, each block of code is printed (with syntax highlighting) before
executing it and you have to confirm execution. This is intended to show the
code to an audience first so you can discuss it, and only proceed with
execution once you agree. There are a few tags which allow you to modify this
behavior.
The supported tags are:
# <demo> stop
Defines block boundaries, the points where yap_ipython stops execution of the
file and returns to the interactive prompt.
You can optionally mark the stop tag with extra dashes before and after the
word 'stop', to help visually distinguish the blocks in a text editor:
# <demo> --- stop ---
# <demo> silent
Make a block execute silently (and hence automatically). Typically used in
cases where you have some boilerplate or initialization code which you need
executed but do not want to be seen in the demo.
# <demo> auto
Make a block execute automatically, but still being printed. Useful for
simple code which does not warrant discussion, since it avoids the extra
manual confirmation.
# <demo> auto_all
This tag can _only_ be in the first block, and if given it overrides the
individual auto tags to make the whole demo fully automatic (no block asks
for confirmation). It can also be given at creation time (or the attribute
set later) to override what's in the file.
While _any_ python file can be run as a Demo instance, if there are no stop
tags the whole file will run in a single block (no different that calling
first %pycat and then %run). The minimal markup to make this useful is to
place a set of stop tags; the other tags are only there to let you fine-tune
the execution.
This is probably best explained with the simple example file below. You can
copy this into a file named ex_demo.py, and try running it via::
from yap_ipython.lib.demo import Demo
d = Demo('ex_demo.py')
d()
Each time you call the demo object, it runs the next block. The demo object
has a few useful methods for navigation, like again(), edit(), jump(), seek()
and back(). It can be reset for a new run via reset() or reloaded from disk
(in case you've edited the source) via reload(). See their docstrings below.
Note: To make this simpler to explore, a file called "demo-exercizer.py" has
been added to the "docs/examples/core" directory. Just cd to this directory in
an yap_ipython session, and type::
%run demo-exercizer.py
and then follow the directions.
Example
-------
The following is a very simple example of a valid demo file.
::
#################### EXAMPLE DEMO <ex_demo.py> ###############################
'''A simple interactive demo to illustrate the use of yap_ipython's Demo class.'''
print 'Hello, welcome to an interactive yap_ipython demo.'
# The mark below defines a block boundary, which is a point where yap_ipython will
# stop execution and return to the interactive prompt. The dashes are actually
# optional and used only as a visual aid to clearly separate blocks while
# editing the demo code.
# <demo> stop
x = 1
y = 2
# <demo> stop
# the mark below makes this block as silent
# <demo> silent
print 'This is a silent block, which gets executed but not printed.'
# <demo> stop
# <demo> auto
print 'This is an automatic block.'
print 'It is executed without asking for confirmation, but printed.'
z = x+y
print 'z=',x
# <demo> stop
# This is just another normal block.
print 'z is now:', z
print 'bye!'
################### END EXAMPLE DEMO <ex_demo.py> ############################
"""
#*****************************************************************************
# Copyright (C) 2005-2006 Fernando Perez. <Fernando.Perez@colorado.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#
#*****************************************************************************
import os
import re
import shlex
import sys
import pygments
from yap_ipython.utils.text import marquee
from yap_ipython.utils import openpy
from yap_ipython.utils import py3compat
__all__ = ['Demo','IPythonDemo','LineDemo','IPythonLineDemo','DemoError']
class DemoError(Exception): pass
def re_mark(mark):
return re.compile(r'^\s*#\s+<demo>\s+%s\s*$' % mark,re.MULTILINE)
class Demo(object):
re_stop = re_mark('-*\s?stop\s?-*')
re_silent = re_mark('silent')
re_auto = re_mark('auto')
re_auto_all = re_mark('auto_all')
def __init__(self,src,title='',arg_str='',auto_all=None, format_rst=False,
formatter='terminal', style='default'):
"""Make a new demo object. To run the demo, simply call the object.
See the module docstring for full details and an example (you can use
yap_ipython.Demo? in yap_ipython to see it).
Inputs:
- src is either a file, or file-like object, or a
string that can be resolved to a filename.
Optional inputs:
- title: a string to use as the demo name. Of most use when the demo
you are making comes from an object that has no filename, or if you
want an alternate denotation distinct from the filename.
- arg_str(''): a string of arguments, internally converted to a list
just like sys.argv, so the demo script can see a similar
environment.
- auto_all(None): global flag to run all blocks automatically without
confirmation. This attribute overrides the block-level tags and
applies to the whole demo. It is an attribute of the object, and
can be changed at runtime simply by reassigning it to a boolean
value.
- format_rst(False): a bool to enable comments and doc strings
formatting with pygments rst lexer
- formatter('terminal'): a string of pygments formatter name to be
used. Useful values for terminals: terminal, terminal256,
terminal16m
- style('default'): a string of pygments style name to be used.
"""
if hasattr(src, "read"):
# It seems to be a file or a file-like object
self.fname = "from a file-like object"
if title == '':
self.title = "from a file-like object"
else:
self.title = title
else:
# Assume it's a string or something that can be converted to one
self.fname = src
if title == '':
(filepath, filename) = os.path.split(src)
self.title = filename
else:
self.title = title
self.sys_argv = [src] + shlex.split(arg_str)
self.auto_all = auto_all
self.src = src
self.inside_ipython = "get_ipython" in globals()
if self.inside_ipython:
# get a few things from ipython. While it's a bit ugly design-wise,
# it ensures that things like color scheme and the like are always in
# sync with the ipython mode being used. This class is only meant to
# be used inside ipython anyways, so it's OK.
ip = get_ipython() # this is in builtins whenever yap_ipython is running
self.ip_ns = ip.user_ns
self.ip_colorize = ip.pycolorize
self.ip_showtb = ip.showtraceback
self.ip_run_cell = ip.run_cell
self.shell = ip
self.formatter = pygments.formatters.get_formatter_by_name(formatter,
style=style)
self.python_lexer = pygments.lexers.get_lexer_by_name("py3")
self.format_rst = format_rst
if format_rst:
self.rst_lexer = pygments.lexers.get_lexer_by_name("rst")
# load user data and initialize data structures
self.reload()
def fload(self):
"""Load file object."""
# read data and parse into blocks
if hasattr(self, 'fobj') and self.fobj is not None:
self.fobj.close()
if hasattr(self.src, "read"):
# It seems to be a file or a file-like object
self.fobj = self.src
else:
# Assume it's a string or something that can be converted to one
self.fobj = openpy.open(self.fname)
def reload(self):
"""Reload source from disk and initialize state."""
self.fload()
self.src = "".join(openpy.strip_encoding_cookie(self.fobj))
src_b = [b.strip() for b in self.re_stop.split(self.src) if b]
self._silent = [bool(self.re_silent.findall(b)) for b in src_b]
self._auto = [bool(self.re_auto.findall(b)) for b in src_b]
# if auto_all is not given (def. None), we read it from the file
if self.auto_all is None:
self.auto_all = bool(self.re_auto_all.findall(src_b[0]))
else:
self.auto_all = bool(self.auto_all)
# Clean the sources from all markup so it doesn't get displayed when
# running the demo
src_blocks = []
auto_strip = lambda s: self.re_auto.sub('',s)
for i,b in enumerate(src_b):
if self._auto[i]:
src_blocks.append(auto_strip(b))
else:
src_blocks.append(b)
# remove the auto_all marker
src_blocks[0] = self.re_auto_all.sub('',src_blocks[0])
self.nblocks = len(src_blocks)
self.src_blocks = src_blocks
# also build syntax-highlighted source
self.src_blocks_colored = list(map(self.highlight,self.src_blocks))
# ensure clean namespace and seek offset
self.reset()
def reset(self):
"""Reset the namespace and seek pointer to restart the demo"""
self.user_ns = {}
self.finished = False
self.block_index = 0
def _validate_index(self,index):
if index<0 or index>=self.nblocks:
raise ValueError('invalid block index %s' % index)
def _get_index(self,index):
"""Get the current block index, validating and checking status.
Returns None if the demo is finished"""
if index is None:
if self.finished:
print('Demo finished. Use <demo_name>.reset() if you want to rerun it.')
return None
index = self.block_index
else:
self._validate_index(index)
return index
def seek(self,index):
"""Move the current seek pointer to the given block.
You can use negative indices to seek from the end, with identical
semantics to those of Python lists."""
if index<0:
index = self.nblocks + index
self._validate_index(index)
self.block_index = index
self.finished = False
def back(self,num=1):
"""Move the seek pointer back num blocks (default is 1)."""
self.seek(self.block_index-num)
def jump(self,num=1):
"""Jump a given number of blocks relative to the current one.
The offset can be positive or negative, defaults to 1."""
self.seek(self.block_index+num)
def again(self):
"""Move the seek pointer back one block and re-execute."""
self.back(1)
self()
def edit(self,index=None):
"""Edit a block.
If no number is given, use the last block executed.
This edits the in-memory copy of the demo, it does NOT modify the
original source file. If you want to do that, simply open the file in
an editor and use reload() when you make changes to the file. This
method is meant to let you change a block during a demonstration for
explanatory purposes, without damaging your original script."""
index = self._get_index(index)
if index is None:
return
# decrease the index by one (unless we're at the very beginning), so
# that the default demo.edit() call opens up the sblock we've last run
if index>0:
index -= 1
filename = self.shell.mktempfile(self.src_blocks[index])
self.shell.hooks.editor(filename,1)
with open(filename, 'r') as f:
new_block = f.read()
# update the source and colored block
self.src_blocks[index] = new_block
self.src_blocks_colored[index] = self.highlight(new_block)
self.block_index = index
# call to run with the newly edited index
self()
def show(self,index=None):
"""Show a single block on screen"""
index = self._get_index(index)
if index is None:
return
print(self.marquee('<%s> block # %s (%s remaining)' %
(self.title,index,self.nblocks-index-1)))
print(self.src_blocks_colored[index])
sys.stdout.flush()
def show_all(self):
"""Show entire demo on screen, block by block"""
fname = self.title
title = self.title
nblocks = self.nblocks
silent = self._silent
marquee = self.marquee
for index,block in enumerate(self.src_blocks_colored):
if silent[index]:
print(marquee('<%s> SILENT block # %s (%s remaining)' %
(title,index,nblocks-index-1)))
else:
print(marquee('<%s> block # %s (%s remaining)' %
(title,index,nblocks-index-1)))
print(block, end=' ')
sys.stdout.flush()
def run_cell(self,source):
"""Execute a string with one or more lines of code"""
exec(source, self.user_ns)
def __call__(self,index=None):
"""run a block of the demo.
If index is given, it should be an integer >=1 and <= nblocks. This
means that the calling convention is one off from typical Python
lists. The reason for the inconsistency is that the demo always
prints 'Block n/N, and N is the total, so it would be very odd to use
zero-indexing here."""
index = self._get_index(index)
if index is None:
return
try:
marquee = self.marquee
next_block = self.src_blocks[index]
self.block_index += 1
if self._silent[index]:
print(marquee('Executing silent block # %s (%s remaining)' %
(index,self.nblocks-index-1)))
else:
self.pre_cmd()
self.show(index)
if self.auto_all or self._auto[index]:
print(marquee('output:'))
else:
print(marquee('Press <q> to quit, <Enter> to execute...'), end=' ')
ans = py3compat.input().strip()
if ans:
print(marquee('Block NOT executed'))
return
try:
save_argv = sys.argv
sys.argv = self.sys_argv
self.run_cell(next_block)
self.post_cmd()
finally:
sys.argv = save_argv
except:
if self.inside_ipython:
self.ip_showtb(filename=self.fname)
else:
if self.inside_ipython:
self.ip_ns.update(self.user_ns)
if self.block_index == self.nblocks:
mq1 = self.marquee('END OF DEMO')
if mq1:
# avoid spurious print if empty marquees are used
print()
print(mq1)
print(self.marquee('Use <demo_name>.reset() if you want to rerun it.'))
self.finished = True
# These methods are meant to be overridden by subclasses who may wish to
# customize the behavior of of their demos.
def marquee(self,txt='',width=78,mark='*'):
"""Return the input string centered in a 'marquee'."""
return marquee(txt,width,mark)
def pre_cmd(self):
"""Method called before executing each block."""
pass
def post_cmd(self):
"""Method called after executing each block."""
pass
def highlight(self, block):
"""Method called on each block to highlight it content"""
tokens = pygments.lex(block, self.python_lexer)
if self.format_rst:
from pygments.token import Token
toks = []
for token in tokens:
if token[0] == Token.String.Doc and len(token[1]) > 6:
toks += pygments.lex(token[1][:3], self.python_lexer)
# parse doc string content by rst lexer
toks += pygments.lex(token[1][3:-3], self.rst_lexer)
toks += pygments.lex(token[1][-3:], self.python_lexer)
elif token[0] == Token.Comment.Single:
toks.append((Token.Comment.Single, token[1][0]))
# parse comment content by rst lexer
# remove the extrat newline added by rst lexer
toks += list(pygments.lex(token[1][1:], self.rst_lexer))[:-1]
else:
toks.append(token)
tokens = toks
return pygments.format(tokens, self.formatter)
class IPythonDemo(Demo):
"""Class for interactive demos with yap_ipython's input processing applied.
This subclasses Demo, but instead of executing each block by the Python
interpreter (via exec), it actually calls yap_ipython on it, so that any input
filters which may be in place are applied to the input block.
If you have an interactive environment which exposes special input
processing, you can use this class instead to write demo scripts which
operate exactly as if you had typed them interactively. The default Demo
class requires the input to be valid, pure Python code.
"""
def run_cell(self,source):
"""Execute a string with one or more lines of code"""
self.shell.run_cell(source)
class LineDemo(Demo):
"""Demo where each line is executed as a separate block.
The input script should be valid Python code.
This class doesn't require any markup at all, and it's meant for simple
scripts (with no nesting or any kind of indentation) which consist of
multiple lines of input to be executed, one at a time, as if they had been
typed in the interactive prompt.
Note: the input can not have *any* indentation, which means that only
single-lines of input are accepted, not even function definitions are
valid."""
def reload(self):
"""Reload source from disk and initialize state."""
# read data and parse into blocks
self.fload()
lines = self.fobj.readlines()
src_b = [l for l in lines if l.strip()]
nblocks = len(src_b)
self.src = ''.join(lines)
self._silent = [False]*nblocks
self._auto = [True]*nblocks
self.auto_all = True
self.nblocks = nblocks
self.src_blocks = src_b
# also build syntax-highlighted source
self.src_blocks_colored = list(map(self.highlight,self.src_blocks))
# ensure clean namespace and seek offset
self.reset()
class IPythonLineDemo(IPythonDemo,LineDemo):
"""Variant of the LineDemo class whose input is processed by yap_ipython."""
pass
class ClearMixin(object):
"""Use this mixin to make Demo classes with less visual clutter.
Demos using this mixin will clear the screen before every block and use
blank marquees.
Note that in order for the methods defined here to actually override those
of the classes it's mixed with, it must go /first/ in the inheritance
tree. For example:
class ClearIPDemo(ClearMixin,IPythonDemo): pass
will provide an IPythonDemo class with the mixin's features.
"""
def marquee(self,txt='',width=78,mark='*'):
"""Blank marquee that returns '' no matter what the input."""
return ''
def pre_cmd(self):
"""Method called before executing each block.
This one simply clears the screen."""
from yap_ipython.utils.terminal import _term_clear
_term_clear()
class ClearDemo(ClearMixin,Demo):
pass
class ClearIPDemo(ClearMixin,IPythonDemo):
pass
def slide(file_path, noclear=False, format_rst=True, formatter="terminal",
style="native", auto_all=False, delimiter='...'):
if noclear:
demo_class = Demo
else:
demo_class = ClearDemo
demo = demo_class(file_path, format_rst=format_rst, formatter=formatter,
style=style, auto_all=auto_all)
while not demo.finished:
demo()
try:
py3compat.input('\n' + delimiter)
except KeyboardInterrupt:
exit(1)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Run python demos')
parser.add_argument('--noclear', '-C', action='store_true',
help='Do not clear terminal on each slide')
parser.add_argument('--rst', '-r', action='store_true',
help='Highlight comments and dostrings as rst')
parser.add_argument('--formatter', '-f', default='terminal',
help='pygments formatter name could be: terminal, '
'terminal256, terminal16m')
parser.add_argument('--style', '-s', default='default',
help='pygments style name')
parser.add_argument('--auto', '-a', action='store_true',
help='Run all blocks automatically without'
'confirmation')
parser.add_argument('--delimiter', '-d', default='...',
help='slides delimiter added after each slide run')
parser.add_argument('file', nargs=1,
help='python demo file')
args = parser.parse_args()
slide(args.file[0], noclear=args.noclear, format_rst=args.rst,
formatter=args.formatter, style=args.style, auto_all=args.auto,
delimiter=args.delimiter)

View File

@@ -0,0 +1,557 @@
"""Various display related classes.
Authors : MinRK, gregcaporaso, dannystaple
"""
from os.path import exists, isfile, splitext, abspath, join, isdir
from os import walk, sep
from yap_ipython.core.display import DisplayObject
__all__ = ['Audio', 'IFrame', 'YouTubeVideo', 'VimeoVideo', 'ScribdDocument',
'FileLink', 'FileLinks']
class Audio(DisplayObject):
"""Create an audio object.
When this object is returned by an input cell or passed to the
display function, it will result in Audio controls being displayed
in the frontend (only works in the notebook).
Parameters
----------
data : numpy array, list, unicode, str or bytes
Can be one of
* Numpy 1d array containing the desired waveform (mono)
* Numpy 2d array containing waveforms for each channel.
Shape=(NCHAN, NSAMPLES). For the standard channel order, see
http://msdn.microsoft.com/en-us/library/windows/hardware/dn653308(v=vs.85).aspx
* List of float or integer representing the waveform (mono)
* String containing the filename
* Bytestring containing raw PCM data or
* URL pointing to a file on the web.
If the array option is used the waveform will be normalized.
If a filename or url is used the format support will be browser
dependent.
url : unicode
A URL to download the data from.
filename : unicode
Path to a local file to load the data from.
embed : boolean
Should the audio data be embedded using a data URI (True) or should
the original source be referenced. Set this to True if you want the
audio to playable later with no internet connection in the notebook.
Default is `True`, unless the keyword argument `url` is set, then
default value is `False`.
rate : integer
The sampling rate of the raw data.
Only required when data parameter is being used as an array
autoplay : bool
Set to True if the audio should immediately start playing.
Default is `False`.
Examples
--------
::
# Generate a sound
import numpy as np
framerate = 44100
t = np.linspace(0,5,framerate*5)
data = np.sin(2*np.pi*220*t) + np.sin(2*np.pi*224*t))
Audio(data,rate=framerate)
# Can also do stereo or more channels
dataleft = np.sin(2*np.pi*220*t)
dataright = np.sin(2*np.pi*224*t)
Audio([dataleft, dataright],rate=framerate)
Audio("http://www.nch.com.au/acm/8k16bitpcm.wav") # From URL
Audio(url="http://www.w3schools.com/html/horse.ogg")
Audio('/path/to/sound.wav') # From file
Audio(filename='/path/to/sound.ogg')
Audio(b'RAW_WAV_DATA..) # From bytes
Audio(data=b'RAW_WAV_DATA..)
"""
_read_flags = 'rb'
def __init__(self, data=None, filename=None, url=None, embed=None, rate=None, autoplay=False):
if filename is None and url is None and data is None:
raise ValueError("No image data found. Expecting filename, url, or data.")
if embed is False and url is None:
raise ValueError("No url found. Expecting url when embed=False")
if url is not None and embed is not True:
self.embed = False
else:
self.embed = True
self.autoplay = autoplay
super(Audio, self).__init__(data=data, url=url, filename=filename)
if self.data is not None and not isinstance(self.data, bytes):
self.data = self._make_wav(data,rate)
def reload(self):
"""Reload the raw data from file or URL."""
import mimetypes
if self.embed:
super(Audio, self).reload()
if self.filename is not None:
self.mimetype = mimetypes.guess_type(self.filename)[0]
elif self.url is not None:
self.mimetype = mimetypes.guess_type(self.url)[0]
else:
self.mimetype = "audio/wav"
def _make_wav(self, data, rate):
""" Transform a numpy array to a PCM bytestring """
import struct
from io import BytesIO
import wave
try:
import numpy as np
data = np.array(data, dtype=float)
if len(data.shape) == 1:
nchan = 1
elif len(data.shape) == 2:
# In wave files,channels are interleaved. E.g.,
# "L1R1L2R2..." for stereo. See
# http://msdn.microsoft.com/en-us/library/windows/hardware/dn653308(v=vs.85).aspx
# for channel ordering
nchan = data.shape[0]
data = data.T.ravel()
else:
raise ValueError('Array audio input must be a 1D or 2D array')
scaled = np.int16(data/np.max(np.abs(data))*32767).tolist()
except ImportError:
# check that it is a "1D" list
idata = iter(data) # fails if not an iterable
try:
iter(idata.next())
raise TypeError('Only lists of mono audio are '
'supported if numpy is not installed')
except TypeError:
# this means it's not a nested list, which is what we want
pass
maxabsvalue = float(max([abs(x) for x in data]))
scaled = [int(x/maxabsvalue*32767) for x in data]
nchan = 1
fp = BytesIO()
waveobj = wave.open(fp,mode='wb')
waveobj.setnchannels(nchan)
waveobj.setframerate(rate)
waveobj.setsampwidth(2)
waveobj.setcomptype('NONE','NONE')
waveobj.writeframes(b''.join([struct.pack('<h',x) for x in scaled]))
val = fp.getvalue()
waveobj.close()
return val
def _data_and_metadata(self):
"""shortcut for returning metadata with url information, if defined"""
md = {}
if self.url:
md['url'] = self.url
if md:
return self.data, md
else:
return self.data
def _repr_html_(self):
src = """
<audio controls="controls" {autoplay}>
<source src="{src}" type="{type}" />
Your browser does not support the audio element.
</audio>
"""
return src.format(src=self.src_attr(),type=self.mimetype, autoplay=self.autoplay_attr())
def src_attr(self):
import base64
if self.embed and (self.data is not None):
data = base64=base64.b64encode(self.data).decode('ascii')
return """data:{type};base64,{base64}""".format(type=self.mimetype,
base64=data)
elif self.url is not None:
return self.url
else:
return ""
def autoplay_attr(self):
if(self.autoplay):
return 'autoplay="autoplay"'
else:
return ''
class IFrame(object):
"""
Generic class to embed an iframe in an yap_ipython notebook
"""
iframe = """
<iframe
width="{width}"
height="{height}"
src="{src}{params}"
frameborder="0"
allowfullscreen
></iframe>
"""
def __init__(self, src, width, height, **kwargs):
self.src = src
self.width = width
self.height = height
self.params = kwargs
def _repr_html_(self):
"""return the embed iframe"""
if self.params:
try:
from urllib.parse import urlencode # Py 3
except ImportError:
from urllib import urlencode
params = "?" + urlencode(self.params)
else:
params = ""
return self.iframe.format(src=self.src,
width=self.width,
height=self.height,
params=params)
class YouTubeVideo(IFrame):
"""Class for embedding a YouTube Video in an yap_ipython session, based on its video id.
e.g. to embed the video from https://www.youtube.com/watch?v=foo , you would
do::
vid = YouTubeVideo("foo")
display(vid)
To start from 30 seconds::
vid = YouTubeVideo("abc", start=30)
display(vid)
To calculate seconds from time as hours, minutes, seconds use
:class:`datetime.timedelta`::
start=int(timedelta(hours=1, minutes=46, seconds=40).total_seconds())
Other parameters can be provided as documented at
https://developers.google.com/youtube/player_parameters#Parameters
When converting the notebook using nbconvert, a jpeg representation of the video
will be inserted in the document.
"""
def __init__(self, id, width=400, height=300, **kwargs):
self.id=id
src = "https://www.youtube.com/embed/{0}".format(id)
super(YouTubeVideo, self).__init__(src, width, height, **kwargs)
def _repr_jpeg_(self):
# Deferred import
from urllib.request import urlopen
try:
return urlopen("https://img.youtube.com/vi/{id}/hqdefault.jpg".format(id=self.id)).read()
except IOError:
return None
class VimeoVideo(IFrame):
"""
Class for embedding a Vimeo video in an yap_ipython session, based on its video id.
"""
def __init__(self, id, width=400, height=300, **kwargs):
src="https://player.vimeo.com/video/{0}".format(id)
super(VimeoVideo, self).__init__(src, width, height, **kwargs)
class ScribdDocument(IFrame):
"""
Class for embedding a Scribd document in an yap_ipython session
Use the start_page params to specify a starting point in the document
Use the view_mode params to specify display type one off scroll | slideshow | book
e.g to Display Wes' foundational paper about PANDAS in book mode from page 3
ScribdDocument(71048089, width=800, height=400, start_page=3, view_mode="book")
"""
def __init__(self, id, width=400, height=300, **kwargs):
src="https://www.scribd.com/embeds/{0}/content".format(id)
super(ScribdDocument, self).__init__(src, width, height, **kwargs)
class FileLink(object):
"""Class for embedding a local file link in an yap_ipython session, based on path
e.g. to embed a link that was generated in the yap_ipython notebook as my/data.txt
you would do::
local_file = FileLink("my/data.txt")
display(local_file)
or in the HTML notebook, just::
FileLink("my/data.txt")
"""
html_link_str = "<a href='%s' target='_blank'>%s</a>"
def __init__(self,
path,
url_prefix='',
result_html_prefix='',
result_html_suffix='<br>'):
"""
Parameters
----------
path : str
path to the file or directory that should be formatted
url_prefix : str
prefix to be prepended to all files to form a working link [default:
'']
result_html_prefix : str
text to append to beginning to link [default: '']
result_html_suffix : str
text to append at the end of link [default: '<br>']
"""
if isdir(path):
raise ValueError("Cannot display a directory using FileLink. "
"Use FileLinks to display '%s'." % path)
self.path = path
self.url_prefix = url_prefix
self.result_html_prefix = result_html_prefix
self.result_html_suffix = result_html_suffix
def _format_path(self):
fp = ''.join([self.url_prefix,self.path])
return ''.join([self.result_html_prefix,
self.html_link_str % (fp, self.path),
self.result_html_suffix])
def _repr_html_(self):
"""return html link to file
"""
if not exists(self.path):
return ("Path (<tt>%s</tt>) doesn't exist. "
"It may still be in the process of "
"being generated, or you may have the "
"incorrect path." % self.path)
return self._format_path()
def __repr__(self):
"""return absolute path to file
"""
return abspath(self.path)
class FileLinks(FileLink):
"""Class for embedding local file links in an yap_ipython session, based on path
e.g. to embed links to files that were generated in the yap_ipython notebook
under ``my/data``, you would do::
local_files = FileLinks("my/data")
display(local_files)
or in the HTML notebook, just::
FileLinks("my/data")
"""
def __init__(self,
path,
url_prefix='',
included_suffixes=None,
result_html_prefix='',
result_html_suffix='<br>',
notebook_display_formatter=None,
terminal_display_formatter=None,
recursive=True):
"""
See :class:`FileLink` for the ``path``, ``url_prefix``,
``result_html_prefix`` and ``result_html_suffix`` parameters.
included_suffixes : list
Filename suffixes to include when formatting output [default: include
all files]
notebook_display_formatter : function
Used to format links for display in the notebook. See discussion of
formatter functions below.
terminal_display_formatter : function
Used to format links for display in the terminal. See discussion of
formatter functions below.
Formatter functions must be of the form::
f(dirname, fnames, included_suffixes)
dirname : str
The name of a directory
fnames : list
The files in that directory
included_suffixes : list
The file suffixes that should be included in the output (passing None
meansto include all suffixes in the output in the built-in formatters)
recursive : boolean
Whether to recurse into subdirectories. Default is True.
The function should return a list of lines that will be printed in the
notebook (if passing notebook_display_formatter) or the terminal (if
passing terminal_display_formatter). This function is iterated over for
each directory in self.path. Default formatters are in place, can be
passed here to support alternative formatting.
"""
if isfile(path):
raise ValueError("Cannot display a file using FileLinks. "
"Use FileLink to display '%s'." % path)
self.included_suffixes = included_suffixes
# remove trailing slashs for more consistent output formatting
path = path.rstrip('/')
self.path = path
self.url_prefix = url_prefix
self.result_html_prefix = result_html_prefix
self.result_html_suffix = result_html_suffix
self.notebook_display_formatter = \
notebook_display_formatter or self._get_notebook_display_formatter()
self.terminal_display_formatter = \
terminal_display_formatter or self._get_terminal_display_formatter()
self.recursive = recursive
def _get_display_formatter(self,
dirname_output_format,
fname_output_format,
fp_format,
fp_cleaner=None):
""" generate built-in formatter function
this is used to define both the notebook and terminal built-in
formatters as they only differ by some wrapper text for each entry
dirname_output_format: string to use for formatting directory
names, dirname will be substituted for a single "%s" which
must appear in this string
fname_output_format: string to use for formatting file names,
if a single "%s" appears in the string, fname will be substituted
if two "%s" appear in the string, the path to fname will be
substituted for the first and fname will be substituted for the
second
fp_format: string to use for formatting filepaths, must contain
exactly two "%s" and the dirname will be subsituted for the first
and fname will be substituted for the second
"""
def f(dirname, fnames, included_suffixes=None):
result = []
# begin by figuring out which filenames, if any,
# are going to be displayed
display_fnames = []
for fname in fnames:
if (isfile(join(dirname,fname)) and
(included_suffixes is None or
splitext(fname)[1] in included_suffixes)):
display_fnames.append(fname)
if len(display_fnames) == 0:
# if there are no filenames to display, don't print anything
# (not even the directory name)
pass
else:
# otherwise print the formatted directory name followed by
# the formatted filenames
dirname_output_line = dirname_output_format % dirname
result.append(dirname_output_line)
for fname in display_fnames:
fp = fp_format % (dirname,fname)
if fp_cleaner is not None:
fp = fp_cleaner(fp)
try:
# output can include both a filepath and a filename...
fname_output_line = fname_output_format % (fp, fname)
except TypeError:
# ... or just a single filepath
fname_output_line = fname_output_format % fname
result.append(fname_output_line)
return result
return f
def _get_notebook_display_formatter(self,
spacer="&nbsp;&nbsp;"):
""" generate function to use for notebook formatting
"""
dirname_output_format = \
self.result_html_prefix + "%s/" + self.result_html_suffix
fname_output_format = \
self.result_html_prefix + spacer + self.html_link_str + self.result_html_suffix
fp_format = self.url_prefix + '%s/%s'
if sep == "\\":
# Working on a platform where the path separator is "\", so
# must convert these to "/" for generating a URI
def fp_cleaner(fp):
# Replace all occurrences of backslash ("\") with a forward
# slash ("/") - this is necessary on windows when a path is
# provided as input, but we must link to a URI
return fp.replace('\\','/')
else:
fp_cleaner = None
return self._get_display_formatter(dirname_output_format,
fname_output_format,
fp_format,
fp_cleaner)
def _get_terminal_display_formatter(self,
spacer=" "):
""" generate function to use for terminal formatting
"""
dirname_output_format = "%s/"
fname_output_format = spacer + "%s"
fp_format = '%s/%s'
return self._get_display_formatter(dirname_output_format,
fname_output_format,
fp_format)
def _format_path(self):
result_lines = []
if self.recursive:
walked_dir = list(walk(self.path))
else:
walked_dir = [next(walk(self.path))]
walked_dir.sort()
for dirname, subdirs, fnames in walked_dir:
result_lines += self.notebook_display_formatter(dirname, fnames, self.included_suffixes)
return '\n'.join(result_lines)
def __repr__(self):
"""return newline-separated absolute paths
"""
result_lines = []
if self.recursive:
walked_dir = list(walk(self.path))
else:
walked_dir = [next(walk(self.path))]
walked_dir.sort()
for dirname, subdirs, fnames in walked_dir:
result_lines += self.terminal_display_formatter(dirname, fnames, self.included_suffixes)
return '\n'.join(result_lines)

View File

@@ -0,0 +1,128 @@
""" 'editor' hooks for common editors that work well with ipython
They should honor the line number argument, at least.
Contributions are *very* welcome.
"""
import os
import pipes
import shlex
import subprocess
import sys
from yap_ipython import get_ipython
from yap_ipython.core.error import TryNext
from yap_ipython.utils import py3compat
def install_editor(template, wait=False):
"""Installs the editor that is called by yap_ipython for the %edit magic.
This overrides the default editor, which is generally set by your EDITOR
environment variable or is notepad (windows) or vi (linux). By supplying a
template string `run_template`, you can control how the editor is invoked
by yap_ipython -- (e.g. the format in which it accepts command line options)
Parameters
----------
template : basestring
run_template acts as a template for how your editor is invoked by
the shell. It should contain '{filename}', which will be replaced on
invokation with the file name, and '{line}', $line by line number
(or 0) to invoke the file with.
wait : bool
If `wait` is true, wait until the user presses enter before returning,
to facilitate non-blocking editors that exit immediately after
the call.
"""
# not all editors support $line, so we'll leave out this check
# for substitution in ['$file', '$line']:
# if not substitution in run_template:
# raise ValueError(('run_template should contain %s'
# ' for string substitution. You supplied "%s"' % (substitution,
# run_template)))
def call_editor(self, filename, line=0):
if line is None:
line = 0
cmd = template.format(filename=pipes.quote(filename), line=line)
print(">", cmd)
# pipes.quote doesn't work right on Windows, but it does after splitting
if sys.platform.startswith('win'):
cmd = shlex.split(cmd)
proc = subprocess.Popen(cmd, shell=True)
if proc.wait() != 0:
raise TryNext()
if wait:
py3compat.input("Press Enter when done editing:")
get_ipython().set_hook('editor', call_editor)
get_ipython().editor = template
# in these, exe is always the path/name of the executable. Useful
# if you don't have the editor directory in your path
def komodo(exe=u'komodo'):
""" Activestate Komodo [Edit] """
install_editor(exe + u' -l {line} {filename}', wait=True)
def scite(exe=u"scite"):
""" SciTE or Sc1 """
install_editor(exe + u' {filename} -goto:{line}')
def notepadplusplus(exe=u'notepad++'):
""" Notepad++ http://notepad-plus.sourceforge.net """
install_editor(exe + u' -n{line} {filename}')
def jed(exe=u'jed'):
""" JED, the lightweight emacsish editor """
install_editor(exe + u' +{line} {filename}')
def idle(exe=u'idle'):
""" Idle, the editor bundled with python
Parameters
----------
exe : str, None
If none, should be pretty smart about finding the executable.
"""
if exe is None:
import idlelib
p = os.path.dirname(idlelib.__filename__)
# i'm not sure if this actually works. Is this idle.py script
# guaranteed to be executable?
exe = os.path.join(p, 'idle.py')
install_editor(exe + u' {filename}')
def mate(exe=u'mate'):
""" TextMate, the missing editor"""
# wait=True is not required since we're using the -w flag to mate
install_editor(exe + u' -w -l {line} {filename}')
# ##########################################
# these are untested, report any problems
# ##########################################
def emacs(exe=u'emacs'):
install_editor(exe + u' +{line} {filename}')
def gnuclient(exe=u'gnuclient'):
install_editor(exe + u' -nw +{line} {filename}')
def crimson_editor(exe=u'cedt.exe'):
install_editor(exe + u' /L:{line} {filename}')
def kate(exe=u'kate'):
install_editor(exe + u' -u -l {line} {filename}')

View File

@@ -0,0 +1,155 @@
# coding: utf-8
"""
Support for creating GUI apps and starting event loops.
yap_ipython's GUI integration allows interative plotting and GUI usage in yap_ipython
session. yap_ipython has two different types of GUI integration:
1. The terminal based yap_ipython supports GUI event loops through Python's
PyOS_InputHook. PyOS_InputHook is a hook that Python calls periodically
whenever raw_input is waiting for a user to type code. We implement GUI
support in the terminal by setting PyOS_InputHook to a function that
iterates the event loop for a short while. It is important to note that
in this situation, the real GUI event loop is NOT run in the normal
manner, so you can't use the normal means to detect that it is running.
2. In the two process yap_ipython kernel/frontend, the GUI event loop is run in
the kernel. In this case, the event loop is run in the normal manner by
calling the function or method of the GUI toolkit that starts the event
loop.
In addition to starting the GUI event loops in one of these two ways, yap_ipython
will *always* create an appropriate GUI application object when GUi
integration is enabled.
If you want your GUI apps to run in yap_ipython you need to do two things:
1. Test to see if there is already an existing main application object. If
there is, you should use it. If there is not an existing application object
you should create one.
2. Test to see if the GUI event loop is running. If it is, you should not
start it. If the event loop is not running you may start it.
This module contains functions for each toolkit that perform these things
in a consistent manner. Because of how PyOS_InputHook runs the event loop
you cannot detect if the event loop is running using the traditional calls
(such as ``wx.GetApp.IsMainLoopRunning()`` in wxPython). If PyOS_InputHook is
set These methods will return a false negative. That is, they will say the
event loop is not running, when is actually is. To work around this limitation
we proposed the following informal protocol:
* Whenever someone starts the event loop, they *must* set the ``_in_event_loop``
attribute of the main application object to ``True``. This should be done
regardless of how the event loop is actually run.
* Whenever someone stops the event loop, they *must* set the ``_in_event_loop``
attribute of the main application object to ``False``.
* If you want to see if the event loop is running, you *must* use ``hasattr``
to see if ``_in_event_loop`` attribute has been set. If it is set, you
*must* use its value. If it has not been set, you can query the toolkit
in the normal manner.
* If you want GUI support and no one else has created an application or
started the event loop you *must* do this. We don't want projects to
attempt to defer these things to someone else if they themselves need it.
The functions below implement this logic for each GUI toolkit. If you need
to create custom application subclasses, you will likely have to modify this
code for your own purposes. This code can be copied into your own project
so you don't have to depend on yap_ipython.
"""
# Copyright (c) yap_ipython Development Team.
# Distributed under the terms of the Modified BSD License.
from yap_ipython.core.getipython import get_ipython
#-----------------------------------------------------------------------------
# wx
#-----------------------------------------------------------------------------
def get_app_wx(*args, **kwargs):
"""Create a new wx app or return an exiting one."""
import wx
app = wx.GetApp()
if app is None:
if 'redirect' not in kwargs:
kwargs['redirect'] = False
app = wx.PySimpleApp(*args, **kwargs)
return app
def is_event_loop_running_wx(app=None):
"""Is the wx event loop running."""
# New way: check attribute on shell instance
ip = get_ipython()
if ip is not None:
if ip.active_eventloop and ip.active_eventloop == 'wx':
return True
# Fall through to checking the application, because Wx has a native way
# to check if the event loop is running, unlike Qt.
# Old way: check Wx application
if app is None:
app = get_app_wx()
if hasattr(app, '_in_event_loop'):
return app._in_event_loop
else:
return app.IsMainLoopRunning()
def start_event_loop_wx(app=None):
"""Start the wx event loop in a consistent manner."""
if app is None:
app = get_app_wx()
if not is_event_loop_running_wx(app):
app._in_event_loop = True
app.MainLoop()
app._in_event_loop = False
else:
app._in_event_loop = True
#-----------------------------------------------------------------------------
# qt4
#-----------------------------------------------------------------------------
def get_app_qt4(*args, **kwargs):
"""Create a new qt4 app or return an existing one."""
from yap_ipython.external.qt_for_kernel import QtGui
app = QtGui.QApplication.instance()
if app is None:
if not args:
args = ([''],)
app = QtGui.QApplication(*args, **kwargs)
return app
def is_event_loop_running_qt4(app=None):
"""Is the qt4 event loop running."""
# New way: check attribute on shell instance
ip = get_ipython()
if ip is not None:
return ip.active_eventloop and ip.active_eventloop.startswith('qt')
# Old way: check attribute on QApplication singleton
if app is None:
app = get_app_qt4([''])
if hasattr(app, '_in_event_loop'):
return app._in_event_loop
else:
# Does qt4 provide a other way to detect this?
return False
def start_event_loop_qt4(app=None):
"""Start the qt4 event loop in a consistent manner."""
if app is None:
app = get_app_qt4([''])
if not is_event_loop_running_qt4(app):
app._in_event_loop = True
app.exec_()
app._in_event_loop = False
else:
app._in_event_loop = True
#-----------------------------------------------------------------------------
# Tk
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# gtk
#-----------------------------------------------------------------------------

View File

@@ -0,0 +1,666 @@
# coding: utf-8
"""
Deprecated since yap_ipython 5.0
Inputhook management for GUI event loop integration.
"""
# Copyright (c) yap_ipython Development Team.
# Distributed under the terms of the Modified BSD License.
try:
import ctypes
except ImportError:
ctypes = None
except SystemError: # IronPython issue, 2/8/2014
ctypes = None
import os
import platform
import sys
from distutils.version import LooseVersion as V
from warnings import warn
warn("`yap_ipython.lib.inputhook` is deprecated since yap_ipython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
# Constants for identifying the GUI toolkits.
GUI_WX = 'wx'
GUI_QT = 'qt'
GUI_QT4 = 'qt4'
GUI_GTK = 'gtk'
GUI_TK = 'tk'
GUI_OSX = 'osx'
GUI_GLUT = 'glut'
GUI_PYGLET = 'pyglet'
GUI_GTK3 = 'gtk3'
GUI_NONE = 'none' # i.e. disable
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def _stdin_ready_posix():
"""Return True if there's something to read on stdin (posix version)."""
infds, outfds, erfds = select.select([sys.stdin],[],[],0)
return bool(infds)
def _stdin_ready_nt():
"""Return True if there's something to read on stdin (nt version)."""
return msvcrt.kbhit()
def _stdin_ready_other():
"""Return True, assuming there's something to read on stdin."""
return True
def _use_appnope():
"""Should we use appnope for dealing with OS X app nap?
Checks if we are on OS X 10.9 or greater.
"""
return sys.platform == 'darwin' and V(platform.mac_ver()[0]) >= V('10.9')
def _ignore_CTRL_C_posix():
"""Ignore CTRL+C (SIGINT)."""
signal.signal(signal.SIGINT, signal.SIG_IGN)
def _allow_CTRL_C_posix():
"""Take CTRL+C into account (SIGINT)."""
signal.signal(signal.SIGINT, signal.default_int_handler)
def _ignore_CTRL_C_other():
"""Ignore CTRL+C (not implemented)."""
pass
def _allow_CTRL_C_other():
"""Take CTRL+C into account (not implemented)."""
pass
if os.name == 'posix':
import select
import signal
stdin_ready = _stdin_ready_posix
ignore_CTRL_C = _ignore_CTRL_C_posix
allow_CTRL_C = _allow_CTRL_C_posix
elif os.name == 'nt':
import msvcrt
stdin_ready = _stdin_ready_nt
ignore_CTRL_C = _ignore_CTRL_C_other
allow_CTRL_C = _allow_CTRL_C_other
else:
stdin_ready = _stdin_ready_other
ignore_CTRL_C = _ignore_CTRL_C_other
allow_CTRL_C = _allow_CTRL_C_other
#-----------------------------------------------------------------------------
# Main InputHookManager class
#-----------------------------------------------------------------------------
class InputHookManager(object):
"""DEPRECATED since yap_ipython 5.0
Manage PyOS_InputHook for different GUI toolkits.
This class installs various hooks under ``PyOSInputHook`` to handle
GUI event loop integration.
"""
def __init__(self):
if ctypes is None:
warn("yap_ipython GUI event loop requires ctypes, %gui will not be available")
else:
self.PYFUNC = ctypes.PYFUNCTYPE(ctypes.c_int)
self.guihooks = {}
self.aliases = {}
self.apps = {}
self._reset()
def _reset(self):
self._callback_pyfunctype = None
self._callback = None
self._installed = False
self._current_gui = None
def get_pyos_inputhook(self):
"""DEPRECATED since yap_ipython 5.0
Return the current PyOS_InputHook as a ctypes.c_void_p."""
warn("`get_pyos_inputhook` is deprecated since yap_ipython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
return ctypes.c_void_p.in_dll(ctypes.pythonapi,"PyOS_InputHook")
def get_pyos_inputhook_as_func(self):
"""DEPRECATED since yap_ipython 5.0
Return the current PyOS_InputHook as a ctypes.PYFUNCYPE."""
warn("`get_pyos_inputhook_as_func` is deprecated since yap_ipython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
return self.PYFUNC.in_dll(ctypes.pythonapi,"PyOS_InputHook")
def set_inputhook(self, callback):
"""DEPRECATED since yap_ipython 5.0
Set PyOS_InputHook to callback and return the previous one."""
# On platforms with 'readline' support, it's all too likely to
# have a KeyboardInterrupt signal delivered *even before* an
# initial ``try:`` clause in the callback can be executed, so
# we need to disable CTRL+C in this situation.
ignore_CTRL_C()
self._callback = callback
self._callback_pyfunctype = self.PYFUNC(callback)
pyos_inputhook_ptr = self.get_pyos_inputhook()
original = self.get_pyos_inputhook_as_func()
pyos_inputhook_ptr.value = \
ctypes.cast(self._callback_pyfunctype, ctypes.c_void_p).value
self._installed = True
return original
def clear_inputhook(self, app=None):
"""DEPRECATED since yap_ipython 5.0
Set PyOS_InputHook to NULL and return the previous one.
Parameters
----------
app : optional, ignored
This parameter is allowed only so that clear_inputhook() can be
called with a similar interface as all the ``enable_*`` methods. But
the actual value of the parameter is ignored. This uniform interface
makes it easier to have user-level entry points in the main yap_ipython
app like :meth:`enable_gui`."""
warn("`clear_inputhook` is deprecated since yap_ipython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
pyos_inputhook_ptr = self.get_pyos_inputhook()
original = self.get_pyos_inputhook_as_func()
pyos_inputhook_ptr.value = ctypes.c_void_p(None).value
allow_CTRL_C()
self._reset()
return original
def clear_app_refs(self, gui=None):
"""DEPRECATED since yap_ipython 5.0
Clear yap_ipython's internal reference to an application instance.
Whenever we create an app for a user on qt4 or wx, we hold a
reference to the app. This is needed because in some cases bad things
can happen if a user doesn't hold a reference themselves. This
method is provided to clear the references we are holding.
Parameters
----------
gui : None or str
If None, clear all app references. If ('wx', 'qt4') clear
the app for that toolkit. References are not held for gtk or tk
as those toolkits don't have the notion of an app.
"""
warn("`clear_app_refs` is deprecated since yap_ipython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
if gui is None:
self.apps = {}
elif gui in self.apps:
del self.apps[gui]
def register(self, toolkitname, *aliases):
"""DEPRECATED since yap_ipython 5.0
Register a class to provide the event loop for a given GUI.
This is intended to be used as a class decorator. It should be passed
the names with which to register this GUI integration. The classes
themselves should subclass :class:`InputHookBase`.
::
@inputhook_manager.register('qt')
class QtInputHook(InputHookBase):
def enable(self, app=None):
...
"""
warn("`register` is deprecated since yap_ipython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
def decorator(cls):
if ctypes is not None:
inst = cls(self)
self.guihooks[toolkitname] = inst
for a in aliases:
self.aliases[a] = toolkitname
return cls
return decorator
def current_gui(self):
"""DEPRECATED since yap_ipython 5.0
Return a string indicating the currently active GUI or None."""
warn("`current_gui` is deprecated since yap_ipython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
return self._current_gui
def enable_gui(self, gui=None, app=None):
"""DEPRECATED since yap_ipython 5.0
Switch amongst GUI input hooks by name.
This is a higher level method than :meth:`set_inputhook` - it uses the
GUI name to look up a registered object which enables the input hook
for that GUI.
Parameters
----------
gui : optional, string or None
If None (or 'none'), clears input hook, otherwise it must be one
of the recognized GUI names (see ``GUI_*`` constants in module).
app : optional, existing application object.
For toolkits that have the concept of a global app, you can supply an
existing one. If not given, the toolkit will be probed for one, and if
none is found, a new one will be created. Note that GTK does not have
this concept, and passing an app if ``gui=="GTK"`` will raise an error.
Returns
-------
The output of the underlying gui switch routine, typically the actual
PyOS_InputHook wrapper object or the GUI toolkit app created, if there was
one.
"""
warn("`enable_gui` is deprecated since yap_ipython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
if gui in (None, GUI_NONE):
return self.disable_gui()
if gui in self.aliases:
return self.enable_gui(self.aliases[gui], app)
try:
gui_hook = self.guihooks[gui]
except KeyError:
e = "Invalid GUI request {!r}, valid ones are: {}"
raise ValueError(e.format(gui, ', '.join(self.guihooks)))
self._current_gui = gui
app = gui_hook.enable(app)
if app is not None:
app._in_event_loop = True
self.apps[gui] = app
return app
def disable_gui(self):
"""DEPRECATED since yap_ipython 5.0
Disable GUI event loop integration.
If an application was registered, this sets its ``_in_event_loop``
attribute to False. It then calls :meth:`clear_inputhook`.
"""
warn("`disable_gui` is deprecated since yap_ipython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
gui = self._current_gui
if gui in self.apps:
self.apps[gui]._in_event_loop = False
return self.clear_inputhook()
class InputHookBase(object):
"""DEPRECATED since yap_ipython 5.0
Base class for input hooks for specific toolkits.
Subclasses should define an :meth:`enable` method with one argument, ``app``,
which will either be an instance of the toolkit's application class, or None.
They may also define a :meth:`disable` method with no arguments.
"""
def __init__(self, manager):
self.manager = manager
def disable(self):
pass
inputhook_manager = InputHookManager()
@inputhook_manager.register('osx')
class NullInputHook(InputHookBase):
"""DEPRECATED since yap_ipython 5.0
A null inputhook that doesn't need to do anything"""
def enable(self, app=None):
warn("This function is deprecated since yap_ipython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
@inputhook_manager.register('wx')
class WxInputHook(InputHookBase):
def enable(self, app=None):
"""DEPRECATED since yap_ipython 5.0
Enable event loop integration with wxPython.
Parameters
----------
app : WX Application, optional.
Running application to use. If not given, we probe WX for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the ``PyOS_InputHook`` for wxPython, which allows
the wxPython to integrate with terminal based applications like
yap_ipython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`wx.App` as
follows::
import wx
app = wx.App(redirect=False, clearSigInt=False)
"""
warn("This function is deprecated since yap_ipython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
import wx
wx_version = V(wx.__version__).version
if wx_version < [2, 8]:
raise ValueError("requires wxPython >= 2.8, but you have %s" % wx.__version__)
from yap_ipython.lib.inputhookwx import inputhook_wx
self.manager.set_inputhook(inputhook_wx)
if _use_appnope():
from appnope import nope
nope()
import wx
if app is None:
app = wx.GetApp()
if app is None:
app = wx.App(redirect=False, clearSigInt=False)
return app
def disable(self):
"""DEPRECATED since yap_ipython 5.0
Disable event loop integration with wxPython.
This restores appnapp on OS X
"""
warn("This function is deprecated since yap_ipython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
if _use_appnope():
from appnope import nap
nap()
@inputhook_manager.register('qt', 'qt4')
class Qt4InputHook(InputHookBase):
def enable(self, app=None):
"""DEPRECATED since yap_ipython 5.0
Enable event loop integration with PyQt4.
Parameters
----------
app : Qt Application, optional.
Running application to use. If not given, we probe Qt for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the PyOS_InputHook for PyQt4, which allows
the PyQt4 to integrate with terminal based applications like
yap_ipython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`QApplication`
as follows::
from PyQt4 import QtCore
app = QtGui.QApplication(sys.argv)
"""
warn("This function is deprecated since yap_ipython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
from yap_ipython.lib.inputhookqt4 import create_inputhook_qt4
app, inputhook_qt4 = create_inputhook_qt4(self.manager, app)
self.manager.set_inputhook(inputhook_qt4)
if _use_appnope():
from appnope import nope
nope()
return app
def disable_qt4(self):
"""DEPRECATED since yap_ipython 5.0
Disable event loop integration with PyQt4.
This restores appnapp on OS X
"""
warn("This function is deprecated since yap_ipython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
if _use_appnope():
from appnope import nap
nap()
@inputhook_manager.register('qt5')
class Qt5InputHook(Qt4InputHook):
def enable(self, app=None):
warn("This function is deprecated since yap_ipython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
os.environ['QT_API'] = 'pyqt5'
return Qt4InputHook.enable(self, app)
@inputhook_manager.register('gtk')
class GtkInputHook(InputHookBase):
def enable(self, app=None):
"""DEPRECATED since yap_ipython 5.0
Enable event loop integration with PyGTK.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for PyGTK, which allows
the PyGTK to integrate with terminal based applications like
yap_ipython.
"""
warn("This function is deprecated since yap_ipython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
import gtk
try:
gtk.set_interactive(True)
except AttributeError:
# For older versions of gtk, use our own ctypes version
from yap_ipython.lib.inputhookgtk import inputhook_gtk
self.manager.set_inputhook(inputhook_gtk)
@inputhook_manager.register('tk')
class TkInputHook(InputHookBase):
def enable(self, app=None):
"""DEPRECATED since yap_ipython 5.0
Enable event loop integration with Tk.
Parameters
----------
app : toplevel :class:`Tkinter.Tk` widget, optional.
Running toplevel widget to use. If not given, we probe Tk for an
existing one, and create a new one if none is found.
Notes
-----
If you have already created a :class:`Tkinter.Tk` object, the only
thing done by this method is to register with the
:class:`InputHookManager`, since creating that object automatically
sets ``PyOS_InputHook``.
"""
warn("This function is deprecated since yap_ipython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
if app is None:
try:
from tkinter import Tk # Py 3
except ImportError:
from Tkinter import Tk # Py 2
app = Tk()
app.withdraw()
self.manager.apps[GUI_TK] = app
return app
@inputhook_manager.register('glut')
class GlutInputHook(InputHookBase):
def enable(self, app=None):
"""DEPRECATED since yap_ipython 5.0
Enable event loop integration with GLUT.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for GLUT, which allows the GLUT to
integrate with terminal based applications like yap_ipython. Due to GLUT
limitations, it is currently not possible to start the event loop
without first creating a window. You should thus not create another
window but use instead the created one. See 'gui-glut.py' in the
docs/examples/lib directory.
The default screen mode is set to:
glut.GLUT_DOUBLE | glut.GLUT_RGBA | glut.GLUT_DEPTH
"""
warn("This function is deprecated since yap_ipython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
import OpenGL.GLUT as glut
from yap_ipython.lib.inputhookglut import glut_display_mode, \
glut_close, glut_display, \
glut_idle, inputhook_glut
if GUI_GLUT not in self.manager.apps:
glut.glutInit( sys.argv )
glut.glutInitDisplayMode( glut_display_mode )
# This is specific to freeglut
if bool(glut.glutSetOption):
glut.glutSetOption( glut.GLUT_ACTION_ON_WINDOW_CLOSE,
glut.GLUT_ACTION_GLUTMAINLOOP_RETURNS )
glut.glutCreateWindow( sys.argv[0] )
glut.glutReshapeWindow( 1, 1 )
glut.glutHideWindow( )
glut.glutWMCloseFunc( glut_close )
glut.glutDisplayFunc( glut_display )
glut.glutIdleFunc( glut_idle )
else:
glut.glutWMCloseFunc( glut_close )
glut.glutDisplayFunc( glut_display )
glut.glutIdleFunc( glut_idle)
self.manager.set_inputhook( inputhook_glut )
def disable(self):
"""DEPRECATED since yap_ipython 5.0
Disable event loop integration with glut.
This sets PyOS_InputHook to NULL and set the display function to a
dummy one and set the timer to a dummy timer that will be triggered
very far in the future.
"""
warn("This function is deprecated since yap_ipython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
import OpenGL.GLUT as glut
from glut_support import glutMainLoopEvent
glut.glutHideWindow() # This is an event to be processed below
glutMainLoopEvent()
super(GlutInputHook, self).disable()
@inputhook_manager.register('pyglet')
class PygletInputHook(InputHookBase):
def enable(self, app=None):
"""DEPRECATED since yap_ipython 5.0
Enable event loop integration with pyglet.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the ``PyOS_InputHook`` for pyglet, which allows
pyglet to integrate with terminal based applications like
yap_ipython.
"""
warn("This function is deprecated since yap_ipython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
from yap_ipython.lib.inputhookpyglet import inputhook_pyglet
self.manager.set_inputhook(inputhook_pyglet)
return app
@inputhook_manager.register('gtk3')
class Gtk3InputHook(InputHookBase):
def enable(self, app=None):
"""DEPRECATED since yap_ipython 5.0
Enable event loop integration with Gtk3 (gir bindings).
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for Gtk3, which allows
the Gtk3 to integrate with terminal based applications like
yap_ipython.
"""
warn("This function is deprecated since yap_ipython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
from yap_ipython.lib.inputhookgtk3 import inputhook_gtk3
self.manager.set_inputhook(inputhook_gtk3)
clear_inputhook = inputhook_manager.clear_inputhook
set_inputhook = inputhook_manager.set_inputhook
current_gui = inputhook_manager.current_gui
clear_app_refs = inputhook_manager.clear_app_refs
enable_gui = inputhook_manager.enable_gui
disable_gui = inputhook_manager.disable_gui
register = inputhook_manager.register
guis = inputhook_manager.guihooks
def _deprecated_disable():
warn("This function is deprecated since yap_ipython 4.0 use disable_gui() instead",
DeprecationWarning, stacklevel=2)
inputhook_manager.disable_gui()
disable_wx = disable_qt4 = disable_gtk = disable_gtk3 = disable_glut = \
disable_pyglet = disable_osx = _deprecated_disable

View File

@@ -0,0 +1,172 @@
# coding: utf-8
"""
GLUT Inputhook support functions
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The yap_ipython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
# GLUT is quite an old library and it is difficult to ensure proper
# integration within yap_ipython since original GLUT does not allow to handle
# events one by one. Instead, it requires for the mainloop to be entered
# and never returned (there is not even a function to exit he
# mainloop). Fortunately, there are alternatives such as freeglut
# (available for linux and windows) and the OSX implementation gives
# access to a glutCheckLoop() function that blocks itself until a new
# event is received. This means we have to setup the idle callback to
# ensure we got at least one event that will unblock the function.
#
# Furthermore, it is not possible to install these handlers without a window
# being first created. We choose to make this window invisible. This means that
# display mode options are set at this level and user won't be able to change
# them later without modifying the code. This should probably be made available
# via yap_ipython options system.
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import sys
import time
import signal
import OpenGL.GLUT as glut
import OpenGL.platform as platform
from timeit import default_timer as clock
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
# Frame per second : 60
# Should probably be an yap_ipython option
glut_fps = 60
# Display mode : double buffeed + rgba + depth
# Should probably be an yap_ipython option
glut_display_mode = (glut.GLUT_DOUBLE |
glut.GLUT_RGBA |
glut.GLUT_DEPTH)
glutMainLoopEvent = None
if sys.platform == 'darwin':
try:
glutCheckLoop = platform.createBaseFunction(
'glutCheckLoop', dll=platform.GLUT, resultType=None,
argTypes=[],
doc='glutCheckLoop( ) -> None',
argNames=(),
)
except AttributeError:
raise RuntimeError(
'''Your glut implementation does not allow interactive sessions'''
'''Consider installing freeglut.''')
glutMainLoopEvent = glutCheckLoop
elif glut.HAVE_FREEGLUT:
glutMainLoopEvent = glut.glutMainLoopEvent
else:
raise RuntimeError(
'''Your glut implementation does not allow interactive sessions. '''
'''Consider installing freeglut.''')
#-----------------------------------------------------------------------------
# Platform-dependent imports and functions
#-----------------------------------------------------------------------------
if os.name == 'posix':
import select
def stdin_ready():
infds, outfds, erfds = select.select([sys.stdin],[],[],0)
if infds:
return True
else:
return False
elif sys.platform == 'win32':
import msvcrt
def stdin_ready():
return msvcrt.kbhit()
#-----------------------------------------------------------------------------
# Callback functions
#-----------------------------------------------------------------------------
def glut_display():
# Dummy display function
pass
def glut_idle():
# Dummy idle function
pass
def glut_close():
# Close function only hides the current window
glut.glutHideWindow()
glutMainLoopEvent()
def glut_int_handler(signum, frame):
# Catch sigint and print the defautl message
signal.signal(signal.SIGINT, signal.default_int_handler)
print('\nKeyboardInterrupt')
# Need to reprint the prompt at this stage
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def inputhook_glut():
"""Run the pyglet event loop by processing pending events only.
This keeps processing pending events until stdin is ready. After
processing all pending events, a call to time.sleep is inserted. This is
needed, otherwise, CPU usage is at 100%. This sleep time should be tuned
though for best performance.
"""
# We need to protect against a user pressing Control-C when yap_ipython is
# idle and this is running. We trap KeyboardInterrupt and pass.
signal.signal(signal.SIGINT, glut_int_handler)
try:
t = clock()
# Make sure the default window is set after a window has been closed
if glut.glutGetWindow() == 0:
glut.glutSetWindow( 1 )
glutMainLoopEvent()
return 0
while not stdin_ready():
glutMainLoopEvent()
# We need to sleep at this point to keep the idle CPU load
# low. However, if sleep to long, GUI response is poor. As
# a compromise, we watch how often GUI events are being processed
# and switch between a short and long sleep time. Here are some
# stats useful in helping to tune this.
# time CPU load
# 0.001 13%
# 0.005 3%
# 0.01 1.5%
# 0.05 0.5%
used_time = clock() - t
if used_time > 10.0:
# print 'Sleep for 1 s' # dbg
time.sleep(1.0)
elif used_time > 0.1:
# Few GUI events coming in, so we can sleep longer
# print 'Sleep for 0.05 s' # dbg
time.sleep(0.05)
else:
# Many GUI events coming in, so sleep only very little
time.sleep(0.001)
except KeyboardInterrupt:
pass
return 0

View File

@@ -0,0 +1,35 @@
# encoding: utf-8
"""
Enable pygtk to be used interacive by setting PyOS_InputHook.
Authors: Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The yap_ipython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
import gtk, gobject
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def _main_quit(*args, **kwargs):
gtk.main_quit()
return False
def inputhook_gtk():
gobject.io_add_watch(sys.stdin, gobject.IO_IN, _main_quit)
gtk.main()
return 0

View File

@@ -0,0 +1,34 @@
# encoding: utf-8
"""
Enable Gtk3 to be used interacive by yap_ipython.
Authors: Thomi Richards
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012, the yap_ipython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
from gi.repository import Gtk, GLib
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def _main_quit(*args, **kwargs):
Gtk.main_quit()
return False
def inputhook_gtk3():
GLib.io_add_watch(sys.stdin, GLib.PRIORITY_DEFAULT, GLib.IO_IN, _main_quit)
Gtk.main()
return 0

View File

@@ -0,0 +1,111 @@
# encoding: utf-8
"""
Enable pyglet to be used interacive by setting PyOS_InputHook.
Authors
-------
* Nicolas P. Rougier
* Fernando Perez
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The yap_ipython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import sys
import time
from timeit import default_timer as clock
import pyglet
#-----------------------------------------------------------------------------
# Platform-dependent imports and functions
#-----------------------------------------------------------------------------
if os.name == 'posix':
import select
def stdin_ready():
infds, outfds, erfds = select.select([sys.stdin],[],[],0)
if infds:
return True
else:
return False
elif sys.platform == 'win32':
import msvcrt
def stdin_ready():
return msvcrt.kbhit()
# On linux only, window.flip() has a bug that causes an AttributeError on
# window close. For details, see:
# http://groups.google.com/group/pyglet-users/browse_thread/thread/47c1aab9aa4a3d23/c22f9e819826799e?#c22f9e819826799e
if sys.platform.startswith('linux'):
def flip(window):
try:
window.flip()
except AttributeError:
pass
else:
def flip(window):
window.flip()
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def inputhook_pyglet():
"""Run the pyglet event loop by processing pending events only.
This keeps processing pending events until stdin is ready. After
processing all pending events, a call to time.sleep is inserted. This is
needed, otherwise, CPU usage is at 100%. This sleep time should be tuned
though for best performance.
"""
# We need to protect against a user pressing Control-C when yap_ipython is
# idle and this is running. We trap KeyboardInterrupt and pass.
try:
t = clock()
while not stdin_ready():
pyglet.clock.tick()
for window in pyglet.app.windows:
window.switch_to()
window.dispatch_events()
window.dispatch_event('on_draw')
flip(window)
# We need to sleep at this point to keep the idle CPU load
# low. However, if sleep to long, GUI response is poor. As
# a compromise, we watch how often GUI events are being processed
# and switch between a short and long sleep time. Here are some
# stats useful in helping to tune this.
# time CPU load
# 0.001 13%
# 0.005 3%
# 0.01 1.5%
# 0.05 0.5%
used_time = clock() - t
if used_time > 10.0:
# print 'Sleep for 1 s' # dbg
time.sleep(1.0)
elif used_time > 0.1:
# Few GUI events coming in, so we can sleep longer
# print 'Sleep for 0.05 s' # dbg
time.sleep(0.05)
else:
# Many GUI events coming in, so sleep only very little
time.sleep(0.001)
except KeyboardInterrupt:
pass
return 0

View File

@@ -0,0 +1,180 @@
# -*- coding: utf-8 -*-
"""
Qt4's inputhook support function
Author: Christian Boos
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The yap_ipython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import signal
import threading
from yap_ipython.core.interactiveshell import InteractiveShell
from yap_ipython.external.qt_for_kernel import QtCore, QtGui
from yap_ipython.lib.inputhook import allow_CTRL_C, ignore_CTRL_C, stdin_ready
#-----------------------------------------------------------------------------
# Module Globals
#-----------------------------------------------------------------------------
got_kbdint = False
sigint_timer = None
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def create_inputhook_qt4(mgr, app=None):
"""Create an input hook for running the Qt4 application event loop.
Parameters
----------
mgr : an InputHookManager
app : Qt Application, optional.
Running application to use. If not given, we probe Qt for an
existing application object, and create a new one if none is found.
Returns
-------
A pair consisting of a Qt Application (either the one given or the
one found or created) and a inputhook.
Notes
-----
We use a custom input hook instead of PyQt4's default one, as it
interacts better with the readline packages (issue #481).
The inputhook function works in tandem with a 'pre_prompt_hook'
which automatically restores the hook as an inputhook in case the
latter has been temporarily disabled after having intercepted a
KeyboardInterrupt.
"""
if app is None:
app = QtCore.QCoreApplication.instance()
if app is None:
app = QtGui.QApplication([" "])
# Re-use previously created inputhook if any
ip = InteractiveShell.instance()
if hasattr(ip, '_inputhook_qt4'):
return app, ip._inputhook_qt4
# Otherwise create the inputhook_qt4/preprompthook_qt4 pair of
# hooks (they both share the got_kbdint flag)
def inputhook_qt4():
"""PyOS_InputHook python hook for Qt4.
Process pending Qt events and if there's no pending keyboard
input, spend a short slice of time (50ms) running the Qt event
loop.
As a Python ctypes callback can't raise an exception, we catch
the KeyboardInterrupt and temporarily deactivate the hook,
which will let a *second* CTRL+C be processed normally and go
back to a clean prompt line.
"""
try:
allow_CTRL_C()
app = QtCore.QCoreApplication.instance()
if not app: # shouldn't happen, but safer if it happens anyway...
return 0
app.processEvents(QtCore.QEventLoop.AllEvents, 300)
if not stdin_ready():
# Generally a program would run QCoreApplication::exec()
# from main() to enter and process the Qt event loop until
# quit() or exit() is called and the program terminates.
#
# For our input hook integration, we need to repeatedly
# enter and process the Qt event loop for only a short
# amount of time (say 50ms) to ensure that Python stays
# responsive to other user inputs.
#
# A naive approach would be to repeatedly call
# QCoreApplication::exec(), using a timer to quit after a
# short amount of time. Unfortunately, QCoreApplication
# emits an aboutToQuit signal before stopping, which has
# the undesirable effect of closing all modal windows.
#
# To work around this problem, we instead create a
# QEventLoop and call QEventLoop::exec(). Other than
# setting some state variables which do not seem to be
# used anywhere, the only thing QCoreApplication adds is
# the aboutToQuit signal which is precisely what we are
# trying to avoid.
timer = QtCore.QTimer()
event_loop = QtCore.QEventLoop()
timer.timeout.connect(event_loop.quit)
while not stdin_ready():
timer.start(50)
event_loop.exec_()
timer.stop()
except KeyboardInterrupt:
global got_kbdint, sigint_timer
ignore_CTRL_C()
got_kbdint = True
mgr.clear_inputhook()
# This generates a second SIGINT so the user doesn't have to
# press CTRL+C twice to get a clean prompt.
#
# Since we can't catch the resulting KeyboardInterrupt here
# (because this is a ctypes callback), we use a timer to
# generate the SIGINT after we leave this callback.
#
# Unfortunately this doesn't work on Windows (SIGINT kills
# Python and CTRL_C_EVENT doesn't work).
if(os.name == 'posix'):
pid = os.getpid()
if(not sigint_timer):
sigint_timer = threading.Timer(.01, os.kill,
args=[pid, signal.SIGINT] )
sigint_timer.start()
else:
print("\nKeyboardInterrupt - Ctrl-C again for new prompt")
except: # NO exceptions are allowed to escape from a ctypes callback
ignore_CTRL_C()
from traceback import print_exc
print_exc()
print("Got exception from inputhook_qt4, unregistering.")
mgr.clear_inputhook()
finally:
allow_CTRL_C()
return 0
def preprompthook_qt4(ishell):
"""'pre_prompt_hook' used to restore the Qt4 input hook
(in case the latter was temporarily deactivated after a
CTRL+C)
"""
global got_kbdint, sigint_timer
if(sigint_timer):
sigint_timer.cancel()
sigint_timer = None
if got_kbdint:
mgr.set_inputhook(inputhook_qt4)
got_kbdint = False
ip._inputhook_qt4 = inputhook_qt4
ip.set_hook('pre_prompt_hook', preprompthook_qt4)
return app, inputhook_qt4

View File

@@ -0,0 +1,167 @@
# encoding: utf-8
"""
Enable wxPython to be used interacive by setting PyOS_InputHook.
Authors: Robin Dunn, Brian Granger, Ondrej Certik
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The yap_ipython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
import signal
import time
from timeit import default_timer as clock
import wx
from yap_ipython.lib.inputhook import stdin_ready
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def inputhook_wx1():
"""Run the wx event loop by processing pending events only.
This approach seems to work, but its performance is not great as it
relies on having PyOS_InputHook called regularly.
"""
try:
app = wx.GetApp()
if app is not None:
assert wx.Thread_IsMain()
# Make a temporary event loop and process system events until
# there are no more waiting, then allow idle events (which
# will also deal with pending or posted wx events.)
evtloop = wx.EventLoop()
ea = wx.EventLoopActivator(evtloop)
while evtloop.Pending():
evtloop.Dispatch()
app.ProcessIdle()
del ea
except KeyboardInterrupt:
pass
return 0
class EventLoopTimer(wx.Timer):
def __init__(self, func):
self.func = func
wx.Timer.__init__(self)
def Notify(self):
self.func()
class EventLoopRunner(object):
def Run(self, time):
self.evtloop = wx.EventLoop()
self.timer = EventLoopTimer(self.check_stdin)
self.timer.Start(time)
self.evtloop.Run()
def check_stdin(self):
if stdin_ready():
self.timer.Stop()
self.evtloop.Exit()
def inputhook_wx2():
"""Run the wx event loop, polling for stdin.
This version runs the wx eventloop for an undetermined amount of time,
during which it periodically checks to see if anything is ready on
stdin. If anything is ready on stdin, the event loop exits.
The argument to elr.Run controls how often the event loop looks at stdin.
This determines the responsiveness at the keyboard. A setting of 1000
enables a user to type at most 1 char per second. I have found that a
setting of 10 gives good keyboard response. We can shorten it further,
but eventually performance would suffer from calling select/kbhit too
often.
"""
try:
app = wx.GetApp()
if app is not None:
assert wx.Thread_IsMain()
elr = EventLoopRunner()
# As this time is made shorter, keyboard response improves, but idle
# CPU load goes up. 10 ms seems like a good compromise.
elr.Run(time=10) # CHANGE time here to control polling interval
except KeyboardInterrupt:
pass
return 0
def inputhook_wx3():
"""Run the wx event loop by processing pending events only.
This is like inputhook_wx1, but it keeps processing pending events
until stdin is ready. After processing all pending events, a call to
time.sleep is inserted. This is needed, otherwise, CPU usage is at 100%.
This sleep time should be tuned though for best performance.
"""
# We need to protect against a user pressing Control-C when yap_ipython is
# idle and this is running. We trap KeyboardInterrupt and pass.
try:
app = wx.GetApp()
if app is not None:
assert wx.Thread_IsMain()
# The import of wx on Linux sets the handler for signal.SIGINT
# to 0. This is a bug in wx or gtk. We fix by just setting it
# back to the Python default.
if not callable(signal.getsignal(signal.SIGINT)):
signal.signal(signal.SIGINT, signal.default_int_handler)
evtloop = wx.EventLoop()
ea = wx.EventLoopActivator(evtloop)
t = clock()
while not stdin_ready():
while evtloop.Pending():
t = clock()
evtloop.Dispatch()
app.ProcessIdle()
# We need to sleep at this point to keep the idle CPU load
# low. However, if sleep to long, GUI response is poor. As
# a compromise, we watch how often GUI events are being processed
# and switch between a short and long sleep time. Here are some
# stats useful in helping to tune this.
# time CPU load
# 0.001 13%
# 0.005 3%
# 0.01 1.5%
# 0.05 0.5%
used_time = clock() - t
if used_time > 10.0:
# print 'Sleep for 1 s' # dbg
time.sleep(1.0)
elif used_time > 0.1:
# Few GUI events coming in, so we can sleep longer
# print 'Sleep for 0.05 s' # dbg
time.sleep(0.05)
else:
# Many GUI events coming in, so sleep only very little
time.sleep(0.001)
del ea
except KeyboardInterrupt:
pass
return 0
if sys.platform == 'darwin':
# On OSX, evtloop.Pending() always returns True, regardless of there being
# any events pending. As such we can't use implementations 1 or 3 of the
# inputhook as those depend on a pending/dispatch loop.
inputhook_wx = inputhook_wx2
else:
# This is our default implementation
inputhook_wx = inputhook_wx3

View File

@@ -0,0 +1,13 @@
"""[DEPRECATED] Utilities for connecting to kernels
Moved to yap_ipython.kernel.connect
"""
import warnings
warnings.warn("yap_ipython.lib.kernel moved to yap_ipython.kernel.connect in yap_ipython 1.0,"
" and will be removed in yap_ipython 6.0.",
DeprecationWarning
)
from yap_kernel.connect import *

View File

@@ -0,0 +1,201 @@
# -*- coding: utf-8 -*-
"""Tools for handling LaTeX."""
# Copyright (c) yap_ipython Development Team.
# Distributed under the terms of the Modified BSD License.
from io import BytesIO, open
import os
import tempfile
import shutil
import subprocess
from base64 import encodebytes
from yap_ipython.utils.process import find_cmd, FindCmdError
from traitlets.config import get_config
from traitlets.config.configurable import SingletonConfigurable
from traitlets import List, Bool, Unicode
from yap_ipython.utils.py3compat import cast_unicode
class LaTeXTool(SingletonConfigurable):
"""An object to store configuration of the LaTeX tool."""
def _config_default(self):
return get_config()
backends = List(
Unicode(), ["matplotlib", "dvipng"],
help="Preferred backend to draw LaTeX math equations. "
"Backends in the list are checked one by one and the first "
"usable one is used. Note that `matplotlib` backend "
"is usable only for inline style equations. To draw "
"display style equations, `dvipng` backend must be specified. ",
# It is a List instead of Enum, to make configuration more
# flexible. For example, to use matplotlib mainly but dvipng
# for display style, the default ["matplotlib", "dvipng"] can
# be used. To NOT use dvipng so that other repr such as
# unicode pretty printing is used, you can use ["matplotlib"].
).tag(config=True)
use_breqn = Bool(
True,
help="Use breqn.sty to automatically break long equations. "
"This configuration takes effect only for dvipng backend.",
).tag(config=True)
packages = List(
['amsmath', 'amsthm', 'amssymb', 'bm'],
help="A list of packages to use for dvipng backend. "
"'breqn' will be automatically appended when use_breqn=True.",
).tag(config=True)
preamble = Unicode(
help="Additional preamble to use when generating LaTeX source "
"for dvipng backend.",
).tag(config=True)
def latex_to_png(s, encode=False, backend=None, wrap=False):
"""Render a LaTeX string to PNG.
Parameters
----------
s : str
The raw string containing valid inline LaTeX.
encode : bool, optional
Should the PNG data base64 encoded to make it JSON'able.
backend : {matplotlib, dvipng}
Backend for producing PNG data.
wrap : bool
If true, Automatically wrap `s` as a LaTeX equation.
None is returned when the backend cannot be used.
"""
s = cast_unicode(s)
allowed_backends = LaTeXTool.instance().backends
if backend is None:
backend = allowed_backends[0]
if backend not in allowed_backends:
return None
if backend == 'matplotlib':
f = latex_to_png_mpl
elif backend == 'dvipng':
f = latex_to_png_dvipng
else:
raise ValueError('No such backend {0}'.format(backend))
bin_data = f(s, wrap)
if encode and bin_data:
bin_data = encodebytes(bin_data)
return bin_data
def latex_to_png_mpl(s, wrap):
try:
from matplotlib import mathtext
from pyparsing import ParseFatalException
except ImportError:
return None
# mpl mathtext doesn't support display math, force inline
s = s.replace('$$', '$')
if wrap:
s = u'${0}$'.format(s)
try:
mt = mathtext.MathTextParser('bitmap')
f = BytesIO()
mt.to_png(f, s, fontsize=12)
return f.getvalue()
except (ValueError, RuntimeError, ParseFatalException):
return None
def latex_to_png_dvipng(s, wrap):
try:
find_cmd('latex')
find_cmd('dvipng')
except FindCmdError:
return None
try:
workdir = tempfile.mkdtemp()
tmpfile = os.path.join(workdir, "tmp.tex")
dvifile = os.path.join(workdir, "tmp.dvi")
outfile = os.path.join(workdir, "tmp.png")
with open(tmpfile, "w", encoding='utf8') as f:
f.writelines(genelatex(s, wrap))
with open(os.devnull, 'wb') as devnull:
subprocess.check_call(
["latex", "-halt-on-error", "-interaction", "batchmode", tmpfile],
cwd=workdir, stdout=devnull, stderr=devnull)
subprocess.check_call(
["dvipng", "-T", "tight", "-x", "1500", "-z", "9",
"-bg", "transparent", "-o", outfile, dvifile], cwd=workdir,
stdout=devnull, stderr=devnull)
with open(outfile, "rb") as f:
return f.read()
except subprocess.CalledProcessError:
return None
finally:
shutil.rmtree(workdir)
def kpsewhich(filename):
"""Invoke kpsewhich command with an argument `filename`."""
try:
find_cmd("kpsewhich")
proc = subprocess.Popen(
["kpsewhich", filename],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
return stdout.strip().decode('utf8', 'replace')
except FindCmdError:
pass
def genelatex(body, wrap):
"""Generate LaTeX document for dvipng backend."""
lt = LaTeXTool.instance()
breqn = wrap and lt.use_breqn and kpsewhich("breqn.sty")
yield r'\documentclass{article}'
packages = lt.packages
if breqn:
packages = packages + ['breqn']
for pack in packages:
yield r'\usepackage{{{0}}}'.format(pack)
yield r'\pagestyle{empty}'
if lt.preamble:
yield lt.preamble
yield r'\begin{document}'
if breqn:
yield r'\begin{dmath*}'
yield body
yield r'\end{dmath*}'
elif wrap:
yield u'$${0}$$'.format(body)
else:
yield body
yield u'\end{document}'
_data_uri_template_png = u"""<img src="data:image/png;base64,%s" alt=%s />"""
def latex_to_html(s, alt='image'):
"""Render LaTeX to HTML with embedded PNG data using data URIs.
Parameters
----------
s : str
The raw string containing valid inline LateX.
alt : str
The alt text to use for the HTML.
"""
base64_data = latex_to_png(s, encode=True).decode('ascii')
if base64_data:
return _data_uri_template_png % (base64_data, alt)

View File

@@ -0,0 +1,512 @@
# -*- coding: utf-8 -*-
"""
Defines a variety of Pygments lexers for highlighting yap_ipython code.
This includes:
IPythonLexer, IPython3Lexer
Lexers for pure yap_ipython (python + magic/shell commands)
IPythonPartialTracebackLexer, IPythonTracebackLexer
Supports 2.x and 3.x via keyword `python3`. The partial traceback
lexer reads everything but the Python code appearing in a traceback.
The full lexer combines the partial lexer with an yap_ipython lexer.
IPythonConsoleLexer
A lexer for yap_ipython console sessions, with support for tracebacks.
IPyLexer
A friendly lexer which examines the first line of text and from it,
decides whether to use an yap_ipython lexer or an yap_ipython console lexer.
This is probably the only lexer that needs to be explicitly added
to Pygments.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the yap_ipython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# Standard library
import re
# Third party
from pygments.lexers import BashLexer, PythonLexer, Python3Lexer
from pygments.lexer import (
Lexer, DelegatingLexer, RegexLexer, do_insertions, bygroups, using,
)
from pygments.token import (
Generic, Keyword, Literal, Name, Operator, Other, Text, Error,
)
from pygments.util import get_bool_opt
# Local
line_re = re.compile('.*?\n')
__all__ = ['build_ipy_lexer', 'IPython3Lexer', 'IPythonLexer',
'IPythonPartialTracebackLexer', 'IPythonTracebackLexer',
'IPythonConsoleLexer', 'IPyLexer']
ipython_tokens = [
(r"(?s)(\s*)(%%)(\w+)(.*)", bygroups(Text, Operator, Keyword, Text)),
(r'(?s)(^\s*)(%%!)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(BashLexer))),
(r"(%%?)(\w+)(\?\??)$", bygroups(Operator, Keyword, Operator)),
(r"\b(\?\??)(\s*)$", bygroups(Operator, Text)),
(r'(%)(sx|sc|system)(.*)(\n)', bygroups(Operator, Keyword,
using(BashLexer), Text)),
(r'(%)(\w+)(.*\n)', bygroups(Operator, Keyword, Text)),
(r'^(!!)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)),
(r'(!)(?!=)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)),
(r'^(\s*)(\?\??)(\s*%{0,2}[\w\.\*]*)', bygroups(Text, Operator, Text)),
(r'(\s*%{0,2}[\w\.\*]*)(\?\??)(\s*)$', bygroups(Text, Operator, Text)),
]
def build_ipy_lexer(python3):
"""Builds yap_ipython lexers depending on the value of `python3`.
The lexer inherits from an appropriate Python lexer and then adds
information about yap_ipython specific keywords (i.e. magic commands,
shell commands, etc.)
Parameters
----------
python3 : bool
If `True`, then build an yap_ipython lexer from a Python 3 lexer.
"""
# It would be nice to have a single yap_ipython lexer class which takes
# a boolean `python3`. But since there are two Python lexer classes,
# we will also have two yap_ipython lexer classes.
if python3:
PyLexer = Python3Lexer
name = 'IPython3'
aliases = ['ipython3']
doc = """IPython3 Lexer"""
else:
PyLexer = PythonLexer
name = 'yap_ipython'
aliases = ['ipython2', 'ipython']
doc = """yap_ipython Lexer"""
tokens = PyLexer.tokens.copy()
tokens['root'] = ipython_tokens + tokens['root']
attrs = {'name': name, 'aliases': aliases, 'filenames': [],
'__doc__': doc, 'tokens': tokens}
return type(name, (PyLexer,), attrs)
IPython3Lexer = build_ipy_lexer(python3=True)
IPythonLexer = build_ipy_lexer(python3=False)
class IPythonPartialTracebackLexer(RegexLexer):
"""
Partial lexer for yap_ipython tracebacks.
Handles all the non-python output. This works for both Python 2.x and 3.x.
"""
name = 'yap_ipython Partial Traceback'
tokens = {
'root': [
# Tracebacks for syntax errors have a different style.
# For both types of tracebacks, we mark the first line with
# Generic.Traceback. For syntax errors, we mark the filename
# as we mark the filenames for non-syntax tracebacks.
#
# These two regexps define how IPythonConsoleLexer finds a
# traceback.
#
## Non-syntax traceback
(r'^(\^C)?(-+\n)', bygroups(Error, Generic.Traceback)),
## Syntax traceback
(r'^( File)(.*)(, line )(\d+\n)',
bygroups(Generic.Traceback, Name.Namespace,
Generic.Traceback, Literal.Number.Integer)),
# (Exception Identifier)(Whitespace)(Traceback Message)
(r'(?u)(^[^\d\W]\w*)(\s*)(Traceback.*?\n)',
bygroups(Name.Exception, Generic.Whitespace, Text)),
# (Module/Filename)(Text)(Callee)(Function Signature)
# Better options for callee and function signature?
(r'(.*)( in )(.*)(\(.*\)\n)',
bygroups(Name.Namespace, Text, Name.Entity, Name.Tag)),
# Regular line: (Whitespace)(Line Number)(Python Code)
(r'(\s*?)(\d+)(.*?\n)',
bygroups(Generic.Whitespace, Literal.Number.Integer, Other)),
# Emphasized line: (Arrow)(Line Number)(Python Code)
# Using Exception token so arrow color matches the Exception.
(r'(-*>?\s?)(\d+)(.*?\n)',
bygroups(Name.Exception, Literal.Number.Integer, Other)),
# (Exception Identifier)(Message)
(r'(?u)(^[^\d\W]\w*)(:.*?\n)',
bygroups(Name.Exception, Text)),
# Tag everything else as Other, will be handled later.
(r'.*\n', Other),
],
}
class IPythonTracebackLexer(DelegatingLexer):
"""
yap_ipython traceback lexer.
For doctests, the tracebacks can be snipped as much as desired with the
exception to the lines that designate a traceback. For non-syntax error
tracebacks, this is the line of hyphens. For syntax error tracebacks,
this is the line which lists the File and line number.
"""
# The lexer inherits from DelegatingLexer. The "root" lexer is an
# appropriate yap_ipython lexer, which depends on the value of the boolean
# `python3`. First, we parse with the partial yap_ipython traceback lexer.
# Then, any code marked with the "Other" token is delegated to the root
# lexer.
#
name = 'yap_ipython Traceback'
aliases = ['ipythontb']
def __init__(self, **options):
self.python3 = get_bool_opt(options, 'python3', False)
if self.python3:
self.aliases = ['ipython3tb']
else:
self.aliases = ['ipython2tb', 'ipythontb']
if self.python3:
IPyLexer = IPython3Lexer
else:
IPyLexer = IPythonLexer
DelegatingLexer.__init__(self, IPyLexer,
IPythonPartialTracebackLexer, **options)
class IPythonConsoleLexer(Lexer):
"""
An yap_ipython console lexer for yap_ipython code-blocks and doctests, such as:
.. code-block:: rst
.. code-block:: ipythonconsole
In [1]: a = 'foo'
In [2]: a
Out[2]: 'foo'
In [3]: print a
foo
In [4]: 1 / 0
Support is also provided for yap_ipython exceptions:
.. code-block:: rst
.. code-block:: ipythonconsole
In [1]: raise Exception
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-1-fca2ab0ca76b> in <module>()
----> 1 raise Exception
Exception:
"""
name = 'yap_ipython console session'
aliases = ['ipythonconsole']
mimetypes = ['text/x-ipython-console']
# The regexps used to determine what is input and what is output.
# The default prompts for yap_ipython are:
#
# in = 'In [#]: '
# continuation = ' .D.: '
# template = 'Out[#]: '
#
# Where '#' is the 'prompt number' or 'execution count' and 'D'
# D is a number of dots matching the width of the execution count
#
in1_regex = r'In \[[0-9]+\]: '
in2_regex = r' \.\.+\.: '
out_regex = r'Out\[[0-9]+\]: '
#: The regex to determine when a traceback starts.
ipytb_start = re.compile(r'^(\^C)?(-+\n)|^( File)(.*)(, line )(\d+\n)')
def __init__(self, **options):
"""Initialize the yap_ipython console lexer.
Parameters
----------
python3 : bool
If `True`, then the console inputs are parsed using a Python 3
lexer. Otherwise, they are parsed using a Python 2 lexer.
in1_regex : RegexObject
The compiled regular expression used to detect the start
of inputs. Although the yap_ipython configuration setting may have a
trailing whitespace, do not include it in the regex. If `None`,
then the default input prompt is assumed.
in2_regex : RegexObject
The compiled regular expression used to detect the continuation
of inputs. Although the yap_ipython configuration setting may have a
trailing whitespace, do not include it in the regex. If `None`,
then the default input prompt is assumed.
out_regex : RegexObject
The compiled regular expression used to detect outputs. If `None`,
then the default output prompt is assumed.
"""
self.python3 = get_bool_opt(options, 'python3', False)
if self.python3:
self.aliases = ['ipython3console']
else:
self.aliases = ['ipython2console', 'ipythonconsole']
in1_regex = options.get('in1_regex', self.in1_regex)
in2_regex = options.get('in2_regex', self.in2_regex)
out_regex = options.get('out_regex', self.out_regex)
# So that we can work with input and output prompts which have been
# rstrip'd (possibly by editors) we also need rstrip'd variants. If
# we do not do this, then such prompts will be tagged as 'output'.
# The reason can't just use the rstrip'd variants instead is because
# we want any whitespace associated with the prompt to be inserted
# with the token. This allows formatted code to be modified so as hide
# the appearance of prompts, with the whitespace included. One example
# use of this is in copybutton.js from the standard lib Python docs.
in1_regex_rstrip = in1_regex.rstrip() + '\n'
in2_regex_rstrip = in2_regex.rstrip() + '\n'
out_regex_rstrip = out_regex.rstrip() + '\n'
# Compile and save them all.
attrs = ['in1_regex', 'in2_regex', 'out_regex',
'in1_regex_rstrip', 'in2_regex_rstrip', 'out_regex_rstrip']
for attr in attrs:
self.__setattr__(attr, re.compile(locals()[attr]))
Lexer.__init__(self, **options)
if self.python3:
pylexer = IPython3Lexer
tblexer = IPythonTracebackLexer
else:
pylexer = IPythonLexer
tblexer = IPythonTracebackLexer
self.pylexer = pylexer(**options)
self.tblexer = tblexer(**options)
self.reset()
def reset(self):
self.mode = 'output'
self.index = 0
self.buffer = u''
self.insertions = []
def buffered_tokens(self):
"""
Generator of unprocessed tokens after doing insertions and before
changing to a new state.
"""
if self.mode == 'output':
tokens = [(0, Generic.Output, self.buffer)]
elif self.mode == 'input':
tokens = self.pylexer.get_tokens_unprocessed(self.buffer)
else: # traceback
tokens = self.tblexer.get_tokens_unprocessed(self.buffer)
for i, t, v in do_insertions(self.insertions, tokens):
# All token indexes are relative to the buffer.
yield self.index + i, t, v
# Clear it all
self.index += len(self.buffer)
self.buffer = u''
self.insertions = []
def get_mci(self, line):
"""
Parses the line and returns a 3-tuple: (mode, code, insertion).
`mode` is the next mode (or state) of the lexer, and is always equal
to 'input', 'output', or 'tb'.
`code` is a portion of the line that should be added to the buffer
corresponding to the next mode and eventually lexed by another lexer.
For example, `code` could be Python code if `mode` were 'input'.
`insertion` is a 3-tuple (index, token, text) representing an
unprocessed "token" that will be inserted into the stream of tokens
that are created from the buffer once we change modes. This is usually
the input or output prompt.
In general, the next mode depends on current mode and on the contents
of `line`.
"""
# To reduce the number of regex match checks, we have multiple
# 'if' blocks instead of 'if-elif' blocks.
# Check for possible end of input
in2_match = self.in2_regex.match(line)
in2_match_rstrip = self.in2_regex_rstrip.match(line)
if (in2_match and in2_match.group().rstrip() == line.rstrip()) or \
in2_match_rstrip:
end_input = True
else:
end_input = False
if end_input and self.mode != 'tb':
# Only look for an end of input when not in tb mode.
# An ellipsis could appear within the traceback.
mode = 'output'
code = u''
insertion = (0, Generic.Prompt, line)
return mode, code, insertion
# Check for output prompt
out_match = self.out_regex.match(line)
out_match_rstrip = self.out_regex_rstrip.match(line)
if out_match or out_match_rstrip:
mode = 'output'
if out_match:
idx = out_match.end()
else:
idx = out_match_rstrip.end()
code = line[idx:]
# Use the 'heading' token for output. We cannot use Generic.Error
# since it would conflict with exceptions.
insertion = (0, Generic.Heading, line[:idx])
return mode, code, insertion
# Check for input or continuation prompt (non stripped version)
in1_match = self.in1_regex.match(line)
if in1_match or (in2_match and self.mode != 'tb'):
# New input or when not in tb, continued input.
# We do not check for continued input when in tb since it is
# allowable to replace a long stack with an ellipsis.
mode = 'input'
if in1_match:
idx = in1_match.end()
else: # in2_match
idx = in2_match.end()
code = line[idx:]
insertion = (0, Generic.Prompt, line[:idx])
return mode, code, insertion
# Check for input or continuation prompt (stripped version)
in1_match_rstrip = self.in1_regex_rstrip.match(line)
if in1_match_rstrip or (in2_match_rstrip and self.mode != 'tb'):
# New input or when not in tb, continued input.
# We do not check for continued input when in tb since it is
# allowable to replace a long stack with an ellipsis.
mode = 'input'
if in1_match_rstrip:
idx = in1_match_rstrip.end()
else: # in2_match
idx = in2_match_rstrip.end()
code = line[idx:]
insertion = (0, Generic.Prompt, line[:idx])
return mode, code, insertion
# Check for traceback
if self.ipytb_start.match(line):
mode = 'tb'
code = line
insertion = None
return mode, code, insertion
# All other stuff...
if self.mode in ('input', 'output'):
# We assume all other text is output. Multiline input that
# does not use the continuation marker cannot be detected.
# For example, the 3 in the following is clearly output:
#
# In [1]: print 3
# 3
#
# But the following second line is part of the input:
#
# In [2]: while True:
# print True
#
# In both cases, the 2nd line will be 'output'.
#
mode = 'output'
else:
mode = 'tb'
code = line
insertion = None
return mode, code, insertion
def get_tokens_unprocessed(self, text):
self.reset()
for match in line_re.finditer(text):
line = match.group()
mode, code, insertion = self.get_mci(line)
if mode != self.mode:
# Yield buffered tokens before transitioning to new mode.
for token in self.buffered_tokens():
yield token
self.mode = mode
if insertion:
self.insertions.append((len(self.buffer), [insertion]))
self.buffer += code
for token in self.buffered_tokens():
yield token
class IPyLexer(Lexer):
"""
Primary lexer for all yap_ipython-like code.
This is a simple helper lexer. If the first line of the text begins with
"In \[[0-9]+\]:", then the entire text is parsed with an yap_ipython console
lexer. If not, then the entire text is parsed with an yap_ipython lexer.
The goal is to reduce the number of lexers that are registered
with Pygments.
"""
name = 'IPy session'
aliases = ['ipy']
def __init__(self, **options):
self.python3 = get_bool_opt(options, 'python3', False)
if self.python3:
self.aliases = ['ipy3']
else:
self.aliases = ['ipy2', 'ipy']
Lexer.__init__(self, **options)
self.IPythonLexer = IPythonLexer(**options)
self.IPythonConsoleLexer = IPythonConsoleLexer(**options)
def get_tokens_unprocessed(self, text):
# Search for the input prompt anywhere...this allows code blocks to
# begin with comments as well.
if re.match(r'.*(In \[[0-9]+\]:)', text.strip(), re.DOTALL):
lex = self.IPythonConsoleLexer
else:
lex = self.IPythonLexer
for token in lex.get_tokens_unprocessed(text):
yield token

Some files were not shown because too many files have changed in this diff Show More