update to latest ipykernel
This commit is contained in:
parent
517667c1d5
commit
386c88e372
@ -1,983 +0,0 @@
|
||||
|
||||
/* This file, dswiatoms.h, was generated automatically
|
||||
by calling "yap -L misc/buildswiatoms"
|
||||
and is based on SWIATOMS, copied from the SWI-Prolog distribution
|
||||
please do not update */
|
||||
|
||||
#define ATOM_abort ((atom_t)(0*2+1))
|
||||
#define ATOM_aborted ((atom_t)(1*2+1))
|
||||
#define ATOM_abs ((atom_t)(2*2+1))
|
||||
#define ATOM_access ((atom_t)(3*2+1))
|
||||
#define ATOM_access_level ((atom_t)(4*2+1))
|
||||
#define ATOM_acos ((atom_t)(5*2+1))
|
||||
#define ATOM_acosh ((atom_t)(6*2+1))
|
||||
#define ATOM_acyclic_term ((atom_t)(7*2+1))
|
||||
#define ATOM_add_import ((atom_t)(8*2+1))
|
||||
#define ATOM_address ((atom_t)(9*2+1))
|
||||
#define ATOM_agc ((atom_t)(10*2+1))
|
||||
#define ATOM_agc_gained ((atom_t)(11*2+1))
|
||||
#define ATOM_agc_margin ((atom_t)(12*2+1))
|
||||
#define ATOM_agc_time ((atom_t)(13*2+1))
|
||||
#define ATOM_alias ((atom_t)(14*2+1))
|
||||
#define ATOM_allow_variable_name_as_functor ((atom_t)(15*2+1))
|
||||
#define ATOM_alnum ((atom_t)(16*2+1))
|
||||
#define ATOM_alpha ((atom_t)(17*2+1))
|
||||
#define ATOM_alternative ((atom_t)(18*2+1))
|
||||
#define ATOM_and ((atom_t)(19*2+1))
|
||||
#define ATOM_anonvar ((atom_t)(20*2+1))
|
||||
#define ATOM_append ((atom_t)(21*2+1))
|
||||
#define ATOM_ar_equals ((atom_t)(22*2+1))
|
||||
#define ATOM_ar_not_equal ((atom_t)(23*2+1))
|
||||
#define ATOM_argument ((atom_t)(24*2+1))
|
||||
#define ATOM_argumentlimit ((atom_t)(25*2+1))
|
||||
#define ATOM_arity ((atom_t)(26*2+1))
|
||||
#define ATOM_as ((atom_t)(27*2+1))
|
||||
#define ATOM_ascii ((atom_t)(28*2+1))
|
||||
#define ATOM_asin ((atom_t)(29*2+1))
|
||||
#define ATOM_asinh ((atom_t)(30*2+1))
|
||||
#define ATOM_assert ((atom_t)(31*2+1))
|
||||
#define ATOM_asserta ((atom_t)(32*2+1))
|
||||
#define ATOM_at ((atom_t)(33*2+1))
|
||||
#define ATOM_at_equals ((atom_t)(34*2+1))
|
||||
#define ATOM_at_exit ((atom_t)(35*2+1))
|
||||
#define ATOM_at_larger ((atom_t)(36*2+1))
|
||||
#define ATOM_at_larger_eq ((atom_t)(37*2+1))
|
||||
#define ATOM_at_not_equals ((atom_t)(38*2+1))
|
||||
#define ATOM_at_smaller ((atom_t)(39*2+1))
|
||||
#define ATOM_at_smaller_eq ((atom_t)(40*2+1))
|
||||
#define ATOM_atan ((atom_t)(41*2+1))
|
||||
#define ATOM_atan2 ((atom_t)(42*2+1))
|
||||
#define ATOM_atanh ((atom_t)(43*2+1))
|
||||
#define ATOM_atom ((atom_t)(44*2+1))
|
||||
#define ATOM_atom_garbage_collection ((atom_t)(45*2+1))
|
||||
#define ATOM_atomic ((atom_t)(46*2+1))
|
||||
#define ATOM_atoms ((atom_t)(47*2+1))
|
||||
#define ATOM_att ((atom_t)(48*2+1))
|
||||
#define ATOM_attributes ((atom_t)(49*2+1))
|
||||
#define ATOM_attvar ((atom_t)(50*2+1))
|
||||
#define ATOM_autoload ((atom_t)(51*2+1))
|
||||
#define ATOM_back_quotes ((atom_t)(52*2+1))
|
||||
#define ATOM_backslash ((atom_t)(53*2+1))
|
||||
#define ATOM_backtrace ((atom_t)(54*2+1))
|
||||
#define ATOM_backquoted_string ((atom_t)(55*2+1))
|
||||
#define ATOM_bar ((atom_t)(56*2+1))
|
||||
#define ATOM_base ((atom_t)(57*2+1))
|
||||
#define ATOM_begin ((atom_t)(58*2+1))
|
||||
#define ATOM_binary ((atom_t)(59*2+1))
|
||||
#define ATOM_binary_stream ((atom_t)(60*2+1))
|
||||
#define ATOM_bind ((atom_t)(61*2+1))
|
||||
#define ATOM_bitor ((atom_t)(62*2+1))
|
||||
#define ATOM_blobs ((atom_t)(63*2+1))
|
||||
#define ATOM_bof ((atom_t)(64*2+1))
|
||||
#define ATOM_bom ((atom_t)(65*2+1))
|
||||
#define ATOM_bool ((atom_t)(66*2+1))
|
||||
#define ATOM_boolean ((atom_t)(67*2+1))
|
||||
#define ATOM_brace_term_position ((atom_t)(68*2+1))
|
||||
#define ATOM_brace_terms ((atom_t)(69*2+1))
|
||||
#define ATOM_break ((atom_t)(70*2+1))
|
||||
#define ATOM_break_level ((atom_t)(71*2+1))
|
||||
#define ATOM_btree ((atom_t)(72*2+1))
|
||||
#define ATOM_buffer ((atom_t)(73*2+1))
|
||||
#define ATOM_buffer_size ((atom_t)(74*2+1))
|
||||
#define ATOM_built_in_procedure ((atom_t)(75*2+1))
|
||||
#define ATOM_busy ((atom_t)(76*2+1))
|
||||
#define ATOM_byte ((atom_t)(77*2+1))
|
||||
#define ATOM_c_stack ((atom_t)(78*2+1))
|
||||
#define ATOM_call ((atom_t)(79*2+1))
|
||||
#define ATOM_callable ((atom_t)(80*2+1))
|
||||
#define ATOM_canceled ((atom_t)(81*2+1))
|
||||
#define ATOM_case_sensitive_file_names ((atom_t)(82*2+1))
|
||||
#define ATOM_catch ((atom_t)(83*2+1))
|
||||
#define ATOM_category ((atom_t)(84*2+1))
|
||||
#define ATOM_ceil ((atom_t)(85*2+1))
|
||||
#define ATOM_ceiling ((atom_t)(86*2+1))
|
||||
#define ATOM_char_type ((atom_t)(87*2+1))
|
||||
#define ATOM_character ((atom_t)(88*2+1))
|
||||
#define ATOM_character_code ((atom_t)(89*2+1))
|
||||
#define ATOM_character_escapes ((atom_t)(90*2+1))
|
||||
#define ATOM_chars ((atom_t)(91*2+1))
|
||||
#define ATOM_chdir ((atom_t)(92*2+1))
|
||||
#define ATOM_chmod ((atom_t)(93*2+1))
|
||||
#define ATOM_choice ((atom_t)(94*2+1))
|
||||
#define ATOM_class ((atom_t)(95*2+1))
|
||||
#define ATOM_clause ((atom_t)(96*2+1))
|
||||
#define ATOM_clause_reference ((atom_t)(97*2+1))
|
||||
#define ATOM_clauses ((atom_t)(98*2+1))
|
||||
#define ATOM_close ((atom_t)(99*2+1))
|
||||
#define ATOM_close_on_abort ((atom_t)(100*2+1))
|
||||
#define ATOM_close_on_exec ((atom_t)(101*2+1))
|
||||
#define ATOM_close_option ((atom_t)(102*2+1))
|
||||
#define ATOM_cm ((atom_t)(103*2+1))
|
||||
#define ATOM_cntrl ((atom_t)(104*2+1))
|
||||
#define ATOM_co ((atom_t)(105*2+1))
|
||||
#define ATOM_codes ((atom_t)(106*2+1))
|
||||
#define ATOM_collected ((atom_t)(107*2+1))
|
||||
#define ATOM_collections ((atom_t)(108*2+1))
|
||||
#define ATOM_colon ((atom_t)(109*2+1))
|
||||
#define ATOM_colon_eq ((atom_t)(110*2+1))
|
||||
#define ATOM_comma ((atom_t)(111*2+1))
|
||||
#define ATOM_comments ((atom_t)(112*2+1))
|
||||
#define ATOM_compound ((atom_t)(113*2+1))
|
||||
#define ATOM_context ((atom_t)(114*2+1))
|
||||
#define ATOM_context_module ((atom_t)(115*2+1))
|
||||
#define ATOM_continue ((atom_t)(116*2+1))
|
||||
#define ATOM_copysign ((atom_t)(117*2+1))
|
||||
#define ATOM_core ((atom_t)(118*2+1))
|
||||
#define ATOM_core_left ((atom_t)(119*2+1))
|
||||
#define ATOM_cos ((atom_t)(120*2+1))
|
||||
#define ATOM_cosh ((atom_t)(121*2+1))
|
||||
#define ATOM_cputime ((atom_t)(122*2+1))
|
||||
#define ATOM_create ((atom_t)(123*2+1))
|
||||
#define ATOM_csym ((atom_t)(124*2+1))
|
||||
#define ATOM_csymf ((atom_t)(125*2+1))
|
||||
#define ATOM_cumulative ((atom_t)(126*2+1))
|
||||
#define ATOM_curl ((atom_t)(127*2+1))
|
||||
#define ATOM_current ((atom_t)(128*2+1))
|
||||
#define ATOM_current_input ((atom_t)(129*2+1))
|
||||
#define ATOM_current_locale ((atom_t)(130*2+1))
|
||||
#define ATOM_current_output ((atom_t)(131*2+1))
|
||||
#define ATOM_cut ((atom_t)(132*2+1))
|
||||
#define ATOM_cut_call ((atom_t)(133*2+1))
|
||||
#define ATOM_cut_exit ((atom_t)(134*2+1))
|
||||
#define ATOM_cut_parent ((atom_t)(135*2+1))
|
||||
#define ATOM_cutted ((atom_t)(136*2+1))
|
||||
#define ATOM_cycles ((atom_t)(137*2+1))
|
||||
#define ATOM_cyclic_term ((atom_t)(138*2+1))
|
||||
#define ATOM_dand ((atom_t)(139*2+1))
|
||||
#define ATOM_date ((atom_t)(140*2+1))
|
||||
#define ATOM_db_reference ((atom_t)(141*2+1))
|
||||
#define ATOM_dc_call_prolog ((atom_t)(142*2+1))
|
||||
#define ATOM_dcall ((atom_t)(143*2+1))
|
||||
#define ATOM_dcall_cleanup ((atom_t)(144*2+1))
|
||||
#define ATOM_dcatch ((atom_t)(145*2+1))
|
||||
#define ATOM_dcut ((atom_t)(146*2+1))
|
||||
#define ATOM_dde_error ((atom_t)(147*2+1))
|
||||
#define ATOM_dde_handle ((atom_t)(148*2+1))
|
||||
#define ATOM_deadline ((atom_t)(149*2+1))
|
||||
#define ATOM_debug ((atom_t)(150*2+1))
|
||||
#define ATOM_debug_on_error ((atom_t)(151*2+1))
|
||||
#define ATOM_debug_topic ((atom_t)(152*2+1))
|
||||
#define ATOM_debugger_print_options ((atom_t)(153*2+1))
|
||||
#define ATOM_debugger_show_context ((atom_t)(154*2+1))
|
||||
#define ATOM_debugging ((atom_t)(155*2+1))
|
||||
#define ATOM_dec10 ((atom_t)(156*2+1))
|
||||
#define ATOM_decimal_point ((atom_t)(157*2+1))
|
||||
#define ATOM_default ((atom_t)(158*2+1))
|
||||
#define ATOM_defined ((atom_t)(159*2+1))
|
||||
#define ATOM_delete ((atom_t)(160*2+1))
|
||||
#define ATOM_depth_limit_exceeded ((atom_t)(161*2+1))
|
||||
#define ATOM_destroy ((atom_t)(162*2+1))
|
||||
#define ATOM_detached ((atom_t)(163*2+1))
|
||||
#define ATOM_detect ((atom_t)(164*2+1))
|
||||
#define ATOM_development ((atom_t)(165*2+1))
|
||||
#define ATOM_dexit ((atom_t)(166*2+1))
|
||||
#define ATOM_dforeign_registered ((atom_t)(167*2+1))
|
||||
#define ATOM_dgarbage_collect ((atom_t)(168*2+1))
|
||||
#define ATOM_digit ((atom_t)(169*2+1))
|
||||
#define ATOM_directory ((atom_t)(170*2+1))
|
||||
#define ATOM_discontiguous ((atom_t)(171*2+1))
|
||||
#define ATOM_div ((atom_t)(172*2+1))
|
||||
#define ATOM_divide ((atom_t)(173*2+1))
|
||||
#define ATOM_dload ((atom_t)(174*2+1))
|
||||
#define ATOM_dmessage_queue ((atom_t)(175*2+1))
|
||||
#define ATOM_dmutex ((atom_t)(176*2+1))
|
||||
#define ATOM_domain_error ((atom_t)(177*2+1))
|
||||
#define ATOM_dos ((atom_t)(178*2+1))
|
||||
#define ATOM_dot ((atom_t)(179*2+1))
|
||||
#define ATOM_dot_lists ((atom_t)(180*2+1))
|
||||
#define ATOM_dots ((atom_t)(181*2+1))
|
||||
#define ATOM_double_quotes ((atom_t)(182*2+1))
|
||||
#define ATOM_doublestar ((atom_t)(183*2+1))
|
||||
#define ATOM_dparse_quasi_quotations ((atom_t)(184*2+1))
|
||||
#define ATOM_dprof_node ((atom_t)(185*2+1))
|
||||
#define ATOM_dquasi_quotation ((atom_t)(186*2+1))
|
||||
#define ATOM_dquery_loop ((atom_t)(187*2+1))
|
||||
#define ATOM_drecover_and_rethrow ((atom_t)(188*2+1))
|
||||
#define ATOM_dstream ((atom_t)(189*2+1))
|
||||
#define ATOM_dthread_init ((atom_t)(190*2+1))
|
||||
#define ATOM_dthrow ((atom_t)(191*2+1))
|
||||
#define ATOM_dtime ((atom_t)(192*2+1))
|
||||
#define ATOM_dtoplevel ((atom_t)(193*2+1))
|
||||
#define ATOM_duplicate_key ((atom_t)(194*2+1))
|
||||
#define ATOM_dvard ((atom_t)(195*2+1))
|
||||
#define ATOM_dvariable_names ((atom_t)(196*2+1))
|
||||
#define ATOM_dwakeup ((atom_t)(197*2+1))
|
||||
#define ATOM_dynamic ((atom_t)(198*2+1))
|
||||
#define ATOM_e ((atom_t)(199*2+1))
|
||||
#define ATOM_encoding ((atom_t)(200*2+1))
|
||||
#define ATOM_end ((atom_t)(201*2+1))
|
||||
#define ATOM_end_of_file ((atom_t)(202*2+1))
|
||||
#define ATOM_end_of_line ((atom_t)(203*2+1))
|
||||
#define ATOM_end_of_stream ((atom_t)(204*2+1))
|
||||
#define ATOM_environment ((atom_t)(205*2+1))
|
||||
#define ATOM_eof ((atom_t)(206*2+1))
|
||||
#define ATOM_eof_action ((atom_t)(207*2+1))
|
||||
#define ATOM_eof_code ((atom_t)(208*2+1))
|
||||
#define ATOM_epsilon ((atom_t)(209*2+1))
|
||||
#define ATOM_equal ((atom_t)(210*2+1))
|
||||
#define ATOM_equals ((atom_t)(211*2+1))
|
||||
#define ATOM_erase ((atom_t)(212*2+1))
|
||||
#define ATOM_erased ((atom_t)(213*2+1))
|
||||
#define ATOM_erf ((atom_t)(214*2+1))
|
||||
#define ATOM_erfc ((atom_t)(215*2+1))
|
||||
#define ATOM_error ((atom_t)(216*2+1))
|
||||
#define ATOM_eval ((atom_t)(217*2+1))
|
||||
#define ATOM_evaluable ((atom_t)(218*2+1))
|
||||
#define ATOM_evaluation_error ((atom_t)(219*2+1))
|
||||
#define ATOM_exception ((atom_t)(220*2+1))
|
||||
#define ATOM_exclusive ((atom_t)(221*2+1))
|
||||
#define ATOM_execute ((atom_t)(222*2+1))
|
||||
#define ATOM_exist ((atom_t)(223*2+1))
|
||||
#define ATOM_existence_error ((atom_t)(224*2+1))
|
||||
#define ATOM_exit ((atom_t)(225*2+1))
|
||||
#define ATOM_exited ((atom_t)(226*2+1))
|
||||
#define ATOM_exp ((atom_t)(227*2+1))
|
||||
#define ATOM_export ((atom_t)(228*2+1))
|
||||
#define ATOM_exported ((atom_t)(229*2+1))
|
||||
#define ATOM_exports ((atom_t)(230*2+1))
|
||||
#define ATOM_expression ((atom_t)(231*2+1))
|
||||
#define ATOM_external_exception ((atom_t)(232*2+1))
|
||||
#define ATOM_externals ((atom_t)(233*2+1))
|
||||
#define ATOM_fact ((atom_t)(234*2+1))
|
||||
#define ATOM_factor ((atom_t)(235*2+1))
|
||||
#define ATOM_fail ((atom_t)(236*2+1))
|
||||
#define ATOM_failure_error ((atom_t)(237*2+1))
|
||||
#define ATOM_false ((atom_t)(238*2+1))
|
||||
#define ATOM_feature ((atom_t)(239*2+1))
|
||||
#define ATOM_file ((atom_t)(240*2+1))
|
||||
#define ATOM_file_name ((atom_t)(241*2+1))
|
||||
#define ATOM_file_name_variables ((atom_t)(242*2+1))
|
||||
#define ATOM_file_no ((atom_t)(243*2+1))
|
||||
#define ATOM_flag ((atom_t)(244*2+1))
|
||||
#define ATOM_flag_value ((atom_t)(245*2+1))
|
||||
#define ATOM_float ((atom_t)(246*2+1))
|
||||
#define ATOM_float_format ((atom_t)(247*2+1))
|
||||
#define ATOM_float_fractional_part ((atom_t)(248*2+1))
|
||||
#define ATOM_float_integer_part ((atom_t)(249*2+1))
|
||||
#define ATOM_float_overflow ((atom_t)(250*2+1))
|
||||
#define ATOM_float_underflow ((atom_t)(251*2+1))
|
||||
#define ATOM_floor ((atom_t)(252*2+1))
|
||||
#define ATOM_force ((atom_t)(253*2+1))
|
||||
#define ATOM_foreign ((atom_t)(254*2+1))
|
||||
#define ATOM_foreign_function ((atom_t)(255*2+1))
|
||||
#define ATOM_foreign_return_value ((atom_t)(256*2+1))
|
||||
#define ATOM_fork ((atom_t)(257*2+1))
|
||||
#define ATOM_frame ((atom_t)(258*2+1))
|
||||
#define ATOM_frame_attribute ((atom_t)(259*2+1))
|
||||
#define ATOM_frame_finished ((atom_t)(260*2+1))
|
||||
#define ATOM_frame_reference ((atom_t)(261*2+1))
|
||||
#define ATOM_free_of_attvar ((atom_t)(262*2+1))
|
||||
#define ATOM_freeze ((atom_t)(263*2+1))
|
||||
#define ATOM_full ((atom_t)(264*2+1))
|
||||
#define ATOM_fullstop ((atom_t)(265*2+1))
|
||||
#define ATOM_functor_name ((atom_t)(266*2+1))
|
||||
#define ATOM_functors ((atom_t)(267*2+1))
|
||||
#define ATOM_fx ((atom_t)(268*2+1))
|
||||
#define ATOM_fy ((atom_t)(269*2+1))
|
||||
#define ATOM_garbage_collected ((atom_t)(270*2+1))
|
||||
#define ATOM_garbage_collection ((atom_t)(271*2+1))
|
||||
#define ATOM_gc ((atom_t)(272*2+1))
|
||||
#define ATOM_gcd ((atom_t)(273*2+1))
|
||||
#define ATOM_gctime ((atom_t)(274*2+1))
|
||||
#define ATOM_gdiv ((atom_t)(275*2+1))
|
||||
#define ATOM_getcwd ((atom_t)(276*2+1))
|
||||
#define ATOM_global ((atom_t)(277*2+1))
|
||||
#define ATOM_global_shifts ((atom_t)(278*2+1))
|
||||
#define ATOM_global_stack ((atom_t)(279*2+1))
|
||||
#define ATOM_globallimit ((atom_t)(280*2+1))
|
||||
#define ATOM_globalused ((atom_t)(281*2+1))
|
||||
#define ATOM_goal ((atom_t)(282*2+1))
|
||||
#define ATOM_goal_expansion ((atom_t)(283*2+1))
|
||||
#define ATOM_grammar ((atom_t)(284*2+1))
|
||||
#define ATOM_graph ((atom_t)(285*2+1))
|
||||
#define ATOM_ground ((atom_t)(286*2+1))
|
||||
#define ATOM_grouping ((atom_t)(287*2+1))
|
||||
#define ATOM_gvar ((atom_t)(288*2+1))
|
||||
#define ATOM_halt ((atom_t)(289*2+1))
|
||||
#define ATOM_has_alternatives ((atom_t)(290*2+1))
|
||||
#define ATOM_hash ((atom_t)(291*2+1))
|
||||
#define ATOM_hashed ((atom_t)(292*2+1))
|
||||
#define ATOM_hat ((atom_t)(293*2+1))
|
||||
#define ATOM_heap_gc ((atom_t)(294*2+1))
|
||||
#define ATOM_heapused ((atom_t)(295*2+1))
|
||||
#define ATOM_help ((atom_t)(296*2+1))
|
||||
#define ATOM_hidden ((atom_t)(297*2+1))
|
||||
#define ATOM_hide_childs ((atom_t)(298*2+1))
|
||||
#define ATOM_history_depth ((atom_t)(299*2+1))
|
||||
#define ATOM_ifthen ((atom_t)(300*2+1))
|
||||
#define ATOM_ignore ((atom_t)(301*2+1))
|
||||
#define ATOM_ignore_ops ((atom_t)(302*2+1))
|
||||
#define ATOM_import_into ((atom_t)(303*2+1))
|
||||
#define ATOM_import_type ((atom_t)(304*2+1))
|
||||
#define ATOM_imported ((atom_t)(305*2+1))
|
||||
#define ATOM_imported_procedure ((atom_t)(306*2+1))
|
||||
#define ATOM_index ((atom_t)(307*2+1))
|
||||
#define ATOM_indexed ((atom_t)(308*2+1))
|
||||
#define ATOM_inf ((atom_t)(309*2+1))
|
||||
#define ATOM_inferences ((atom_t)(310*2+1))
|
||||
#define ATOM_infinite ((atom_t)(311*2+1))
|
||||
#define ATOM_informational ((atom_t)(312*2+1))
|
||||
#define ATOM_init_file ((atom_t)(313*2+1))
|
||||
#define ATOM_initialization ((atom_t)(314*2+1))
|
||||
#define ATOM_input ((atom_t)(315*2+1))
|
||||
#define ATOM_inserted_char ((atom_t)(316*2+1))
|
||||
#define ATOM_instantiation_error ((atom_t)(317*2+1))
|
||||
#define ATOM_int ((atom_t)(318*2+1))
|
||||
#define ATOM_int64_t ((atom_t)(319*2+1))
|
||||
#define ATOM_int_overflow ((atom_t)(320*2+1))
|
||||
#define ATOM_integer ((atom_t)(321*2+1))
|
||||
#define ATOM_integer_expression ((atom_t)(322*2+1))
|
||||
#define ATOM_interrupt ((atom_t)(323*2+1))
|
||||
#define ATOM_io_error ((atom_t)(324*2+1))
|
||||
#define ATOM_io_mode ((atom_t)(325*2+1))
|
||||
#define ATOM_ioctl ((atom_t)(326*2+1))
|
||||
#define ATOM_is ((atom_t)(327*2+1))
|
||||
#define ATOM_iso ((atom_t)(328*2+1))
|
||||
#define ATOM_iso_latin_1 ((atom_t)(329*2+1))
|
||||
#define ATOM_isovar ((atom_t)(330*2+1))
|
||||
#define ATOM_join ((atom_t)(331*2+1))
|
||||
#define ATOM_jump ((atom_t)(332*2+1))
|
||||
#define ATOM_kernel ((atom_t)(333*2+1))
|
||||
#define ATOM_key ((atom_t)(334*2+1))
|
||||
#define ATOM_key_value_position ((atom_t)(335*2+1))
|
||||
#define ATOM_larger ((atom_t)(336*2+1))
|
||||
#define ATOM_larger_equal ((atom_t)(337*2+1))
|
||||
#define ATOM_level ((atom_t)(338*2+1))
|
||||
#define ATOM_lgamma ((atom_t)(339*2+1))
|
||||
#define ATOM_li ((atom_t)(340*2+1))
|
||||
#define ATOM_library ((atom_t)(341*2+1))
|
||||
#define ATOM_limit ((atom_t)(342*2+1))
|
||||
#define ATOM_line ((atom_t)(343*2+1))
|
||||
#define ATOM_line_count ((atom_t)(344*2+1))
|
||||
#define ATOM_line_position ((atom_t)(345*2+1))
|
||||
#define ATOM_list ((atom_t)(346*2+1))
|
||||
#define ATOM_list_position ((atom_t)(347*2+1))
|
||||
#define ATOM_listing ((atom_t)(348*2+1))
|
||||
#define ATOM_local ((atom_t)(349*2+1))
|
||||
#define ATOM_local_shifts ((atom_t)(350*2+1))
|
||||
#define ATOM_local_stack ((atom_t)(351*2+1))
|
||||
#define ATOM_locale ((atom_t)(352*2+1))
|
||||
#define ATOM_locale_property ((atom_t)(353*2+1))
|
||||
#define ATOM_locallimit ((atom_t)(354*2+1))
|
||||
#define ATOM_localused ((atom_t)(355*2+1))
|
||||
#define ATOM_lock ((atom_t)(356*2+1))
|
||||
#define ATOM_locked ((atom_t)(357*2+1))
|
||||
#define ATOM_log ((atom_t)(358*2+1))
|
||||
#define ATOM_log10 ((atom_t)(359*2+1))
|
||||
#define ATOM_long ((atom_t)(360*2+1))
|
||||
#define ATOM_loose ((atom_t)(361*2+1))
|
||||
#define ATOM_low ((atom_t)(362*2+1))
|
||||
#define ATOM_lower ((atom_t)(363*2+1))
|
||||
#define ATOM_lsb ((atom_t)(364*2+1))
|
||||
#define ATOM_lshift ((atom_t)(365*2+1))
|
||||
#define ATOM_main ((atom_t)(366*2+1))
|
||||
#define ATOM_map ((atom_t)(367*2+1))
|
||||
#define ATOM_map_position ((atom_t)(368*2+1))
|
||||
#define ATOM_map_punify ((atom_t)(369*2+1))
|
||||
#define ATOM_map_select ((atom_t)(370*2+1))
|
||||
#define ATOM_mark ((atom_t)(371*2+1))
|
||||
#define ATOM_matches ((atom_t)(372*2+1))
|
||||
#define ATOM_max ((atom_t)(373*2+1))
|
||||
#define ATOM_max_arity ((atom_t)(374*2+1))
|
||||
#define ATOM_max_dde_handles ((atom_t)(375*2+1))
|
||||
#define ATOM_max_depth ((atom_t)(376*2+1))
|
||||
#define ATOM_max_files ((atom_t)(377*2+1))
|
||||
#define ATOM_max_frame_size ((atom_t)(378*2+1))
|
||||
#define ATOM_max_length ((atom_t)(379*2+1))
|
||||
#define ATOM_max_path_length ((atom_t)(380*2+1))
|
||||
#define ATOM_max_size ((atom_t)(381*2+1))
|
||||
#define ATOM_max_variable_length ((atom_t)(382*2+1))
|
||||
#define ATOM_memory ((atom_t)(383*2+1))
|
||||
#define ATOM_message ((atom_t)(384*2+1))
|
||||
#define ATOM_message_lines ((atom_t)(385*2+1))
|
||||
#define ATOM_message_queue ((atom_t)(386*2+1))
|
||||
#define ATOM_message_queue_property ((atom_t)(387*2+1))
|
||||
#define ATOM_meta_argument ((atom_t)(388*2+1))
|
||||
#define ATOM_meta_argument_specifier ((atom_t)(389*2+1))
|
||||
#define ATOM_meta_atom ((atom_t)(390*2+1))
|
||||
#define ATOM_meta_predicate ((atom_t)(391*2+1))
|
||||
#define ATOM_min ((atom_t)(392*2+1))
|
||||
#define ATOM_min_free ((atom_t)(393*2+1))
|
||||
#define ATOM_minus ((atom_t)(394*2+1))
|
||||
#define ATOM_mismatched_char ((atom_t)(395*2+1))
|
||||
#define ATOM_mod ((atom_t)(396*2+1))
|
||||
#define ATOM_mode ((atom_t)(397*2+1))
|
||||
#define ATOM_modify ((atom_t)(398*2+1))
|
||||
#define ATOM_module ((atom_t)(399*2+1))
|
||||
#define ATOM_module_class ((atom_t)(400*2+1))
|
||||
#define ATOM_module_property ((atom_t)(401*2+1))
|
||||
#define ATOM_module_transparent ((atom_t)(402*2+1))
|
||||
#define ATOM_modules ((atom_t)(403*2+1))
|
||||
#define ATOM_msb ((atom_t)(404*2+1))
|
||||
#define ATOM_multifile ((atom_t)(405*2+1))
|
||||
#define ATOM_mutex ((atom_t)(406*2+1))
|
||||
#define ATOM_mutex_option ((atom_t)(407*2+1))
|
||||
#define ATOM_mutex_property ((atom_t)(408*2+1))
|
||||
#define ATOM_natural ((atom_t)(409*2+1))
|
||||
#define ATOM_newline ((atom_t)(410*2+1))
|
||||
#define ATOM_next_argument ((atom_t)(411*2+1))
|
||||
#define ATOM_nil ((atom_t)(412*2+1))
|
||||
#define ATOM_nl ((atom_t)(413*2+1))
|
||||
#define ATOM_nlink ((atom_t)(414*2+1))
|
||||
#define ATOM_no_memory ((atom_t)(415*2+1))
|
||||
#define ATOM_nodebug ((atom_t)(416*2+1))
|
||||
#define ATOM_non_empty_list ((atom_t)(417*2+1))
|
||||
#define ATOM_non_terminal ((atom_t)(418*2+1))
|
||||
#define ATOM_none ((atom_t)(419*2+1))
|
||||
#define ATOM_nonvar ((atom_t)(420*2+1))
|
||||
#define ATOM_noprofile ((atom_t)(421*2+1))
|
||||
#define ATOM_normal ((atom_t)(422*2+1))
|
||||
#define ATOM_not ((atom_t)(423*2+1))
|
||||
#define ATOM_not_equals ((atom_t)(424*2+1))
|
||||
#define ATOM_not_implemented ((atom_t)(425*2+1))
|
||||
#define ATOM_not_less_than_one ((atom_t)(426*2+1))
|
||||
#define ATOM_not_less_than_zero ((atom_t)(427*2+1))
|
||||
#define ATOM_not_provable ((atom_t)(428*2+1))
|
||||
#define ATOM_not_strict_equal ((atom_t)(429*2+1))
|
||||
#define ATOM_not_unique ((atom_t)(430*2+1))
|
||||
#define ATOM_number ((atom_t)(431*2+1))
|
||||
#define ATOM_number_of_clauses ((atom_t)(432*2+1))
|
||||
#define ATOM_number_of_rules ((atom_t)(433*2+1))
|
||||
#define ATOM_numbervar_option ((atom_t)(434*2+1))
|
||||
#define ATOM_numbervars ((atom_t)(435*2+1))
|
||||
#define ATOM_occurs_check ((atom_t)(436*2+1))
|
||||
#define ATOM_octet ((atom_t)(437*2+1))
|
||||
#define ATOM_off ((atom_t)(438*2+1))
|
||||
#define ATOM_on ((atom_t)(439*2+1))
|
||||
#define ATOM_open ((atom_t)(440*2+1))
|
||||
#define ATOM_operator ((atom_t)(441*2+1))
|
||||
#define ATOM_operator_priority ((atom_t)(442*2+1))
|
||||
#define ATOM_operator_specifier ((atom_t)(443*2+1))
|
||||
#define ATOM_optimise ((atom_t)(444*2+1))
|
||||
#define ATOM_or ((atom_t)(445*2+1))
|
||||
#define ATOM_order ((atom_t)(446*2+1))
|
||||
#define ATOM_output ((atom_t)(447*2+1))
|
||||
#define ATOM_owner ((atom_t)(448*2+1))
|
||||
#define ATOM_pair ((atom_t)(449*2+1))
|
||||
#define ATOM_paren ((atom_t)(450*2+1))
|
||||
#define ATOM_parent ((atom_t)(451*2+1))
|
||||
#define ATOM_parent_goal ((atom_t)(452*2+1))
|
||||
#define ATOM_partial ((atom_t)(453*2+1))
|
||||
#define ATOM_past ((atom_t)(454*2+1))
|
||||
#define ATOM_past_end_of_stream ((atom_t)(455*2+1))
|
||||
#define ATOM_pattern ((atom_t)(456*2+1))
|
||||
#define ATOM_pc ((atom_t)(457*2+1))
|
||||
#define ATOM_peek ((atom_t)(458*2+1))
|
||||
#define ATOM_period ((atom_t)(459*2+1))
|
||||
#define ATOM_permission_error ((atom_t)(460*2+1))
|
||||
#define ATOM_pi ((atom_t)(461*2+1))
|
||||
#define ATOM_pipe ((atom_t)(462*2+1))
|
||||
#define ATOM_plain ((atom_t)(463*2+1))
|
||||
#define ATOM_plus ((atom_t)(464*2+1))
|
||||
#define ATOM_popcount ((atom_t)(465*2+1))
|
||||
#define ATOM_portray ((atom_t)(466*2+1))
|
||||
#define ATOM_portray_goal ((atom_t)(467*2+1))
|
||||
#define ATOM_position ((atom_t)(468*2+1))
|
||||
#define ATOM_posix ((atom_t)(469*2+1))
|
||||
#define ATOM_powm ((atom_t)(470*2+1))
|
||||
#define ATOM_predicate_indicator ((atom_t)(471*2+1))
|
||||
#define ATOM_predicates ((atom_t)(472*2+1))
|
||||
#define ATOM_print ((atom_t)(473*2+1))
|
||||
#define ATOM_print_message ((atom_t)(474*2+1))
|
||||
#define ATOM_priority ((atom_t)(475*2+1))
|
||||
#define ATOM_private_procedure ((atom_t)(476*2+1))
|
||||
#define ATOM_procedure ((atom_t)(477*2+1))
|
||||
#define ATOM_process_comment ((atom_t)(478*2+1))
|
||||
#define ATOM_process_cputime ((atom_t)(479*2+1))
|
||||
#define ATOM_profile_mode ((atom_t)(480*2+1))
|
||||
#define ATOM_profile_no_cpu_time ((atom_t)(481*2+1))
|
||||
#define ATOM_profile_node ((atom_t)(482*2+1))
|
||||
#define ATOM_program ((atom_t)(483*2+1))
|
||||
#define ATOM_program_counter ((atom_t)(484*2+1))
|
||||
#define ATOM_prolog ((atom_t)(485*2+1))
|
||||
#define ATOM_prolog_atom_start ((atom_t)(486*2+1))
|
||||
#define ATOM_prolog_flag ((atom_t)(487*2+1))
|
||||
#define ATOM_prolog_flag_access ((atom_t)(488*2+1))
|
||||
#define ATOM_prolog_flag_option ((atom_t)(489*2+1))
|
||||
#define ATOM_prolog_flag_type ((atom_t)(490*2+1))
|
||||
#define ATOM_prolog_identifier_continue ((atom_t)(491*2+1))
|
||||
#define ATOM_prolog_symbol ((atom_t)(492*2+1))
|
||||
#define ATOM_prolog_var_start ((atom_t)(493*2+1))
|
||||
#define ATOM_prompt ((atom_t)(494*2+1))
|
||||
#define ATOM_property ((atom_t)(495*2+1))
|
||||
#define ATOM_protocol ((atom_t)(496*2+1))
|
||||
#define ATOM_prove ((atom_t)(497*2+1))
|
||||
#define ATOM_public ((atom_t)(498*2+1))
|
||||
#define ATOM_punct ((atom_t)(499*2+1))
|
||||
#define ATOM_quasi_quotation ((atom_t)(500*2+1))
|
||||
#define ATOM_quasi_quotation_position ((atom_t)(501*2+1))
|
||||
#define ATOM_quasi_quotation_syntax ((atom_t)(502*2+1))
|
||||
#define ATOM_quasi_quotations ((atom_t)(503*2+1))
|
||||
#define ATOM_query ((atom_t)(504*2+1))
|
||||
#define ATOM_question_mark ((atom_t)(505*2+1))
|
||||
#define ATOM_queue_option ((atom_t)(506*2+1))
|
||||
#define ATOM_quiet ((atom_t)(507*2+1))
|
||||
#define ATOM_quote ((atom_t)(508*2+1))
|
||||
#define ATOM_quoted ((atom_t)(509*2+1))
|
||||
#define ATOM_radix ((atom_t)(510*2+1))
|
||||
#define ATOM_random ((atom_t)(511*2+1))
|
||||
#define ATOM_random_float ((atom_t)(512*2+1))
|
||||
#define ATOM_random_option ((atom_t)(513*2+1))
|
||||
#define ATOM_rational ((atom_t)(514*2+1))
|
||||
#define ATOM_rationalize ((atom_t)(515*2+1))
|
||||
#define ATOM_rdiv ((atom_t)(516*2+1))
|
||||
#define ATOM_read ((atom_t)(517*2+1))
|
||||
#define ATOM_read_only ((atom_t)(518*2+1))
|
||||
#define ATOM_read_option ((atom_t)(519*2+1))
|
||||
#define ATOM_read_write ((atom_t)(520*2+1))
|
||||
#define ATOM_readline ((atom_t)(521*2+1))
|
||||
#define ATOM_real_time ((atom_t)(522*2+1))
|
||||
#define ATOM_receiver ((atom_t)(523*2+1))
|
||||
#define ATOM_record ((atom_t)(524*2+1))
|
||||
#define ATOM_record_position ((atom_t)(525*2+1))
|
||||
#define ATOM_redefine ((atom_t)(526*2+1))
|
||||
#define ATOM_redo ((atom_t)(527*2+1))
|
||||
#define ATOM_redo_in_skip ((atom_t)(528*2+1))
|
||||
#define ATOM_references ((atom_t)(529*2+1))
|
||||
#define ATOM_rem ((atom_t)(530*2+1))
|
||||
#define ATOM_rename ((atom_t)(531*2+1))
|
||||
#define ATOM_repeat ((atom_t)(532*2+1))
|
||||
#define ATOM_report_error ((atom_t)(533*2+1))
|
||||
#define ATOM_reposition ((atom_t)(534*2+1))
|
||||
#define ATOM_representation_error ((atom_t)(535*2+1))
|
||||
#define ATOM_representation_errors ((atom_t)(536*2+1))
|
||||
#define ATOM_reset ((atom_t)(537*2+1))
|
||||
#define ATOM_resource_error ((atom_t)(538*2+1))
|
||||
#define ATOM_resource_handle ((atom_t)(539*2+1))
|
||||
#define ATOM_retry ((atom_t)(540*2+1))
|
||||
#define ATOM_round ((atom_t)(541*2+1))
|
||||
#define ATOM_rshift ((atom_t)(542*2+1))
|
||||
#define ATOM_running ((atom_t)(543*2+1))
|
||||
#define ATOM_runtime ((atom_t)(544*2+1))
|
||||
#define ATOM_save_class ((atom_t)(545*2+1))
|
||||
#define ATOM_save_option ((atom_t)(546*2+1))
|
||||
#define ATOM_scripting ((atom_t)(547*2+1))
|
||||
#define ATOM_see ((atom_t)(548*2+1))
|
||||
#define ATOM_seed ((atom_t)(549*2+1))
|
||||
#define ATOM_seek_method ((atom_t)(550*2+1))
|
||||
#define ATOM_select ((atom_t)(551*2+1))
|
||||
#define ATOM_semicolon ((atom_t)(552*2+1))
|
||||
#define ATOM_separated ((atom_t)(553*2+1))
|
||||
#define ATOM_set ((atom_t)(554*2+1))
|
||||
#define ATOM_set_end_of_stream ((atom_t)(555*2+1))
|
||||
#define ATOM_setup_call_catcher_cleanup ((atom_t)(556*2+1))
|
||||
#define ATOM_shared ((atom_t)(557*2+1))
|
||||
#define ATOM_shared_object ((atom_t)(558*2+1))
|
||||
#define ATOM_shared_object_handle ((atom_t)(559*2+1))
|
||||
#define ATOM_shell ((atom_t)(560*2+1))
|
||||
#define ATOM_shift_time ((atom_t)(561*2+1))
|
||||
#define ATOM_sign ((atom_t)(562*2+1))
|
||||
#define ATOM_signal ((atom_t)(563*2+1))
|
||||
#define ATOM_signal_handler ((atom_t)(564*2+1))
|
||||
#define ATOM_silent ((atom_t)(565*2+1))
|
||||
#define ATOM_sin ((atom_t)(566*2+1))
|
||||
#define ATOM_singletons ((atom_t)(567*2+1))
|
||||
#define ATOM_sinh ((atom_t)(568*2+1))
|
||||
#define ATOM_size ((atom_t)(569*2+1))
|
||||
#define ATOM_size_t ((atom_t)(570*2+1))
|
||||
#define ATOM_skip ((atom_t)(571*2+1))
|
||||
#define ATOM_skipped ((atom_t)(572*2+1))
|
||||
#define ATOM_smaller ((atom_t)(573*2+1))
|
||||
#define ATOM_smaller_equal ((atom_t)(574*2+1))
|
||||
#define ATOM_softcut ((atom_t)(575*2+1))
|
||||
#define ATOM_source_sink ((atom_t)(576*2+1))
|
||||
#define ATOM_space ((atom_t)(577*2+1))
|
||||
#define ATOM_spacing ((atom_t)(578*2+1))
|
||||
#define ATOM_spare ((atom_t)(579*2+1))
|
||||
#define ATOM_spy ((atom_t)(580*2+1))
|
||||
#define ATOM_sqrt ((atom_t)(581*2+1))
|
||||
#define ATOM_stack ((atom_t)(582*2+1))
|
||||
#define ATOM_stack_parameter ((atom_t)(583*2+1))
|
||||
#define ATOM_stack_shifts ((atom_t)(584*2+1))
|
||||
#define ATOM_stacks ((atom_t)(585*2+1))
|
||||
#define ATOM_stand_alone ((atom_t)(586*2+1))
|
||||
#define ATOM_standard ((atom_t)(587*2+1))
|
||||
#define ATOM_star ((atom_t)(588*2+1))
|
||||
#define ATOM_start ((atom_t)(589*2+1))
|
||||
#define ATOM_stat ((atom_t)(590*2+1))
|
||||
#define ATOM_state ((atom_t)(591*2+1))
|
||||
#define ATOM_static_procedure ((atom_t)(592*2+1))
|
||||
#define ATOM_statistics ((atom_t)(593*2+1))
|
||||
#define ATOM_status ((atom_t)(594*2+1))
|
||||
#define ATOM_stderr ((atom_t)(595*2+1))
|
||||
#define ATOM_stream ((atom_t)(596*2+1))
|
||||
#define ATOM_stream_option ((atom_t)(597*2+1))
|
||||
#define ATOM_stream_or_alias ((atom_t)(598*2+1))
|
||||
#define ATOM_stream_pair ((atom_t)(599*2+1))
|
||||
#define ATOM_stream_position ((atom_t)(600*2+1))
|
||||
#define ATOM_stream_property ((atom_t)(601*2+1))
|
||||
#define ATOM_stream_type_check ((atom_t)(602*2+1))
|
||||
#define ATOM_strict_equal ((atom_t)(603*2+1))
|
||||
#define ATOM_string ((atom_t)(604*2+1))
|
||||
#define ATOM_string_position ((atom_t)(605*2+1))
|
||||
#define ATOM_strong ((atom_t)(606*2+1))
|
||||
#define ATOM_subterm_positions ((atom_t)(607*2+1))
|
||||
#define ATOM_suffix ((atom_t)(608*2+1))
|
||||
#define ATOM_symbol_char ((atom_t)(609*2+1))
|
||||
#define ATOM_syntax_error ((atom_t)(610*2+1))
|
||||
#define ATOM_syntax_errors ((atom_t)(611*2+1))
|
||||
#define ATOM_system ((atom_t)(612*2+1))
|
||||
#define ATOM_SYSTEM_ERROR_INTERNAL ((atom_t)(613*2+1))
|
||||
#define ATOM_system_init_file ((atom_t)(614*2+1))
|
||||
#define ATOM_system_thread_id ((atom_t)(615*2+1))
|
||||
#define ATOM_system_time ((atom_t)(616*2+1))
|
||||
#define ATOM_tan ((atom_t)(617*2+1))
|
||||
#define ATOM_tanh ((atom_t)(618*2+1))
|
||||
#define ATOM_temporary_files ((atom_t)(619*2+1))
|
||||
#define ATOM_term ((atom_t)(620*2+1))
|
||||
#define ATOM_term_expansion ((atom_t)(621*2+1))
|
||||
#define ATOM_term_position ((atom_t)(622*2+1))
|
||||
#define ATOM_terminal ((atom_t)(623*2+1))
|
||||
#define ATOM_terminal_capability ((atom_t)(624*2+1))
|
||||
#define ATOM_test ((atom_t)(625*2+1))
|
||||
#define ATOM_text ((atom_t)(626*2+1))
|
||||
#define ATOM_text_stream ((atom_t)(627*2+1))
|
||||
#define ATOM_thousands_sep ((atom_t)(628*2+1))
|
||||
#define ATOM_thread ((atom_t)(629*2+1))
|
||||
#define ATOM_thread_cputime ((atom_t)(630*2+1))
|
||||
#define ATOM_thread_get_message_option ((atom_t)(631*2+1))
|
||||
#define ATOM_thread_initialization ((atom_t)(632*2+1))
|
||||
#define ATOM_thread_local ((atom_t)(633*2+1))
|
||||
#define ATOM_thread_local_procedure ((atom_t)(634*2+1))
|
||||
#define ATOM_thread_option ((atom_t)(635*2+1))
|
||||
#define ATOM_thread_property ((atom_t)(636*2+1))
|
||||
#define ATOM_threads ((atom_t)(637*2+1))
|
||||
#define ATOM_threads_created ((atom_t)(638*2+1))
|
||||
#define ATOM_throw ((atom_t)(639*2+1))
|
||||
#define ATOM_tilde ((atom_t)(640*2+1))
|
||||
#define ATOM_time ((atom_t)(641*2+1))
|
||||
#define ATOM_time_stamp ((atom_t)(642*2+1))
|
||||
#define ATOM_timeout ((atom_t)(643*2+1))
|
||||
#define ATOM_timeout_error ((atom_t)(644*2+1))
|
||||
#define ATOM_timezone ((atom_t)(645*2+1))
|
||||
#define ATOM_to_lower ((atom_t)(646*2+1))
|
||||
#define ATOM_to_upper ((atom_t)(647*2+1))
|
||||
#define ATOM_top ((atom_t)(648*2+1))
|
||||
#define ATOM_top_level ((atom_t)(649*2+1))
|
||||
#define ATOM_toplevel ((atom_t)(650*2+1))
|
||||
#define ATOM_trace ((atom_t)(651*2+1))
|
||||
#define ATOM_trace_any ((atom_t)(652*2+1))
|
||||
#define ATOM_trace_call ((atom_t)(653*2+1))
|
||||
#define ATOM_trace_exit ((atom_t)(654*2+1))
|
||||
#define ATOM_trace_fail ((atom_t)(655*2+1))
|
||||
#define ATOM_trace_gc ((atom_t)(656*2+1))
|
||||
#define ATOM_trace_redo ((atom_t)(657*2+1))
|
||||
#define ATOM_traceinterc ((atom_t)(658*2+1))
|
||||
#define ATOM_tracing ((atom_t)(659*2+1))
|
||||
#define ATOM_trail ((atom_t)(660*2+1))
|
||||
#define ATOM_trail_shifts ((atom_t)(661*2+1))
|
||||
#define ATOM_traillimit ((atom_t)(662*2+1))
|
||||
#define ATOM_trailused ((atom_t)(663*2+1))
|
||||
#define ATOM_transparent ((atom_t)(664*2+1))
|
||||
#define ATOM_transposed_char ((atom_t)(665*2+1))
|
||||
#define ATOM_transposed_word ((atom_t)(666*2+1))
|
||||
#define ATOM_true ((atom_t)(667*2+1))
|
||||
#define ATOM_truncate ((atom_t)(668*2+1))
|
||||
#define ATOM_tty ((atom_t)(669*2+1))
|
||||
#define ATOM_tty_control ((atom_t)(670*2+1))
|
||||
#define ATOM_type ((atom_t)(671*2+1))
|
||||
#define ATOM_type_error ((atom_t)(672*2+1))
|
||||
#define ATOM_undefined ((atom_t)(673*2+1))
|
||||
#define ATOM_undefined_global_variable ((atom_t)(674*2+1))
|
||||
#define ATOM_undefinterc ((atom_t)(675*2+1))
|
||||
#define ATOM_unicode_be ((atom_t)(676*2+1))
|
||||
#define ATOM_unicode_le ((atom_t)(677*2+1))
|
||||
#define ATOM_unify ((atom_t)(678*2+1))
|
||||
#define ATOM_unify_determined ((atom_t)(679*2+1))
|
||||
#define ATOM_uninstantiation_error ((atom_t)(680*2+1))
|
||||
#define ATOM_unique ((atom_t)(681*2+1))
|
||||
#define ATOM_univ ((atom_t)(682*2+1))
|
||||
#define ATOM_unknown ((atom_t)(683*2+1))
|
||||
#define ATOM_unlimited ((atom_t)(684*2+1))
|
||||
#define ATOM_unload ((atom_t)(685*2+1))
|
||||
#define ATOM_unlock ((atom_t)(686*2+1))
|
||||
#define ATOM_unlocked ((atom_t)(687*2+1))
|
||||
#define ATOM_update ((atom_t)(688*2+1))
|
||||
#define ATOM_upper ((atom_t)(689*2+1))
|
||||
#define ATOM_user ((atom_t)(690*2+1))
|
||||
#define ATOM_user_error ((atom_t)(691*2+1))
|
||||
#define ATOM_user_flags ((atom_t)(692*2+1))
|
||||
#define ATOM_user_input ((atom_t)(693*2+1))
|
||||
#define ATOM_user_output ((atom_t)(694*2+1))
|
||||
#define ATOM_utc ((atom_t)(695*2+1))
|
||||
#define ATOM_utf8 ((atom_t)(696*2+1))
|
||||
#define ATOM_v ((atom_t)(697*2+1))
|
||||
#define ATOM_var ((atom_t)(698*2+1))
|
||||
#define ATOM_variable ((atom_t)(699*2+1))
|
||||
#define ATOM_variable_names ((atom_t)(700*2+1))
|
||||
#define ATOM_variables ((atom_t)(701*2+1))
|
||||
#define ATOM_very_deep ((atom_t)(702*2+1))
|
||||
#define ATOM_vmi ((atom_t)(703*2+1))
|
||||
#define ATOM_volatile ((atom_t)(704*2+1))
|
||||
#define ATOM_wait ((atom_t)(705*2+1))
|
||||
#define ATOM_wakeup ((atom_t)(706*2+1))
|
||||
#define ATOM_walltime ((atom_t)(707*2+1))
|
||||
#define ATOM_warning ((atom_t)(708*2+1))
|
||||
#define ATOM_wchar_t ((atom_t)(709*2+1))
|
||||
#define ATOM_weak ((atom_t)(710*2+1))
|
||||
#define ATOM_when_condition ((atom_t)(711*2+1))
|
||||
#define ATOM_white ((atom_t)(712*2+1))
|
||||
#define ATOM_write ((atom_t)(713*2+1))
|
||||
#define ATOM_write_attributes ((atom_t)(714*2+1))
|
||||
#define ATOM_write_option ((atom_t)(715*2+1))
|
||||
#define ATOM_xdigit ((atom_t)(716*2+1))
|
||||
#define ATOM_xf ((atom_t)(717*2+1))
|
||||
#define ATOM_xfx ((atom_t)(718*2+1))
|
||||
#define ATOM_xfy ((atom_t)(719*2+1))
|
||||
#define ATOM_xml ((atom_t)(720*2+1))
|
||||
#define ATOM_xor ((atom_t)(721*2+1))
|
||||
#define ATOM_xpceref ((atom_t)(722*2+1))
|
||||
#define ATOM_yf ((atom_t)(723*2+1))
|
||||
#define ATOM_yfx ((atom_t)(724*2+1))
|
||||
#define ATOM_zero_divisor ((atom_t)(725*2+1))
|
||||
#define FUNCTOR_abs1 ((functor_t)(0*4+2))
|
||||
#define FUNCTOR_access1 ((functor_t)(1*4+2))
|
||||
#define FUNCTOR_acos1 ((functor_t)(2*4+2))
|
||||
#define FUNCTOR_acosh1 ((functor_t)(3*4+2))
|
||||
#define FUNCTOR_alias1 ((functor_t)(4*4+2))
|
||||
#define FUNCTOR_and2 ((functor_t)(5*4+2))
|
||||
#define FUNCTOR_ar_equals2 ((functor_t)(6*4+2))
|
||||
#define FUNCTOR_ar_not_equal2 ((functor_t)(7*4+2))
|
||||
#define FUNCTOR_asin1 ((functor_t)(8*4+2))
|
||||
#define FUNCTOR_asinh1 ((functor_t)(9*4+2))
|
||||
#define FUNCTOR_assert1 ((functor_t)(10*4+2))
|
||||
#define FUNCTOR_asserta1 ((functor_t)(11*4+2))
|
||||
#define FUNCTOR_atan1 ((functor_t)(12*4+2))
|
||||
#define FUNCTOR_atan2 ((functor_t)(13*4+2))
|
||||
#define FUNCTOR_atanh1 ((functor_t)(14*4+2))
|
||||
#define FUNCTOR_atan22 ((functor_t)(15*4+2))
|
||||
#define FUNCTOR_atom1 ((functor_t)(16*4+2))
|
||||
#define FUNCTOR_att3 ((functor_t)(17*4+2))
|
||||
#define FUNCTOR_backslash1 ((functor_t)(18*4+2))
|
||||
#define FUNCTOR_bar2 ((functor_t)(19*4+2))
|
||||
#define FUNCTOR_bitor2 ((functor_t)(20*4+2))
|
||||
#define FUNCTOR_bom1 ((functor_t)(21*4+2))
|
||||
#define FUNCTOR_brace_term_position3 ((functor_t)(22*4+2))
|
||||
#define FUNCTOR_break1 ((functor_t)(23*4+2))
|
||||
#define FUNCTOR_break2 ((functor_t)(24*4+2))
|
||||
#define FUNCTOR_break3 ((functor_t)(25*4+2))
|
||||
#define FUNCTOR_buffer1 ((functor_t)(26*4+2))
|
||||
#define FUNCTOR_buffer_size1 ((functor_t)(27*4+2))
|
||||
#define FUNCTOR_busy2 ((functor_t)(28*4+2))
|
||||
#define FUNCTOR_call1 ((functor_t)(29*4+2))
|
||||
#define FUNCTOR_catch3 ((functor_t)(30*4+2))
|
||||
#define FUNCTOR_ceil1 ((functor_t)(31*4+2))
|
||||
#define FUNCTOR_ceiling1 ((functor_t)(32*4+2))
|
||||
#define FUNCTOR_chars1 ((functor_t)(33*4+2))
|
||||
#define FUNCTOR_chars2 ((functor_t)(34*4+2))
|
||||
#define FUNCTOR_class1 ((functor_t)(35*4+2))
|
||||
#define FUNCTOR_clause1 ((functor_t)(36*4+2))
|
||||
#define FUNCTOR_close_on_abort1 ((functor_t)(37*4+2))
|
||||
#define FUNCTOR_close_on_exec1 ((functor_t)(38*4+2))
|
||||
#define FUNCTOR_codes1 ((functor_t)(39*4+2))
|
||||
#define FUNCTOR_codes2 ((functor_t)(40*4+2))
|
||||
#define FUNCTOR_colon2 ((functor_t)(41*4+2))
|
||||
#define FUNCTOR_comma2 ((functor_t)(42*4+2))
|
||||
#define FUNCTOR_context2 ((functor_t)(43*4+2))
|
||||
#define FUNCTOR_copysign2 ((functor_t)(44*4+2))
|
||||
#define FUNCTOR_cos1 ((functor_t)(45*4+2))
|
||||
#define FUNCTOR_cosh1 ((functor_t)(46*4+2))
|
||||
#define FUNCTOR_cputime0 ((functor_t)(47*4+2))
|
||||
#define FUNCTOR_curl1 ((functor_t)(48*4+2))
|
||||
#define FUNCTOR_cut_call1 ((functor_t)(49*4+2))
|
||||
#define FUNCTOR_cut_exit1 ((functor_t)(50*4+2))
|
||||
#define FUNCTOR_dand2 ((functor_t)(51*4+2))
|
||||
#define FUNCTOR_date3 ((functor_t)(52*4+2))
|
||||
#define FUNCTOR_date9 ((functor_t)(53*4+2))
|
||||
#define FUNCTOR_dc_call_prolog0 ((functor_t)(54*4+2))
|
||||
#define FUNCTOR_dcall1 ((functor_t)(55*4+2))
|
||||
#define FUNCTOR_dcut1 ((functor_t)(56*4+2))
|
||||
#define FUNCTOR_dde_error2 ((functor_t)(57*4+2))
|
||||
#define FUNCTOR_debugging1 ((functor_t)(58*4+2))
|
||||
#define FUNCTOR_decimal_point1 ((functor_t)(59*4+2))
|
||||
#define FUNCTOR_detached1 ((functor_t)(60*4+2))
|
||||
#define FUNCTOR_dexit2 ((functor_t)(61*4+2))
|
||||
#define FUNCTOR_dforeign_registered2 ((functor_t)(62*4+2))
|
||||
#define FUNCTOR_dgarbage_collect1 ((functor_t)(63*4+2))
|
||||
#define FUNCTOR_div2 ((functor_t)(64*4+2))
|
||||
#define FUNCTOR_gdiv2 ((functor_t)(65*4+2))
|
||||
#define FUNCTOR_divide2 ((functor_t)(66*4+2))
|
||||
#define FUNCTOR_dmessage_queue1 ((functor_t)(67*4+2))
|
||||
#define FUNCTOR_dmutex1 ((functor_t)(68*4+2))
|
||||
#define FUNCTOR_domain_error2 ((functor_t)(69*4+2))
|
||||
#define FUNCTOR_dot2 ((functor_t)(70*4+2))
|
||||
#define FUNCTOR_doublestar2 ((functor_t)(71*4+2))
|
||||
#define FUNCTOR_dparse_quasi_quotations2 ((functor_t)(72*4+2))
|
||||
#define FUNCTOR_dprof_node1 ((functor_t)(73*4+2))
|
||||
#define FUNCTOR_dquasi_quotation3 ((functor_t)(74*4+2))
|
||||
#define FUNCTOR_drecover_and_rethrow2 ((functor_t)(75*4+2))
|
||||
#define FUNCTOR_dstream1 ((functor_t)(76*4+2))
|
||||
#define FUNCTOR_dthread_init0 ((functor_t)(77*4+2))
|
||||
#define FUNCTOR_dthrow1 ((functor_t)(78*4+2))
|
||||
#define FUNCTOR_dtime2 ((functor_t)(79*4+2))
|
||||
#define FUNCTOR_duplicate_key1 ((functor_t)(80*4+2))
|
||||
#define FUNCTOR_dvard1 ((functor_t)(81*4+2))
|
||||
#define FUNCTOR_dwakeup1 ((functor_t)(82*4+2))
|
||||
#define FUNCTOR_e0 ((functor_t)(83*4+2))
|
||||
#define FUNCTOR_encoding1 ((functor_t)(84*4+2))
|
||||
#define FUNCTOR_end_of_stream1 ((functor_t)(85*4+2))
|
||||
#define FUNCTOR_eof_action1 ((functor_t)(86*4+2))
|
||||
#define FUNCTOR_epsilon0 ((functor_t)(87*4+2))
|
||||
#define FUNCTOR_equals2 ((functor_t)(88*4+2))
|
||||
#define FUNCTOR_erased1 ((functor_t)(89*4+2))
|
||||
#define FUNCTOR_erf1 ((functor_t)(90*4+2))
|
||||
#define FUNCTOR_erfc1 ((functor_t)(91*4+2))
|
||||
#define FUNCTOR_error2 ((functor_t)(92*4+2))
|
||||
#define FUNCTOR_eval1 ((functor_t)(93*4+2))
|
||||
#define FUNCTOR_evaluation_error1 ((functor_t)(94*4+2))
|
||||
#define FUNCTOR_exception1 ((functor_t)(95*4+2))
|
||||
#define FUNCTOR_exception3 ((functor_t)(96*4+2))
|
||||
#define FUNCTOR_existence_error2 ((functor_t)(97*4+2))
|
||||
#define FUNCTOR_existence_error3 ((functor_t)(98*4+2))
|
||||
#define FUNCTOR_exited1 ((functor_t)(99*4+2))
|
||||
#define FUNCTOR_exp1 ((functor_t)(100*4+2))
|
||||
#define FUNCTOR_exports1 ((functor_t)(101*4+2))
|
||||
#define FUNCTOR_external_exception1 ((functor_t)(102*4+2))
|
||||
#define FUNCTOR_fail0 ((functor_t)(103*4+2))
|
||||
#define FUNCTOR_failure_error1 ((functor_t)(104*4+2))
|
||||
#define FUNCTOR_file1 ((functor_t)(105*4+2))
|
||||
#define FUNCTOR_file4 ((functor_t)(106*4+2))
|
||||
#define FUNCTOR_file_name1 ((functor_t)(107*4+2))
|
||||
#define FUNCTOR_file_no1 ((functor_t)(108*4+2))
|
||||
#define FUNCTOR_float1 ((functor_t)(109*4+2))
|
||||
#define FUNCTOR_float_fractional_part1 ((functor_t)(110*4+2))
|
||||
#define FUNCTOR_float_integer_part1 ((functor_t)(111*4+2))
|
||||
#define FUNCTOR_floor1 ((functor_t)(112*4+2))
|
||||
#define FUNCTOR_foreign_function1 ((functor_t)(113*4+2))
|
||||
#define FUNCTOR_frame3 ((functor_t)(114*4+2))
|
||||
#define FUNCTOR_frame_finished1 ((functor_t)(115*4+2))
|
||||
#define FUNCTOR_gcd2 ((functor_t)(116*4+2))
|
||||
#define FUNCTOR_goal_expansion2 ((functor_t)(117*4+2))
|
||||
#define FUNCTOR_ground1 ((functor_t)(118*4+2))
|
||||
#define FUNCTOR_grouping1 ((functor_t)(119*4+2))
|
||||
#define FUNCTOR_hat2 ((functor_t)(120*4+2))
|
||||
#define FUNCTOR_ifthen2 ((functor_t)(121*4+2))
|
||||
#define FUNCTOR_import_into1 ((functor_t)(122*4+2))
|
||||
#define FUNCTOR_input0 ((functor_t)(123*4+2))
|
||||
#define FUNCTOR_input4 ((functor_t)(124*4+2))
|
||||
#define FUNCTOR_integer1 ((functor_t)(125*4+2))
|
||||
#define FUNCTOR_interrupt1 ((functor_t)(126*4+2))
|
||||
#define FUNCTOR_io_error2 ((functor_t)(127*4+2))
|
||||
#define FUNCTOR_is2 ((functor_t)(128*4+2))
|
||||
#define FUNCTOR_isovar1 ((functor_t)(129*4+2))
|
||||
#define FUNCTOR_key_value_position7 ((functor_t)(130*4+2))
|
||||
#define FUNCTOR_larger2 ((functor_t)(131*4+2))
|
||||
#define FUNCTOR_larger_equal2 ((functor_t)(132*4+2))
|
||||
#define FUNCTOR_lgamma1 ((functor_t)(133*4+2))
|
||||
#define FUNCTOR_line_count1 ((functor_t)(134*4+2))
|
||||
#define FUNCTOR_list_position4 ((functor_t)(135*4+2))
|
||||
#define FUNCTOR_listing1 ((functor_t)(136*4+2))
|
||||
#define FUNCTOR_locale1 ((functor_t)(137*4+2))
|
||||
#define FUNCTOR_locked2 ((functor_t)(138*4+2))
|
||||
#define FUNCTOR_log1 ((functor_t)(139*4+2))
|
||||
#define FUNCTOR_log101 ((functor_t)(140*4+2))
|
||||
#define FUNCTOR_lsb1 ((functor_t)(141*4+2))
|
||||
#define FUNCTOR_lshift2 ((functor_t)(142*4+2))
|
||||
#define FUNCTOR_map_position5 ((functor_t)(143*4+2))
|
||||
#define FUNCTOR_max2 ((functor_t)(144*4+2))
|
||||
#define FUNCTOR_max_size1 ((functor_t)(145*4+2))
|
||||
#define FUNCTOR_message_lines1 ((functor_t)(146*4+2))
|
||||
#define FUNCTOR_min2 ((functor_t)(147*4+2))
|
||||
#define FUNCTOR_minus1 ((functor_t)(148*4+2))
|
||||
#define FUNCTOR_minus2 ((functor_t)(149*4+2))
|
||||
#define FUNCTOR_mod2 ((functor_t)(150*4+2))
|
||||
#define FUNCTOR_mode1 ((functor_t)(151*4+2))
|
||||
#define FUNCTOR_msb1 ((functor_t)(152*4+2))
|
||||
#define FUNCTOR_newline1 ((functor_t)(153*4+2))
|
||||
#define FUNCTOR_nlink1 ((functor_t)(154*4+2))
|
||||
#define FUNCTOR_nonvar1 ((functor_t)(155*4+2))
|
||||
#define FUNCTOR_not_implemented2 ((functor_t)(156*4+2))
|
||||
#define FUNCTOR_not_provable1 ((functor_t)(157*4+2))
|
||||
#define FUNCTOR_not_strict_equal2 ((functor_t)(158*4+2))
|
||||
#define FUNCTOR_occurs_check2 ((functor_t)(159*4+2))
|
||||
#define FUNCTOR_or1 ((functor_t)(160*4+2))
|
||||
#define FUNCTOR_output0 ((functor_t)(161*4+2))
|
||||
#define FUNCTOR_permission_error3 ((functor_t)(162*4+2))
|
||||
#define FUNCTOR_pi0 ((functor_t)(163*4+2))
|
||||
#define FUNCTOR_pipe1 ((functor_t)(164*4+2))
|
||||
#define FUNCTOR_plus1 ((functor_t)(165*4+2))
|
||||
#define FUNCTOR_plus2 ((functor_t)(166*4+2))
|
||||
#define FUNCTOR_popcount1 ((functor_t)(167*4+2))
|
||||
#define FUNCTOR_portray1 ((functor_t)(168*4+2))
|
||||
#define FUNCTOR_position1 ((functor_t)(169*4+2))
|
||||
#define FUNCTOR_powm3 ((functor_t)(170*4+2))
|
||||
#define FUNCTOR_print1 ((functor_t)(171*4+2))
|
||||
#define FUNCTOR_print_message2 ((functor_t)(172*4+2))
|
||||
#define FUNCTOR_priority1 ((functor_t)(173*4+2))
|
||||
#define FUNCTOR_procedure2 ((functor_t)(174*4+2))
|
||||
#define FUNCTOR_prove1 ((functor_t)(175*4+2))
|
||||
#define FUNCTOR_prove2 ((functor_t)(176*4+2))
|
||||
#define FUNCTOR_punct2 ((functor_t)(177*4+2))
|
||||
#define FUNCTOR_quasi_quotation4 ((functor_t)(178*4+2))
|
||||
#define FUNCTOR_quasi_quotation_position5 ((functor_t)(179*4+2))
|
||||
#define FUNCTOR_random1 ((functor_t)(180*4+2))
|
||||
#define FUNCTOR_random_float0 ((functor_t)(181*4+2))
|
||||
#define FUNCTOR_rational1 ((functor_t)(182*4+2))
|
||||
#define FUNCTOR_rationalize1 ((functor_t)(183*4+2))
|
||||
#define FUNCTOR_rdiv2 ((functor_t)(184*4+2))
|
||||
#define FUNCTOR_redo1 ((functor_t)(185*4+2))
|
||||
#define FUNCTOR_rem2 ((functor_t)(186*4+2))
|
||||
#define FUNCTOR_repeat1 ((functor_t)(187*4+2))
|
||||
#define FUNCTOR_reposition1 ((functor_t)(188*4+2))
|
||||
#define FUNCTOR_representation_error1 ((functor_t)(189*4+2))
|
||||
#define FUNCTOR_representation_errors1 ((functor_t)(190*4+2))
|
||||
#define FUNCTOR_resource_error1 ((functor_t)(191*4+2))
|
||||
#define FUNCTOR_retry1 ((functor_t)(192*4+2))
|
||||
#define FUNCTOR_round1 ((functor_t)(193*4+2))
|
||||
#define FUNCTOR_rshift2 ((functor_t)(194*4+2))
|
||||
#define FUNCTOR_semicolon2 ((functor_t)(195*4+2))
|
||||
#define FUNCTOR_setup_call_catcher_cleanup4 ((functor_t)(196*4+2))
|
||||
#define FUNCTOR_shared_object2 ((functor_t)(197*4+2))
|
||||
#define FUNCTOR_shell2 ((functor_t)(198*4+2))
|
||||
#define FUNCTOR_sign1 ((functor_t)(199*4+2))
|
||||
#define FUNCTOR_signal1 ((functor_t)(200*4+2))
|
||||
#define FUNCTOR_signal2 ((functor_t)(201*4+2))
|
||||
#define FUNCTOR_sin1 ((functor_t)(202*4+2))
|
||||
#define FUNCTOR_singletons1 ((functor_t)(203*4+2))
|
||||
#define FUNCTOR_sinh1 ((functor_t)(204*4+2))
|
||||
#define FUNCTOR_size1 ((functor_t)(205*4+2))
|
||||
#define FUNCTOR_smaller2 ((functor_t)(206*4+2))
|
||||
#define FUNCTOR_smaller_equal2 ((functor_t)(207*4+2))
|
||||
#define FUNCTOR_softcut2 ((functor_t)(208*4+2))
|
||||
#define FUNCTOR_spy1 ((functor_t)(209*4+2))
|
||||
#define FUNCTOR_sqrt1 ((functor_t)(210*4+2))
|
||||
#define FUNCTOR_star2 ((functor_t)(211*4+2))
|
||||
#define FUNCTOR_status1 ((functor_t)(212*4+2))
|
||||
#define FUNCTOR_stream4 ((functor_t)(213*4+2))
|
||||
#define FUNCTOR_stream_position4 ((functor_t)(214*4+2))
|
||||
#define FUNCTOR_strict_equal2 ((functor_t)(215*4+2))
|
||||
#define FUNCTOR_string1 ((functor_t)(216*4+2))
|
||||
#define FUNCTOR_string2 ((functor_t)(217*4+2))
|
||||
#define FUNCTOR_string_position2 ((functor_t)(218*4+2))
|
||||
#define FUNCTOR_syntax_error1 ((functor_t)(219*4+2))
|
||||
#define FUNCTOR_syntax_error3 ((functor_t)(220*4+2))
|
||||
#define FUNCTOR_tan1 ((functor_t)(221*4+2))
|
||||
#define FUNCTOR_tanh1 ((functor_t)(222*4+2))
|
||||
#define FUNCTOR_term_expansion2 ((functor_t)(223*4+2))
|
||||
#define FUNCTOR_term_position5 ((functor_t)(224*4+2))
|
||||
#define FUNCTOR_thousands_sep1 ((functor_t)(225*4+2))
|
||||
#define FUNCTOR_timeout1 ((functor_t)(226*4+2))
|
||||
#define FUNCTOR_timeout_error2 ((functor_t)(227*4+2))
|
||||
#define FUNCTOR_trace1 ((functor_t)(228*4+2))
|
||||
#define FUNCTOR_traceinterc3 ((functor_t)(229*4+2))
|
||||
#define FUNCTOR_tracing1 ((functor_t)(230*4+2))
|
||||
#define FUNCTOR_true0 ((functor_t)(231*4+2))
|
||||
#define FUNCTOR_truncate1 ((functor_t)(232*4+2))
|
||||
#define FUNCTOR_tty1 ((functor_t)(233*4+2))
|
||||
#define FUNCTOR_type1 ((functor_t)(234*4+2))
|
||||
#define FUNCTOR_type_error2 ((functor_t)(235*4+2))
|
||||
#define FUNCTOR_undefinterc4 ((functor_t)(236*4+2))
|
||||
#define FUNCTOR_unify_determined2 ((functor_t)(237*4+2))
|
||||
#define FUNCTOR_uninstantiation_error1 ((functor_t)(238*4+2))
|
||||
#define FUNCTOR_var1 ((functor_t)(239*4+2))
|
||||
#define FUNCTOR_wakeup3 ((functor_t)(240*4+2))
|
||||
#define FUNCTOR_warning3 ((functor_t)(241*4+2))
|
||||
#define FUNCTOR_xor2 ((functor_t)(242*4+2))
|
||||
#define FUNCTOR_xpceref1 ((functor_t)(243*4+2))
|
||||
#define FUNCTOR_xpceref2 ((functor_t)(244*4+2))
|
||||
|
||||
|
||||
#define N_SWI_ATOMS 726
|
||||
#define N_SWI_FUNCTORS 245
|
||||
#define N_SWI_HASH_BITS 11
|
||||
#define N_SWI_HASH 2048
|
@ -1,8 +1,24 @@
|
||||
recursive-include yap4py *.dylib
|
||||
recursive-include yap4py *.dll
|
||||
recursive-include yap4py *.so
|
||||
recursive-include yap4py/prolog *.yap
|
||||
recursive-include yap4py *.yss
|
||||
recursive-include yap4py/prolog *.pl
|
||||
recursive-include yap4py/prolog *.r
|
||||
recursive-include yap4py *.md
|
||||
include COPYING.md
|
||||
include CONTRIBUTING.md
|
||||
include README.md
|
||||
|
||||
# Documentation
|
||||
graft docs
|
||||
exclude docs/\#*
|
||||
|
||||
# Examples
|
||||
graft examples
|
||||
|
||||
# docs subdirs we want to skip
|
||||
prune docs/build
|
||||
prune docs/gh-pages
|
||||
prune docs/dist
|
||||
|
||||
# Patterns to exclude from any directory
|
||||
global-exclude *~
|
||||
global-exclude *.pyc
|
||||
global-exclude *.pyo
|
||||
global-exclude .git
|
||||
global-exclude .ipynb_checkpoints
|
||||
|
||||
prune data_kernelspec
|
||||
|
93
packages/python/swig/YAP4PY.md
Normal file
93
packages/python/swig/YAP4PY.md
Normal file
@ -0,0 +1,93 @@
|
||||
|
||||
<center>
|
||||
![The YAP Logo](docs/icons/yap_128x128x32.png)
|
||||
</center>
|
||||
|
||||
NOTE: this version of YAP is still experimental, documentation may be out of date.
|
||||
|
||||
## Introduction
|
||||
|
||||
This document provides User information on version 6.3.4 of
|
||||
YAP (<em>Yet Another Prolog</em>). The YAP Prolog System is a
|
||||
high-performance Prolog compiler developed at Universidade do
|
||||
Porto. YAP supports stream Input/Output, sockets, modules,
|
||||
exceptions, Prolog debugger, C-interface, dynamic code, internal
|
||||
database, DCGs, saved states, co-routining, arrays, threads.
|
||||
|
||||
We explicitly allow both commercial and non-commercial use of YAP.
|
||||
|
||||
|
||||
YAP is based on the David H. D. Warren's WAM (Warren Abstract Machine),
|
||||
with several optimizations for better performance. YAP follows the
|
||||
Edinburgh tradition, and was originally designed to be largely
|
||||
compatible with DEC-10 Prolog, Quintus Prolog, and especially with
|
||||
C-Prolog. More recently, we have worked on being compatible with SICStus Prolog and with SWI-Prolog.
|
||||
|
||||
YAP implements most of the ISO-Prolog standard. We are striving at
|
||||
full compatibility, and the manual describes what is still
|
||||
missing.
|
||||
The document is intended neither as an introduction to Prolog nor to the
|
||||
implementation aspects of the compiler. A good introduction to
|
||||
programming in Prolog is the book @cite TheArtOfProlog , by
|
||||
L. Sterling and E. Shapiro, published by "The MIT Press, Cambridge
|
||||
MA". Other references should include the classical @cite ProgrammingInProlog , by W.F. Clocksin and C.S. Mellish, published by
|
||||
Springer-Verlag.
|
||||
|
||||
YAP 6.3.4 has been built with the gcc and clang compilers on Linux and OSX machines. We expect to recover support for WIN32 machines and
|
||||
Android next.
|
||||
|
||||
We are happy to include in YAP several excellent packages developed
|
||||
under separate licenses. Our thanks to the authors for their kind
|
||||
authorization to include these packages.
|
||||
|
||||
The overall copyright and permission notice for YAP4.3 can be found in
|
||||
the Artistic file in this directory. YAP follows the Perl Artistic
|
||||
license, and it is thus non-copylefted freeware. Some components of YAP have been obtained from SWI Prolog and ciao, and have
|
||||
different licenses.
|
||||
|
||||
If you have a question about this software, desire to add code, found a
|
||||
bug, want to request a feature, or wonder how to get further assistance,
|
||||
please send e-mail to <yap-users AT lists.sourceforge.net>. To
|
||||
subscribe to the mailing list, visit the page
|
||||
<https://lists.sourceforge.net/lists/listinfo/yap-users>.
|
||||
|
||||
On-line documentation is available for [YAP](http://www.dcc.fp.pt/~vsc/yap/)
|
||||
|
||||
|
||||
|
||||
The packages are, in alphabetical order:
|
||||
|
||||
+ The CHR package developed by Tom Schrijvers,
|
||||
Christian Holzbaur, and Jan Wielemaker.
|
||||
|
||||
+ The CLP(BN) package and Horus toolkit developed by Tiago Gomes, and Vítor Santos Costa.
|
||||
|
||||
+ The CLP(R) package developed by Leslie De Koninck, Bart Demoen, Tom
|
||||
Schrijvers, and Jan Wielemaker, based on the CLP(Q,R) implementation
|
||||
by Christian Holzbaur.
|
||||
|
||||
+ The CPLint package developed by Fabrizio Riguzzi's research
|
||||
laboratory at the [University of Ferrara](http://www.ing.unife.it/Docenti/FabrizioRiguzzi/)
|
||||
|
||||
+ The CUDA interface package developed by Carlos Martínez, Jorge
|
||||
Buenabad, Inês Dutra and Vítor Santos Costa.
|
||||
|
||||
+ The [GECODE](http://www.gecode.org) interface package developed by Denys Duchier and Vítor Santos Costa.
|
||||
|
||||
+ The [JPL](http://www.swi-prolog.org/packages/jpl/) (Java-Prolog Library) package developed by .
|
||||
|
||||
The minisat SAT solver interface developed by Michael Codish,
|
||||
Vitaly Lagoon, and Peter J. Stuckey.
|
||||
|
||||
+ The MYDDAS relational data-base interface developed at the
|
||||
Universidade do Porto by Tiago Soares, Michel Ferreira, and Ricardo Rocha.
|
||||
|
||||
+ The [PRISM](http://rjida.meijo-u.ac.jp/prism/) logic-based
|
||||
programming system for statistical modeling developed at the Sato
|
||||
Research Laboratory, TITECH, Japan.
|
||||
|
||||
+ The ProbLog 1 system developed by the [ProbLog](https://dtai.cs.kuleuven.be/problog) team in the
|
||||
DTAI group of KULeuven.
|
||||
|
||||
+ The [R](http://stoics.org.uk/~nicos/sware/packs/real/) interface package developed by Nicos Angelopoulos,
|
||||
Vítor Santos Costa, João Azevedo, Jan Wielemaker, and Rui Camacho.
|
162
packages/python/swig/setup.py.in
Normal file
162
packages/python/swig/setup.py.in
Normal file
@ -0,0 +1,162 @@
|
||||
#!/usr/bin/env python
|
||||
# coding: utf-8
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
from __future__ import print_function
|
||||
|
||||
from setuptools import setup
|
||||
from setuptools.extension import Extension
|
||||
from codecs import open
|
||||
from os import path, makedirs, walk
|
||||
from shutil import copytree, rmtree, copy2, move
|
||||
from glob import glob
|
||||
from pathlib import Path
|
||||
import platform
|
||||
import os.path
|
||||
|
||||
# the name of the package
|
||||
name = 'yap_kernel'
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Minimal Python version sanity check
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
import sys
|
||||
|
||||
v = sys.version_info
|
||||
if v[:2] < (2,7) or (v[0] >= 3 and v[:2] < (3,3)):
|
||||
error = "ERROR: %s requires Python version 2.7 or 3.3 or above." % name
|
||||
print(error, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
PY3 = (sys.version_info[0] >= 3)
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# get on with it
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
from glob import glob
|
||||
import os
|
||||
import shutil
|
||||
|
||||
from distutils.core import setup
|
||||
|
||||
pjoin = os.path.join
|
||||
here = os.path.abspath(os.path.dirname(__file__))
|
||||
pkg_root = pjoin(here, name)
|
||||
|
||||
my_extra_link_args = []
|
||||
if platform.system() == 'Darwin':
|
||||
my_extra_link_args = ['-Wl,-rpath','-Wl,${_ABS_PYTHON_MODULE_PATH}']
|
||||
so = 'dylib'
|
||||
#or dll in glob('yap/dlls/*'):
|
||||
# move( dll ,'lib' )
|
||||
|
||||
|
||||
cplus=['${RELATIVE_SOURCE}CXX/yapi.cpp']
|
||||
|
||||
py2yap=['${RELATIVE_SOURCE}packages/python/python.c',
|
||||
'${RELATIVE_SOURCE}packages/python/pl2py.c',
|
||||
'${RELATIVE_SOURCE}packages/python/pybips.c',
|
||||
'${RELATIVE_SOURCE}packages/python/py2pl.c',
|
||||
'${RELATIVE_SOURCE}packages/python/pl2pl.c',
|
||||
'${RELATIVE_SOURCE}packages/python/pypreds.c'
|
||||
]
|
||||
|
||||
native_sources = ['yapPYTHON_wrap.cxx']+py2yap+cplus
|
||||
here = path.abspath(path.dirname(__file__))
|
||||
|
||||
# Get the long description from the README file
|
||||
|
||||
extensions=[Extension('_yap', native_sources,
|
||||
define_macros = [('MAJOR_VERSION', '1'),
|
||||
('MINOR_VERSION', '0'),
|
||||
('_YAP_NOT_INSTALLED_', '1'),
|
||||
('YAP_PYTHON', '1')],
|
||||
runtime_library_dirs=['yap4py','${libdir}','${bindir}'],
|
||||
swig_opts=['-modern', '-c++', '-py3','-I${RELATIVE_SOURCE}/CXX'],
|
||||
library_dirs=['../../..','../../../CXX','../../packages/python',"${dlls}","${bindir}", '.'],
|
||||
extra_link_args=my_extra_link_args,
|
||||
extra_compile_args=['-g3','-O0'],
|
||||
libraries=['Yap','${GMP_LIBRARIES}'],
|
||||
include_dirs=['../../..',
|
||||
'${GMP_INCLUDE_DIRS}',
|
||||
'${RELATIVE_SOURCE}H',
|
||||
'${RELATIVE_SOURCE}H/generated',
|
||||
'${RELATIVE_SOURCE}OPTYap',
|
||||
'${RELATIVE_SOURCE}os',
|
||||
'${RELATIVE_SOURCE}include',
|
||||
'${RELATIVE_SOURCE}CXX', '.']
|
||||
)]
|
||||
|
||||
packages = ['yap4py']
|
||||
|
||||
pls = []
|
||||
for (r,d,fs) in walk('dylib'):
|
||||
for f in fs:
|
||||
pls += [os.path.join(r, f)]
|
||||
for (r,d,fs) in walk('yss'):
|
||||
for f in fs:
|
||||
pls += [os.path.join(r, f)]
|
||||
for (r,d,fs) in walk('pl'):
|
||||
for f in fs:
|
||||
pls += [os.path.join(r, f)]
|
||||
for (r,d,fs) in walk('yap'):
|
||||
for f in fs:
|
||||
pls += [os.path.join(r, f)]
|
||||
|
||||
for d, _, _ in os.walk(pjoin(here, name)):
|
||||
if os.path.exists(pjoin(d, '__init__.py')):
|
||||
packages.append(d[len(here)+1:].replace(os.path.sep, '.'))
|
||||
|
||||
package_data = {
|
||||
'yap4pyl': pls,
|
||||
}
|
||||
|
||||
version_ns = {'__version__'='6.3','minor-version'='6','minor-version'='3','patch'='5'}
|
||||
|
||||
|
||||
setup_args = dict(
|
||||
name = name,
|
||||
version = version_ns['__version__'],
|
||||
scripts = glob(pjoin('scripts', '*')),
|
||||
packages = packages,
|
||||
py_modules = ['yap'],
|
||||
package_data = package_data,
|
||||
description = "YAP in Python",
|
||||
author = 'YAP Development Team',
|
||||
author_email = 'ipython-dev@scipy.org',
|
||||
url = 'http://ipython.org',
|
||||
license = 'BSD',
|
||||
extensions = ['extensions'],
|
||||
platforms = "Linux, Mac OS X, Windows",
|
||||
keywords = ['Interactive', 'Interpreter', 'Shell', 'Web'],
|
||||
classifiers = [
|
||||
'Intended Audience :: Developers',
|
||||
'Intended Audience :: System Administrators',
|
||||
'Intended Audience :: Science/Research',
|
||||
'License :: OSI Approved :: BSD License',
|
||||
'Programming Language :: Python',
|
||||
'Programming Language :: Python :: 2.7',
|
||||
'Programming Language :: Python :: 3',
|
||||
],
|
||||
)
|
||||
|
||||
if 'develop' in sys.argv or any(a.startswith('bdist') for a in sys.argv):
|
||||
import setuptools
|
||||
|
||||
setuptools_args = {}
|
||||
install_requires = setuptools_args['install_requires'] = [
|
||||
]
|
||||
|
||||
extras_require = setuptools_args['extras_require'] = {
|
||||
'test:python_version=="2.7"': ['mock'],
|
||||
'test': ['nose_warnings_filters', 'nose-timer'],
|
||||
}
|
||||
|
||||
if 'setuptools' in sys.modules:
|
||||
setup_args.update(setuptools_args)
|
||||
|
||||
if __name__ == '__main__':
|
||||
setup(**setup_args)
|
3
packages/python/yap_kernel/CONTRIBUTING.md
Normal file
3
packages/python/yap_kernel/CONTRIBUTING.md
Normal file
@ -0,0 +1,3 @@
|
||||
# Contributing
|
||||
|
||||
We follow the [IPython Contributing Guide](https://github.com/ipython/ipython/blob/master/CONTRIBUTING.md).
|
59
packages/python/yap_kernel/COPYING.md
Normal file
59
packages/python/yap_kernel/COPYING.md
Normal file
@ -0,0 +1,59 @@
|
||||
# Licensing terms
|
||||
|
||||
This project is licensed under the terms of the Modified BSD License
|
||||
(also known as New or Revised or 3-Clause BSD), as follows:
|
||||
|
||||
- Copyright (c) 2015, IPython Development Team
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
Neither the name of the IPython Development Team nor the names of its
|
||||
contributors may be used to endorse or promote products derived from this
|
||||
software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
## About the IPython Development Team
|
||||
|
||||
The IPython Development Team is the set of all contributors to the IPython project.
|
||||
This includes all of the IPython subprojects.
|
||||
|
||||
The core team that coordinates development on GitHub can be found here:
|
||||
https://github.com/ipython/.
|
||||
|
||||
## Our Copyright Policy
|
||||
|
||||
IPython uses a shared copyright model. Each contributor maintains copyright
|
||||
over their contributions to IPython. But, it is important to note that these
|
||||
contributions are typically only changes to the repositories. Thus, the IPython
|
||||
source code, in its entirety is not the copyright of any single person or
|
||||
institution. Instead, it is the collective copyright of the entire IPython
|
||||
Development Team. If individual contributors want to maintain a record of what
|
||||
changes/contributions they have specific copyright on, they should indicate
|
||||
their copyright in the commit message of the change, when they commit the
|
||||
change to one of the IPython repositories.
|
||||
|
||||
With this in mind, the following banner should be used in any source code file
|
||||
to indicate the copyright and license terms:
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
@ -1 +1,24 @@
|
||||
recursive-include yap_kernel/resources *.*
|
||||
include COPYING.md
|
||||
include CONTRIBUTING.md
|
||||
include README.md
|
||||
|
||||
# Documentation
|
||||
graft docs
|
||||
exclude docs/\#*
|
||||
|
||||
# Examples
|
||||
graft examples
|
||||
|
||||
# docs subdirs we want to skip
|
||||
prune docs/build
|
||||
prune docs/gh-pages
|
||||
prune docs/dist
|
||||
|
||||
# Patterns to exclude from any directory
|
||||
global-exclude *~
|
||||
global-exclude *.pyc
|
||||
global-exclude *.pyo
|
||||
global-exclude .git
|
||||
global-exclude .ipynb_checkpoints
|
||||
|
||||
prune data_kernelspec
|
||||
|
39
packages/python/yap_kernel/README.md
Normal file
39
packages/python/yap_kernel/README.md
Normal file
@ -0,0 +1,39 @@
|
||||
# IPython Kernel for Jupyter
|
||||
|
||||
This package provides the IPython kernel for Jupyter.
|
||||
|
||||
## Installation from source
|
||||
|
||||
1. `git clone`
|
||||
2. `cd ipykernel`
|
||||
3. `pip install -e .`
|
||||
|
||||
After that, all normal `ipython` commands will use this newly-installed version of the kernel.
|
||||
|
||||
## Running tests
|
||||
|
||||
Ensure you have `nosetests` and the `nose-warnings-filters` plugin installed with
|
||||
|
||||
```bash
|
||||
pip install nose nose-warnings-filters
|
||||
```
|
||||
|
||||
and then from the root directory
|
||||
|
||||
```bash
|
||||
nosetests ipykernel
|
||||
```
|
||||
|
||||
## Running tests with coverage
|
||||
|
||||
Follow the instructions from `Running tests`. Ensure you have the `coverage` module installed with
|
||||
|
||||
```bash
|
||||
pip install coverage
|
||||
```
|
||||
|
||||
and then from the root directory
|
||||
|
||||
```bash
|
||||
nosetests --with-coverage --cover-package ipykernel ipykernel
|
||||
```
|
35
packages/python/yap_kernel/appveyor.yml
Normal file
35
packages/python/yap_kernel/appveyor.yml
Normal file
@ -0,0 +1,35 @@
|
||||
build: false
|
||||
shallow_clone: false
|
||||
skip_branch_with_pr: true
|
||||
clone_depth: 1
|
||||
|
||||
environment:
|
||||
|
||||
matrix:
|
||||
- python: "C:/Python27-x64"
|
||||
- python: "C:/Python27"
|
||||
- python: "C:/Python36-x64"
|
||||
- python: "C:/Python36"
|
||||
|
||||
cache:
|
||||
- C:\Users\appveyor\AppData\Local\pip\Cache
|
||||
|
||||
init:
|
||||
- cmd: set PATH=%python%;%python%\scripts;%PATH%
|
||||
install:
|
||||
- cmd: |
|
||||
pip install --upgrade pip wheel
|
||||
pip --version
|
||||
- cmd: |
|
||||
pip install --pre -e . coverage nose_warnings_filters
|
||||
pip install ipykernel[test] nose-timer
|
||||
- cmd: |
|
||||
pip install matplotlib numpy
|
||||
pip freeze
|
||||
- cmd: python -c "import ipykernel.kernelspec; ipykernel.kernelspec.install(user=True)"
|
||||
test_script:
|
||||
- cmd: nosetests --with-coverage --with-timer --cover-package=ipykernel ipykernel
|
||||
|
||||
on_success:
|
||||
- cmd: pip install codecov
|
||||
- cmd: codecov
|
11
packages/python/yap_kernel/data_kernelspec/kernel.json
Normal file
11
packages/python/yap_kernel/data_kernelspec/kernel.json
Normal file
@ -0,0 +1,11 @@
|
||||
{
|
||||
"argv": [
|
||||
"python",
|
||||
"-m",
|
||||
"ipykernel_launcher",
|
||||
"-f",
|
||||
"{connection_file}"
|
||||
],
|
||||
"display_name": "Python 3",
|
||||
"language": "python"
|
||||
}
|
BIN
packages/python/yap_kernel/data_kernelspec/logo-32x32.png
Normal file
BIN
packages/python/yap_kernel/data_kernelspec/logo-32x32.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 1.1 KiB |
BIN
packages/python/yap_kernel/data_kernelspec/logo-64x64.png
Normal file
BIN
packages/python/yap_kernel/data_kernelspec/logo-64x64.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 2.1 KiB |
BIN
packages/python/yap_kernel/dist/yap_kernel-4.7.0.dev0.macosx-10.12-x86_64.tar.gz
vendored
Normal file
BIN
packages/python/yap_kernel/dist/yap_kernel-4.7.0.dev0.macosx-10.12-x86_64.tar.gz
vendored
Normal file
Binary file not shown.
BIN
packages/python/yap_kernel/dist/yap_kernel-4.7.0.dev0.tar.gz
vendored
Normal file
BIN
packages/python/yap_kernel/dist/yap_kernel-4.7.0.dev0.tar.gz
vendored
Normal file
Binary file not shown.
194
packages/python/yap_kernel/docs/changelog.rst
Normal file
194
packages/python/yap_kernel/docs/changelog.rst
Normal file
@ -0,0 +1,194 @@
|
||||
Changes in IPython kernel
|
||||
=========================
|
||||
|
||||
4.6
|
||||
---
|
||||
|
||||
4.6.1
|
||||
*****
|
||||
|
||||
`4.6.1 on GitHub <https://github.com/ipython/ipykernel/milestones/4.6.1>`__
|
||||
|
||||
- Fix eventloop-integration bug preventing Qt windows/widgets from displaying with ipykernel 4.6.0 and IPython ≥ 5.2.
|
||||
- Avoid deprecation warnings about naive datetimes when working with jupyter_client ≥ 5.0.
|
||||
|
||||
|
||||
4.6.0
|
||||
*****
|
||||
|
||||
`4.6.0 on GitHub <https://github.com/ipython/ipykernel/milestones/4.6>`__
|
||||
|
||||
- Add to API `DisplayPublisher.publish` two new fully backward-compatible
|
||||
keyword-args:
|
||||
- `update: bool`
|
||||
- `transient: dict`
|
||||
- Support new `transient` key in `display_data` messages spec for `publish`.
|
||||
For a display data message, `transient` contains data that shouldn't be
|
||||
persisted to files or documents. Add a `display_id` to this `transient`
|
||||
dict by `display(obj, display_id=...)`
|
||||
- Add `ipykernel_launcher` module which removes the current working directory
|
||||
from `sys.path` before launching the kernel. This helps to reduce the cases
|
||||
where the kernel won't start because there's a `random.py` (or similar)
|
||||
module in the current working directory.
|
||||
- Add busy/idle messages on IOPub during processing of aborted requests
|
||||
- Add active event loop setting to GUI, which enables the correct response
|
||||
to IPython's `is_event_loop_running_xxx`
|
||||
- Include IPython kernelspec in wheels to reduce reliance on "native kernel
|
||||
spec" in jupyter_client
|
||||
- Modify `OutStream` to inherit from `TextIOBase` instead of object to improve
|
||||
API support and error reporting
|
||||
- Fix IPython kernel death messages at start, such as "Kernel Restarting..."
|
||||
and "Kernel appears to have died", when parent-poller handles PID 1
|
||||
- Various bugfixes
|
||||
|
||||
|
||||
4.5
|
||||
---
|
||||
|
||||
4.5.2
|
||||
*****
|
||||
|
||||
`4.5.2 on GitHub <https://github.com/ipython/ipykernel/milestones/4.5.2>`__
|
||||
|
||||
- Fix bug when instantiating Comms outside of the IPython kernel (introduced in 4.5.1).
|
||||
|
||||
|
||||
4.5.1
|
||||
*****
|
||||
|
||||
`4.5.1 on GitHub <https://github.com/ipython/ipykernel/milestones/4.5.1>`__
|
||||
|
||||
- Add missing ``stream`` parameter to overridden :func:`getpass`
|
||||
- Remove locks from iopub thread, which could cause deadlocks during debugging
|
||||
- Fix regression where KeyboardInterrupt was treated as an aborted request, rather than an error
|
||||
- Allow instantiating Comms outside of the IPython kernel
|
||||
|
||||
4.5.0
|
||||
*****
|
||||
|
||||
`4.5 on GitHub <https://github.com/ipython/ipykernel/milestones/4.5>`__
|
||||
|
||||
- Use figure.dpi instead of savefig.dpi to set DPI for inline figures
|
||||
- Support ipympl matplotlib backend (requires IPython update as well to fully work)
|
||||
- Various bugfixes, including fixes for output coming from threads,
|
||||
and :func:`input` when called with non-string prompts, which stdlib allows.
|
||||
|
||||
|
||||
4.4
|
||||
---
|
||||
|
||||
4.4.1
|
||||
*****
|
||||
|
||||
`4.4.1 on GitHub <https://github.com/ipython/ipykernel/milestones/4.4.1>`__
|
||||
|
||||
- Fix circular import of matplotlib on Python 2 caused by the inline backend changes in 4.4.0.
|
||||
|
||||
|
||||
4.4.0
|
||||
*****
|
||||
|
||||
`4.4.0 on GitHub <https://github.com/ipython/ipykernel/milestones/4.4>`__
|
||||
|
||||
- Use `MPLBACKEND`_ environment variable to tell matplotlib >= 1.5 use use the inline backend by default.
|
||||
This is only done if MPLBACKEND is not already set and no backend has been explicitly loaded,
|
||||
so setting ``MPLBACKEND=Qt4Agg`` or calling ``%matplotlib notebook`` or ``matplotlib.use('Agg')``
|
||||
will take precedence.
|
||||
- Fixes for logging problems caused by 4.3,
|
||||
where logging could go to the terminal instead of the notebook.
|
||||
- Add ``--sys-prefix`` and ``--profile`` arguments to :command:`ipython kernel install`
|
||||
- Allow Comm (Widget) messages to be sent from background threads.
|
||||
- Select inline matplotlib backend by default if ``%matplotlib`` magic or
|
||||
``matplotlib.use()`` are not called explicitly (for matplotlib >= 1.5).
|
||||
- Fix some longstanding minor deviations from the message protocol
|
||||
(missing status: ok in a few replies, connect_reply format).
|
||||
- Remove calls to NoOpContext from IPython, deprecated in 5.0.
|
||||
|
||||
.. _MPLBACKEND: http://matplotlib.org/devel/coding_guide.html?highlight=mplbackend#developing-a-new-backend
|
||||
|
||||
|
||||
4.3
|
||||
---
|
||||
|
||||
4.3.2
|
||||
*****
|
||||
|
||||
- Use a nonempty dummy session key for inprocess kernels to avoid security
|
||||
warnings.
|
||||
|
||||
4.3.1
|
||||
*****
|
||||
|
||||
- Fix Windows Python 3.5 incompatibility caused by faulthandler patch in 4.3
|
||||
|
||||
4.3.0
|
||||
*****
|
||||
|
||||
`4.3.0 on GitHub <https://github.com/ipython/ipykernel/milestones/4.3>`__
|
||||
|
||||
- Publish all IO in a thread, via :class:`IOPubThread`.
|
||||
This solves the problem of requiring :meth:`sys.stdout.flush` to be called in the notebook to produce output promptly during long-running cells.
|
||||
- Remove refrences to outdated IPython guiref in kernel banner.
|
||||
- Patch faulthandler to use ``sys.__stderr__`` instead of forwarded ``sys.stderr``,
|
||||
which has no fileno when forwarded.
|
||||
- Deprecate some vestiges of the Big Split:
|
||||
- :func:`ipykernel.find_connection_file` is deprecated. Use :func:`jupyter_client.find_connection_file` instead.
|
||||
- Various pieces of code specific to IPython parallel are deprecated in ipykernel
|
||||
and moved to ipyparallel.
|
||||
|
||||
|
||||
4.2
|
||||
---
|
||||
|
||||
4.2.2
|
||||
*****
|
||||
|
||||
`4.2.2 on GitHub <https://github.com/ipython/ipykernel/milestones/4.2.2>`__
|
||||
|
||||
- Don't show interactive debugging info when kernel crashes
|
||||
- Fix handling of numerical types in json_clean
|
||||
- Testing fixes for output capturing
|
||||
|
||||
4.2.1
|
||||
*****
|
||||
|
||||
`4.2.1 on GitHub <https://github.com/ipython/ipykernel/milestones/4.2.1>`__
|
||||
|
||||
- Fix default display name back to "Python X" instead of "pythonX"
|
||||
|
||||
4.2.0
|
||||
*****
|
||||
|
||||
`4.2 on GitHub <https://github.com/ipython/ipykernel/milestones/4.2>`_
|
||||
|
||||
- Support sending a full message in initial opening of comms (metadata, buffers were not previously allowed)
|
||||
- When using ``ipython kernel install --name`` to install the IPython kernelspec, default display-name to the same value as ``--name``.
|
||||
|
||||
4.1
|
||||
---
|
||||
|
||||
4.1.1
|
||||
*****
|
||||
|
||||
`4.1.1 on GitHub <https://github.com/ipython/ipykernel/milestones/4.1.1>`_
|
||||
|
||||
- Fix missing ``ipykernel.__version__`` on Python 2.
|
||||
- Fix missing ``target_name`` when opening comms from the frontend.
|
||||
|
||||
4.1.0
|
||||
*****
|
||||
|
||||
`4.1 on GitHub <https://github.com/ipython/ipykernel/milestones/4.1>`_
|
||||
|
||||
|
||||
- add ``ipython kernel install`` entrypoint for installing the IPython
|
||||
kernelspec
|
||||
- provisional implementation of ``comm_info`` request/reply for msgspec
|
||||
v5.1
|
||||
|
||||
4.0
|
||||
---
|
||||
|
||||
`4.0 on GitHub <https://github.com/ipython/ipykernel/milestones/4.0>`_
|
||||
|
||||
4.0 is the first release of ipykernel as a standalone package.
|
303
packages/python/yap_kernel/docs/conf.py
Normal file
303
packages/python/yap_kernel/docs/conf.py
Normal file
@ -0,0 +1,303 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# IPython Kernel documentation build configuration file, created by
|
||||
# sphinx-quickstart on Mon Oct 5 11:32:44 2015.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to its
|
||||
# containing dir.
|
||||
#
|
||||
# Note that not all possible configuration values are present in this
|
||||
# autogenerated file.
|
||||
#
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
import sys
|
||||
import os
|
||||
import shlex
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
#needs_sphinx = '1.0'
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = [
|
||||
'sphinx.ext.autodoc',
|
||||
'sphinx.ext.intersphinx',
|
||||
]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
|
||||
# The suffix(es) of source filenames.
|
||||
# You can specify multiple suffix as a list of string:
|
||||
# source_suffix = ['.rst', '.md']
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The encoding of source files.
|
||||
#source_encoding = 'utf-8-sig'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = 'IPython Kernel'
|
||||
copyright = '2015, IPython Development Team'
|
||||
author = 'IPython Development Team'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
|
||||
version_ns = {}
|
||||
here = os.path.dirname(__file__)
|
||||
version_py = os.path.join(here, os.pardir, 'yap_kernel', '_version.py')
|
||||
with open(version_py) as f:
|
||||
exec(compile(f.read(), version_py, 'exec'), version_ns)
|
||||
|
||||
# The short X.Y version.
|
||||
version = '%i.%i' % version_ns['version_info'][:2]
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = version_ns['__version__']
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#
|
||||
# This is also used if you do content translation via gettext catalogs.
|
||||
# Usually you set "language" from the command line for these cases.
|
||||
language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
#today = ''
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
#today_fmt = '%B %d, %Y'
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = ['_build']
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all
|
||||
# documents.
|
||||
default_role = 'literal'
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
#add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
#add_module_names = True
|
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
#show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
#modindex_common_prefix = []
|
||||
|
||||
# If true, keep warnings as "system message" paragraphs in the built documents.
|
||||
#keep_warnings = False
|
||||
|
||||
# If true, `todo` and `todoList` produce output, else they produce nothing.
|
||||
todo_include_todos = False
|
||||
|
||||
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
# html_theme = 'alabaster'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#html_theme_options = {}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
#html_theme_path = []
|
||||
|
||||
# The name for this set of Sphinx documents. If None, it defaults to
|
||||
# "<project> v<release> documentation".
|
||||
#html_title = None
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||
#html_short_title = None
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
#html_logo = None
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon of the
|
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
# pixels large.
|
||||
#html_favicon = None
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
|
||||
# Add any extra paths that contain custom files (such as robots.txt or
|
||||
# .htaccess) here, relative to this directory. These files are copied
|
||||
# directly to the root of the documentation.
|
||||
#html_extra_path = []
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
#html_last_updated_fmt = '%b %d, %Y'
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
#html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
#html_sidebars = {}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
#html_additional_pages = {}
|
||||
|
||||
# If false, no module index is generated.
|
||||
#html_domain_indices = True
|
||||
|
||||
# If false, no index is generated.
|
||||
#html_use_index = True
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
#html_split_index = False
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
#html_show_sourcelink = True
|
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
||||
#html_show_sphinx = True
|
||||
|
||||
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
|
||||
#html_show_copyright = True
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages will
|
||||
# contain a <link> tag referring to it. The value of this option must be the
|
||||
# base URL from which the finished HTML is served.
|
||||
#html_use_opensearch = ''
|
||||
|
||||
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
#html_file_suffix = None
|
||||
|
||||
# Language to be used for generating the HTML full-text search index.
|
||||
# Sphinx supports the following languages:
|
||||
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
|
||||
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
|
||||
#html_search_language = 'en'
|
||||
|
||||
# A dictionary with options for the search language support, empty by default.
|
||||
# Now only 'ja' uses this config value
|
||||
#html_search_options = {'type': 'default'}
|
||||
|
||||
# The name of a javascript file (relative to the configuration directory) that
|
||||
# implements a search results scorer. If empty, the default will be used.
|
||||
#html_search_scorer = 'scorer.js'
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'yap_kerneldoc'
|
||||
|
||||
# -- Options for LaTeX output ---------------------------------------------
|
||||
|
||||
latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#'papersize': 'letterpaper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#'pointsize': '10pt',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#'preamble': '',
|
||||
|
||||
# Latex figure (float) alignment
|
||||
#'figure_align': 'htbp',
|
||||
}
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
latex_documents = [
|
||||
(master_doc, 'yap_kernel.tex', 'IPython Kernel Documentation',
|
||||
'IPython Development Team', 'manual'),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
# the title page.
|
||||
#latex_logo = None
|
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||
# not chapters.
|
||||
#latex_use_parts = False
|
||||
|
||||
# If true, show page references after internal links.
|
||||
#latex_show_pagerefs = False
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#latex_show_urls = False
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#latex_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#latex_domain_indices = True
|
||||
|
||||
|
||||
# -- Options for manual page output ---------------------------------------
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
(master_doc, 'yap_kernel', 'IPython Kernel Documentation',
|
||||
[author], 1)
|
||||
]
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#man_show_urls = False
|
||||
|
||||
|
||||
# -- Options for Texinfo output -------------------------------------------
|
||||
|
||||
# Grouping the document tree into Texinfo files. List of tuples
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
(master_doc, 'yap_kernel', 'IPython Kernel Documentation',
|
||||
author, 'yap_kernel', 'One line description of project.',
|
||||
'Miscellaneous'),
|
||||
]
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#texinfo_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#texinfo_domain_indices = True
|
||||
|
||||
# How to display URL addresses: 'footnote', 'no', or 'inline'.
|
||||
#texinfo_show_urls = 'footnote'
|
||||
|
||||
# If true, do not generate a @detailmenu in the "Top" node's menu.
|
||||
#texinfo_no_detailmenu = False
|
||||
|
||||
|
||||
# Example configuration for intersphinx: refer to the Python standard library.
|
||||
intersphinx_mapping = {
|
||||
'https://docs.python.org/': None,
|
||||
'ipython': ('https://ipython.readthedocs.io/en/latest', None),
|
||||
'jupyter': ('https://jupyter.readthedocs.io/en/latest', None),
|
||||
}
|
23
packages/python/yap_kernel/docs/index.rst
Normal file
23
packages/python/yap_kernel/docs/index.rst
Normal file
@ -0,0 +1,23 @@
|
||||
.. _index:
|
||||
|
||||
IPython Kernel Docs
|
||||
===================
|
||||
|
||||
This contains minimal version-sensitive documentation for the IPython kernel package.
|
||||
Most IPython kernel documentation is in the `IPython documentation <https://ipython.readthedocs.io/en/latest/>`_.
|
||||
|
||||
Contents:
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
changelog.rst
|
||||
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
||||
|
263
packages/python/yap_kernel/docs/make.bat
Normal file
263
packages/python/yap_kernel/docs/make.bat
Normal file
@ -0,0 +1,263 @@
|
||||
@ECHO OFF
|
||||
|
||||
REM Command file for Sphinx documentation
|
||||
|
||||
if "%SPHINXBUILD%" == "" (
|
||||
set SPHINXBUILD=sphinx-build
|
||||
)
|
||||
set BUILDDIR=_build
|
||||
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
|
||||
set I18NSPHINXOPTS=%SPHINXOPTS% .
|
||||
if NOT "%PAPER%" == "" (
|
||||
set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
|
||||
set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
|
||||
)
|
||||
|
||||
if "%1" == "" goto help
|
||||
|
||||
if "%1" == "help" (
|
||||
:help
|
||||
echo.Please use `make ^<target^>` where ^<target^> is one of
|
||||
echo. html to make standalone HTML files
|
||||
echo. dirhtml to make HTML files named index.html in directories
|
||||
echo. singlehtml to make a single large HTML file
|
||||
echo. pickle to make pickle files
|
||||
echo. json to make JSON files
|
||||
echo. htmlhelp to make HTML files and a HTML help project
|
||||
echo. qthelp to make HTML files and a qthelp project
|
||||
echo. devhelp to make HTML files and a Devhelp project
|
||||
echo. epub to make an epub
|
||||
echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
|
||||
echo. text to make text files
|
||||
echo. man to make manual pages
|
||||
echo. texinfo to make Texinfo files
|
||||
echo. gettext to make PO message catalogs
|
||||
echo. changes to make an overview over all changed/added/deprecated items
|
||||
echo. xml to make Docutils-native XML files
|
||||
echo. pseudoxml to make pseudoxml-XML files for display purposes
|
||||
echo. linkcheck to check all external links for integrity
|
||||
echo. doctest to run all doctests embedded in the documentation if enabled
|
||||
echo. coverage to run coverage check of the documentation if enabled
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "clean" (
|
||||
for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
|
||||
del /q /s %BUILDDIR%\*
|
||||
goto end
|
||||
)
|
||||
|
||||
|
||||
REM Check if sphinx-build is available and fallback to Python version if any
|
||||
%SPHINXBUILD% 2> nul
|
||||
if errorlevel 9009 goto sphinx_python
|
||||
goto sphinx_ok
|
||||
|
||||
:sphinx_python
|
||||
|
||||
set SPHINXBUILD=python -m sphinx.__init__
|
||||
%SPHINXBUILD% 2> nul
|
||||
if errorlevel 9009 (
|
||||
echo.
|
||||
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
|
||||
echo.installed, then set the SPHINXBUILD environment variable to point
|
||||
echo.to the full path of the 'sphinx-build' executable. Alternatively you
|
||||
echo.may add the Sphinx directory to PATH.
|
||||
echo.
|
||||
echo.If you don't have Sphinx installed, grab it from
|
||||
echo.http://sphinx-doc.org/
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
:sphinx_ok
|
||||
|
||||
|
||||
if "%1" == "html" (
|
||||
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/html.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "dirhtml" (
|
||||
%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "singlehtml" (
|
||||
%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "pickle" (
|
||||
%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can process the pickle files.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "json" (
|
||||
%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can process the JSON files.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "htmlhelp" (
|
||||
%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can run HTML Help Workshop with the ^
|
||||
.hhp project file in %BUILDDIR%/htmlhelp.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "qthelp" (
|
||||
%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can run "qcollectiongenerator" with the ^
|
||||
.qhcp project file in %BUILDDIR%/qthelp, like this:
|
||||
echo.^> qcollectiongenerator %BUILDDIR%\qthelp\IPythonKernel.qhcp
|
||||
echo.To view the help file:
|
||||
echo.^> assistant -collectionFile %BUILDDIR%\qthelp\IPythonKernel.ghc
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "devhelp" (
|
||||
%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "epub" (
|
||||
%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The epub file is in %BUILDDIR%/epub.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "latex" (
|
||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "latexpdf" (
|
||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
||||
cd %BUILDDIR%/latex
|
||||
make all-pdf
|
||||
cd %~dp0
|
||||
echo.
|
||||
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "latexpdfja" (
|
||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
||||
cd %BUILDDIR%/latex
|
||||
make all-pdf-ja
|
||||
cd %~dp0
|
||||
echo.
|
||||
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "text" (
|
||||
%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The text files are in %BUILDDIR%/text.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "man" (
|
||||
%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The manual pages are in %BUILDDIR%/man.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "texinfo" (
|
||||
%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "gettext" (
|
||||
%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "changes" (
|
||||
%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.The overview file is in %BUILDDIR%/changes.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "linkcheck" (
|
||||
%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Link check complete; look for any errors in the above output ^
|
||||
or in %BUILDDIR%/linkcheck/output.txt.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "doctest" (
|
||||
%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Testing of doctests in the sources finished, look at the ^
|
||||
results in %BUILDDIR%/doctest/output.txt.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "coverage" (
|
||||
%SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Testing of coverage in the sources finished, look at the ^
|
||||
results in %BUILDDIR%/coverage/python.txt.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "xml" (
|
||||
%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The XML files are in %BUILDDIR%/xml.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "pseudoxml" (
|
||||
%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
|
||||
goto end
|
||||
)
|
||||
|
||||
:end
|
@ -0,0 +1,46 @@
|
||||
from __future__ import print_function
|
||||
import os
|
||||
|
||||
from IPython.qt.console.rich_ipython_widget import RichIPythonWidget
|
||||
from IPython.qt.inprocess import QtInProcessKernelManager
|
||||
from IPython.lib import guisupport
|
||||
|
||||
|
||||
def print_process_id():
|
||||
print('Process ID is:', os.getpid())
|
||||
|
||||
|
||||
def main():
|
||||
# Print the ID of the main process
|
||||
print_process_id()
|
||||
|
||||
app = guisupport.get_app_qt4()
|
||||
|
||||
# Create an in-process kernel
|
||||
# >>> print_process_id()
|
||||
# will print the same process ID as the main process
|
||||
kernel_manager = QtInProcessKernelManager()
|
||||
kernel_manager.start_kernel()
|
||||
kernel = kernel_manager.kernel
|
||||
kernel.gui = 'qt4'
|
||||
kernel.shell.push({'foo': 43, 'print_process_id': print_process_id})
|
||||
|
||||
kernel_client = kernel_manager.client()
|
||||
kernel_client.start_channels()
|
||||
|
||||
def stop():
|
||||
kernel_client.stop_channels()
|
||||
kernel_manager.shutdown_kernel()
|
||||
app.exit()
|
||||
|
||||
control = RichIPythonWidget()
|
||||
control.kernel_manager = kernel_manager
|
||||
control.kernel_client = kernel_client
|
||||
control.exit_requested.connect(stop)
|
||||
control.show()
|
||||
|
||||
guisupport.start_event_loop_qt4(app)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,31 @@
|
||||
from __future__ import print_function
|
||||
import os
|
||||
|
||||
from IPython.kernel.inprocess import InProcessKernelManager
|
||||
from IPython.terminal.console.interactiveshell import ZMQTerminalInteractiveShell
|
||||
|
||||
|
||||
def print_process_id():
|
||||
print('Process ID is:', os.getpid())
|
||||
|
||||
|
||||
def main():
|
||||
print_process_id()
|
||||
|
||||
# Create an in-process kernel
|
||||
# >>> print_process_id()
|
||||
# will print the same process ID as the main process
|
||||
kernel_manager = InProcessKernelManager()
|
||||
kernel_manager.start_kernel()
|
||||
kernel = kernel_manager.kernel
|
||||
kernel.gui = 'qt4'
|
||||
kernel.shell.push({'foo': 43, 'print_process_id': print_process_id})
|
||||
client = kernel_manager.client()
|
||||
client.start_channels()
|
||||
|
||||
shell = ZMQTerminalInteractiveShell(manager=kernel_manager, client=client)
|
||||
shell.mainloop()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,55 @@
|
||||
#-----------------------------------------------------------------------------
|
||||
# Imports
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
import sys
|
||||
|
||||
from IPython.lib.kernel import connect_qtconsole
|
||||
from IPython.kernel.zmq.kernelapp import YAP_KernelApp
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Functions and classes
|
||||
#-----------------------------------------------------------------------------
|
||||
def mpl_kernel(gui):
|
||||
"""Launch and return an IPython kernel with matplotlib support for the desired gui
|
||||
"""
|
||||
kernel = YAP_KernelApp.instance()
|
||||
kernel.initialize(['python', '--matplotlib=%s' % gui,
|
||||
#'--log-level=10'
|
||||
])
|
||||
return kernel
|
||||
|
||||
|
||||
class InternalYAPKernel(object):
|
||||
|
||||
def init_yapkernel(self, backend):
|
||||
# Start IPython kernel with GUI event loop and mpl support
|
||||
self.yapkernel = mpl_kernel(backend)
|
||||
# To create and track active qt consoles
|
||||
self.consoles = []
|
||||
|
||||
# This application will also act on the shell user namespace
|
||||
self.namespace = self.yapkernel.shell.user_ns
|
||||
|
||||
# Example: a variable that will be seen by the user in the shell, and
|
||||
# that the GUI modifies (the 'Counter++' button increments it):
|
||||
self.namespace['app_counter'] = 0
|
||||
#self.namespace['yapkernel'] = self.yapkernel # dbg
|
||||
|
||||
def print_namespace(self, evt=None):
|
||||
print("\n***Variables in User namespace***")
|
||||
for k, v in self.namespace.items():
|
||||
if not k.startswith('_'):
|
||||
print('%s -> %r' % (k, v))
|
||||
sys.stdout.flush()
|
||||
|
||||
def new_qt_console(self, evt=None):
|
||||
"""start a new qtconsole connected to our kernel"""
|
||||
return connect_qtconsole(self.yapkernel.abs_connection_file, profile=self.yapkernel.profile)
|
||||
|
||||
def count(self, evt=None):
|
||||
self.namespace['app_counter'] += 1
|
||||
|
||||
def cleanup_consoles(self, evt=None):
|
||||
for c in self.consoles:
|
||||
c.kill()
|
@ -0,0 +1,75 @@
|
||||
#!/usr/bin/env python
|
||||
"""Example integrating an IPython kernel into a GUI App.
|
||||
|
||||
This trivial GUI application internally starts an IPython kernel, to which Qt
|
||||
consoles can be connected either by the user at the command line or started
|
||||
from the GUI itself, via a button. The GUI can also manipulate one variable in
|
||||
the kernel's namespace, and print the namespace to the console.
|
||||
|
||||
Play with it by running the script and then opening one or more consoles, and
|
||||
pushing the 'Counter++' and 'Namespace' buttons.
|
||||
|
||||
Upon exit, it should automatically close all consoles opened from the GUI.
|
||||
|
||||
Consoles attached separately from a terminal will not be terminated, though
|
||||
they will notice that their kernel died.
|
||||
"""
|
||||
#-----------------------------------------------------------------------------
|
||||
# Imports
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
from PyQt4 import Qt
|
||||
|
||||
from internal_yapkernel import InternalYAPKernel
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Functions and classes
|
||||
#-----------------------------------------------------------------------------
|
||||
class SimpleWindow(Qt.QWidget, InternalYAPKernel):
|
||||
|
||||
def __init__(self, app):
|
||||
Qt.QWidget.__init__(self)
|
||||
self.app = app
|
||||
self.add_widgets()
|
||||
self.init_yapkernel('qt')
|
||||
|
||||
def add_widgets(self):
|
||||
self.setGeometry(300, 300, 400, 70)
|
||||
self.setWindowTitle('IPython in your app')
|
||||
|
||||
# Add simple buttons:
|
||||
console = Qt.QPushButton('Qt Console', self)
|
||||
console.setGeometry(10, 10, 100, 35)
|
||||
self.connect(console, Qt.SIGNAL('clicked()'), self.new_qt_console)
|
||||
|
||||
namespace = Qt.QPushButton('Namespace', self)
|
||||
namespace.setGeometry(120, 10, 100, 35)
|
||||
self.connect(namespace, Qt.SIGNAL('clicked()'), self.print_namespace)
|
||||
|
||||
count = Qt.QPushButton('Count++', self)
|
||||
count.setGeometry(230, 10, 80, 35)
|
||||
self.connect(count, Qt.SIGNAL('clicked()'), self.count)
|
||||
|
||||
# Quit and cleanup
|
||||
quit = Qt.QPushButton('Quit', self)
|
||||
quit.setGeometry(320, 10, 60, 35)
|
||||
self.connect(quit, Qt.SIGNAL('clicked()'), Qt.qApp, Qt.SLOT('quit()'))
|
||||
|
||||
self.app.connect(self.app, Qt.SIGNAL("lastWindowClosed()"),
|
||||
self.app, Qt.SLOT("quit()"))
|
||||
|
||||
self.app.aboutToQuit.connect(self.cleanup_consoles)
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Main script
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
if __name__ == "__main__":
|
||||
app = Qt.QApplication([])
|
||||
# Create our window
|
||||
win = SimpleWindow(app)
|
||||
win.show()
|
||||
|
||||
# Very important, IPython-specific step: this gets GUI event loop
|
||||
# integration going, and it replaces calling app.exec_()
|
||||
win.yapkernel.start()
|
119
packages/python/yap_kernel/examples/embedding/ipkernel_wxapp.py
Normal file
119
packages/python/yap_kernel/examples/embedding/ipkernel_wxapp.py
Normal file
@ -0,0 +1,119 @@
|
||||
#!/usr/bin/env python
|
||||
"""Example integrating an IPython kernel into a GUI App.
|
||||
|
||||
This trivial GUI application internally starts an IPython kernel, to which Qt
|
||||
consoles can be connected either by the user at the command line or started
|
||||
from the GUI itself, via a button. The GUI can also manipulate one variable in
|
||||
the kernel's namespace, and print the namespace to the console.
|
||||
|
||||
Play with it by running the script and then opening one or more consoles, and
|
||||
pushing the 'Counter++' and 'Namespace' buttons.
|
||||
|
||||
Upon exit, it should automatically close all consoles opened from the GUI.
|
||||
|
||||
Consoles attached separately from a terminal will not be terminated, though
|
||||
they will notice that their kernel died.
|
||||
|
||||
Ref: Modified from wxPython source code wxPython/samples/simple/simple.py
|
||||
"""
|
||||
#-----------------------------------------------------------------------------
|
||||
# Imports
|
||||
#-----------------------------------------------------------------------------
|
||||
import sys
|
||||
|
||||
import wx
|
||||
|
||||
from internal_yapkernel import InternalYAPKernel
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Functions and classes
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
class MyFrame(wx.Frame, InternalYAPKernel):
|
||||
"""
|
||||
This is MyFrame. It just shows a few controls on a wxPanel,
|
||||
and has a simple menu.
|
||||
"""
|
||||
|
||||
def __init__(self, parent, title):
|
||||
wx.Frame.__init__(self, parent, -1, title,
|
||||
pos=(150, 150), size=(350, 285))
|
||||
|
||||
# Create the menubar
|
||||
menuBar = wx.MenuBar()
|
||||
|
||||
# and a menu
|
||||
menu = wx.Menu()
|
||||
|
||||
# add an item to the menu, using \tKeyName automatically
|
||||
# creates an accelerator, the third param is some help text
|
||||
# that will show up in the statusbar
|
||||
menu.Append(wx.ID_EXIT, "E&xit\tAlt-X", "Exit this simple sample")
|
||||
|
||||
# bind the menu event to an event handler
|
||||
self.Bind(wx.EVT_MENU, self.OnTimeToClose, id=wx.ID_EXIT)
|
||||
|
||||
# and put the menu on the menubar
|
||||
menuBar.Append(menu, "&File")
|
||||
self.SetMenuBar(menuBar)
|
||||
|
||||
self.CreateStatusBar()
|
||||
|
||||
# Now create the Panel to put the other controls on.
|
||||
panel = wx.Panel(self)
|
||||
|
||||
# and a few controls
|
||||
text = wx.StaticText(panel, -1, "Hello World!")
|
||||
text.SetFont(wx.Font(14, wx.SWISS, wx.NORMAL, wx.BOLD))
|
||||
text.SetSize(text.GetBestSize())
|
||||
qtconsole_btn = wx.Button(panel, -1, "Qt Console")
|
||||
ns_btn = wx.Button(panel, -1, "Namespace")
|
||||
count_btn = wx.Button(panel, -1, "Count++")
|
||||
close_btn = wx.Button(panel, -1, "Quit")
|
||||
|
||||
# bind the button events to handlers
|
||||
self.Bind(wx.EVT_BUTTON, self.new_qt_console, qtconsole_btn)
|
||||
self.Bind(wx.EVT_BUTTON, self.print_namespace, ns_btn)
|
||||
self.Bind(wx.EVT_BUTTON, self.count, count_btn)
|
||||
self.Bind(wx.EVT_BUTTON, self.OnTimeToClose, close_btn)
|
||||
|
||||
# Use a sizer to layout the controls, stacked vertically and with
|
||||
# a 10 pixel border around each
|
||||
sizer = wx.BoxSizer(wx.VERTICAL)
|
||||
for ctrl in [text, qtconsole_btn, ns_btn, count_btn, close_btn]:
|
||||
sizer.Add(ctrl, 0, wx.ALL, 10)
|
||||
panel.SetSizer(sizer)
|
||||
panel.Layout()
|
||||
|
||||
# Start the IPython kernel with gui support
|
||||
self.init_yapkernel('wx')
|
||||
|
||||
def OnTimeToClose(self, evt):
|
||||
"""Event handler for the button click."""
|
||||
print("See ya later!")
|
||||
sys.stdout.flush()
|
||||
self.cleanup_consoles(evt)
|
||||
self.Close()
|
||||
# Not sure why, but our IPython kernel seems to prevent normal WX
|
||||
# shutdown, so an explicit exit() call is needed.
|
||||
sys.exit()
|
||||
|
||||
|
||||
class MyApp(wx.App):
|
||||
def OnInit(self):
|
||||
frame = MyFrame(None, "Simple wxPython App")
|
||||
self.SetTopWindow(frame)
|
||||
frame.Show(True)
|
||||
self.yapkernel = frame.yapkernel
|
||||
return True
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Main script
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
if __name__ == '__main__':
|
||||
app = MyApp(redirect=False, clearSigInt=False)
|
||||
|
||||
# Very important, IPython-specific step: this gets GUI event loop
|
||||
# integration going, and it replaces calling app.MainLoop()
|
||||
app.yapkernel.start()
|
3
packages/python/yap_kernel/readthedocs.yml
Normal file
3
packages/python/yap_kernel/readthedocs.yml
Normal file
@ -0,0 +1,3 @@
|
||||
python:
|
||||
version: 3.5
|
||||
pip_install: true
|
13
packages/python/yap_kernel/setup.cfg
Normal file
13
packages/python/yap_kernel/setup.cfg
Normal file
@ -0,0 +1,13 @@
|
||||
[bdist_wheel]
|
||||
universal=0
|
||||
|
||||
[nosetests]
|
||||
warningfilters= default |.* |DeprecationWarning |ipykernel.*
|
||||
default |.* |DeprecationWarning |IPython.*
|
||||
ignore |.*assert.* |DeprecationWarning |.*
|
||||
ignore |.*observe.* |DeprecationWarning |IPython.*
|
||||
ignore |.*default.* |DeprecationWarning |IPython.*
|
||||
ignore |.*default.* |DeprecationWarning |jupyter_client.*
|
||||
ignore |.*Metada.* |DeprecationWarning |IPython.*
|
||||
|
||||
|
@ -43,7 +43,7 @@ for d, _, _ in os.walk(pjoin(here, name)):
|
||||
packages.append(d[len(here)+1:].replace(os.path.sep, '.'))
|
||||
|
||||
package_data = {
|
||||
'ipykernel': ['resources/*.*'],
|
||||
'yap_kernel': ['resources/*.*'],
|
||||
}
|
||||
|
||||
version_ns = {}
|
||||
@ -56,9 +56,10 @@ setup_args = dict(
|
||||
version = version_ns['__version__'],
|
||||
scripts = glob(pjoin('scripts', '*')),
|
||||
packages = packages,
|
||||
py_modules = ['yapkernel_launcher'],
|
||||
package_data = package_data,
|
||||
description = "IPython Kernel for Jupyter",
|
||||
author = 'IPython Development Team',
|
||||
description = "YAP Kernel for Jupyter",
|
||||
author = 'YAP Development Team',
|
||||
author_email = 'ipython-dev@scipy.org',
|
||||
url = 'http://ipython.org',
|
||||
license = 'BSD',
|
||||
@ -79,12 +80,13 @@ if 'develop' in sys.argv or any(a.startswith('bdist') for a in sys.argv):
|
||||
import setuptools
|
||||
|
||||
setuptools_args = {}
|
||||
# install_requires = setuptools_args['install_requires'] = [
|
||||
# 'ipython>=4.0.0',
|
||||
# 'traitlets>=4.1.0',
|
||||
# 'jupyter_client',
|
||||
# 'tornado>=4.0',
|
||||
# ]
|
||||
install_requires = setuptools_args['install_requires'] = [
|
||||
'ipython>=4.0.0',
|
||||
'traitlets>=4.1.0',
|
||||
'jupyter_client',
|
||||
'tornado>=4.0',
|
||||
'yap4py'
|
||||
]
|
||||
|
||||
if any(a.startswith(('bdist', 'build', 'install')) for a in sys.argv):
|
||||
from ipykernel.kernelspec import write_kernel_spec, make_ipkernel_cmd, KERNEL_NAME
|
||||
|
@ -34,7 +34,7 @@ import shutil
|
||||
from distutils.core import setup
|
||||
|
||||
pjoin = os.path.join
|
||||
here = os.path.abspath(os.path.dirname(__file__))
|
||||
here = os.path.relpath(os.path.dirname(__file__))
|
||||
pkg_root = pjoin(here, name)
|
||||
|
||||
packages = []
|
||||
@ -56,12 +56,11 @@ setup_args = dict(
|
||||
version = version_ns['__version__'],
|
||||
scripts = glob(pjoin('scripts', '*')),
|
||||
packages = packages,
|
||||
package_dir = {'':'${CMAKE_CURRENT_SOURCE_DIR}'},
|
||||
py_modules = ['ipykernel_launcher'],
|
||||
py_modules = ['yap_kernel_launcher'],
|
||||
package_data = package_data,
|
||||
description = "IPython Kernel for Jupyter",
|
||||
author = 'IPython Development Team',
|
||||
author_email = 'ipython-dev@scipy.org',
|
||||
description = "YAP Kernel for Jupyter",
|
||||
author = 'IPython Development Team and Vitor Santos Costa',
|
||||
author_email = 'vsc@dcc.fc.up.ot',
|
||||
url = 'http://ipython.org',
|
||||
license = 'BSD',
|
||||
platforms = "Linux, Mac OS X, Windows",
|
||||
@ -86,12 +85,13 @@ install_requires = setuptools_args['install_requires'] = [
|
||||
'traitlets>=4.1.0',
|
||||
'jupyter_client',
|
||||
'tornado>=4.0',
|
||||
'yap4py'
|
||||
]
|
||||
|
||||
if any(a.startswith(('bdist', 'build', 'install')) for a in sys.argv):
|
||||
from ipykernel.kernelspec import write_kernel_spec, make_ipkernel_cmd, KERNEL_NAME
|
||||
from yap_kernel.kernelspec import write_kernel_spec, make_yap_kernel_cmd, KERNEL_NAME
|
||||
|
||||
argv = make_ipkernel_cmd(executable='python')
|
||||
argv = make_yap_kernel_cmd(executable='python')
|
||||
dest = os.path.join(here, 'data_kernelspec')
|
||||
if os.path.exists(dest):
|
||||
shutil.rmtree(dest)
|
||||
@ -101,6 +101,10 @@ if any(a.startswith(('bdist', 'build', 'install')) for a in sys.argv):
|
||||
(pjoin('share', 'jupyter', 'kernels', KERNEL_NAME), glob(pjoin(dest, '*'))),
|
||||
]
|
||||
|
||||
setuptools_args['zip_safe']=False
|
||||
setuptools_args['eager_resources'] = ['yap_kernel']
|
||||
setuptools_args['include_package_data']=True
|
||||
|
||||
extras_require = setuptools_args['extras_require'] = {
|
||||
'test:python_version=="2.7"': ['mock'],
|
||||
'test': ['nose_warnings_filters', 'nose-timer'],
|
||||
|
20
packages/python/yap_kernel/yap_kernel.egg-info/PKG-INFO
Normal file
20
packages/python/yap_kernel/yap_kernel.egg-info/PKG-INFO
Normal file
@ -0,0 +1,20 @@
|
||||
Metadata-Version: 1.1
|
||||
Name: yap-kernel
|
||||
Version: 4.7.0.dev0
|
||||
Summary: YAP Kernel for Jupyter
|
||||
Home-page: http://ipython.org
|
||||
Author: YAP Development Team
|
||||
Author-email: ipython-dev@scipy.org
|
||||
License: BSD
|
||||
Description: UNKNOWN
|
||||
Keywords: Interactive,Interpreter,Shell,Web
|
||||
Platform: Linux
|
||||
Platform: Mac OS X
|
||||
Platform: Windows
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: Intended Audience :: System Administrators
|
||||
Classifier: Intended Audience :: Science/Research
|
||||
Classifier: License :: OSI Approved :: BSD License
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 2.7
|
||||
Classifier: Programming Language :: Python :: 3
|
81
packages/python/yap_kernel/yap_kernel.egg-info/SOURCES.txt
Normal file
81
packages/python/yap_kernel/yap_kernel.egg-info/SOURCES.txt
Normal file
@ -0,0 +1,81 @@
|
||||
CONTRIBUTING.md
|
||||
COPYING.md
|
||||
MANIFEST.in
|
||||
README.md
|
||||
setup.cfg
|
||||
setup.py
|
||||
/Users/vsc/github/yap-6.3/yap_kernel/data_kernelspec/kernel.json
|
||||
/Users/vsc/github/yap-6.3/yap_kernel/data_kernelspec/logo-32x32.png
|
||||
/Users/vsc/github/yap-6.3/yap_kernel/data_kernelspec/logo-64x64.png
|
||||
docs/Makefile
|
||||
docs/changelog.rst
|
||||
docs/conf.py
|
||||
docs/index.rst
|
||||
docs/make.bat
|
||||
examples/embedding/inprocess_qtconsole.py
|
||||
examples/embedding/inprocess_terminal.py
|
||||
examples/embedding/internal_ipkernel.py
|
||||
examples/embedding/ipkernel_qtapp.py
|
||||
examples/embedding/ipkernel_wxapp.py
|
||||
yap_kernel/__init__.py
|
||||
yap_kernel/__main__.py
|
||||
yap_kernel/_version.py
|
||||
yap_kernel/codeutil.py
|
||||
yap_kernel/connect.py
|
||||
yap_kernel/datapub.py
|
||||
yap_kernel/displayhook.py
|
||||
yap_kernel/embed.py
|
||||
yap_kernel/eventloops.py
|
||||
yap_kernel/heartbeat.py
|
||||
yap_kernel/interactiveshell.py
|
||||
yap_kernel/iostream.py
|
||||
yap_kernel/jsonutil.py
|
||||
yap_kernel/kernelapp.py
|
||||
yap_kernel/kernelbase.py
|
||||
yap_kernel/kernelspec.py
|
||||
yap_kernel/log.py
|
||||
yap_kernel/parentpoller.py
|
||||
yap_kernel/pickleutil.py
|
||||
yap_kernel/serialize.py
|
||||
yap_kernel/yapkernel.py
|
||||
yap_kernel/zmqshell.py
|
||||
yap_kernel.egg-info/PKG-INFO
|
||||
yap_kernel.egg-info/SOURCES.txt
|
||||
yap_kernel.egg-info/dependency_links.txt
|
||||
yap_kernel.egg-info/requires.txt
|
||||
yap_kernel.egg-info/top_level.txt
|
||||
yap_kernel/comm/__init__.py
|
||||
yap_kernel/comm/comm.py
|
||||
yap_kernel/comm/manager.py
|
||||
yap_kernel/gui/__init__.py
|
||||
yap_kernel/gui/gtk3embed.py
|
||||
yap_kernel/gui/gtkembed.py
|
||||
yap_kernel/inprocess/__init__.py
|
||||
yap_kernel/inprocess/blocking.py
|
||||
yap_kernel/inprocess/channels.py
|
||||
yap_kernel/inprocess/client.py
|
||||
yap_kernel/inprocess/constants.py
|
||||
yap_kernel/inprocess/ipkernel.py
|
||||
yap_kernel/inprocess/manager.py
|
||||
yap_kernel/inprocess/socket.py
|
||||
yap_kernel/inprocess/tests/__init__.py
|
||||
yap_kernel/inprocess/tests/test_kernel.py
|
||||
yap_kernel/inprocess/tests/test_kernelmanager.py
|
||||
yap_kernel/pylab/__init__.py
|
||||
yap_kernel/pylab/backend_inline.py
|
||||
yap_kernel/pylab/config.py
|
||||
yap_kernel/resources/logo-32x32.png
|
||||
yap_kernel/resources/logo-64x64.png
|
||||
yap_kernel/tests/__init__.py
|
||||
yap_kernel/tests/test_connect.py
|
||||
yap_kernel/tests/test_embed_kernel.py
|
||||
yap_kernel/tests/test_io.py
|
||||
yap_kernel/tests/test_jsonutil.py
|
||||
yap_kernel/tests/test_kernel.py
|
||||
yap_kernel/tests/test_kernelspec.py
|
||||
yap_kernel/tests/test_message_spec.py
|
||||
yap_kernel/tests/test_pickleutil.py
|
||||
yap_kernel/tests/test_serialize.py
|
||||
yap_kernel/tests/test_start_kernel.py
|
||||
yap_kernel/tests/test_zmq_shell.py
|
||||
yap_kernel/tests/utils.py
|
@ -0,0 +1 @@
|
||||
|
12
packages/python/yap_kernel/yap_kernel.egg-info/requires.txt
Normal file
12
packages/python/yap_kernel/yap_kernel.egg-info/requires.txt
Normal file
@ -0,0 +1,12 @@
|
||||
ipython>=4.0.0
|
||||
traitlets>=4.1.0
|
||||
jupyter_client
|
||||
tornado>=4.0
|
||||
yap4py
|
||||
|
||||
[test]
|
||||
nose_warnings_filters
|
||||
nose-timer
|
||||
|
||||
[test:python_version=="2.7"]
|
||||
mock
|
@ -0,0 +1,2 @@
|
||||
yap_kernel
|
||||
yapkernel_launcher
|
5
packages/python/yap_kernel/yap_kernel/#__main__.py#
Normal file
5
packages/python/yap_kernel/yap_kernel/#__main__.py#
Normal file
@ -0,0 +1,5 @@
|
||||
if __name__ == '__main__':
|
||||
from ipykernel import kernelapp as app
|
||||
app.launch_new_instance()
|
||||
|
||||
|
492
packages/python/yap_kernel/yap_kernel/#kernelapp.py#
Normal file
492
packages/python/yap_kernel/yap_kernel/#kernelapp.py#
Normal file
@ -0,0 +1,492 @@
|
||||
"""An Application for launching a kernel"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import atexit
|
||||
import os
|
||||
import sys
|
||||
import signal
|
||||
import traceback
|
||||
import logging
|
||||
|
||||
from tornado import ioloop
|
||||
import zmq
|
||||
from zmq.eventloop import ioloop as zmq_ioloop
|
||||
from zmq.eventloop.zmqstream import ZMQStream
|
||||
|
||||
from IPython.core.application import (
|
||||
BaseIPythonApplication, base_flags, base_aliases, catch_config_error
|
||||
)
|
||||
from IPython.core.profiledir import ProfileDir
|
||||
from IPython.core.shellapp import (
|
||||
InteractiveShellApp, shell_flags, shell_aliases
|
||||
)
|
||||
from IPython.utils import io
|
||||
from ipython_genutils.path import filefind, ensure_dir_exists
|
||||
from traitlets import (
|
||||
Any, Instance, Dict, Unicode, Integer, Bool, DottedObjectName, Type, default
|
||||
)
|
||||
from ipython_genutils.importstring import import_item
|
||||
from jupyter_core.paths import jupyter_runtime_dir
|
||||
from jupyter_client import write_connection_file
|
||||
from jupyter_client.connect import ConnectionFileMixin
|
||||
|
||||
# local imports
|
||||
from .iostream import IOPubThread
|
||||
from .heartbeat import Heartbeat
|
||||
from .yapkernel import YAPKernel
|
||||
from .parentpoller import ParentPollerUnix, ParentPollerWindows
|
||||
from jupyter_client.session import (
|
||||
Session, session_flags, session_aliases,
|
||||
)
|
||||
from .zmqshell import ZMQInteractiveShell
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Flags and Aliases
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
kernel_aliases = dict(base_aliases)
|
||||
kernel_aliases.update({
|
||||
'ip' : 'YAP_KernelApp.ip',
|
||||
'hb' : 'YAP_KernelApp.hb_port',
|
||||
'shell' : 'YAP_KernelApp.shell_port',
|
||||
'iopub' : 'YAP_KernelApp.iopub_port',
|
||||
'stdin' : 'YAP_KernelApp.stdin_port',
|
||||
'control' : 'YAP_KernelApp.control_port',
|
||||
'f' : 'YAP_KernelApp.connection_file',
|
||||
'transport': 'YAP_KernelApp.transport',
|
||||
})
|
||||
|
||||
kernel_flags = dict(base_flags)
|
||||
kernel_flags.update({
|
||||
'no-stdout' : (
|
||||
{'YAP_KernelApp' : {'no_stdout' : True}},
|
||||
"redirect stdout to the null device"),
|
||||
'no-stderr' : (
|
||||
{'YAP_KernelApp' : {'no_stderr' : True}},
|
||||
"redirect stderr to the null device"),
|
||||
'pylab' : (
|
||||
{'YAP_KernelApp' : {'pylab' : 'auto'}},
|
||||
"""Pre-load matplotlib and numpy for interactive use with
|
||||
the default matplotlib backend."""),
|
||||
})
|
||||
|
||||
# inherit flags&aliases for any IPython shell apps
|
||||
kernel_aliases.update(shell_aliases)
|
||||
kernel_flags.update(shell_flags)
|
||||
|
||||
# inherit flags&aliases for Sessions
|
||||
kernel_aliases.update(session_aliases)
|
||||
kernel_flags.update(session_flags)
|
||||
|
||||
_ctrl_c_message = """\
|
||||
NOTE: When using the `ipython kernel` entry point, Ctrl-C will not work.
|
||||
|
||||
To exit, you will have to explicitly quit this process, by either sending
|
||||
"quit" from a client, or using Ctrl-\\ in UNIX-like environments.
|
||||
|
||||
To read more about this, see https://github.com/ipython/ipython/issues/2049
|
||||
|
||||
"""
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Application class for starting an IPython Kernel
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
class YAP_KernelApp(BaseIPythonApplication, InteractiveShellApp,
|
||||
ConnectionFileMixin):
|
||||
name='YAP Kernel'
|
||||
aliases = Dict(kernel_aliases)
|
||||
flags = Dict(kernel_flags)
|
||||
classes = [YAPKernel, ZMQInteractiveShell, ProfileDir, Session]
|
||||
# the kernel class, as an importstring
|
||||
kernel_class = Type('yap_kernel.kernelbase.YAP_Kernel',
|
||||
klass='yap_kernel.kernelbase.YAP_Kernel',
|
||||
help="""The Kernel subclass to be used.
|
||||
|
||||
This should allow easy re-use of the YAP_KernelApp entry point
|
||||
to configure and launch kernels other than IPython's own.
|
||||
""").tag(config=True)
|
||||
kernel = Any()
|
||||
poller = Any() # don't restrict this even though current pollers are all Threads
|
||||
heartbeat = Instance(Heartbeat, allow_none=True)
|
||||
ports = Dict()
|
||||
|
||||
|
||||
subcommands = {
|
||||
'install': (
|
||||
'yap_kernel.kernelspec.InstallYAPKernelSpecApp',
|
||||
'Install the YAP kernel'
|
||||
),
|
||||
}
|
||||
|
||||
# connection info:
|
||||
connection_dir = Unicode()
|
||||
|
||||
@default('connection_dir')
|
||||
def _default_connection_dir(self):
|
||||
return jupyter_runtime_dir()
|
||||
|
||||
@property
|
||||
def abs_connection_file(self):
|
||||
if os.path.basename(self.connection_file) == self.connection_file:
|
||||
return os.path.join(self.connection_dir, self.connection_file)
|
||||
else:
|
||||
return self.connection_file
|
||||
|
||||
# streams, etc.
|
||||
no_stdout = Bool(False, help="redirect stdout to the null device").tag(config=True)
|
||||
no_stderr = Bool(False, help="redirect stderr to the null device").tag(config=True)
|
||||
outstream_class = DottedObjectName('yap_kernel.iostream.OutStream',
|
||||
help="The importstring for the OutStream factory").tag(config=True)
|
||||
displayhook_class = DottedObjectName('yap_kernel.displayhook.ZMQDisplayHook',
|
||||
help="The importstring for the DisplayHook factory").tag(config=True)
|
||||
|
||||
# polling
|
||||
parent_handle = Integer(int(os.environ.get('JPY_PARENT_PID') or 0),
|
||||
help="""kill this process if its parent dies. On Windows, the argument
|
||||
specifies the HANDLE of the parent process, otherwise it is simply boolean.
|
||||
""").tag(config=True)
|
||||
interrupt = Integer(int(os.environ.get('JPY_INTERRUPT_EVENT') or 0),
|
||||
help="""ONLY USED ON WINDOWS
|
||||
Interrupt this process when the parent is signaled.
|
||||
""").tag(config=True)
|
||||
|
||||
def init_crash_handler(self):
|
||||
sys.excepthook = self.excepthook
|
||||
|
||||
def excepthook(self, etype, evalue, tb):
|
||||
# write uncaught traceback to 'real' stderr, not zmq-forwarder
|
||||
traceback.print_exception(etype, evalue, tb, file=sys.__stderr__)
|
||||
|
||||
def init_poller(self):
|
||||
if sys.platform == 'win32':
|
||||
if self.interrupt or self.parent_handle:
|
||||
self.poller = ParentPollerWindows(self.interrupt, self.parent_handle)
|
||||
elif self.parent_handle and self.parent_handle != 1:
|
||||
# PID 1 (init) is special and will never go away,
|
||||
# only be reassigned.
|
||||
# Parent polling doesn't work if ppid == 1 to start with.
|
||||
self.poller = ParentPollerUnix()
|
||||
|
||||
def _bind_socket(self, s, port):
|
||||
iface = '%s://%s' % (self.transport, self.ip)
|
||||
if self.transport == 'tcp':
|
||||
if port <= 0:
|
||||
port = s.bind_to_random_port(iface)
|
||||
else:
|
||||
s.bind("tcp://%s:%i" % (self.ip, port))
|
||||
elif self.transport == 'ipc':
|
||||
if port <= 0:
|
||||
port = 1
|
||||
path = "%s-%i" % (self.ip, port)
|
||||
while os.path.exists(path):
|
||||
port = port + 1
|
||||
path = "%s-%i" % (self.ip, port)
|
||||
else:
|
||||
path = "%s-%i" % (self.ip, port)
|
||||
s.bind("ipc://%s" % path)
|
||||
return port
|
||||
|
||||
def write_connection_file(self):
|
||||
"""write connection info to JSON file"""
|
||||
cf = self.abs_connection_file
|
||||
self.log.debug("Writing connection file: %s", cf)
|
||||
write_connection_file(cf, ip=self.ip, key=self.session.key, transport=self.transport,
|
||||
shell_port=self.shell_port, stdin_port=self.stdin_port, hb_port=self.hb_port,
|
||||
iopub_port=self.iopub_port, control_port=self.control_port)
|
||||
|
||||
def cleanup_connection_file(self):
|
||||
cf = self.abs_connection_file
|
||||
self.log.debug("Cleaning up connection file: %s", cf)
|
||||
try:
|
||||
os.remove(cf)
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
|
||||
self.cleanup_ipc_files()
|
||||
|
||||
def init_connection_file(self):
|
||||
if not self.connection_file:
|
||||
self.connection_file = "kernel-%s.json"%os.getpid()
|
||||
try:
|
||||
self.connection_file = filefind(self.connection_file, ['.', self.connection_dir])
|
||||
except IOError:
|
||||
self.log.debug("Connection file not found: %s", self.connection_file)
|
||||
# This means I own it, and I'll create it in this directory:
|
||||
ensure_dir_exists(os.path.dirname(self.abs_connection_file), 0o700)
|
||||
# Also, I will clean it up:
|
||||
atexit.register(self.cleanup_connection_file)
|
||||
return
|
||||
try:
|
||||
self.load_connection_file()
|
||||
except Exception:
|
||||
self.log.error("Failed to load connection file: %r", self.connection_file, exc_info=True)
|
||||
self.exit(1)
|
||||
|
||||
def init_sockets(self):
|
||||
# Create a context, a session, and the kernel sockets.
|
||||
self.log.info("Starting the kernel at pid: %i", os.getpid())
|
||||
context = zmq.Context.instance()
|
||||
# Uncomment this to try closing the context.
|
||||
# atexit.register(context.term)
|
||||
|
||||
self.shell_socket = context.socket(zmq.ROUTER)
|
||||
self.shell_socket.linger = 1000
|
||||
self.shell_port = self._bind_socket(self.shell_socket, self.shell_port)
|
||||
self.log.debug("shell ROUTER Channel on port: %i" % self.shell_port)
|
||||
|
||||
self.stdin_socket = context.socket(zmq.ROUTER)
|
||||
self.stdin_socket.linger = 1000
|
||||
self.stdin_port = self._bind_socket(self.stdin_socket, self.stdin_port)
|
||||
self.log.debug("stdin ROUTER Channel on port: %i" % self.stdin_port)
|
||||
|
||||
self.control_socket = context.socket(zmq.ROUTER)
|
||||
self.control_socket.linger = 1000
|
||||
self.control_port = self._bind_socket(self.control_socket, self.control_port)
|
||||
self.log.debug("control ROUTER Channel on port: %i" % self.control_port)
|
||||
|
||||
self.init_iopub(context)
|
||||
|
||||
def init_iopub(self, context):
|
||||
self.iopub_socket = context.socket(zmq.PUB)
|
||||
self.iopub_socket.linger = 1000
|
||||
self.iopub_port = self._bind_socket(self.iopub_socket, self.iopub_port)
|
||||
self.log.debug("iopub PUB Channel on port: %i" % self.iopub_port)
|
||||
self.configure_tornado_logger()
|
||||
self.iopub_thread = IOPubThread(self.iopub_socket, pipe=True)
|
||||
self.iopub_thread.start()
|
||||
# backward-compat: wrap iopub socket API in background thread
|
||||
self.iopub_socket = self.iopub_thread.background_socket
|
||||
|
||||
def init_heartbeat(self):
|
||||
"""start the heart beating"""
|
||||
# heartbeat doesn't share context, because it mustn't be blocked
|
||||
# by the GIL, which is accessed by libzmq when freeing zero-copy messages
|
||||
hb_ctx = zmq.Context()
|
||||
self.heartbeat = Heartbeat(hb_ctx, (self.transport, self.ip, self.hb_port))
|
||||
self.hb_port = self.heartbeat.port
|
||||
self.log.debug("Heartbeat REP Channel on port: %i" % self.hb_port)
|
||||
self.heartbeat.start()
|
||||
|
||||
def log_connection_info(self):
|
||||
"""display connection info, and store ports"""
|
||||
basename = os.path.basename(self.connection_file)
|
||||
if basename == self.connection_file or \
|
||||
os.path.dirname(self.connection_file) == self.connection_dir:
|
||||
# use shortname
|
||||
tail = basename
|
||||
else:
|
||||
tail = self.connection_file
|
||||
lines = [
|
||||
"To connect another client to this kernel, use:",
|
||||
" --existing %s" % tail,
|
||||
]
|
||||
# log connection info
|
||||
# info-level, so often not shown.
|
||||
# frontends should use the %connect_info magic
|
||||
# to see the connection info
|
||||
for line in lines:
|
||||
self.log.info(line)
|
||||
# also raw print to the terminal if no parent_handle (`ipython kernel`)
|
||||
# unless log-level is CRITICAL (--quiet)
|
||||
if not self.parent_handle and self.log_level < logging.CRITICAL:
|
||||
io.rprint(_ctrl_c_message)
|
||||
for line in lines:
|
||||
io.rprint(line)
|
||||
|
||||
self.ports = dict(shell=self.shell_port, iopub=self.iopub_port,
|
||||
stdin=self.stdin_port, hb=self.hb_port,
|
||||
control=self.control_port)
|
||||
|
||||
def init_blackhole(self):
|
||||
"""redirects stdout/stderr to devnull if necessary"""
|
||||
if self.no_stdout or self.no_stderr:
|
||||
blackhole = open(os.devnull, 'w')
|
||||
if self.no_stdout:
|
||||
sys.stdout = sys.__stdout__ = blackhole
|
||||
if self.no_stderr:
|
||||
sys.stderr = sys.__stderr__ = blackhole
|
||||
|
||||
def init_io(self):
|
||||
"""Redirect input streams and set a display hook."""
|
||||
if self.outstream_class:
|
||||
outstream_factory = import_item(str(self.outstream_class))
|
||||
sys.stdout = outstream_factory(self.session, self.iopub_thread, u'stdout')
|
||||
sys.stderr = outstream_factory(self.session, self.iopub_thread, u'stderr')
|
||||
if self.displayhook_class:
|
||||
displayhook_factory = import_item(str(self.displayhook_class))
|
||||
self.displayhook = displayhook_factory(self.session, self.iopub_socket)
|
||||
sys.displayhook = self.displayhook
|
||||
|
||||
self.patch_io()
|
||||
|
||||
def patch_io(self):
|
||||
"""Patch important libraries that can't handle sys.stdout forwarding"""
|
||||
try:
|
||||
import faulthandler
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
# Warning: this is a monkeypatch of `faulthandler.enable`, watch for possible
|
||||
# updates to the upstream API and update accordingly (up-to-date as of Python 3.5):
|
||||
# https://docs.python.org/3/library/faulthandler.html#faulthandler.enable
|
||||
|
||||
# change default file to __stderr__ from forwarded stderr
|
||||
faulthandler_enable = faulthandler.enable
|
||||
def enable(file=sys.__stderr__, all_threads=True, **kwargs):
|
||||
return faulthandler_enable(file=file, all_threads=all_threads, **kwargs)
|
||||
|
||||
faulthandler.enable = enable
|
||||
|
||||
if hasattr(faulthandler, 'register'):
|
||||
faulthandler_register = faulthandler.register
|
||||
def register(signum, file=sys.__stderr__, all_threads=True, chain=False, **kwargs):
|
||||
return faulthandler_register(signum, file=file, all_threads=all_threads,
|
||||
chain=chain, **kwargs)
|
||||
faulthandler.register = register
|
||||
|
||||
def init_signal(self):
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
|
||||
def init_kernel(self):
|
||||
"""Create the Kernel object itself"""
|
||||
shell_stream = ZMQStream(self.shell_socket)
|
||||
control_stream = ZMQStream(self.control_socket)
|
||||
|
||||
kernel_factory = self.kernel_class.instance
|
||||
|
||||
kernel = kernel_factory(parent=self, session=self.session,
|
||||
shell_streams=[shell_stream, control_stream],
|
||||
iopub_thread=self.iopub_thread,
|
||||
iopub_socket=self.iopub_socket,
|
||||
stdin_socket=self.stdin_socket,
|
||||
log=self.log,
|
||||
profile_dir=self.profile_dir,
|
||||
user_ns=self.user_ns,
|
||||
)
|
||||
kernel.record_ports({
|
||||
name + '_port': port for name, port in self.ports.items()
|
||||
})
|
||||
self.kernel = kernel
|
||||
|
||||
# Allow the displayhook to get the execution count
|
||||
self.displayhook.get_execution_count = lambda: kernel.execution_count
|
||||
|
||||
def init_gui_pylab(self):
|
||||
"""Enable GUI event loop integration, taking pylab into account."""
|
||||
|
||||
# Register inline backend as default
|
||||
# this is higher priority than matplotlibrc,
|
||||
# but lower priority than anything else (mpl.use() for instance).
|
||||
# This only affects matplotlib >= 1.5
|
||||
if not os.environ.get('MPLBACKEND'):
|
||||
os.environ['MPLBACKEND'] = 'module://yap_kernel.pylab.backend_inline'
|
||||
|
||||
# Provide a wrapper for :meth:`InteractiveShellApp.init_gui_pylab`
|
||||
# to ensure that any exception is printed straight to stderr.
|
||||
# Normally _showtraceback associates the reply with an execution,
|
||||
# which means frontends will never draw it, as this exception
|
||||
# is not associated with any execute request.
|
||||
|
||||
shell = self.shell
|
||||
_showtraceback = shell._showtraceback
|
||||
try:
|
||||
# replace error-sending traceback with stderr
|
||||
def print_tb(etype, evalue, stb):
|
||||
print ("GUI event loop or pylab initialization failed",
|
||||
file=sys.stderr)
|
||||
print (shell.InteractiveTB.stb2text(stb), file=sys.stderr)
|
||||
shell._showtraceback = print_tb
|
||||
InteractiveShellApp.init_gui_pylab(self)
|
||||
finally:
|
||||
shell._showtraceback = _showtraceback
|
||||
|
||||
def init_shell(self):
|
||||
self.shell = getattr(self.kernel, 'shell', None)
|
||||
if self.shell:
|
||||
self.shell.configurables.append(self)
|
||||
|
||||
def init_extensions(self):
|
||||
super(YAP_KernelApp, self).init_extensions()
|
||||
# BEGIN HARDCODED WIDGETS HACK
|
||||
# Ensure ipywidgets extension is loaded if available
|
||||
extension_man = self.shell.extension_manager
|
||||
if 'ipywidgets' not in extension_man.loaded:
|
||||
try:
|
||||
extension_man.load_extension('ipywidgets')
|
||||
except ImportError as e:
|
||||
self.log.debug('ipywidgets package not installed. Widgets will not be available.')
|
||||
# END HARDCODED WIDGETS HACK
|
||||
|
||||
def configure_tornado_logger(self):
|
||||
""" Configure the tornado logging.Logger.
|
||||
|
||||
Must set up the tornado logger or else tornado will call
|
||||
basicConfig for the root logger which makes the root logger
|
||||
go to the real sys.stderr instead of the capture streams.
|
||||
This function mimics the setup of logging.basicConfig.
|
||||
"""
|
||||
logger = logging.getLogger('tornado')
|
||||
handler = logging.StreamHandler()
|
||||
formatter = logging.Formatter(logging.BASIC_FORMAT)
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
@catch_config_error
|
||||
def initialize(self, argv=None):
|
||||
super(YAP_KernelApp, self).initialize(argv)
|
||||
if self.subapp is not None:
|
||||
return
|
||||
# register zmq IOLoop with tornado
|
||||
zmq_ioloop.install()
|
||||
self.init_blackhole()
|
||||
self.init_connection_file()
|
||||
self.init_poller()
|
||||
self.init_sockets()
|
||||
self.init_heartbeat()
|
||||
# writing/displaying connection info must be *after* init_sockets/heartbeat
|
||||
self.write_connection_file()
|
||||
# Log connection info after writing connection file, so that the connection
|
||||
# file is definitely available at the time someone reads the log.
|
||||
self.log_connection_info()
|
||||
self.init_io()
|
||||
self.init_signal()
|
||||
self.init_kernel()
|
||||
# shell init steps
|
||||
self.init_path()
|
||||
self.init_shell()
|
||||
if self.shell:
|
||||
self.init_gui_pylab()
|
||||
self.init_extensions()
|
||||
self.init_code()
|
||||
# flush stdout/stderr, so that anything written to these streams during
|
||||
# initialization do not get associated with the first execution request
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
|
||||
def start(self):
|
||||
if self.subapp is not None:
|
||||
return self.subapp.start()
|
||||
if self.poller is not None:
|
||||
self.poller.start()
|
||||
self.kernel.start()
|
||||
try:
|
||||
ioloop.IOLoop.instance().start()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
launch_new_instance = YAP_KernelApp.launch_instance
|
||||
|
||||
def main():
|
||||
"""Run an YAPKernel as an application"""
|
||||
app = YAP_KernelApp.instance()
|
||||
app.initialize()
|
||||
app.start()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
3
packages/python/yap_kernel/yap_kernel/__init__.py
Normal file
3
packages/python/yap_kernel/yap_kernel/__init__.py
Normal file
@ -0,0 +1,3 @@
|
||||
from ._version import version_info, __version__, kernel_protocol_version_info, kernel_protocol_version
|
||||
from .connect import *
|
||||
|
5
packages/python/yap_kernel/yap_kernel/__main__.py
Normal file
5
packages/python/yap_kernel/yap_kernel/__main__.py
Normal file
@ -0,0 +1,5 @@
|
||||
if __name__ == '__main__':
|
||||
from yap_kernel import kernelapp as app
|
||||
app.launch_new_instance()
|
||||
|
||||
|
5
packages/python/yap_kernel/yap_kernel/_version.py
Normal file
5
packages/python/yap_kernel/yap_kernel/_version.py
Normal file
@ -0,0 +1,5 @@
|
||||
version_info = (6, 3, 5)
|
||||
__version__ = '.'.join(map(str, version_info))
|
||||
|
||||
kernel_protocol_version_info = (5, 1)
|
||||
kernel_protocol_version = '%s.%s' % kernel_protocol_version_info
|
38
packages/python/yap_kernel/yap_kernel/codeutil.py
Normal file
38
packages/python/yap_kernel/yap_kernel/codeutil.py
Normal file
@ -0,0 +1,38 @@
|
||||
# encoding: utf-8
|
||||
|
||||
"""Utilities to enable code objects to be pickled.
|
||||
|
||||
Any process that import this module will be able to pickle code objects. This
|
||||
includes the func_code attribute of any function. Once unpickled, new
|
||||
functions can be built using new.function(code, globals()). Eventually
|
||||
we need to automate all of this so that functions themselves can be pickled.
|
||||
|
||||
Reference: A. Tremols, P Cogolo, "Python Cookbook," p 302-305
|
||||
"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import warnings
|
||||
warnings.warn("yap_kernel.codeutil is deprecated since IPykernel 4.3.1. It has moved to ipyparallel.serialize", DeprecationWarning)
|
||||
|
||||
import sys
|
||||
import types
|
||||
try:
|
||||
import copyreg # Py 3
|
||||
except ImportError:
|
||||
import copy_reg as copyreg # Py 2
|
||||
|
||||
def code_ctor(*args):
|
||||
return types.CodeType(*args)
|
||||
|
||||
def reduce_code(co):
|
||||
args = [co.co_argcount, co.co_nlocals, co.co_stacksize,
|
||||
co.co_flags, co.co_code, co.co_consts, co.co_names,
|
||||
co.co_varnames, co.co_filename, co.co_name, co.co_firstlineno,
|
||||
co.co_lnotab, co.co_freevars, co.co_cellvars]
|
||||
if sys.version_info[0] >= 3:
|
||||
args.insert(1, co.co_kwonlyargcount)
|
||||
return code_ctor, tuple(args)
|
||||
|
||||
copyreg.pickle(types.CodeType, reduce_code)
|
2
packages/python/yap_kernel/yap_kernel/comm/__init__.py
Normal file
2
packages/python/yap_kernel/yap_kernel/comm/__init__.py
Normal file
@ -0,0 +1,2 @@
|
||||
from .manager import *
|
||||
from .comm import *
|
164
packages/python/yap_kernel/yap_kernel/comm/comm.py
Normal file
164
packages/python/yap_kernel/yap_kernel/comm/comm.py
Normal file
@ -0,0 +1,164 @@
|
||||
"""Base class for a Comm"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import uuid
|
||||
|
||||
from traitlets.config import LoggingConfigurable
|
||||
from yap_kernel.kernelbase import Kernel
|
||||
|
||||
from yap_kernel.jsonutil import json_clean
|
||||
from traitlets import Instance, Unicode, Bytes, Bool, Dict, Any, default
|
||||
|
||||
|
||||
class Comm(LoggingConfigurable):
|
||||
"""Class for communicating between a Frontend and a Kernel"""
|
||||
kernel = Instance('yap_kernel.kernelbase.Kernel', allow_none=True)
|
||||
|
||||
@default('kernel')
|
||||
def _default_kernel(self):
|
||||
if Kernel.initialized():
|
||||
return Kernel.instance()
|
||||
|
||||
comm_id = Unicode()
|
||||
|
||||
@default('comm_id')
|
||||
def _default_comm_id(self):
|
||||
return uuid.uuid4().hex
|
||||
|
||||
primary = Bool(True, help="Am I the primary or secondary Comm?")
|
||||
|
||||
target_name = Unicode('comm')
|
||||
target_module = Unicode(None, allow_none=True, help="""requirejs module from
|
||||
which to load comm target.""")
|
||||
|
||||
topic = Bytes()
|
||||
|
||||
@default('topic')
|
||||
def _default_topic(self):
|
||||
return ('comm-%s' % self.comm_id).encode('ascii')
|
||||
|
||||
_open_data = Dict(help="data dict, if any, to be included in comm_open")
|
||||
_close_data = Dict(help="data dict, if any, to be included in comm_close")
|
||||
|
||||
_msg_callback = Any()
|
||||
_close_callback = Any()
|
||||
|
||||
_closed = Bool(True)
|
||||
|
||||
def __init__(self, target_name='', data=None, metadata=None, buffers=None, **kwargs):
|
||||
if target_name:
|
||||
kwargs['target_name'] = target_name
|
||||
super(Comm, self).__init__(**kwargs)
|
||||
if self.kernel:
|
||||
if self.primary:
|
||||
# I am primary, open my peer.
|
||||
self.open(data=data, metadata=metadata, buffers=buffers)
|
||||
else:
|
||||
self._closed = False
|
||||
|
||||
def _publish_msg(self, msg_type, data=None, metadata=None, buffers=None, **keys):
|
||||
"""Helper for sending a comm message on IOPub"""
|
||||
data = {} if data is None else data
|
||||
metadata = {} if metadata is None else metadata
|
||||
content = json_clean(dict(data=data, comm_id=self.comm_id, **keys))
|
||||
self.kernel.session.send(self.kernel.iopub_socket, msg_type,
|
||||
content,
|
||||
metadata=json_clean(metadata),
|
||||
parent=self.kernel._parent_header,
|
||||
ident=self.topic,
|
||||
buffers=buffers,
|
||||
)
|
||||
|
||||
def __del__(self):
|
||||
"""trigger close on gc"""
|
||||
self.close()
|
||||
|
||||
# publishing messages
|
||||
|
||||
def open(self, data=None, metadata=None, buffers=None):
|
||||
"""Open the frontend-side version of this comm"""
|
||||
if data is None:
|
||||
data = self._open_data
|
||||
comm_manager = getattr(self.kernel, 'comm_manager', None)
|
||||
if comm_manager is None:
|
||||
raise RuntimeError("Comms cannot be opened without a kernel "
|
||||
"and a comm_manager attached to that kernel.")
|
||||
|
||||
comm_manager.register_comm(self)
|
||||
try:
|
||||
self._publish_msg('comm_open',
|
||||
data=data, metadata=metadata, buffers=buffers,
|
||||
target_name=self.target_name,
|
||||
target_module=self.target_module,
|
||||
)
|
||||
self._closed = False
|
||||
except:
|
||||
comm_manager.unregister_comm(self)
|
||||
raise
|
||||
|
||||
def close(self, data=None, metadata=None, buffers=None):
|
||||
"""Close the frontend-side version of this comm"""
|
||||
if self._closed:
|
||||
# only close once
|
||||
return
|
||||
self._closed = True
|
||||
# nothing to send if we have no kernel
|
||||
# can be None during interpreter cleanup
|
||||
if not self.kernel:
|
||||
return
|
||||
if data is None:
|
||||
data = self._close_data
|
||||
self._publish_msg('comm_close',
|
||||
data=data, metadata=metadata, buffers=buffers,
|
||||
)
|
||||
self.kernel.comm_manager.unregister_comm(self)
|
||||
|
||||
def send(self, data=None, metadata=None, buffers=None):
|
||||
"""Send a message to the frontend-side version of this comm"""
|
||||
self._publish_msg('comm_msg',
|
||||
data=data, metadata=metadata, buffers=buffers,
|
||||
)
|
||||
|
||||
# registering callbacks
|
||||
|
||||
def on_close(self, callback):
|
||||
"""Register a callback for comm_close
|
||||
|
||||
Will be called with the `data` of the close message.
|
||||
|
||||
Call `on_close(None)` to disable an existing callback.
|
||||
"""
|
||||
self._close_callback = callback
|
||||
|
||||
def on_msg(self, callback):
|
||||
"""Register a callback for comm_msg
|
||||
|
||||
Will be called with the `data` of any comm_msg messages.
|
||||
|
||||
Call `on_msg(None)` to disable an existing callback.
|
||||
"""
|
||||
self._msg_callback = callback
|
||||
|
||||
# handling of incoming messages
|
||||
|
||||
def handle_close(self, msg):
|
||||
"""Handle a comm_close message"""
|
||||
self.log.debug("handle_close[%s](%s)", self.comm_id, msg)
|
||||
if self._close_callback:
|
||||
self._close_callback(msg)
|
||||
|
||||
def handle_msg(self, msg):
|
||||
"""Handle a comm_msg message"""
|
||||
self.log.debug("handle_msg[%s](%s)", self.comm_id, msg)
|
||||
if self._msg_callback:
|
||||
shell = self.kernel.shell
|
||||
if shell:
|
||||
shell.events.trigger('pre_execute')
|
||||
self._msg_callback(msg)
|
||||
if shell:
|
||||
shell.events.trigger('post_execute')
|
||||
|
||||
|
||||
__all__ = ['Comm']
|
130
packages/python/yap_kernel/yap_kernel/comm/manager.py
Normal file
130
packages/python/yap_kernel/yap_kernel/comm/manager.py
Normal file
@ -0,0 +1,130 @@
|
||||
"""Base class to manage comms"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import sys
|
||||
import logging
|
||||
|
||||
from traitlets.config import LoggingConfigurable
|
||||
|
||||
from ipython_genutils.importstring import import_item
|
||||
from ipython_genutils.py3compat import string_types
|
||||
from traitlets import Instance, Unicode, Dict, Any, default
|
||||
|
||||
from .comm import Comm
|
||||
|
||||
|
||||
class CommManager(LoggingConfigurable):
|
||||
"""Manager for Comms in the Kernel"""
|
||||
|
||||
kernel = Instance('yap_kernel.kernelbase.Kernel')
|
||||
comms = Dict()
|
||||
targets = Dict()
|
||||
|
||||
# Public APIs
|
||||
|
||||
def register_target(self, target_name, f):
|
||||
"""Register a callable f for a given target name
|
||||
|
||||
f will be called with two arguments when a comm_open message is received with `target`:
|
||||
|
||||
- the Comm instance
|
||||
- the `comm_open` message itself.
|
||||
|
||||
f can be a Python callable or an import string for one.
|
||||
"""
|
||||
if isinstance(f, string_types):
|
||||
f = import_item(f)
|
||||
|
||||
self.targets[target_name] = f
|
||||
|
||||
def unregister_target(self, target_name, f):
|
||||
"""Unregister a callable registered with register_target"""
|
||||
return self.targets.pop(target_name)
|
||||
|
||||
def register_comm(self, comm):
|
||||
"""Register a new comm"""
|
||||
comm_id = comm.comm_id
|
||||
comm.kernel = self.kernel
|
||||
self.comms[comm_id] = comm
|
||||
return comm_id
|
||||
|
||||
def unregister_comm(self, comm):
|
||||
"""Unregister a comm, and close its counterpart"""
|
||||
# unlike get_comm, this should raise a KeyError
|
||||
comm = self.comms.pop(comm.comm_id)
|
||||
|
||||
def get_comm(self, comm_id):
|
||||
"""Get a comm with a particular id
|
||||
|
||||
Returns the comm if found, otherwise None.
|
||||
|
||||
This will not raise an error,
|
||||
it will log messages if the comm cannot be found.
|
||||
"""
|
||||
try:
|
||||
return self.comms[comm_id]
|
||||
except KeyError:
|
||||
self.log.warn("No such comm: %s", comm_id)
|
||||
if self.log.isEnabledFor(logging.DEBUG):
|
||||
# don't create the list of keys if debug messages aren't enabled
|
||||
self.log.debug("Current comms: %s", list(self.comms.keys()))
|
||||
|
||||
# Message handlers
|
||||
def comm_open(self, stream, ident, msg):
|
||||
"""Handler for comm_open messages"""
|
||||
content = msg['content']
|
||||
comm_id = content['comm_id']
|
||||
target_name = content['target_name']
|
||||
f = self.targets.get(target_name, None)
|
||||
comm = Comm(comm_id=comm_id,
|
||||
primary=False,
|
||||
target_name=target_name,
|
||||
)
|
||||
self.register_comm(comm)
|
||||
if f is None:
|
||||
self.log.error("No such comm target registered: %s", target_name)
|
||||
else:
|
||||
try:
|
||||
f(comm, msg)
|
||||
return
|
||||
except Exception:
|
||||
self.log.error("Exception opening comm with target: %s", target_name, exc_info=True)
|
||||
|
||||
# Failure.
|
||||
try:
|
||||
comm.close()
|
||||
except:
|
||||
self.log.error("""Could not close comm during `comm_open` failure
|
||||
clean-up. The comm may not have been opened yet.""", exc_info=True)
|
||||
|
||||
def comm_msg(self, stream, ident, msg):
|
||||
"""Handler for comm_msg messages"""
|
||||
content = msg['content']
|
||||
comm_id = content['comm_id']
|
||||
comm = self.get_comm(comm_id)
|
||||
if comm is None:
|
||||
return
|
||||
|
||||
try:
|
||||
comm.handle_msg(msg)
|
||||
except Exception:
|
||||
self.log.error('Exception in comm_msg for %s', comm_id, exc_info=True)
|
||||
|
||||
def comm_close(self, stream, ident, msg):
|
||||
"""Handler for comm_close messages"""
|
||||
content = msg['content']
|
||||
comm_id = content['comm_id']
|
||||
comm = self.get_comm(comm_id)
|
||||
if comm is None:
|
||||
return
|
||||
|
||||
del self.comms[comm_id]
|
||||
|
||||
try:
|
||||
comm.handle_close(msg)
|
||||
except Exception:
|
||||
self.log.error('Exception in comm_close for %s', comm_id, exc_info=True)
|
||||
|
||||
__all__ = ['CommManager']
|
183
packages/python/yap_kernel/yap_kernel/connect.py
Normal file
183
packages/python/yap_kernel/yap_kernel/connect.py
Normal file
@ -0,0 +1,183 @@
|
||||
"""Connection file-related utilities for the kernel
|
||||
"""
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import json
|
||||
import sys
|
||||
from subprocess import Popen, PIPE
|
||||
import warnings
|
||||
|
||||
from IPython.core.profiledir import ProfileDir
|
||||
from IPython.paths import get_ipython_dir
|
||||
from ipython_genutils.path import filefind
|
||||
from ipython_genutils.py3compat import str_to_bytes
|
||||
|
||||
import jupyter_client
|
||||
from jupyter_client import write_connection_file
|
||||
|
||||
|
||||
|
||||
def get_connection_file(app=None):
|
||||
"""Return the path to the connection file of an app
|
||||
|
||||
Parameters
|
||||
----------
|
||||
app : YAPKernelApp instance [optional]
|
||||
If unspecified, the currently running app will be used
|
||||
"""
|
||||
if app is None:
|
||||
from yap_kernel.kernelapp import YAPKernelApp
|
||||
if not YAPKernelApp.initialized():
|
||||
raise RuntimeError("app not specified, and not in a running Kernel")
|
||||
|
||||
app = YAPKernelApp.instance()
|
||||
return filefind(app.connection_file, ['.', app.connection_dir])
|
||||
|
||||
|
||||
def find_connection_file(filename='kernel-*.json', profile=None):
|
||||
"""DEPRECATED: find a connection file, and return its absolute path.
|
||||
|
||||
THIS FUNCION IS DEPRECATED. Use juptyer_client.find_connection_file instead.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
filename : str
|
||||
The connection file or fileglob to search for.
|
||||
profile : str [optional]
|
||||
The name of the profile to use when searching for the connection file,
|
||||
if different from the current IPython session or 'default'.
|
||||
|
||||
Returns
|
||||
-------
|
||||
str : The absolute path of the connection file.
|
||||
"""
|
||||
|
||||
import warnings
|
||||
warnings.warn("""yap_kernel.find_connection_file is deprecated, use jupyter_client.find_connection_file""",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
from IPython.core.application import BaseIPythonApplication as IPApp
|
||||
try:
|
||||
# quick check for absolute path, before going through logic
|
||||
return filefind(filename)
|
||||
except IOError:
|
||||
pass
|
||||
|
||||
if profile is None:
|
||||
# profile unspecified, check if running from an IPython app
|
||||
if IPApp.initialized():
|
||||
app = IPApp.instance()
|
||||
profile_dir = app.profile_dir
|
||||
else:
|
||||
# not running in IPython, use default profile
|
||||
profile_dir = ProfileDir.find_profile_dir_by_name(get_ipython_dir(), 'default')
|
||||
else:
|
||||
# find profiledir by profile name:
|
||||
profile_dir = ProfileDir.find_profile_dir_by_name(get_ipython_dir(), profile)
|
||||
security_dir = profile_dir.security_dir
|
||||
|
||||
return jupyter_client.find_connection_file(filename, path=['.', security_dir])
|
||||
|
||||
|
||||
def _find_connection_file(connection_file, profile=None):
|
||||
"""Return the absolute path for a connection file
|
||||
|
||||
- If nothing specified, return current Kernel's connection file
|
||||
- If profile specified, show deprecation warning about finding connection files in profiles
|
||||
- Otherwise, call jupyter_client.find_connection_file
|
||||
"""
|
||||
if connection_file is None:
|
||||
# get connection file from current kernel
|
||||
return get_connection_file()
|
||||
else:
|
||||
# connection file specified, allow shortnames:
|
||||
if profile is not None:
|
||||
warnings.warn(
|
||||
"Finding connection file by profile is deprecated.",
|
||||
DeprecationWarning, stacklevel=3,
|
||||
)
|
||||
return find_connection_file(connection_file, profile=profile)
|
||||
else:
|
||||
return jupyter_client.find_connection_file(connection_file)
|
||||
|
||||
|
||||
def get_connection_info(connection_file=None, unpack=False, profile=None):
|
||||
"""Return the connection information for the current Kernel.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
connection_file : str [optional]
|
||||
The connection file to be used. Can be given by absolute path, or
|
||||
IPython will search in the security directory of a given profile.
|
||||
If run from IPython,
|
||||
|
||||
If unspecified, the connection file for the currently running
|
||||
IPython Kernel will be used, which is only allowed from inside a kernel.
|
||||
unpack : bool [default: False]
|
||||
if True, return the unpacked dict, otherwise just the string contents
|
||||
of the file.
|
||||
profile : DEPRECATED
|
||||
|
||||
Returns
|
||||
-------
|
||||
The connection dictionary of the current kernel, as string or dict,
|
||||
depending on `unpack`.
|
||||
"""
|
||||
cf = _find_connection_file(connection_file, profile)
|
||||
|
||||
with open(cf) as f:
|
||||
info = f.read()
|
||||
|
||||
if unpack:
|
||||
info = json.loads(info)
|
||||
# ensure key is bytes:
|
||||
info['key'] = str_to_bytes(info.get('key', ''))
|
||||
return info
|
||||
|
||||
|
||||
def connect_qtconsole(connection_file=None, argv=None, profile=None):
|
||||
"""Connect a qtconsole to the current kernel.
|
||||
|
||||
This is useful for connecting a second qtconsole to a kernel, or to a
|
||||
local notebook.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
connection_file : str [optional]
|
||||
The connection file to be used. Can be given by absolute path, or
|
||||
IPython will search in the security directory of a given profile.
|
||||
If run from IPython,
|
||||
|
||||
If unspecified, the connection file for the currently running
|
||||
IPython Kernel will be used, which is only allowed from inside a kernel.
|
||||
argv : list [optional]
|
||||
Any extra args to be passed to the console.
|
||||
profile : DEPRECATED
|
||||
|
||||
Returns
|
||||
-------
|
||||
:class:`subprocess.Popen` instance running the qtconsole frontend
|
||||
"""
|
||||
argv = [] if argv is None else argv
|
||||
|
||||
cf = _find_connection_file(connection_file, profile)
|
||||
|
||||
cmd = ';'.join([
|
||||
"from IPython.qt.console import qtconsoleapp",
|
||||
"qtconsoleapp.main()"
|
||||
])
|
||||
|
||||
return Popen([sys.executable, '-c', cmd, '--existing', cf] + argv,
|
||||
stdout=PIPE, stderr=PIPE, close_fds=(sys.platform != 'win32'),
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
'write_connection_file',
|
||||
'get_connection_file',
|
||||
'find_connection_file',
|
||||
'get_connection_info',
|
||||
'connect_qtconsole',
|
||||
]
|
62
packages/python/yap_kernel/yap_kernel/datapub.py
Normal file
62
packages/python/yap_kernel/yap_kernel/datapub.py
Normal file
@ -0,0 +1,62 @@
|
||||
"""Publishing native (typically pickled) objects.
|
||||
"""
|
||||
|
||||
import warnings
|
||||
warnings.warn("yap_kernel.datapub is deprecated. It has moved to ipyparallel.datapub", DeprecationWarning)
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
from traitlets.config import Configurable
|
||||
from traitlets import Instance, Dict, CBytes, Any
|
||||
from yap_kernel.jsonutil import json_clean
|
||||
from yap_kernel.serialize import serialize_object
|
||||
from jupyter_client.session import Session, extract_header
|
||||
|
||||
|
||||
class ZMQDataPublisher(Configurable):
|
||||
|
||||
topic = topic = CBytes(b'datapub')
|
||||
session = Instance(Session, allow_none=True)
|
||||
pub_socket = Any(allow_none=True)
|
||||
parent_header = Dict({})
|
||||
|
||||
def set_parent(self, parent):
|
||||
"""Set the parent for outbound messages."""
|
||||
self.parent_header = extract_header(parent)
|
||||
|
||||
def publish_data(self, data):
|
||||
"""publish a data_message on the IOPub channel
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
data : dict
|
||||
The data to be published. Think of it as a namespace.
|
||||
"""
|
||||
session = self.session
|
||||
buffers = serialize_object(data,
|
||||
buffer_threshold=session.buffer_threshold,
|
||||
item_threshold=session.item_threshold,
|
||||
)
|
||||
content = json_clean(dict(keys=list(data.keys())))
|
||||
session.send(self.pub_socket, 'data_message', content=content,
|
||||
parent=self.parent_header,
|
||||
buffers=buffers,
|
||||
ident=self.topic,
|
||||
)
|
||||
|
||||
|
||||
def publish_data(data):
|
||||
"""publish a data_message on the IOPub channel
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
data : dict
|
||||
The data to be published. Think of it as a namespace.
|
||||
"""
|
||||
warnings.warn("yap_kernel.datapub is deprecated. It has moved to ipyparallel.datapub", DeprecationWarning)
|
||||
|
||||
from yap_kernel.zmqshell import ZMQInteractiveShell
|
||||
ZMQInteractiveShell.instance().data_pub.publish_data(data)
|
80
packages/python/yap_kernel/yap_kernel/displayhook.py
Normal file
80
packages/python/yap_kernel/yap_kernel/displayhook.py
Normal file
@ -0,0 +1,80 @@
|
||||
"""Replacements for sys.displayhook that publish over ZMQ."""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import sys
|
||||
|
||||
from IPython.core.displayhook import DisplayHook
|
||||
from yap_kernel.jsonutil import encode_images
|
||||
from ipython_genutils.py3compat import builtin_mod
|
||||
from traitlets import Instance, Dict, Any
|
||||
from jupyter_client.session import extract_header, Session
|
||||
|
||||
|
||||
class ZMQDisplayHook(object):
|
||||
"""A simple displayhook that publishes the object's repr over a ZeroMQ
|
||||
socket."""
|
||||
topic = b'execute_result'
|
||||
|
||||
def __init__(self, session, pub_socket):
|
||||
self.session = session
|
||||
self.pub_socket = pub_socket
|
||||
self.parent_header = {}
|
||||
|
||||
def get_execution_count(self):
|
||||
"""This method is replaced in kernelapp"""
|
||||
return 0
|
||||
|
||||
def __call__(self, obj):
|
||||
if obj is None:
|
||||
return
|
||||
|
||||
builtin_mod._ = obj
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
contents = {u'execution_count': self.get_execution_count(),
|
||||
u'data': {'text/plain': repr(obj)},
|
||||
u'metadata': {}}
|
||||
self.session.send(self.pub_socket, u'execute_result', contents,
|
||||
parent=self.parent_header, ident=self.topic)
|
||||
|
||||
def set_parent(self, parent):
|
||||
self.parent_header = extract_header(parent)
|
||||
|
||||
|
||||
class ZMQShellDisplayHook(DisplayHook):
|
||||
"""A displayhook subclass that publishes data using ZeroMQ. This is intended
|
||||
to work with an InteractiveShell instance. It sends a dict of different
|
||||
representations of the object."""
|
||||
topic=None
|
||||
|
||||
session = Instance(Session, allow_none=True)
|
||||
pub_socket = Any(allow_none=True)
|
||||
parent_header = Dict({})
|
||||
|
||||
def set_parent(self, parent):
|
||||
"""Set the parent for outbound messages."""
|
||||
self.parent_header = extract_header(parent)
|
||||
|
||||
def start_displayhook(self):
|
||||
self.msg = self.session.msg(u'execute_result', {
|
||||
'data': {},
|
||||
'metadata': {},
|
||||
}, parent=self.parent_header)
|
||||
|
||||
def write_output_prompt(self):
|
||||
"""Write the output prompt."""
|
||||
self.msg['content']['execution_count'] = self.prompt_count
|
||||
|
||||
def write_format_data(self, format_dict, md_dict=None):
|
||||
self.msg['content']['data'] = encode_images(format_dict)
|
||||
self.msg['content']['metadata'] = md_dict
|
||||
|
||||
def finish_displayhook(self):
|
||||
"""Finish up all displayhook activities."""
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
if self.msg['content']['data']:
|
||||
self.session.send(self.pub_socket, self.msg, ident=self.topic)
|
||||
self.msg = None
|
57
packages/python/yap_kernel/yap_kernel/embed.py
Normal file
57
packages/python/yap_kernel/yap_kernel/embed.py
Normal file
@ -0,0 +1,57 @@
|
||||
"""Simple function for embedding an IPython kernel
|
||||
"""
|
||||
#-----------------------------------------------------------------------------
|
||||
# Imports
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
import sys
|
||||
|
||||
from IPython.utils.frame import extract_module_locals
|
||||
|
||||
from .kernelapp import YAPKernelApp
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Code
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
def embed_kernel(module=None, local_ns=None, **kwargs):
|
||||
"""Embed and start an IPython kernel in a given scope.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
module : ModuleType, optional
|
||||
The module to load into IPython globals (default: caller)
|
||||
local_ns : dict, optional
|
||||
The namespace to load into IPython user namespace (default: caller)
|
||||
|
||||
kwargs : various, optional
|
||||
Further keyword args are relayed to the YAPKernelApp constructor,
|
||||
allowing configuration of the Kernel. Will only have an effect
|
||||
on the first embed_kernel call for a given process.
|
||||
|
||||
"""
|
||||
# get the app if it exists, or set it up if it doesn't
|
||||
if YAPKernelApp.initialized():
|
||||
app = YAPKernelApp.instance()
|
||||
else:
|
||||
app = YAPKernelApp.instance(**kwargs)
|
||||
app.initialize([])
|
||||
# Undo unnecessary sys module mangling from init_sys_modules.
|
||||
# This would not be necessary if we could prevent it
|
||||
# in the first place by using a different InteractiveShell
|
||||
# subclass, as in the regular embed case.
|
||||
main = app.kernel.shell._orig_sys_modules_main_mod
|
||||
if main is not None:
|
||||
sys.modules[app.kernel.shell._orig_sys_modules_main_name] = main
|
||||
|
||||
# load the calling scope if not given
|
||||
(caller_module, caller_locals) = extract_module_locals(1)
|
||||
if module is None:
|
||||
module = caller_module
|
||||
if local_ns is None:
|
||||
local_ns = caller_locals
|
||||
|
||||
app.kernel.user_module = module
|
||||
app.kernel.user_ns = local_ns
|
||||
app.shell.set_completer_frame()
|
||||
app.start()
|
309
packages/python/yap_kernel/yap_kernel/eventloops.py
Normal file
309
packages/python/yap_kernel/yap_kernel/eventloops.py
Normal file
@ -0,0 +1,309 @@
|
||||
# encoding: utf-8
|
||||
"""Event loop integration for the ZeroMQ-based kernels."""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import os
|
||||
import sys
|
||||
import platform
|
||||
|
||||
import zmq
|
||||
|
||||
from distutils.version import LooseVersion as V
|
||||
from traitlets.config.application import Application
|
||||
from IPython.utils import io
|
||||
|
||||
def _use_appnope():
|
||||
"""Should we use appnope for dealing with OS X app nap?
|
||||
|
||||
Checks if we are on OS X 10.9 or greater.
|
||||
"""
|
||||
return sys.platform == 'darwin' and V(platform.mac_ver()[0]) >= V('10.9')
|
||||
|
||||
def _notify_stream_qt(kernel, stream):
|
||||
|
||||
from IPython.external.qt_for_kernel import QtCore
|
||||
|
||||
if _use_appnope() and kernel._darwin_app_nap:
|
||||
from appnope import nope_scope as context
|
||||
else:
|
||||
from contextlib import contextmanager
|
||||
@contextmanager
|
||||
def context():
|
||||
yield
|
||||
|
||||
def process_stream_events():
|
||||
while stream.getsockopt(zmq.EVENTS) & zmq.POLLIN:
|
||||
with context():
|
||||
kernel.do_one_iteration()
|
||||
|
||||
fd = stream.getsockopt(zmq.FD)
|
||||
notifier = QtCore.QSocketNotifier(fd, QtCore.QSocketNotifier.Read, kernel.app)
|
||||
notifier.activated.connect(process_stream_events)
|
||||
|
||||
# mapping of keys to loop functions
|
||||
loop_map = {
|
||||
'inline': None,
|
||||
'nbagg': None,
|
||||
'notebook': None,
|
||||
'ipympl': None,
|
||||
None : None,
|
||||
}
|
||||
|
||||
def register_integration(*toolkitnames):
|
||||
"""Decorator to register an event loop to integrate with the IPython kernel
|
||||
|
||||
The decorator takes names to register the event loop as for the %gui magic.
|
||||
You can provide alternative names for the same toolkit.
|
||||
|
||||
The decorated function should take a single argument, the IPython kernel
|
||||
instance, arrange for the event loop to call ``kernel.do_one_iteration()``
|
||||
at least every ``kernel._poll_interval`` seconds, and start the event loop.
|
||||
|
||||
:mod:`yap_kernel.eventloops` provides and registers such functions
|
||||
for a few common event loops.
|
||||
"""
|
||||
def decorator(func):
|
||||
for name in toolkitnames:
|
||||
loop_map[name] = func
|
||||
return func
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def _loop_qt(app):
|
||||
"""Inner-loop for running the Qt eventloop
|
||||
|
||||
Pulled from guisupport.start_event_loop in IPython < 5.2,
|
||||
since IPython 5.2 only checks `get_ipython().active_eventloop` is defined,
|
||||
rather than if the eventloop is actually running.
|
||||
"""
|
||||
app._in_event_loop = True
|
||||
app.exec_()
|
||||
app._in_event_loop = False
|
||||
|
||||
|
||||
@register_integration('qt', 'qt4')
|
||||
def loop_qt4(kernel):
|
||||
"""Start a kernel with PyQt4 event loop integration."""
|
||||
|
||||
from IPython.lib.guisupport import get_app_qt4
|
||||
|
||||
kernel.app = get_app_qt4([" "])
|
||||
kernel.app.setQuitOnLastWindowClosed(False)
|
||||
|
||||
for s in kernel.shell_streams:
|
||||
_notify_stream_qt(kernel, s)
|
||||
|
||||
_loop_qt(kernel.app)
|
||||
|
||||
|
||||
@register_integration('qt5')
|
||||
def loop_qt5(kernel):
|
||||
"""Start a kernel with PyQt5 event loop integration."""
|
||||
os.environ['QT_API'] = 'pyqt5'
|
||||
return loop_qt4(kernel)
|
||||
|
||||
|
||||
def _loop_wx(app):
|
||||
"""Inner-loop for running the Wx eventloop
|
||||
|
||||
Pulled from guisupport.start_event_loop in IPython < 5.2,
|
||||
since IPython 5.2 only checks `get_ipython().active_eventloop` is defined,
|
||||
rather than if the eventloop is actually running.
|
||||
"""
|
||||
app._in_event_loop = True
|
||||
app.MainLoop()
|
||||
app._in_event_loop = False
|
||||
|
||||
|
||||
@register_integration('wx')
|
||||
def loop_wx(kernel):
|
||||
"""Start a kernel with wx event loop support."""
|
||||
|
||||
import wx
|
||||
|
||||
if _use_appnope() and kernel._darwin_app_nap:
|
||||
# we don't hook up App Nap contexts for Wx,
|
||||
# just disable it outright.
|
||||
from appnope import nope
|
||||
nope()
|
||||
|
||||
doi = kernel.do_one_iteration
|
||||
# Wx uses milliseconds
|
||||
poll_interval = int(1000*kernel._poll_interval)
|
||||
|
||||
# We have to put the wx.Timer in a wx.Frame for it to fire properly.
|
||||
# We make the Frame hidden when we create it in the main app below.
|
||||
class TimerFrame(wx.Frame):
|
||||
def __init__(self, func):
|
||||
wx.Frame.__init__(self, None, -1)
|
||||
self.timer = wx.Timer(self)
|
||||
# Units for the timer are in milliseconds
|
||||
self.timer.Start(poll_interval)
|
||||
self.Bind(wx.EVT_TIMER, self.on_timer)
|
||||
self.func = func
|
||||
|
||||
def on_timer(self, event):
|
||||
self.func()
|
||||
|
||||
# We need a custom wx.App to create our Frame subclass that has the
|
||||
# wx.Timer to drive the ZMQ event loop.
|
||||
class IPWxApp(wx.App):
|
||||
def OnInit(self):
|
||||
self.frame = TimerFrame(doi)
|
||||
self.frame.Show(False)
|
||||
return True
|
||||
|
||||
# The redirect=False here makes sure that wx doesn't replace
|
||||
# sys.stdout/stderr with its own classes.
|
||||
kernel.app = IPWxApp(redirect=False)
|
||||
|
||||
# The import of wx on Linux sets the handler for signal.SIGINT
|
||||
# to 0. This is a bug in wx or gtk. We fix by just setting it
|
||||
# back to the Python default.
|
||||
import signal
|
||||
if not callable(signal.getsignal(signal.SIGINT)):
|
||||
signal.signal(signal.SIGINT, signal.default_int_handler)
|
||||
|
||||
_loop_wx(kernel.app)
|
||||
|
||||
|
||||
@register_integration('tk')
|
||||
def loop_tk(kernel):
|
||||
"""Start a kernel with the Tk event loop."""
|
||||
|
||||
try:
|
||||
from tkinter import Tk # Py 3
|
||||
except ImportError:
|
||||
from Tkinter import Tk # Py 2
|
||||
doi = kernel.do_one_iteration
|
||||
# Tk uses milliseconds
|
||||
poll_interval = int(1000*kernel._poll_interval)
|
||||
# For Tkinter, we create a Tk object and call its withdraw method.
|
||||
class Timer(object):
|
||||
def __init__(self, func):
|
||||
self.app = Tk()
|
||||
self.app.withdraw()
|
||||
self.func = func
|
||||
|
||||
def on_timer(self):
|
||||
self.func()
|
||||
self.app.after(poll_interval, self.on_timer)
|
||||
|
||||
def start(self):
|
||||
self.on_timer() # Call it once to get things going.
|
||||
self.app.mainloop()
|
||||
|
||||
kernel.timer = Timer(doi)
|
||||
kernel.timer.start()
|
||||
|
||||
|
||||
@register_integration('gtk')
|
||||
def loop_gtk(kernel):
|
||||
"""Start the kernel, coordinating with the GTK event loop"""
|
||||
from .gui.gtkembed import GTKEmbed
|
||||
|
||||
gtk_kernel = GTKEmbed(kernel)
|
||||
gtk_kernel.start()
|
||||
|
||||
|
||||
@register_integration('gtk3')
|
||||
def loop_gtk3(kernel):
|
||||
"""Start the kernel, coordinating with the GTK event loop"""
|
||||
from .gui.gtk3embed import GTKEmbed
|
||||
|
||||
gtk_kernel = GTKEmbed(kernel)
|
||||
gtk_kernel.start()
|
||||
|
||||
|
||||
@register_integration('osx')
|
||||
def loop_cocoa(kernel):
|
||||
"""Start the kernel, coordinating with the Cocoa CFRunLoop event loop
|
||||
via the matplotlib MacOSX backend.
|
||||
"""
|
||||
import matplotlib
|
||||
if matplotlib.__version__ < '1.1.0':
|
||||
kernel.log.warn(
|
||||
"MacOSX backend in matplotlib %s doesn't have a Timer, "
|
||||
"falling back on Tk for CFRunLoop integration. Note that "
|
||||
"even this won't work if Tk is linked against X11 instead of "
|
||||
"Cocoa (e.g. EPD). To use the MacOSX backend in the kernel, "
|
||||
"you must use matplotlib >= 1.1.0, or a native libtk."
|
||||
)
|
||||
return loop_tk(kernel)
|
||||
|
||||
from matplotlib.backends.backend_macosx import TimerMac, show
|
||||
|
||||
# scale interval for sec->ms
|
||||
poll_interval = int(1000*kernel._poll_interval)
|
||||
|
||||
real_excepthook = sys.excepthook
|
||||
def handle_int(etype, value, tb):
|
||||
"""don't let KeyboardInterrupts look like crashes"""
|
||||
if etype is KeyboardInterrupt:
|
||||
io.raw_print("KeyboardInterrupt caught in CFRunLoop")
|
||||
else:
|
||||
real_excepthook(etype, value, tb)
|
||||
|
||||
# add doi() as a Timer to the CFRunLoop
|
||||
def doi():
|
||||
# restore excepthook during IPython code
|
||||
sys.excepthook = real_excepthook
|
||||
kernel.do_one_iteration()
|
||||
# and back:
|
||||
sys.excepthook = handle_int
|
||||
|
||||
t = TimerMac(poll_interval)
|
||||
t.add_callback(doi)
|
||||
t.start()
|
||||
|
||||
# but still need a Poller for when there are no active windows,
|
||||
# during which time mainloop() returns immediately
|
||||
poller = zmq.Poller()
|
||||
if kernel.control_stream:
|
||||
poller.register(kernel.control_stream.socket, zmq.POLLIN)
|
||||
for stream in kernel.shell_streams:
|
||||
poller.register(stream.socket, zmq.POLLIN)
|
||||
|
||||
while True:
|
||||
try:
|
||||
# double nested try/except, to properly catch KeyboardInterrupt
|
||||
# due to pyzmq Issue #130
|
||||
try:
|
||||
# don't let interrupts during mainloop invoke crash_handler:
|
||||
sys.excepthook = handle_int
|
||||
show.mainloop()
|
||||
sys.excepthook = real_excepthook
|
||||
# use poller if mainloop returned (no windows)
|
||||
# scale by extra factor of 10, since it's a real poll
|
||||
poller.poll(10*poll_interval)
|
||||
kernel.do_one_iteration()
|
||||
except:
|
||||
raise
|
||||
except KeyboardInterrupt:
|
||||
# Ctrl-C shouldn't crash the kernel
|
||||
io.raw_print("KeyboardInterrupt caught in kernel")
|
||||
finally:
|
||||
# ensure excepthook is restored
|
||||
sys.excepthook = real_excepthook
|
||||
|
||||
|
||||
|
||||
def enable_gui(gui, kernel=None):
|
||||
"""Enable integration with a given GUI"""
|
||||
if gui not in loop_map:
|
||||
e = "Invalid GUI request %r, valid ones are:%s" % (gui, loop_map.keys())
|
||||
raise ValueError(e)
|
||||
if kernel is None:
|
||||
if Application.initialized():
|
||||
kernel = getattr(Application.instance(), 'kernel', None)
|
||||
if kernel is None:
|
||||
raise RuntimeError("You didn't specify a kernel,"
|
||||
" and no IPython Application with a kernel appears to be running."
|
||||
)
|
||||
loop = loop_map[gui]
|
||||
if loop and kernel.eventloop is not None and kernel.eventloop is not loop:
|
||||
raise RuntimeError("Cannot activate multiple GUI eventloops")
|
||||
kernel.eventloop = loop
|
15
packages/python/yap_kernel/yap_kernel/gui/__init__.py
Normal file
15
packages/python/yap_kernel/yap_kernel/gui/__init__.py
Normal file
@ -0,0 +1,15 @@
|
||||
"""GUI support for the IPython ZeroMQ kernel.
|
||||
|
||||
This package contains the various toolkit-dependent utilities we use to enable
|
||||
coordination between the IPython kernel and the event loops of the various GUI
|
||||
toolkits.
|
||||
"""
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Copyright (C) 2010-2011 The IPython Development Team.
|
||||
#
|
||||
# Distributed under the terms of the BSD License.
|
||||
#
|
||||
# The full license is in the file COPYING.txt, distributed as part of this
|
||||
# software.
|
||||
#-----------------------------------------------------------------------------
|
88
packages/python/yap_kernel/yap_kernel/gui/gtk3embed.py
Normal file
88
packages/python/yap_kernel/yap_kernel/gui/gtk3embed.py
Normal file
@ -0,0 +1,88 @@
|
||||
"""GUI support for the IPython ZeroMQ kernel - GTK toolkit support.
|
||||
"""
|
||||
#-----------------------------------------------------------------------------
|
||||
# Copyright (C) 2010-2011 The IPython Development Team
|
||||
#
|
||||
# Distributed under the terms of the BSD License. The full license is in
|
||||
# the file COPYING.txt, distributed as part of this software.
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Imports
|
||||
#-----------------------------------------------------------------------------
|
||||
# stdlib
|
||||
import sys
|
||||
|
||||
# Third-party
|
||||
import gi
|
||||
gi.require_version ('Gdk', '3.0')
|
||||
gi.require_version ('Gtk', '3.0')
|
||||
from gi.repository import GObject, Gtk
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Classes and functions
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
class GTKEmbed(object):
|
||||
"""A class to embed a kernel into the GTK main event loop.
|
||||
"""
|
||||
def __init__(self, kernel):
|
||||
self.kernel = kernel
|
||||
# These two will later store the real gtk functions when we hijack them
|
||||
self.gtk_main = None
|
||||
self.gtk_main_quit = None
|
||||
|
||||
def start(self):
|
||||
"""Starts the GTK main event loop and sets our kernel startup routine.
|
||||
"""
|
||||
# Register our function to initiate the kernel and start gtk
|
||||
GObject.idle_add(self._wire_kernel)
|
||||
Gtk.main()
|
||||
|
||||
def _wire_kernel(self):
|
||||
"""Initializes the kernel inside GTK.
|
||||
|
||||
This is meant to run only once at startup, so it does its job and
|
||||
returns False to ensure it doesn't get run again by GTK.
|
||||
"""
|
||||
self.gtk_main, self.gtk_main_quit = self._hijack_gtk()
|
||||
GObject.timeout_add(int(1000*self.kernel._poll_interval),
|
||||
self.iterate_kernel)
|
||||
return False
|
||||
|
||||
def iterate_kernel(self):
|
||||
"""Run one iteration of the kernel and return True.
|
||||
|
||||
GTK timer functions must return True to be called again, so we make the
|
||||
call to :meth:`do_one_iteration` and then return True for GTK.
|
||||
"""
|
||||
self.kernel.do_one_iteration()
|
||||
return True
|
||||
|
||||
def stop(self):
|
||||
# FIXME: this one isn't getting called because we have no reliable
|
||||
# kernel shutdown. We need to fix that: once the kernel has a
|
||||
# shutdown mechanism, it can call this.
|
||||
self.gtk_main_quit()
|
||||
sys.exit()
|
||||
|
||||
def _hijack_gtk(self):
|
||||
"""Hijack a few key functions in GTK for IPython integration.
|
||||
|
||||
Modifies pyGTK's main and main_quit with a dummy so user code does not
|
||||
block IPython. This allows us to use %run to run arbitrary pygtk
|
||||
scripts from a long-lived IPython session, and when they attempt to
|
||||
start or stop
|
||||
|
||||
Returns
|
||||
-------
|
||||
The original functions that have been hijacked:
|
||||
- Gtk.main
|
||||
- Gtk.main_quit
|
||||
"""
|
||||
def dummy(*args, **kw):
|
||||
pass
|
||||
# save and trap main and main_quit from gtk
|
||||
orig_main, Gtk.main = Gtk.main, dummy
|
||||
orig_main_quit, Gtk.main_quit = Gtk.main_quit, dummy
|
||||
return orig_main, orig_main_quit
|
86
packages/python/yap_kernel/yap_kernel/gui/gtkembed.py
Normal file
86
packages/python/yap_kernel/yap_kernel/gui/gtkembed.py
Normal file
@ -0,0 +1,86 @@
|
||||
"""GUI support for the IPython ZeroMQ kernel - GTK toolkit support.
|
||||
"""
|
||||
#-----------------------------------------------------------------------------
|
||||
# Copyright (C) 2010-2011 The IPython Development Team
|
||||
#
|
||||
# Distributed under the terms of the BSD License. The full license is in
|
||||
# the file COPYING.txt, distributed as part of this software.
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Imports
|
||||
#-----------------------------------------------------------------------------
|
||||
# stdlib
|
||||
import sys
|
||||
|
||||
# Third-party
|
||||
import gobject
|
||||
import gtk
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Classes and functions
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
class GTKEmbed(object):
|
||||
"""A class to embed a kernel into the GTK main event loop.
|
||||
"""
|
||||
def __init__(self, kernel):
|
||||
self.kernel = kernel
|
||||
# These two will later store the real gtk functions when we hijack them
|
||||
self.gtk_main = None
|
||||
self.gtk_main_quit = None
|
||||
|
||||
def start(self):
|
||||
"""Starts the GTK main event loop and sets our kernel startup routine.
|
||||
"""
|
||||
# Register our function to initiate the kernel and start gtk
|
||||
gobject.idle_add(self._wire_kernel)
|
||||
gtk.main()
|
||||
|
||||
def _wire_kernel(self):
|
||||
"""Initializes the kernel inside GTK.
|
||||
|
||||
This is meant to run only once at startup, so it does its job and
|
||||
returns False to ensure it doesn't get run again by GTK.
|
||||
"""
|
||||
self.gtk_main, self.gtk_main_quit = self._hijack_gtk()
|
||||
gobject.timeout_add(int(1000*self.kernel._poll_interval),
|
||||
self.iterate_kernel)
|
||||
return False
|
||||
|
||||
def iterate_kernel(self):
|
||||
"""Run one iteration of the kernel and return True.
|
||||
|
||||
GTK timer functions must return True to be called again, so we make the
|
||||
call to :meth:`do_one_iteration` and then return True for GTK.
|
||||
"""
|
||||
self.kernel.do_one_iteration()
|
||||
return True
|
||||
|
||||
def stop(self):
|
||||
# FIXME: this one isn't getting called because we have no reliable
|
||||
# kernel shutdown. We need to fix that: once the kernel has a
|
||||
# shutdown mechanism, it can call this.
|
||||
self.gtk_main_quit()
|
||||
sys.exit()
|
||||
|
||||
def _hijack_gtk(self):
|
||||
"""Hijack a few key functions in GTK for IPython integration.
|
||||
|
||||
Modifies pyGTK's main and main_quit with a dummy so user code does not
|
||||
block IPython. This allows us to use %run to run arbitrary pygtk
|
||||
scripts from a long-lived IPython session, and when they attempt to
|
||||
start or stop
|
||||
|
||||
Returns
|
||||
-------
|
||||
The original functions that have been hijacked:
|
||||
- gtk.main
|
||||
- gtk.main_quit
|
||||
"""
|
||||
def dummy(*args, **kw):
|
||||
pass
|
||||
# save and trap main and main_quit from gtk
|
||||
orig_main, gtk.main = gtk.main, dummy
|
||||
orig_main_quit, gtk.main_quit = gtk.main_quit, dummy
|
||||
return orig_main, orig_main_quit
|
68
packages/python/yap_kernel/yap_kernel/heartbeat.py
Normal file
68
packages/python/yap_kernel/yap_kernel/heartbeat.py
Normal file
@ -0,0 +1,68 @@
|
||||
"""The client and server for a basic ping-pong style heartbeat.
|
||||
"""
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Copyright (C) 2008-2011 The IPython Development Team
|
||||
#
|
||||
# Distributed under the terms of the BSD License. The full license is in
|
||||
# the file COPYING, distributed as part of this software.
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Imports
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
import errno
|
||||
import os
|
||||
import socket
|
||||
from threading import Thread
|
||||
|
||||
import zmq
|
||||
|
||||
from jupyter_client.localinterfaces import localhost
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Code
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
|
||||
class Heartbeat(Thread):
|
||||
"A simple ping-pong style heartbeat that runs in a thread."
|
||||
|
||||
def __init__(self, context, addr=None):
|
||||
if addr is None:
|
||||
addr = ('tcp', localhost(), 0)
|
||||
Thread.__init__(self)
|
||||
self.context = context
|
||||
self.transport, self.ip, self.port = addr
|
||||
if self.port == 0:
|
||||
if addr[0] == 'tcp':
|
||||
s = socket.socket()
|
||||
# '*' means all interfaces to 0MQ, which is '' to socket.socket
|
||||
s.bind(('' if self.ip == '*' else self.ip, 0))
|
||||
self.port = s.getsockname()[1]
|
||||
s.close()
|
||||
elif addr[0] == 'ipc':
|
||||
self.port = 1
|
||||
while os.path.exists("%s-%s" % (self.ip, self.port)):
|
||||
self.port = self.port + 1
|
||||
else:
|
||||
raise ValueError("Unrecognized zmq transport: %s" % addr[0])
|
||||
self.addr = (self.ip, self.port)
|
||||
self.daemon = True
|
||||
|
||||
def run(self):
|
||||
self.socket = self.context.socket(zmq.ROUTER)
|
||||
self.socket.linger = 1000
|
||||
c = ':' if self.transport == 'tcp' else '-'
|
||||
self.socket.bind('%s://%s' % (self.transport, self.ip) + c + str(self.port))
|
||||
while True:
|
||||
try:
|
||||
zmq.device(zmq.QUEUE, self.socket, self.socket)
|
||||
except zmq.ZMQError as e:
|
||||
if e.errno == errno.EINTR:
|
||||
continue
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
break
|
@ -0,0 +1,8 @@
|
||||
from .channels import (
|
||||
InProcessChannel,
|
||||
InProcessHBChannel,
|
||||
)
|
||||
|
||||
from .client import InProcessKernelClient
|
||||
from .manager import InProcessKernelManager
|
||||
from .blocking import BlockingInProcessKernelClient
|
93
packages/python/yap_kernel/yap_kernel/inprocess/blocking.py
Normal file
93
packages/python/yap_kernel/yap_kernel/inprocess/blocking.py
Normal file
@ -0,0 +1,93 @@
|
||||
""" Implements a fully blocking kernel client.
|
||||
|
||||
Useful for test suites and blocking terminal interfaces.
|
||||
"""
|
||||
#-----------------------------------------------------------------------------
|
||||
# Copyright (C) 2012 The IPython Development Team
|
||||
#
|
||||
# Distributed under the terms of the BSD License. The full license is in
|
||||
# the file COPYING.txt, distributed as part of this software.
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
try:
|
||||
from queue import Queue, Empty # Py 3
|
||||
except ImportError:
|
||||
from Queue import Queue, Empty # Py 2
|
||||
|
||||
# IPython imports
|
||||
from IPython.utils.io import raw_print
|
||||
from traitlets import Type
|
||||
|
||||
# Local imports
|
||||
from .channels import (
|
||||
InProcessChannel,
|
||||
)
|
||||
from .client import InProcessKernelClient
|
||||
|
||||
class BlockingInProcessChannel(InProcessChannel):
|
||||
|
||||
def __init__(self, *args, **kwds):
|
||||
super(BlockingInProcessChannel, self).__init__(*args, **kwds)
|
||||
self._in_queue = Queue()
|
||||
|
||||
def call_handlers(self, msg):
|
||||
self._in_queue.put(msg)
|
||||
|
||||
def get_msg(self, block=True, timeout=None):
|
||||
""" Gets a message if there is one that is ready. """
|
||||
if timeout is None:
|
||||
# Queue.get(timeout=None) has stupid uninteruptible
|
||||
# behavior, so wait for a week instead
|
||||
timeout = 604800
|
||||
return self._in_queue.get(block, timeout)
|
||||
|
||||
def get_msgs(self):
|
||||
""" Get all messages that are currently ready. """
|
||||
msgs = []
|
||||
while True:
|
||||
try:
|
||||
msgs.append(self.get_msg(block=False))
|
||||
except Empty:
|
||||
break
|
||||
return msgs
|
||||
|
||||
def msg_ready(self):
|
||||
""" Is there a message that has been received? """
|
||||
return not self._in_queue.empty()
|
||||
|
||||
|
||||
class BlockingInProcessStdInChannel(BlockingInProcessChannel):
|
||||
def call_handlers(self, msg):
|
||||
""" Overridden for the in-process channel.
|
||||
|
||||
This methods simply calls raw_input directly.
|
||||
"""
|
||||
msg_type = msg['header']['msg_type']
|
||||
if msg_type == 'input_request':
|
||||
_raw_input = self.client.kernel._sys_raw_input
|
||||
prompt = msg['content']['prompt']
|
||||
raw_print(prompt, end='')
|
||||
self.client.input(_raw_input())
|
||||
|
||||
class BlockingInProcessKernelClient(InProcessKernelClient):
|
||||
|
||||
# The classes to use for the various channels.
|
||||
shell_channel_class = Type(BlockingInProcessChannel)
|
||||
iopub_channel_class = Type(BlockingInProcessChannel)
|
||||
stdin_channel_class = Type(BlockingInProcessStdInChannel)
|
||||
|
||||
def wait_for_ready(self):
|
||||
# Wait for kernel info reply on shell channel
|
||||
while True:
|
||||
msg = self.shell_channel.get_msg(block=True)
|
||||
if msg['msg_type'] == 'kernel_info_reply':
|
||||
self._handle_kernel_info_reply(msg)
|
||||
break
|
||||
|
||||
# Flush IOPub channel
|
||||
while True:
|
||||
try:
|
||||
msg = self.iopub_channel.get_msg(block=True, timeout=0.2)
|
||||
print(msg['msg_type'])
|
||||
except Empty:
|
||||
break
|
97
packages/python/yap_kernel/yap_kernel/inprocess/channels.py
Normal file
97
packages/python/yap_kernel/yap_kernel/inprocess/channels.py
Normal file
@ -0,0 +1,97 @@
|
||||
"""A kernel client for in-process kernels."""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
from jupyter_client.channelsabc import HBChannelABC
|
||||
|
||||
from .socket import DummySocket
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Channel classes
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
class InProcessChannel(object):
|
||||
"""Base class for in-process channels."""
|
||||
proxy_methods = []
|
||||
|
||||
def __init__(self, client=None):
|
||||
super(InProcessChannel, self).__init__()
|
||||
self.client = client
|
||||
self._is_alive = False
|
||||
|
||||
def is_alive(self):
|
||||
return self._is_alive
|
||||
|
||||
def start(self):
|
||||
self._is_alive = True
|
||||
|
||||
def stop(self):
|
||||
self._is_alive = False
|
||||
|
||||
def call_handlers(self, msg):
|
||||
""" This method is called in the main thread when a message arrives.
|
||||
|
||||
Subclasses should override this method to handle incoming messages.
|
||||
"""
|
||||
raise NotImplementedError('call_handlers must be defined in a subclass.')
|
||||
|
||||
def flush(self, timeout=1.0):
|
||||
pass
|
||||
|
||||
|
||||
def call_handlers_later(self, *args, **kwds):
|
||||
""" Call the message handlers later.
|
||||
|
||||
The default implementation just calls the handlers immediately, but this
|
||||
method exists so that GUI toolkits can defer calling the handlers until
|
||||
after the event loop has run, as expected by GUI frontends.
|
||||
"""
|
||||
self.call_handlers(*args, **kwds)
|
||||
|
||||
def process_events(self):
|
||||
""" Process any pending GUI events.
|
||||
|
||||
This method will be never be called from a frontend without an event
|
||||
loop (e.g., a terminal frontend).
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
|
||||
class InProcessHBChannel(object):
|
||||
"""A dummy heartbeat channel interface for in-process kernels.
|
||||
|
||||
Normally we use the heartbeat to check that the kernel process is alive.
|
||||
When the kernel is in-process, that doesn't make sense, but clients still
|
||||
expect this interface.
|
||||
"""
|
||||
|
||||
time_to_dead = 3.0
|
||||
|
||||
def __init__(self, client=None):
|
||||
super(InProcessHBChannel, self).__init__()
|
||||
self.client = client
|
||||
self._is_alive = False
|
||||
self._pause = True
|
||||
|
||||
def is_alive(self):
|
||||
return self._is_alive
|
||||
|
||||
def start(self):
|
||||
self._is_alive = True
|
||||
|
||||
def stop(self):
|
||||
self._is_alive = False
|
||||
|
||||
def pause(self):
|
||||
self._pause = True
|
||||
|
||||
def unpause(self):
|
||||
self._pause = False
|
||||
|
||||
def is_beating(self):
|
||||
return not self._pause
|
||||
|
||||
|
||||
HBChannelABC.register(InProcessHBChannel)
|
180
packages/python/yap_kernel/yap_kernel/inprocess/client.py
Normal file
180
packages/python/yap_kernel/yap_kernel/inprocess/client.py
Normal file
@ -0,0 +1,180 @@
|
||||
"""A client for in-process kernels."""
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Copyright (C) 2012 The IPython Development Team
|
||||
#
|
||||
# Distributed under the terms of the BSD License. The full license is in
|
||||
# the file COPYING, distributed as part of this software.
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Imports
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
# IPython imports
|
||||
from yap_kernel.inprocess.socket import DummySocket
|
||||
from traitlets import Type, Instance, default
|
||||
from jupyter_client.clientabc import KernelClientABC
|
||||
from jupyter_client.client import KernelClient
|
||||
|
||||
# Local imports
|
||||
from .channels import (
|
||||
InProcessChannel,
|
||||
InProcessHBChannel,
|
||||
)
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Main kernel Client class
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
class InProcessKernelClient(KernelClient):
|
||||
"""A client for an in-process kernel.
|
||||
|
||||
This class implements the interface of
|
||||
`jupyter_client.clientabc.KernelClientABC` and allows
|
||||
(asynchronous) frontends to be used seamlessly with an in-process kernel.
|
||||
|
||||
See `jupyter_client.client.KernelClient` for docstrings.
|
||||
"""
|
||||
|
||||
# The classes to use for the various channels.
|
||||
shell_channel_class = Type(InProcessChannel)
|
||||
iopub_channel_class = Type(InProcessChannel)
|
||||
stdin_channel_class = Type(InProcessChannel)
|
||||
hb_channel_class = Type(InProcessHBChannel)
|
||||
|
||||
kernel = Instance('yap_kernel.inprocess.yapkernel.InProcessKernel',
|
||||
allow_none=True)
|
||||
|
||||
#--------------------------------------------------------------------------
|
||||
# Channel management methods
|
||||
#--------------------------------------------------------------------------
|
||||
|
||||
@default('blocking_class')
|
||||
def _default_blocking_class(self):
|
||||
from .blocking import BlockingInProcessKernelClient
|
||||
return BlockingInProcessKernelClient
|
||||
|
||||
def get_connection_info(self):
|
||||
d = super(InProcessKernelClient, self).get_connection_info()
|
||||
d['kernel'] = self.kernel
|
||||
return d
|
||||
|
||||
def start_channels(self, *args, **kwargs):
|
||||
super(InProcessKernelClient, self).start_channels()
|
||||
self.kernel.frontends.append(self)
|
||||
|
||||
@property
|
||||
def shell_channel(self):
|
||||
if self._shell_channel is None:
|
||||
self._shell_channel = self.shell_channel_class(self)
|
||||
return self._shell_channel
|
||||
|
||||
@property
|
||||
def iopub_channel(self):
|
||||
if self._iopub_channel is None:
|
||||
self._iopub_channel = self.iopub_channel_class(self)
|
||||
return self._iopub_channel
|
||||
|
||||
@property
|
||||
def stdin_channel(self):
|
||||
if self._stdin_channel is None:
|
||||
self._stdin_channel = self.stdin_channel_class(self)
|
||||
return self._stdin_channel
|
||||
|
||||
@property
|
||||
def hb_channel(self):
|
||||
if self._hb_channel is None:
|
||||
self._hb_channel = self.hb_channel_class(self)
|
||||
return self._hb_channel
|
||||
|
||||
# Methods for sending specific messages
|
||||
# -------------------------------------
|
||||
|
||||
def execute(self, code, silent=False, store_history=True,
|
||||
user_expressions={}, allow_stdin=None):
|
||||
if allow_stdin is None:
|
||||
allow_stdin = self.allow_stdin
|
||||
content = dict(code=code, silent=silent, store_history=store_history,
|
||||
user_expressions=user_expressions,
|
||||
allow_stdin=allow_stdin)
|
||||
msg = self.session.msg('execute_request', content)
|
||||
self._dispatch_to_kernel(msg)
|
||||
return msg['header']['msg_id']
|
||||
|
||||
def complete(self, code, cursor_pos=None):
|
||||
if cursor_pos is None:
|
||||
cursor_pos = len(code)
|
||||
content = dict(code=code, cursor_pos=cursor_pos)
|
||||
msg = self.session.msg('complete_request', content)
|
||||
self._dispatch_to_kernel(msg)
|
||||
return msg['header']['msg_id']
|
||||
|
||||
def inspect(self, code, cursor_pos=None, detail_level=0):
|
||||
if cursor_pos is None:
|
||||
cursor_pos = len(code)
|
||||
content = dict(code=code, cursor_pos=cursor_pos,
|
||||
detail_level=detail_level,
|
||||
)
|
||||
msg = self.session.msg('inspect_request', content)
|
||||
self._dispatch_to_kernel(msg)
|
||||
return msg['header']['msg_id']
|
||||
|
||||
def history(self, raw=True, output=False, hist_access_type='range', **kwds):
|
||||
content = dict(raw=raw, output=output,
|
||||
hist_access_type=hist_access_type, **kwds)
|
||||
msg = self.session.msg('history_request', content)
|
||||
self._dispatch_to_kernel(msg)
|
||||
return msg['header']['msg_id']
|
||||
|
||||
def shutdown(self, restart=False):
|
||||
# FIXME: What to do here?
|
||||
raise NotImplementedError('Cannot shutdown in-process kernel')
|
||||
|
||||
def kernel_info(self):
|
||||
"""Request kernel info."""
|
||||
msg = self.session.msg('kernel_info_request')
|
||||
self._dispatch_to_kernel(msg)
|
||||
return msg['header']['msg_id']
|
||||
|
||||
def comm_info(self, target_name=None):
|
||||
"""Request a dictionary of valid comms and their targets."""
|
||||
if target_name is None:
|
||||
content = {}
|
||||
else:
|
||||
content = dict(target_name=target_name)
|
||||
msg = self.session.msg('comm_info_request', content)
|
||||
self._dispatch_to_kernel(msg)
|
||||
return msg['header']['msg_id']
|
||||
|
||||
def input(self, string):
|
||||
if self.kernel is None:
|
||||
raise RuntimeError('Cannot send input reply. No kernel exists.')
|
||||
self.kernel.raw_input_str = string
|
||||
|
||||
def is_complete(self, code):
|
||||
msg = self.session.msg('is_complete_request', {'code': code})
|
||||
self._dispatch_to_kernel(msg)
|
||||
return msg['header']['msg_id']
|
||||
|
||||
def _dispatch_to_kernel(self, msg):
|
||||
""" Send a message to the kernel and handle a reply.
|
||||
"""
|
||||
kernel = self.kernel
|
||||
if kernel is None:
|
||||
raise RuntimeError('Cannot send request. No kernel exists.')
|
||||
|
||||
stream = DummySocket()
|
||||
self.session.send(stream, msg)
|
||||
msg_parts = stream.recv_multipart()
|
||||
kernel.dispatch_shell(stream, msg_parts)
|
||||
|
||||
idents, reply_msg = self.session.recv(stream, copy=False)
|
||||
self.shell_channel.call_handlers_later(reply_msg)
|
||||
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# ABC Registration
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
KernelClientABC.register(InProcessKernelClient)
|
@ -0,0 +1,8 @@
|
||||
"""Shared constants.
|
||||
"""
|
||||
|
||||
# Because inprocess communication is not networked, we can use a common Session
|
||||
# key everywhere. This is not just the empty bytestring to avoid tripping
|
||||
# certain security checks in the rest of Jupyter that assumes that empty keys
|
||||
# are insecure.
|
||||
INPROCESS_KEY = b'inprocess'
|
315
packages/python/yap_kernel/yap_kernel/inprocess/ipkernel.py
Normal file
315
packages/python/yap_kernel/yap_kernel/inprocess/ipkernel.py
Normal file
@ -0,0 +1,315 @@
|
||||
"""An in-process kernel"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
from contextlib import contextmanager
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from IPython.core.interactiveshell import InteractiveShellABC
|
||||
from yap_kernel.jsonutil import json_clean
|
||||
from traitlets import Any, Enum, Instance, List, Type, default
|
||||
from yap_kernel.yapkernel import YAPKernel
|
||||
from yap_kernel.zmqshell import ZMQInteractiveShell
|
||||
|
||||
from .constants import INPROCESS_KEY
|
||||
from .socket import DummySocket
|
||||
from ..iostream import OutStream, BackgroundSocket, IOPubThread
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Main kernel class
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
class InProcessKernel(YAPKernel):
|
||||
|
||||
#-------------------------------------------------------------------------
|
||||
# InProcessKernel interface
|
||||
#-------------------------------------------------------------------------
|
||||
|
||||
# The frontends connected to this kernel.
|
||||
frontends = List(
|
||||
Instance('yap_kernel.inprocess.client.InProcessKernelClient',
|
||||
allow_none=True)
|
||||
)
|
||||
|
||||
# The GUI environment that the kernel is running under. This need not be
|
||||
# specified for the normal operation for the kernel, but is required for
|
||||
# IPython's GUI support (including pylab). The default is 'inline' because
|
||||
# it is safe under all GUI toolkits.
|
||||
gui = Enum(('tk', 'gtk', 'wx', 'qt', 'qt4', 'inline'),
|
||||
default_value='inline')
|
||||
|
||||
raw_input_str = Any()
|
||||
stdout = Any()
|
||||
stderr = Any()
|
||||
|
||||
#-------------------------------------------------------------------------
|
||||
# Kernel interface
|
||||
#-------------------------------------------------------------------------
|
||||
|
||||
shell_class = Type(allow_none=True)
|
||||
shell_streams = List()
|
||||
control_stream = Any()
|
||||
_underlying_iopub_socket = Instance(DummySocket, ())
|
||||
iopub_thread = Instance(IOPubThread)
|
||||
|
||||
@default('iopub_thread')
|
||||
def _default_iopub_thread(self):
|
||||
thread = IOPubThread(self._underlying_iopub_socket)
|
||||
thread.start()
|
||||
return thread
|
||||
|
||||
iopub_socket = Instance(BackgroundSocket)
|
||||
|
||||
@default('iopub_socket')
|
||||
def _default_iopub_socket(self):
|
||||
return self.iopub_thread.background_socket
|
||||
|
||||
stdin_socket = Instance(DummySocket, ())
|
||||
|
||||
def __init__(self, **traits):
|
||||
super(InProcessKernel, self).__init__(**traits)
|
||||
|
||||
self._underlying_iopub_socket.observe(self._io_dispatch, names=['message_sent'])
|
||||
pjoin = os.path.join
|
||||
here = os.path.abspath(os.path.dirname(__file__))
|
||||
yap_lib_path = pjoin(here, "../yap4py/prolog" )
|
||||
yap_dll_path = pjoin(here, "../yap4py" )
|
||||
args = yap.YAPEngineArgs()
|
||||
args.setYapLibDir(yap_dll_path)
|
||||
args.setYapShareDir(yap_lib_path)
|
||||
#args.setYapPrologBootFile(os.path.join(yap_lib_path."startup.yss"))
|
||||
self.yapeng = yap.YAPEngine( args )
|
||||
self.q = None
|
||||
self.yapeng.goal( use_module( library('yapi') ) )
|
||||
self.shell.run_cell = self.run_cell
|
||||
self.shell.kernel = self
|
||||
|
||||
def execute_request(self, stream, ident, parent):
|
||||
""" Override for temporary IO redirection. """
|
||||
with self._redirected_io():
|
||||
super(InProcessKernel, self).execute_request(stream, ident, parent)
|
||||
|
||||
def start(self):
|
||||
""" Override registration of dispatchers for streams. """
|
||||
self.shell.exit_now = False
|
||||
|
||||
def _abort_queue(self, stream):
|
||||
""" The in-process kernel doesn't abort requests. """
|
||||
pass
|
||||
|
||||
def _input_request(self, prompt, ident, parent, password=False):
|
||||
# Flush output before making the request.
|
||||
self.raw_input_str = None
|
||||
sys.stderr.flush()
|
||||
sys.stdout.flush()
|
||||
|
||||
# Send the input request.
|
||||
content = json_clean(dict(prompt=prompt, password=password))
|
||||
msg = self.session.msg(u'input_request', content, parent)
|
||||
for frontend in self.frontends:
|
||||
if frontend.session.session == parent['header']['session']:
|
||||
frontend.stdin_channel.call_handlers(msg)
|
||||
break
|
||||
else:
|
||||
logging.error('No frontend found for raw_input request')
|
||||
return str()
|
||||
|
||||
# Await a response.
|
||||
while self.raw_input_str is None:
|
||||
frontend.stdin_channel.process_events()
|
||||
return self.raw_input_str
|
||||
|
||||
#-------------------------------------------------------------------------
|
||||
# Protected interface
|
||||
#-------------------------------------------------------------------------
|
||||
|
||||
@contextmanager
|
||||
def _redirected_io(self):
|
||||
""" Temporarily redirect IO to the kernel.
|
||||
"""
|
||||
sys_stdout, sys_stderr = sys.stdout, sys.stderr
|
||||
sys.stdout, sys.stderr = self.stdout, self.stderr
|
||||
yield
|
||||
sys.stdout, sys.stderr = sys_stdout, sys_stderr
|
||||
|
||||
#------ Trait change handlers --------------------------------------------
|
||||
|
||||
def _io_dispatch(self, change):
|
||||
""" Called when a message is sent to the IO socket.
|
||||
"""
|
||||
ident, msg = self.session.recv(self.iopub_socket, copy=False)
|
||||
for frontend in self.frontends:
|
||||
frontend.iopub_channel.call_handlers(msg)
|
||||
|
||||
#------ Trait initializers -----------------------------------------------
|
||||
|
||||
@default('log')
|
||||
def _default_log(self):
|
||||
return logging.getLogger(__name__)
|
||||
|
||||
@default('session')
|
||||
def _default_session(self):
|
||||
from jupyter_client.session import Session
|
||||
return Session(parent=self, key=INPROCESS_KEY)
|
||||
|
||||
@default('shell_class')
|
||||
def _default_shell_class(self):
|
||||
return InProcessInteractiveShell
|
||||
|
||||
@default('stdout')
|
||||
def _default_stdout(self):
|
||||
return OutStream(self.session, self.iopub_thread, u'stdout')
|
||||
|
||||
@default('stderr')
|
||||
def _default_stderr(self):
|
||||
return OutStream(self.session, self.iopub_thread, u'stderr')
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Interactive shell subclass
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
class InProcessInteractiveShell(ZMQInteractiveShell):
|
||||
|
||||
kernel = Instance('yap_kernel.inprocess.yapkernel.InProcessKernel',
|
||||
allow_none=True)
|
||||
|
||||
#-------------------------------------------------------------------------
|
||||
# InteractiveShell interface
|
||||
#-------------------------------------------------------------------------
|
||||
|
||||
def enable_gui(self, gui=None):
|
||||
"""Enable GUI integration for the kernel."""
|
||||
from yap_kernel.eventloops import enable_gui
|
||||
if not gui:
|
||||
gui = self.kernel.gui
|
||||
enable_gui(gui, kernel=self.kernel)
|
||||
self.active_eventloop = gui
|
||||
|
||||
|
||||
def enable_matplotlib(self, gui=None):
|
||||
"""Enable matplotlib integration for the kernel."""
|
||||
if not gui:
|
||||
gui = self.kernel.gui
|
||||
return super(InProcessInteractiveShell, self).enable_matplotlib(gui)
|
||||
|
||||
def enable_pylab(self, gui=None, import_all=True, welcome_message=False):
|
||||
"""Activate pylab support at runtime."""
|
||||
if not gui:
|
||||
gui = self.kernel.gui
|
||||
return super(InProcessInteractiveShell, self).enable_pylab(gui, import_all,
|
||||
welcome_message)
|
||||
|
||||
|
||||
def closeq(self):
|
||||
if self.q:
|
||||
self.q.close()
|
||||
self.q = None
|
||||
|
||||
def run_cell(self, s, store_history=True, silent=False, shell_futures=True):
|
||||
|
||||
"""Run a complete IPython cell.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
raw_cell : str
|
||||
The code (including IPython code such as %magic functions) to run.
|
||||
store_history : bool
|
||||
If True, the raw and translated cell will be stored in IPython's
|
||||
history. For user code calling back into IPython's machinery, this
|
||||
should be set to False.
|
||||
silent : bool
|
||||
If True, avoid side-effects, such as implicit displayhooks and
|
||||
and logging. silent=True forces store_history=False.
|
||||
shell_futures : bool
|
||||
If True, the code will share future statements with the interactive
|
||||
shell. It will both be affected by previous __future__ imports, and
|
||||
any __future__ imports in the code will affect the shell. If False,
|
||||
__future__ imports are not shared in either direction.
|
||||
|
||||
Returns
|
||||
-------
|
||||
result : :class:`ExecutionResult`
|
||||
"""
|
||||
|
||||
def numbervars(self, l):
|
||||
return self.yapeng.fun(bindvars(l))
|
||||
|
||||
result = ExecutionResult()
|
||||
|
||||
if (not s) or s.isspace():
|
||||
self.shell.last_execution_succeeded = True
|
||||
return result
|
||||
|
||||
if store_history:
|
||||
result.execution_count = self.shell.execution_count
|
||||
|
||||
def error_before_exec(value):
|
||||
result.error_before_exec = value
|
||||
self.shell.last_execution_succeeded = False
|
||||
return result
|
||||
|
||||
|
||||
if not self.q:
|
||||
try:
|
||||
self.q = self.yapeng.query(s)
|
||||
except SyntaxError:
|
||||
return error_before_exec( sys.exc_info()[1])
|
||||
|
||||
cell = s # cell has to exist so it can be stored/logged
|
||||
|
||||
# Store raw and processed history
|
||||
# if not silent:
|
||||
# self.shell..logger.log(cell, s)
|
||||
|
||||
has_raised = False
|
||||
try:
|
||||
#f = io.StringIO()
|
||||
# with redirect_stdout(f):
|
||||
run = self.q.next()
|
||||
# print('{0}'.format(f.getvalue()))
|
||||
# Execute the user code
|
||||
if run:
|
||||
myvs = self.numbervars(self.q.namedVars())
|
||||
if myvs:
|
||||
for eq in myvs:
|
||||
name = eq[0]
|
||||
binding = eq[1]
|
||||
if name != binding:
|
||||
print(name + " = " + str(binding))
|
||||
else:
|
||||
print("yes")
|
||||
if self.q.deterministic():
|
||||
self.closeq()
|
||||
else:
|
||||
print("No (more) answers")
|
||||
self.closeq()
|
||||
except:
|
||||
result.error_in_exec = sys.exc_info()[1]
|
||||
# self.showtraceback()
|
||||
has_raised = True
|
||||
self.closeq()
|
||||
|
||||
|
||||
self.shell.last_execution_succeeded = not has_raised
|
||||
result.result = self.shell.last_execution_succeeded
|
||||
print( self.q )
|
||||
# Reset this so later displayed values do not modify the
|
||||
# ExecutionResult
|
||||
# self.displayhook.exec_result = None
|
||||
|
||||
#self.events.trigger('post_execute')
|
||||
#if not silent:
|
||||
# self.events.trigger('post_run_cell')
|
||||
|
||||
if store_history:
|
||||
# Write output to the database. Does nothing unless
|
||||
# history output logging is enabled.
|
||||
# self.history_manager.store_output(self.execution_count)
|
||||
# Each cell is a *single* input, regardless of how many lines it has
|
||||
self.shell.execution_count += 1
|
||||
|
||||
return result
|
||||
|
||||
InteractiveShellABC.register(InProcessInteractiveShell)
|
81
packages/python/yap_kernel/yap_kernel/inprocess/manager.py
Normal file
81
packages/python/yap_kernel/yap_kernel/inprocess/manager.py
Normal file
@ -0,0 +1,81 @@
|
||||
"""A kernel manager for in-process kernels."""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
from traitlets import Instance, DottedObjectName, default
|
||||
from jupyter_client.managerabc import KernelManagerABC
|
||||
from jupyter_client.manager import KernelManager
|
||||
from jupyter_client.session import Session
|
||||
|
||||
from .constants import INPROCESS_KEY
|
||||
|
||||
|
||||
class InProcessKernelManager(KernelManager):
|
||||
"""A manager for an in-process kernel.
|
||||
|
||||
This class implements the interface of
|
||||
`jupyter_client.kernelmanagerabc.KernelManagerABC` and allows
|
||||
(asynchronous) frontends to be used seamlessly with an in-process kernel.
|
||||
|
||||
See `jupyter_client.kernelmanager.KernelManager` for docstrings.
|
||||
"""
|
||||
|
||||
# The kernel process with which the KernelManager is communicating.
|
||||
kernel = Instance('yap_kernel.inprocess.yapkernel.InProcessKernel',
|
||||
allow_none=True)
|
||||
# the client class for KM.client() shortcut
|
||||
client_class = DottedObjectName('yap_kernel.inprocess.BlockingInProcessKernelClient')
|
||||
|
||||
@default('blocking_class')
|
||||
def _default_blocking_class(self):
|
||||
from .blocking import BlockingInProcessKernelClient
|
||||
return BlockingInProcessKernelClient
|
||||
|
||||
@default('session')
|
||||
def _default_session(self):
|
||||
# don't sign in-process messages
|
||||
return Session(key=INPROCESS_KEY, parent=self)
|
||||
|
||||
#--------------------------------------------------------------------------
|
||||
# Kernel management methods
|
||||
#--------------------------------------------------------------------------
|
||||
|
||||
def start_kernel(self, **kwds):
|
||||
from yap_kernel.inprocess.yapkernel import InProcessKernel
|
||||
self.kernel = InProcessKernel(parent=self, session=self.session)
|
||||
|
||||
def shutdown_kernel(self):
|
||||
self.kernel.iopub_thread.stop()
|
||||
self._kill_kernel()
|
||||
|
||||
def restart_kernel(self, now=False, **kwds):
|
||||
self.shutdown_kernel()
|
||||
self.start_kernel(**kwds)
|
||||
|
||||
@property
|
||||
def has_kernel(self):
|
||||
return self.kernel is not None
|
||||
|
||||
def _kill_kernel(self):
|
||||
self.kernel = None
|
||||
|
||||
def interrupt_kernel(self):
|
||||
raise NotImplementedError("Cannot interrupt in-process kernel.")
|
||||
|
||||
def signal_kernel(self, signum):
|
||||
raise NotImplementedError("Cannot signal in-process kernel.")
|
||||
|
||||
def is_alive(self):
|
||||
return self.kernel is not None
|
||||
|
||||
def client(self, **kwargs):
|
||||
kwargs['kernel'] = self.kernel
|
||||
return super(InProcessKernelManager, self).client(**kwargs)
|
||||
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# ABC Registration
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
KernelManagerABC.register(InProcessKernelManager)
|
64
packages/python/yap_kernel/yap_kernel/inprocess/socket.py
Normal file
64
packages/python/yap_kernel/yap_kernel/inprocess/socket.py
Normal file
@ -0,0 +1,64 @@
|
||||
""" Defines a dummy socket implementing (part of) the zmq.Socket interface. """
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import abc
|
||||
import warnings
|
||||
try:
|
||||
from queue import Queue # Py 3
|
||||
except ImportError:
|
||||
from Queue import Queue # Py 2
|
||||
|
||||
import zmq
|
||||
|
||||
from traitlets import HasTraits, Instance, Int
|
||||
from ipython_genutils.py3compat import with_metaclass
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Generic socket interface
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
class SocketABC(with_metaclass(abc.ABCMeta, object)):
|
||||
|
||||
@abc.abstractmethod
|
||||
def recv_multipart(self, flags=0, copy=True, track=False):
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def send_multipart(self, msg_parts, flags=0, copy=True, track=False):
|
||||
raise NotImplementedError
|
||||
|
||||
@classmethod
|
||||
def register(cls, other_cls):
|
||||
if other_cls is not DummySocket:
|
||||
warnings.warn("SocketABC is deprecated since yap_kernel version 4.5.0.",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
abc.ABCMeta.register(cls, other_cls)
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Dummy socket class
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
class DummySocket(HasTraits):
|
||||
""" A dummy socket implementing (part of) the zmq.Socket interface. """
|
||||
|
||||
queue = Instance(Queue, ())
|
||||
message_sent = Int(0) # Should be an Event
|
||||
context = Instance(zmq.Context)
|
||||
def _context_default(self):
|
||||
return zmq.Context.instance()
|
||||
|
||||
#-------------------------------------------------------------------------
|
||||
# Socket interface
|
||||
#-------------------------------------------------------------------------
|
||||
|
||||
def recv_multipart(self, flags=0, copy=True, track=False):
|
||||
return self.queue.get_nowait()
|
||||
|
||||
def send_multipart(self, msg_parts, flags=0, copy=True, track=False):
|
||||
msg_parts = list(map(zmq.Message, msg_parts))
|
||||
self.queue.put_nowait(msg_parts)
|
||||
self.message_sent += 1
|
||||
|
||||
SocketABC.register(DummySocket)
|
@ -0,0 +1,76 @@
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
from ipykernel.inprocess.blocking import BlockingInProcessKernelClient
|
||||
from ipykernel.inprocess.manager import InProcessKernelManager
|
||||
from ipykernel.inprocess.ipkernel import InProcessKernel
|
||||
from ipykernel.tests.utils import assemble_output
|
||||
from IPython.testing.decorators import skipif_not_matplotlib
|
||||
from IPython.utils.io import capture_output
|
||||
from ipython_genutils import py3compat
|
||||
|
||||
if py3compat.PY3:
|
||||
from io import StringIO
|
||||
else:
|
||||
from StringIO import StringIO
|
||||
|
||||
|
||||
class InProcessKernelTestCase(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.km = InProcessKernelManager()
|
||||
self.km.start_kernel()
|
||||
self.kc = self.km.client()
|
||||
self.kc.start_channels()
|
||||
self.kc.wait_for_ready()
|
||||
|
||||
@skipif_not_matplotlib
|
||||
def test_pylab(self):
|
||||
"""Does %pylab work in the in-process kernel?"""
|
||||
kc = self.kc
|
||||
kc.execute('%pylab')
|
||||
out, err = assemble_output(kc.iopub_channel)
|
||||
self.assertIn('matplotlib', out)
|
||||
|
||||
def test_raw_input(self):
|
||||
""" Does the in-process kernel handle raw_input correctly?
|
||||
"""
|
||||
io = StringIO('foobar\n')
|
||||
sys_stdin = sys.stdin
|
||||
sys.stdin = io
|
||||
try:
|
||||
if py3compat.PY3:
|
||||
self.kc.execute('x = input()')
|
||||
else:
|
||||
self.kc.execute('x = raw_input()')
|
||||
finally:
|
||||
sys.stdin = sys_stdin
|
||||
self.assertEqual(self.km.kernel.shell.user_ns.get('x'), 'foobar')
|
||||
|
||||
def test_stdout(self):
|
||||
""" Does the in-process kernel correctly capture IO?
|
||||
"""
|
||||
kernel = InProcessKernel()
|
||||
|
||||
with capture_output() as io:
|
||||
kernel.shell.run_cell('print("foo")')
|
||||
self.assertEqual(io.stdout, 'foo\n')
|
||||
|
||||
kc = BlockingInProcessKernelClient(kernel=kernel, session=kernel.session)
|
||||
kernel.frontends.append(kc)
|
||||
kc.execute('print("bar")')
|
||||
out, err = assemble_output(kc.iopub_channel)
|
||||
self.assertEqual(out, 'bar\n')
|
||||
|
||||
def test_getpass_stream(self):
|
||||
"Tests that kernel getpass accept the stream parameter"
|
||||
kernel = InProcessKernel()
|
||||
kernel._allow_stdin = True
|
||||
kernel._input_request = lambda *args, **kwargs : None
|
||||
|
||||
kernel.getpass(stream='non empty')
|
@ -0,0 +1,115 @@
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import unittest
|
||||
|
||||
from ipykernel.inprocess.blocking import BlockingInProcessKernelClient
|
||||
from ipykernel.inprocess.manager import InProcessKernelManager
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Test case
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
class InProcessKernelManagerTestCase(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.km = InProcessKernelManager()
|
||||
|
||||
def tearDown(self):
|
||||
if self.km.has_kernel:
|
||||
self.km.shutdown_kernel()
|
||||
|
||||
def test_interface(self):
|
||||
""" Does the in-process kernel manager implement the basic KM interface?
|
||||
"""
|
||||
km = self.km
|
||||
self.assert_(not km.has_kernel)
|
||||
|
||||
km.start_kernel()
|
||||
self.assert_(km.has_kernel)
|
||||
self.assert_(km.kernel is not None)
|
||||
|
||||
kc = km.client()
|
||||
self.assert_(not kc.channels_running)
|
||||
|
||||
kc.start_channels()
|
||||
self.assert_(kc.channels_running)
|
||||
|
||||
old_kernel = km.kernel
|
||||
km.restart_kernel()
|
||||
self.assertIsNotNone(km.kernel)
|
||||
self.assertNotEquals(km.kernel, old_kernel)
|
||||
|
||||
km.shutdown_kernel()
|
||||
self.assert_(not km.has_kernel)
|
||||
|
||||
self.assertRaises(NotImplementedError, km.interrupt_kernel)
|
||||
self.assertRaises(NotImplementedError, km.signal_kernel, 9)
|
||||
|
||||
kc.stop_channels()
|
||||
self.assert_(not kc.channels_running)
|
||||
|
||||
def test_execute(self):
|
||||
""" Does executing code in an in-process kernel work?
|
||||
"""
|
||||
km = self.km
|
||||
km.start_kernel()
|
||||
kc = km.client()
|
||||
kc.start_channels()
|
||||
kc.wait_for_ready()
|
||||
kc.execute('foo = 1')
|
||||
self.assertEquals(km.kernel.shell.user_ns['foo'], 1)
|
||||
|
||||
def test_complete(self):
|
||||
""" Does requesting completion from an in-process kernel work?
|
||||
"""
|
||||
km = self.km
|
||||
km.start_kernel()
|
||||
kc = km.client()
|
||||
kc.start_channels()
|
||||
kc.wait_for_ready()
|
||||
km.kernel.shell.push({'my_bar': 0, 'my_baz': 1})
|
||||
kc.complete('my_ba', 5)
|
||||
msg = kc.get_shell_msg()
|
||||
self.assertEqual(msg['header']['msg_type'], 'complete_reply')
|
||||
self.assertEqual(sorted(msg['content']['matches']),
|
||||
['my_bar', 'my_baz'])
|
||||
|
||||
def test_inspect(self):
|
||||
""" Does requesting object information from an in-process kernel work?
|
||||
"""
|
||||
km = self.km
|
||||
km.start_kernel()
|
||||
kc = km.client()
|
||||
kc.start_channels()
|
||||
kc.wait_for_ready()
|
||||
km.kernel.shell.user_ns['foo'] = 1
|
||||
kc.inspect('foo')
|
||||
msg = kc.get_shell_msg()
|
||||
self.assertEqual(msg['header']['msg_type'], 'inspect_reply')
|
||||
content = msg['content']
|
||||
assert content['found']
|
||||
text = content['data']['text/plain']
|
||||
self.assertIn('int', text)
|
||||
|
||||
def test_history(self):
|
||||
""" Does requesting history from an in-process kernel work?
|
||||
"""
|
||||
km = self.km
|
||||
km.start_kernel()
|
||||
kc = km.client()
|
||||
kc.start_channels()
|
||||
kc.wait_for_ready()
|
||||
kc.execute('1')
|
||||
kc.history(hist_access_type='tail', n=1)
|
||||
msg = kc.shell_channel.get_msgs()[-1]
|
||||
self.assertEquals(msg['header']['msg_type'], 'history_reply')
|
||||
history = msg['content']['history']
|
||||
self.assertEquals(len(history), 1)
|
||||
self.assertEquals(history[0][2], '1')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
251
packages/python/yap_kernel/yap_kernel/interactiveshell.py
Normal file
251
packages/python/yap_kernel/yap_kernel/interactiveshell.py
Normal file
@ -0,0 +1,251 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""YAP Stuff for Main IPython class."""
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Copyright (C) 2001 Janko Hauser <jhauser@zscout.de>
|
||||
# Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
|
||||
# Copyright (C) 2008-2011 The IPython Development Team
|
||||
#
|
||||
# Distributed under the terms of the BSD License. The full license is in
|
||||
# the file COPYING, distributed as part of this software.
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
from __future__ import absolute_import, print_function
|
||||
|
||||
import __future__
|
||||
import abc
|
||||
import ast
|
||||
import atexit
|
||||
import functools
|
||||
import os
|
||||
import re
|
||||
import runpy
|
||||
import signal
|
||||
|
||||
import sys
|
||||
import tempfile
|
||||
import traceback
|
||||
import types
|
||||
import subprocess
|
||||
import warnings
|
||||
import yap4py.yapi
|
||||
import yap
|
||||
from io import open as io_open
|
||||
|
||||
from pickleshare import PickleShareDB
|
||||
|
||||
from traitlets.config.configurable import SingletonConfigurable
|
||||
from IPython.core import oinspect
|
||||
from IPython.core import magic
|
||||
from IPython.core import page
|
||||
from IPython.core import prefilter
|
||||
from IPython.core import shadowns
|
||||
from IPython.core import ultratb
|
||||
from IPython.core import interactiveshell
|
||||
from IPython.core.alias import Alias, AliasManager
|
||||
from IPython.core.autocall import ExitAutocall
|
||||
from IPython.core.builtin_trap import BuiltinTrap
|
||||
from IPython.core.events import EventManager, available_events
|
||||
from IPython.core.compilerop import CachingCompiler, check_linecache_ipython
|
||||
from IPython.core.debugger import Pdb
|
||||
from IPython.core.display_trap import DisplayTrap
|
||||
from IPython.core.displayhook import DisplayHook
|
||||
from IPython.core.displaypub import DisplayPublisher
|
||||
from IPython.core.error import InputRejected, UsageError
|
||||
from IPython.core.extensions import ExtensionManager
|
||||
from IPython.core.formatters import DisplayFormatter
|
||||
from IPython.core.history import HistoryManager
|
||||
from IPython.core.inputsplitter import ESC_MAGIC, ESC_MAGIC2
|
||||
from IPython.core.logger import Logger
|
||||
from IPython.core.macro import Macro
|
||||
from IPython.core.payload import PayloadManager
|
||||
from IPython.core.prefilter import PrefilterManager
|
||||
from IPython.core.profiledir import ProfileDir
|
||||
from IPython.core.usage import default_banner
|
||||
from IPython.core.interactiveshell import InteractiveShellABC, InteractiveShell, ExecutionResult
|
||||
from IPython.testing.skipdoctest import skip_doctest
|
||||
from IPython.utils import PyColorize
|
||||
from IPython.utils import io
|
||||
from IPython.utils import py3compat
|
||||
from IPython.utils import openpy
|
||||
from IPython.utils.decorators import undoc
|
||||
from IPython.utils.io import ask_yes_no
|
||||
from IPython.utils.ipstruct import Struct
|
||||
from IPython.paths import get_ipython_dir
|
||||
from IPython.utils.path import get_home_dir, get_py_filename, ensure_dir_exists
|
||||
from IPython.utils.process import system, getoutput
|
||||
from IPython.utils.py3compat import (builtin_mod, unicode_type, string_types,
|
||||
with_metaclass, iteritems)
|
||||
from IPython.utils.strdispatch import StrDispatch
|
||||
from IPython.utils.syspathcontext import prepended_to_syspath
|
||||
from IPython.utils.text import format_screen, LSString, SList, DollarFormatter
|
||||
from IPython.utils.tempdir import TemporaryDirectory
|
||||
from traitlets import (
|
||||
Integer, Bool, CaselessStrEnum, Enum, List, Dict, Unicode, Instance, Type,
|
||||
observe, default,
|
||||
)
|
||||
from warnings import warn
|
||||
from logging import error
|
||||
from collections import namedtuple
|
||||
|
||||
use_module = namedtuple('use_module', 'file')
|
||||
bindvars = namedtuple('bindvars', 'list')
|
||||
library = namedtuple('library', 'list')
|
||||
v = namedtuple('_', 'slot')
|
||||
load_fieos = namedtuple('load_files', 'file ofile args')
|
||||
|
||||
|
||||
class YAPInteraction:
|
||||
"""An enhanced, interactive shell for YAP."""
|
||||
|
||||
def __init__(self, shell, **kwargs):
|
||||
try:
|
||||
if self.yapeng:
|
||||
return
|
||||
except Exception:
|
||||
pass
|
||||
pjoin = os.path.join
|
||||
here = os.path.abspath(os.path.dirname(__file__))
|
||||
yap_lib_path = pjoin(here, "../yap4py/prolog")
|
||||
yap_dll_path = pjoin(here, "../yap4py")
|
||||
self.args = yap.YAPEngineArgs()
|
||||
self.args.setYapLibDir(yap_dll_path)
|
||||
self.args.setYapShareDir(yap_lib_path)
|
||||
# args.setYapPrologBootFile(os.path.join(yap_lib_path."startup.yss"))
|
||||
self.yapeng = yap.YAPEngine(self.args)
|
||||
self.q = None
|
||||
self.yapeng.goal(use_module(library('yapi')))
|
||||
self.shell = shell
|
||||
self.run = False
|
||||
|
||||
def eng(self):
|
||||
return self.yapeng
|
||||
|
||||
def closeq(self):
|
||||
if self.q:
|
||||
self.q.close()
|
||||
self.q = None
|
||||
|
||||
def numbervars(self, l):
|
||||
return self.yapeng.fun(bindvars(l))
|
||||
|
||||
def run_cell(self, s, store_history=True, silent=False,
|
||||
shell_futures=True):
|
||||
"""Run a complete IPython cell.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
raw_cell : str
|
||||
The code (including IPython code such as
|
||||
%magic functions) to run.
|
||||
store_history : bool
|
||||
If True, the raw and translated cell will be stored in IPython's
|
||||
history. For user code calling back into
|
||||
IPython's machinery, this
|
||||
should be set to False.
|
||||
silent : bool
|
||||
If True, avoid side-effects, such as implicit displayhooks and
|
||||
and logging. silent=True forces store_history=False.
|
||||
shell_futures : bool
|
||||
If True, the code will share future statements with the interactive
|
||||
shell. It will both be affected by previous __future__ imports, and
|
||||
any __future__ imports in the code will affect the shell. If False,
|
||||
__future__ imports are not shared in either direction.
|
||||
|
||||
Returns
|
||||
-------
|
||||
result : :class:`ExecutionResult`
|
||||
"""
|
||||
|
||||
result = ExecutionResult()
|
||||
|
||||
if store_history:
|
||||
result.execution_count = self.shell.execution_count
|
||||
|
||||
def error_before_exec(value):
|
||||
result.error_before_exec = value
|
||||
self.shell.last_execution_succeeded = False
|
||||
return result
|
||||
|
||||
# inspect for ?? in the text
|
||||
st = s.strip('\n\j\r\t ')
|
||||
if (st):
|
||||
(p0, pm, pf) = st.rpartition('??')
|
||||
if pm == '??':
|
||||
if pf.isdigit(p):
|
||||
maxits = int(pf)*2
|
||||
s = p0
|
||||
elif pf.isspace(p):
|
||||
maxits = 1
|
||||
s = p0
|
||||
else:
|
||||
s = st
|
||||
maxits = 2
|
||||
else:
|
||||
# business as usual
|
||||
s = st
|
||||
maxits = 2
|
||||
elif st == '':
|
||||
# next. please
|
||||
maxis = 2
|
||||
self.qclose()
|
||||
|
||||
if not self.q:
|
||||
try:
|
||||
if s:
|
||||
self.q = self.yapeng.query(s)
|
||||
else:
|
||||
return
|
||||
except SyntaxError:
|
||||
return error_before_exec(sys.exc_info()[1])
|
||||
|
||||
cell = s # cell has to exist so it can be stored/logged
|
||||
# Store raw and processed history
|
||||
# if not silent:
|
||||
# self.shell..logger.log(cell, s)
|
||||
has_raised = False
|
||||
self.run = True
|
||||
try:
|
||||
while self.run and maxits != 0:
|
||||
# f = io.StringIO()
|
||||
# with redirect_stdout(f):
|
||||
self.run = self.q.next()
|
||||
# print('{0}'.format(f.getvalue()))
|
||||
# Execute the user code
|
||||
if self.run:
|
||||
myvs = self.numbervars(self.q.namedVars())
|
||||
if myvs:
|
||||
for eq in myvs:
|
||||
name = eq[0]
|
||||
binding = eq[1]
|
||||
if name != binding:
|
||||
print(name + " = " + str(binding))
|
||||
else:
|
||||
print("yes")
|
||||
if self.q.deterministic():
|
||||
self.closeq()
|
||||
self.run = False
|
||||
self.q = None
|
||||
else:
|
||||
maxits -= 2
|
||||
else:
|
||||
print("No (more) answers")
|
||||
self.closeq()
|
||||
self.run = False
|
||||
except Exception:
|
||||
result.error_in_exec = sys.exc_info()[1]
|
||||
# self.showtraceback()
|
||||
has_raised = True
|
||||
self.closeq()
|
||||
|
||||
self.shell.last_execution_succeeded = not has_raised
|
||||
result.result = self.shell.last_execution_succeeded
|
||||
# Reset this so later displayed values do not modify the
|
||||
# ExecutionResult
|
||||
# self.displayhook.exec_result = None
|
||||
|
||||
self.events.trigger('post_execute')
|
||||
if not silent:
|
||||
self.events.trigger('post_self.run_cell')
|
||||
|
||||
return result
|
383
packages/python/yap_kernel/yap_kernel/iostream.py
Normal file
383
packages/python/yap_kernel/yap_kernel/iostream.py
Normal file
@ -0,0 +1,383 @@
|
||||
# coding: utf-8
|
||||
"""Wrappers for forwarding stdout/stderr over zmq"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
from __future__ import print_function
|
||||
import atexit
|
||||
from binascii import b2a_hex
|
||||
import os
|
||||
import sys
|
||||
import threading
|
||||
import warnings
|
||||
from io import StringIO, UnsupportedOperation, TextIOBase
|
||||
|
||||
import zmq
|
||||
from zmq.eventloop.ioloop import IOLoop
|
||||
from zmq.eventloop.zmqstream import ZMQStream
|
||||
|
||||
from jupyter_client.session import extract_header
|
||||
|
||||
from ipython_genutils import py3compat
|
||||
from ipython_genutils.py3compat import unicode_type
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Globals
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
MASTER = 0
|
||||
CHILD = 1
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# IO classes
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
class IOPubThread(object):
|
||||
"""An object for sending IOPub messages in a background thread
|
||||
|
||||
Prevents a blocking main thread from delaying output from threads.
|
||||
|
||||
IOPubThread(pub_socket).background_socket is a Socket-API-providing object
|
||||
whose IO is always run in a thread.
|
||||
"""
|
||||
|
||||
def __init__(self, socket, pipe=False):
|
||||
"""Create IOPub thread
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
socket: zmq.PUB Socket
|
||||
the socket on which messages will be sent.
|
||||
pipe: bool
|
||||
Whether this process should listen for IOPub messages
|
||||
piped from subprocesses.
|
||||
"""
|
||||
self.socket = socket
|
||||
self.background_socket = BackgroundSocket(self)
|
||||
self._master_pid = os.getpid()
|
||||
self._pipe_flag = pipe
|
||||
self.io_loop = IOLoop()
|
||||
if pipe:
|
||||
self._setup_pipe_in()
|
||||
self._local = threading.local()
|
||||
self._events = {}
|
||||
self._setup_event_pipe()
|
||||
self.thread = threading.Thread(target=self._thread_main)
|
||||
self.thread.daemon = True
|
||||
|
||||
def _thread_main(self):
|
||||
"""The inner loop that's actually run in a thread"""
|
||||
self.io_loop.start()
|
||||
self.io_loop.close(all_fds=True)
|
||||
|
||||
def _setup_event_pipe(self):
|
||||
"""Create the PULL socket listening for events that should fire in this thread."""
|
||||
ctx = self.socket.context
|
||||
pipe_in = ctx.socket(zmq.PULL)
|
||||
pipe_in.linger = 0
|
||||
|
||||
_uuid = b2a_hex(os.urandom(16)).decode('ascii')
|
||||
iface = self._event_interface = 'inproc://%s' % _uuid
|
||||
pipe_in.bind(iface)
|
||||
self._event_puller = ZMQStream(pipe_in, self.io_loop)
|
||||
self._event_puller.on_recv(self._handle_event)
|
||||
|
||||
@property
|
||||
def _event_pipe(self):
|
||||
"""thread-local event pipe for signaling events that should be processed in the thread"""
|
||||
try:
|
||||
event_pipe = self._local.event_pipe
|
||||
except AttributeError:
|
||||
# new thread, new event pipe
|
||||
ctx = self.socket.context
|
||||
event_pipe = ctx.socket(zmq.PUSH)
|
||||
event_pipe.linger = 0
|
||||
event_pipe.connect(self._event_interface)
|
||||
self._local.event_pipe = event_pipe
|
||||
return event_pipe
|
||||
|
||||
def _handle_event(self, msg):
|
||||
"""Handle an event on the event pipe"""
|
||||
event_id = msg[0]
|
||||
event_f = self._events.pop(event_id)
|
||||
event_f()
|
||||
|
||||
def _setup_pipe_in(self):
|
||||
"""setup listening pipe for IOPub from forked subprocesses"""
|
||||
ctx = self.socket.context
|
||||
|
||||
# use UUID to authenticate pipe messages
|
||||
self._pipe_uuid = os.urandom(16)
|
||||
|
||||
pipe_in = ctx.socket(zmq.PULL)
|
||||
pipe_in.linger = 0
|
||||
|
||||
try:
|
||||
self._pipe_port = pipe_in.bind_to_random_port("tcp://127.0.0.1")
|
||||
except zmq.ZMQError as e:
|
||||
warnings.warn("Couldn't bind IOPub Pipe to 127.0.0.1: %s" % e +
|
||||
"\nsubprocess output will be unavailable."
|
||||
)
|
||||
self._pipe_flag = False
|
||||
pipe_in.close()
|
||||
return
|
||||
self._pipe_in = ZMQStream(pipe_in, self.io_loop)
|
||||
self._pipe_in.on_recv(self._handle_pipe_msg)
|
||||
|
||||
def _handle_pipe_msg(self, msg):
|
||||
"""handle a pipe message from a subprocess"""
|
||||
if not self._pipe_flag or not self._is_master_process():
|
||||
return
|
||||
if msg[0] != self._pipe_uuid:
|
||||
print("Bad pipe message: %s", msg, file=sys.__stderr__)
|
||||
return
|
||||
self.send_multipart(msg[1:])
|
||||
|
||||
def _setup_pipe_out(self):
|
||||
# must be new context after fork
|
||||
ctx = zmq.Context()
|
||||
pipe_out = ctx.socket(zmq.PUSH)
|
||||
pipe_out.linger = 3000 # 3s timeout for pipe_out sends before discarding the message
|
||||
pipe_out.connect("tcp://127.0.0.1:%i" % self._pipe_port)
|
||||
return ctx, pipe_out
|
||||
|
||||
def _is_master_process(self):
|
||||
return os.getpid() == self._master_pid
|
||||
|
||||
def _check_mp_mode(self):
|
||||
"""check for forks, and switch to zmq pipeline if necessary"""
|
||||
if not self._pipe_flag or self._is_master_process():
|
||||
return MASTER
|
||||
else:
|
||||
return CHILD
|
||||
|
||||
def start(self):
|
||||
"""Start the IOPub thread"""
|
||||
self.thread.start()
|
||||
# make sure we don't prevent process exit
|
||||
# I'm not sure why setting daemon=True above isn't enough, but it doesn't appear to be.
|
||||
atexit.register(self.stop)
|
||||
|
||||
def stop(self):
|
||||
"""Stop the IOPub thread"""
|
||||
if not self.thread.is_alive():
|
||||
return
|
||||
self.io_loop.add_callback(self.io_loop.stop)
|
||||
self.thread.join()
|
||||
if hasattr(self._local, 'event_pipe'):
|
||||
self._local.event_pipe.close()
|
||||
|
||||
def close(self):
|
||||
self.socket.close()
|
||||
self.socket = None
|
||||
|
||||
@property
|
||||
def closed(self):
|
||||
return self.socket is None
|
||||
|
||||
def schedule(self, f):
|
||||
"""Schedule a function to be called in our IO thread.
|
||||
|
||||
If the thread is not running, call immediately.
|
||||
"""
|
||||
if self.thread.is_alive():
|
||||
event_id = os.urandom(16)
|
||||
while event_id in self._events:
|
||||
event_id = os.urandom(16)
|
||||
self._events[event_id] = f
|
||||
self._event_pipe.send(event_id)
|
||||
else:
|
||||
f()
|
||||
|
||||
def send_multipart(self, *args, **kwargs):
|
||||
"""send_multipart schedules actual zmq send in my thread.
|
||||
|
||||
If my thread isn't running (e.g. forked process), send immediately.
|
||||
"""
|
||||
self.schedule(lambda : self._really_send(*args, **kwargs))
|
||||
|
||||
def _really_send(self, msg, *args, **kwargs):
|
||||
"""The callback that actually sends messages"""
|
||||
mp_mode = self._check_mp_mode()
|
||||
|
||||
if mp_mode != CHILD:
|
||||
# we are master, do a regular send
|
||||
self.socket.send_multipart(msg, *args, **kwargs)
|
||||
else:
|
||||
# we are a child, pipe to master
|
||||
# new context/socket for every pipe-out
|
||||
# since forks don't teardown politely, use ctx.term to ensure send has completed
|
||||
ctx, pipe_out = self._setup_pipe_out()
|
||||
pipe_out.send_multipart([self._pipe_uuid] + msg, *args, **kwargs)
|
||||
pipe_out.close()
|
||||
ctx.term()
|
||||
|
||||
|
||||
class BackgroundSocket(object):
|
||||
"""Wrapper around IOPub thread that provides zmq send[_multipart]"""
|
||||
io_thread = None
|
||||
|
||||
def __init__(self, io_thread):
|
||||
self.io_thread = io_thread
|
||||
|
||||
def __getattr__(self, attr):
|
||||
"""Wrap socket attr access for backward-compatibility"""
|
||||
if attr.startswith('__') and attr.endswith('__'):
|
||||
# don't wrap magic methods
|
||||
super(BackgroundSocket, self).__getattr__(attr)
|
||||
if hasattr(self.io_thread.socket, attr):
|
||||
warnings.warn("Accessing zmq Socket attribute %s on BackgroundSocket" % attr,
|
||||
DeprecationWarning, stacklevel=2)
|
||||
return getattr(self.io_thread.socket, attr)
|
||||
super(BackgroundSocket, self).__getattr__(attr)
|
||||
|
||||
def __setattr__(self, attr, value):
|
||||
if attr == 'io_thread' or (attr.startswith('__' and attr.endswith('__'))):
|
||||
super(BackgroundSocket, self).__setattr__(attr, value)
|
||||
else:
|
||||
warnings.warn("Setting zmq Socket attribute %s on BackgroundSocket" % attr,
|
||||
DeprecationWarning, stacklevel=2)
|
||||
setattr(self.io_thread.socket, attr, value)
|
||||
|
||||
def send(self, msg, *args, **kwargs):
|
||||
return self.send_multipart([msg], *args, **kwargs)
|
||||
|
||||
def send_multipart(self, *args, **kwargs):
|
||||
"""Schedule send in IO thread"""
|
||||
return self.io_thread.send_multipart(*args, **kwargs)
|
||||
|
||||
|
||||
class OutStream(TextIOBase):
|
||||
"""A file like object that publishes the stream to a 0MQ PUB socket.
|
||||
|
||||
Output is handed off to an IO Thread
|
||||
"""
|
||||
|
||||
# The time interval between automatic flushes, in seconds.
|
||||
flush_interval = 0.2
|
||||
topic = None
|
||||
encoding = 'UTF-8'
|
||||
|
||||
def __init__(self, session, pub_thread, name, pipe=None):
|
||||
if pipe is not None:
|
||||
warnings.warn("pipe argument to OutStream is deprecated and ignored",
|
||||
DeprecationWarning)
|
||||
# This is necessary for compatibility with Python built-in streams
|
||||
self.session = session
|
||||
if not isinstance(pub_thread, IOPubThread):
|
||||
# Backward-compat: given socket, not thread. Wrap in a thread.
|
||||
warnings.warn("OutStream should be created with IOPubThread, not %r" % pub_thread,
|
||||
DeprecationWarning, stacklevel=2)
|
||||
pub_thread = IOPubThread(pub_thread)
|
||||
pub_thread.start()
|
||||
self.pub_thread = pub_thread
|
||||
self.name = name
|
||||
self.topic = b'stream.' + py3compat.cast_bytes(name)
|
||||
self.parent_header = {}
|
||||
self._master_pid = os.getpid()
|
||||
self._flush_pending = False
|
||||
self._io_loop = pub_thread.io_loop
|
||||
self._new_buffer()
|
||||
|
||||
def _is_master_process(self):
|
||||
return os.getpid() == self._master_pid
|
||||
|
||||
def set_parent(self, parent):
|
||||
self.parent_header = extract_header(parent)
|
||||
|
||||
def close(self):
|
||||
self.pub_thread = None
|
||||
|
||||
@property
|
||||
def closed(self):
|
||||
return self.pub_thread is None
|
||||
|
||||
def _schedule_flush(self):
|
||||
"""schedule a flush in the IO thread
|
||||
|
||||
call this on write, to indicate that flush should be called soon.
|
||||
"""
|
||||
if self._flush_pending:
|
||||
return
|
||||
self._flush_pending = True
|
||||
|
||||
# add_timeout has to be handed to the io thread via event pipe
|
||||
def _schedule_in_thread():
|
||||
self._io_loop.call_later(self.flush_interval, self._flush)
|
||||
self.pub_thread.schedule(_schedule_in_thread)
|
||||
|
||||
def flush(self):
|
||||
"""trigger actual zmq send
|
||||
|
||||
send will happen in the background thread
|
||||
"""
|
||||
if self.pub_thread.thread.is_alive():
|
||||
# wait for flush to actually get through:
|
||||
self.pub_thread.schedule(self._flush)
|
||||
evt = threading.Event()
|
||||
self.pub_thread.schedule(evt.set)
|
||||
evt.wait()
|
||||
else:
|
||||
self._flush()
|
||||
|
||||
def _flush(self):
|
||||
"""This is where the actual send happens.
|
||||
|
||||
_flush should generally be called in the IO thread,
|
||||
unless the thread has been destroyed (e.g. forked subprocess).
|
||||
"""
|
||||
self._flush_pending = False
|
||||
data = self._flush_buffer()
|
||||
if data:
|
||||
# FIXME: this disables Session's fork-safe check,
|
||||
# since pub_thread is itself fork-safe.
|
||||
# There should be a better way to do this.
|
||||
self.session.pid = os.getpid()
|
||||
content = {u'name':self.name, u'text':data}
|
||||
self.session.send(self.pub_thread, u'stream', content=content,
|
||||
parent=self.parent_header, ident=self.topic)
|
||||
|
||||
def write(self, string):
|
||||
if self.pub_thread is None:
|
||||
raise ValueError('I/O operation on closed file')
|
||||
else:
|
||||
# Make sure that we're handling unicode
|
||||
if not isinstance(string, unicode_type):
|
||||
string = string.decode(self.encoding, 'replace')
|
||||
|
||||
is_child = (not self._is_master_process())
|
||||
# only touch the buffer in the IO thread to avoid races
|
||||
self.pub_thread.schedule(lambda : self._buffer.write(string))
|
||||
if is_child:
|
||||
# newlines imply flush in subprocesses
|
||||
# mp.Pool cannot be trusted to flush promptly (or ever),
|
||||
# and this helps.
|
||||
if '\n' in string:
|
||||
self.flush()
|
||||
else:
|
||||
self._schedule_flush()
|
||||
|
||||
def writelines(self, sequence):
|
||||
if self.pub_thread is None:
|
||||
raise ValueError('I/O operation on closed file')
|
||||
else:
|
||||
for string in sequence:
|
||||
self.write(string)
|
||||
|
||||
def _flush_buffer(self):
|
||||
"""clear the current buffer and return the current buffer data.
|
||||
|
||||
This should only be called in the IO thread.
|
||||
"""
|
||||
data = u''
|
||||
if self._buffer is not None:
|
||||
buf = self._buffer
|
||||
self._new_buffer()
|
||||
data = buf.getvalue()
|
||||
buf.close()
|
||||
return data
|
||||
|
||||
def _new_buffer(self):
|
||||
self._buffer = StringIO()
|
173
packages/python/yap_kernel/yap_kernel/jsonutil.py
Normal file
173
packages/python/yap_kernel/yap_kernel/jsonutil.py
Normal file
@ -0,0 +1,173 @@
|
||||
"""Utilities to manipulate JSON objects."""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import math
|
||||
import re
|
||||
import types
|
||||
from datetime import datetime
|
||||
import numbers
|
||||
|
||||
try:
|
||||
# base64.encodestring is deprecated in Python 3.x
|
||||
from base64 import encodebytes
|
||||
except ImportError:
|
||||
# Python 2.x
|
||||
from base64 import encodestring as encodebytes
|
||||
|
||||
from ipython_genutils import py3compat
|
||||
from ipython_genutils.py3compat import unicode_type, iteritems
|
||||
from ipython_genutils.encoding import DEFAULT_ENCODING
|
||||
next_attr_name = '__next__' if py3compat.PY3 else 'next'
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Globals and constants
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
# timestamp formats
|
||||
ISO8601 = "%Y-%m-%dT%H:%M:%S.%f"
|
||||
ISO8601_PAT=re.compile(r"^(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2})(\.\d{1,6})?Z?([\+\-]\d{2}:?\d{2})?$")
|
||||
|
||||
# holy crap, strptime is not threadsafe.
|
||||
# Calling it once at import seems to help.
|
||||
datetime.strptime("1", "%d")
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Classes and functions
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
|
||||
# constants for identifying png/jpeg data
|
||||
PNG = b'\x89PNG\r\n\x1a\n'
|
||||
# front of PNG base64-encoded
|
||||
PNG64 = b'iVBORw0KG'
|
||||
JPEG = b'\xff\xd8'
|
||||
# front of JPEG base64-encoded
|
||||
JPEG64 = b'/9'
|
||||
# front of PDF base64-encoded
|
||||
PDF64 = b'JVBER'
|
||||
|
||||
def encode_images(format_dict):
|
||||
"""b64-encodes images in a displaypub format dict
|
||||
|
||||
Perhaps this should be handled in json_clean itself?
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
format_dict : dict
|
||||
A dictionary of display data keyed by mime-type
|
||||
|
||||
Returns
|
||||
-------
|
||||
|
||||
format_dict : dict
|
||||
A copy of the same dictionary,
|
||||
but binary image data ('image/png', 'image/jpeg' or 'application/pdf')
|
||||
is base64-encoded.
|
||||
|
||||
"""
|
||||
encoded = format_dict.copy()
|
||||
|
||||
pngdata = format_dict.get('image/png')
|
||||
if isinstance(pngdata, bytes):
|
||||
# make sure we don't double-encode
|
||||
if not pngdata.startswith(PNG64):
|
||||
pngdata = encodebytes(pngdata)
|
||||
encoded['image/png'] = pngdata.decode('ascii')
|
||||
|
||||
jpegdata = format_dict.get('image/jpeg')
|
||||
if isinstance(jpegdata, bytes):
|
||||
# make sure we don't double-encode
|
||||
if not jpegdata.startswith(JPEG64):
|
||||
jpegdata = encodebytes(jpegdata)
|
||||
encoded['image/jpeg'] = jpegdata.decode('ascii')
|
||||
|
||||
pdfdata = format_dict.get('application/pdf')
|
||||
if isinstance(pdfdata, bytes):
|
||||
# make sure we don't double-encode
|
||||
if not pdfdata.startswith(PDF64):
|
||||
pdfdata = encodebytes(pdfdata)
|
||||
encoded['application/pdf'] = pdfdata.decode('ascii')
|
||||
|
||||
return encoded
|
||||
|
||||
|
||||
def json_clean(obj):
|
||||
"""Clean an object to ensure it's safe to encode in JSON.
|
||||
|
||||
Atomic, immutable objects are returned unmodified. Sets and tuples are
|
||||
converted to lists, lists are copied and dicts are also copied.
|
||||
|
||||
Note: dicts whose keys could cause collisions upon encoding (such as a dict
|
||||
with both the number 1 and the string '1' as keys) will cause a ValueError
|
||||
to be raised.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
obj : any python object
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : object
|
||||
|
||||
A version of the input which will not cause an encoding error when
|
||||
encoded as JSON. Note that this function does not *encode* its inputs,
|
||||
it simply sanitizes it so that there will be no encoding errors later.
|
||||
|
||||
"""
|
||||
# types that are 'atomic' and ok in json as-is.
|
||||
atomic_ok = (unicode_type, type(None))
|
||||
|
||||
# containers that we need to convert into lists
|
||||
container_to_list = (tuple, set, types.GeneratorType)
|
||||
|
||||
# Since bools are a subtype of Integrals, which are a subtype of Reals,
|
||||
# we have to check them in that order.
|
||||
|
||||
if isinstance(obj, bool):
|
||||
return obj
|
||||
|
||||
if isinstance(obj, numbers.Integral):
|
||||
# cast int to int, in case subclasses override __str__ (e.g. boost enum, #4598)
|
||||
return int(obj)
|
||||
|
||||
if isinstance(obj, numbers.Real):
|
||||
# cast out-of-range floats to their reprs
|
||||
if math.isnan(obj) or math.isinf(obj):
|
||||
return repr(obj)
|
||||
return float(obj)
|
||||
|
||||
if isinstance(obj, atomic_ok):
|
||||
return obj
|
||||
|
||||
if isinstance(obj, bytes):
|
||||
return obj.decode(DEFAULT_ENCODING, 'replace')
|
||||
|
||||
if isinstance(obj, container_to_list) or (
|
||||
hasattr(obj, '__iter__') and hasattr(obj, next_attr_name)):
|
||||
obj = list(obj)
|
||||
|
||||
if isinstance(obj, list):
|
||||
return [json_clean(x) for x in obj]
|
||||
|
||||
if isinstance(obj, dict):
|
||||
# First, validate that the dict won't lose data in conversion due to
|
||||
# key collisions after stringification. This can happen with keys like
|
||||
# True and 'true' or 1 and '1', which collide in JSON.
|
||||
nkeys = len(obj)
|
||||
nkeys_collapsed = len(set(map(unicode_type, obj)))
|
||||
if nkeys != nkeys_collapsed:
|
||||
raise ValueError('dict cannot be safely converted to JSON: '
|
||||
'key collision would lead to dropped values')
|
||||
# If all OK, proceed by making the new dict that will be json-safe
|
||||
out = {}
|
||||
for k,v in iteritems(obj):
|
||||
out[unicode_type(k)] = json_clean(v)
|
||||
return out
|
||||
if isinstance(obj, datetime):
|
||||
return obj.strftime(ISO8601)
|
||||
|
||||
# we don't understand it, it's probably an unserializable object
|
||||
raise ValueError("Can't clean for JSON: %r" % obj)
|
491
packages/python/yap_kernel/yap_kernel/kernelapp.py
Normal file
491
packages/python/yap_kernel/yap_kernel/kernelapp.py
Normal file
@ -0,0 +1,491 @@
|
||||
"""An Application for launching a kernel"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import atexit
|
||||
import os
|
||||
import sys
|
||||
import signal
|
||||
import traceback
|
||||
import logging
|
||||
|
||||
from tornado import ioloop
|
||||
import zmq
|
||||
from zmq.eventloop import ioloop as zmq_ioloop
|
||||
from zmq.eventloop.zmqstream import ZMQStream
|
||||
|
||||
from IPython.core.application import (
|
||||
BaseIPythonApplication, base_flags, base_aliases, catch_config_error
|
||||
)
|
||||
from IPython.core.profiledir import ProfileDir
|
||||
from IPython.core.shellapp import (
|
||||
InteractiveShellApp, shell_flags, shell_aliases
|
||||
)
|
||||
from IPython.utils import io
|
||||
from ipython_genutils.path import filefind, ensure_dir_exists
|
||||
from traitlets import (
|
||||
Any, Instance, Dict, Unicode, Integer, Bool, DottedObjectName, Type, default
|
||||
)
|
||||
from ipython_genutils.importstring import import_item
|
||||
from jupyter_core.paths import jupyter_runtime_dir
|
||||
from jupyter_client import write_connection_file
|
||||
from jupyter_client.connect import ConnectionFileMixin
|
||||
|
||||
# local imports
|
||||
from .iostream import IOPubThread
|
||||
from .heartbeat import Heartbeat
|
||||
from .yapkernel import YAPKernel
|
||||
from .parentpoller import ParentPollerUnix, ParentPollerWindows
|
||||
from jupyter_client.session import (
|
||||
Session, session_flags, session_aliases,
|
||||
)
|
||||
from .zmqshell import ZMQInteractiveShell
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Flags and Aliases
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
kernel_aliases = dict(base_aliases)
|
||||
kernel_aliases.update({
|
||||
'ip' : 'YAPKernelApp.ip',
|
||||
'hb' : 'YAPKernelApp.hb_port',
|
||||
'shell' : 'YAPKernelApp.shell_port',
|
||||
'iopub' : 'YAPKernelApp.iopub_port',
|
||||
'stdin' : 'YAPKernelApp.stdin_port',
|
||||
'control' : 'YAPKernelApp.control_port',
|
||||
'f' : 'YAPKernelApp.connection_file',
|
||||
'transport': 'YAPKernelApp.transport',
|
||||
})
|
||||
|
||||
kernel_flags = dict(base_flags)
|
||||
kernel_flags.update({
|
||||
'no-stdout' : (
|
||||
{'YAPKernelApp' : {'no_stdout' : True}},
|
||||
"redirect stdout to the null device"),
|
||||
'no-stderr' : (
|
||||
{'YAPKernelApp' : {'no_stderr' : True}},
|
||||
"redirect stderr to the null device"),
|
||||
'pylab' : (
|
||||
{'YAPKernelApp' : {'pylab' : 'auto'}},
|
||||
"""Pre-load matplotlib and numpy for interactive use with
|
||||
the default matplotlib backend."""),
|
||||
})
|
||||
|
||||
# inherit flags&aliases for any IPython shell apps
|
||||
kernel_aliases.update(shell_aliases)
|
||||
kernel_flags.update(shell_flags)
|
||||
|
||||
# inherit flags&aliases for Sessions
|
||||
kernel_aliases.update(session_aliases)
|
||||
kernel_flags.update(session_flags)
|
||||
|
||||
_ctrl_c_message = """\
|
||||
NOTE: When using the `ipython kernel` entry point, Ctrl-C will not work.
|
||||
|
||||
To exit, you will have to explicitly quit this process, by either sending
|
||||
"quit" from a client, or using Ctrl-\\ in UNIX-like environments.
|
||||
|
||||
To read more about this, see https://github.com/ipython/ipython/issues/2049
|
||||
|
||||
"""
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Application class for starting an IPython Kernel
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
class YAPKernelApp(BaseIPythonApplication, InteractiveShellApp,
|
||||
ConnectionFileMixin):
|
||||
name='YAP Kernel'
|
||||
aliases = Dict(kernel_aliases)
|
||||
flags = Dict(kernel_flags)
|
||||
classes = [YAPKernel, ZMQInteractiveShell, ProfileDir, Session]
|
||||
# the kernel class, as an importstring
|
||||
kernel_class = Type('yap_kernel.yapkernel.YAPKernel',
|
||||
klass='yap_kernel.yapkernel.YAPKernel',
|
||||
help="""The Kernel subclass to be used.
|
||||
|
||||
This should allow easy re-use of the YAPKernelApp entry point
|
||||
to configure and launch kernels other than IPython's own.
|
||||
""").tag(config=True)
|
||||
kernel = Any()
|
||||
poller = Any() # don't restrict this even though current pollers are all Threads
|
||||
heartbeat = Instance(Heartbeat, allow_none=True)
|
||||
ports = Dict()
|
||||
|
||||
subcommands = {
|
||||
'install': (
|
||||
'yap_kernel.kernelspec.InstallYAPKernelSpecApp',
|
||||
'Install the YAP kernel'
|
||||
),
|
||||
}
|
||||
|
||||
# connection info:
|
||||
connection_dir = Unicode()
|
||||
|
||||
@default('connection_dir')
|
||||
def _default_connection_dir(self):
|
||||
return jupyter_runtime_dir()
|
||||
|
||||
@property
|
||||
def abs_connection_file(self):
|
||||
if os.path.basename(self.connection_file) == self.connection_file:
|
||||
return os.path.join(self.connection_dir, self.connection_file)
|
||||
else:
|
||||
return self.connection_file
|
||||
|
||||
# streams, etc.
|
||||
no_stdout = Bool(False, help="redirect stdout to the null device").tag(config=True)
|
||||
no_stderr = Bool(False, help="redirect stderr to the null device").tag(config=True)
|
||||
outstream_class = DottedObjectName('yap_kernel.iostream.OutStream',
|
||||
help="The importstring for the OutStream factory").tag(config=True)
|
||||
displayhook_class = DottedObjectName('yap_kernel.displayhook.ZMQDisplayHook',
|
||||
help="The importstring for the DisplayHook factory").tag(config=True)
|
||||
|
||||
# polling
|
||||
parent_handle = Integer(int(os.environ.get('JPY_PARENT_PID') or 0),
|
||||
help="""kill this process if its parent dies. On Windows, the argument
|
||||
specifies the HANDLE of the parent process, otherwise it is simply boolean.
|
||||
""").tag(config=True)
|
||||
interrupt = Integer(int(os.environ.get('JPY_INTERRUPT_EVENT') or 0),
|
||||
help="""ONLY USED ON WINDOWS
|
||||
Interrupt this process when the parent is signaled.
|
||||
""").tag(config=True)
|
||||
|
||||
def init_crash_handler(self):
|
||||
sys.excepthook = self.excepthook
|
||||
|
||||
def excepthook(self, etype, evalue, tb):
|
||||
# write uncaught traceback to 'real' stderr, not zmq-forwarder
|
||||
traceback.print_exception(etype, evalue, tb, file=sys.__stderr__)
|
||||
|
||||
def init_poller(self):
|
||||
if sys.platform == 'win32':
|
||||
if self.interrupt or self.parent_handle:
|
||||
self.poller = ParentPollerWindows(self.interrupt, self.parent_handle)
|
||||
elif self.parent_handle and self.parent_handle != 1:
|
||||
# PID 1 (init) is special and will never go away,
|
||||
# only be reassigned.
|
||||
# Parent polling doesn't work if ppid == 1 to start with.
|
||||
self.poller = ParentPollerUnix()
|
||||
|
||||
def _bind_socket(self, s, port):
|
||||
iface = '%s://%s' % (self.transport, self.ip)
|
||||
if self.transport == 'tcp':
|
||||
if port <= 0:
|
||||
port = s.bind_to_random_port(iface)
|
||||
else:
|
||||
s.bind("tcp://%s:%i" % (self.ip, port))
|
||||
elif self.transport == 'ipc':
|
||||
if port <= 0:
|
||||
port = 1
|
||||
path = "%s-%i" % (self.ip, port)
|
||||
while os.path.exists(path):
|
||||
port = port + 1
|
||||
path = "%s-%i" % (self.ip, port)
|
||||
else:
|
||||
path = "%s-%i" % (self.ip, port)
|
||||
s.bind("ipc://%s" % path)
|
||||
return port
|
||||
|
||||
def write_connection_file(self):
|
||||
"""write connection info to JSON file"""
|
||||
cf = self.abs_connection_file
|
||||
self.log.debug("Writing connection file: %s", cf)
|
||||
write_connection_file(cf, ip=self.ip, key=self.session.key, transport=self.transport,
|
||||
shell_port=self.shell_port, stdin_port=self.stdin_port, hb_port=self.hb_port,
|
||||
iopub_port=self.iopub_port, control_port=self.control_port)
|
||||
|
||||
def cleanup_connection_file(self):
|
||||
cf = self.abs_connection_file
|
||||
self.log.debug("Cleaning up connection file: %s", cf)
|
||||
try:
|
||||
os.remove(cf)
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
|
||||
self.cleanup_ipc_files()
|
||||
|
||||
def init_connection_file(self):
|
||||
if not self.connection_file:
|
||||
self.connection_file = "kernel-%s.json"%os.getpid()
|
||||
try:
|
||||
self.connection_file = filefind(self.connection_file, ['.', self.connection_dir])
|
||||
except IOError:
|
||||
self.log.debug("Connection file not found: %s", self.connection_file)
|
||||
# This means I own it, and I'll create it in this directory:
|
||||
ensure_dir_exists(os.path.dirname(self.abs_connection_file), 0o700)
|
||||
# Also, I will clean it up:
|
||||
atexit.register(self.cleanup_connection_file)
|
||||
return
|
||||
try:
|
||||
self.load_connection_file()
|
||||
except Exception:
|
||||
self.log.error("Failed to load connection file: %r", self.connection_file, exc_info=True)
|
||||
self.exit(1)
|
||||
|
||||
def init_sockets(self):
|
||||
# Create a context, a session, and the kernel sockets.
|
||||
self.log.info("Starting the kernel at pid: %i", os.getpid())
|
||||
context = zmq.Context.instance()
|
||||
# Uncomment this to try closing the context.
|
||||
# atexit.register(context.term)
|
||||
|
||||
self.shell_socket = context.socket(zmq.ROUTER)
|
||||
self.shell_socket.linger = 1000
|
||||
self.shell_port = self._bind_socket(self.shell_socket, self.shell_port)
|
||||
self.log.debug("shell ROUTER Channel on port: %i" % self.shell_port)
|
||||
|
||||
self.stdin_socket = context.socket(zmq.ROUTER)
|
||||
self.stdin_socket.linger = 1000
|
||||
self.stdin_port = self._bind_socket(self.stdin_socket, self.stdin_port)
|
||||
self.log.debug("stdin ROUTER Channel on port: %i" % self.stdin_port)
|
||||
|
||||
self.control_socket = context.socket(zmq.ROUTER)
|
||||
self.control_socket.linger = 1000
|
||||
self.control_port = self._bind_socket(self.control_socket, self.control_port)
|
||||
self.log.debug("control ROUTER Channel on port: %i" % self.control_port)
|
||||
|
||||
self.init_iopub(context)
|
||||
|
||||
def init_iopub(self, context):
|
||||
self.iopub_socket = context.socket(zmq.PUB)
|
||||
self.iopub_socket.linger = 1000
|
||||
self.iopub_port = self._bind_socket(self.iopub_socket, self.iopub_port)
|
||||
self.log.debug("iopub PUB Channel on port: %i" % self.iopub_port)
|
||||
self.configure_tornado_logger()
|
||||
self.iopub_thread = IOPubThread(self.iopub_socket, pipe=True)
|
||||
self.iopub_thread.start()
|
||||
# backward-compat: wrap iopub socket API in background thread
|
||||
self.iopub_socket = self.iopub_thread.background_socket
|
||||
|
||||
def init_heartbeat(self):
|
||||
"""start the heart beating"""
|
||||
# heartbeat doesn't share context, because it mustn't be blocked
|
||||
# by the GIL, which is accessed by libzmq when freeing zero-copy messages
|
||||
hb_ctx = zmq.Context()
|
||||
self.heartbeat = Heartbeat(hb_ctx, (self.transport, self.ip, self.hb_port))
|
||||
self.hb_port = self.heartbeat.port
|
||||
self.log.debug("Heartbeat REP Channel on port: %i" % self.hb_port)
|
||||
self.heartbeat.start()
|
||||
|
||||
def log_connection_info(self):
|
||||
"""display connection info, and store ports"""
|
||||
basename = os.path.basename(self.connection_file)
|
||||
if basename == self.connection_file or \
|
||||
os.path.dirname(self.connection_file) == self.connection_dir:
|
||||
# use shortname
|
||||
tail = basename
|
||||
else:
|
||||
tail = self.connection_file
|
||||
lines = [
|
||||
"To connect another client to this kernel, use:",
|
||||
" --existing %s" % tail,
|
||||
]
|
||||
# log connection info
|
||||
# info-level, so often not shown.
|
||||
# frontends should use the %connect_info magic
|
||||
# to see the connection info
|
||||
for line in lines:
|
||||
self.log.info(line)
|
||||
# also raw print to the terminal if no parent_handle (`ipython kernel`)
|
||||
# unless log-level is CRITICAL (--quiet)
|
||||
if not self.parent_handle and self.log_level < logging.CRITICAL:
|
||||
io.rprint(_ctrl_c_message)
|
||||
for line in lines:
|
||||
io.rprint(line)
|
||||
|
||||
self.ports = dict(shell=self.shell_port, iopub=self.iopub_port,
|
||||
stdin=self.stdin_port, hb=self.hb_port,
|
||||
control=self.control_port)
|
||||
|
||||
def init_blackhole(self):
|
||||
"""redirects stdout/stderr to devnull if necessary"""
|
||||
if self.no_stdout or self.no_stderr:
|
||||
blackhole = open(os.devnull, 'w')
|
||||
if self.no_stdout:
|
||||
sys.stdout = sys.__stdout__ = blackhole
|
||||
if self.no_stderr:
|
||||
sys.stderr = sys.__stderr__ = blackhole
|
||||
|
||||
def init_io(self):
|
||||
"""Redirect input streams and set a display hook."""
|
||||
if self.outstream_class:
|
||||
outstream_factory = import_item(str(self.outstream_class))
|
||||
sys.stdout = outstream_factory(self.session, self.iopub_thread, u'stdout')
|
||||
sys.stderr = outstream_factory(self.session, self.iopub_thread, u'stderr')
|
||||
if self.displayhook_class:
|
||||
displayhook_factory = import_item(str(self.displayhook_class))
|
||||
self.displayhook = displayhook_factory(self.session, self.iopub_socket)
|
||||
sys.displayhook = self.displayhook
|
||||
|
||||
self.patch_io()
|
||||
|
||||
def patch_io(self):
|
||||
"""Patch important libraries that can't handle sys.stdout forwarding"""
|
||||
try:
|
||||
import faulthandler
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
# Warning: this is a monkeypatch of `faulthandler.enable`, watch for possible
|
||||
# updates to the upstream API and update accordingly (up-to-date as of Python 3.5):
|
||||
# https://docs.python.org/3/library/faulthandler.html#faulthandler.enable
|
||||
|
||||
# change default file to __stderr__ from forwarded stderr
|
||||
faulthandler_enable = faulthandler.enable
|
||||
def enable(file=sys.__stderr__, all_threads=True, **kwargs):
|
||||
return faulthandler_enable(file=file, all_threads=all_threads, **kwargs)
|
||||
|
||||
faulthandler.enable = enable
|
||||
|
||||
if hasattr(faulthandler, 'register'):
|
||||
faulthandler_register = faulthandler.register
|
||||
def register(signum, file=sys.__stderr__, all_threads=True, chain=False, **kwargs):
|
||||
return faulthandler_register(signum, file=file, all_threads=all_threads,
|
||||
chain=chain, **kwargs)
|
||||
faulthandler.register = register
|
||||
|
||||
def init_signal(self):
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
|
||||
def init_kernel(self):
|
||||
"""Create the Kernel object itself"""
|
||||
shell_stream = ZMQStream(self.shell_socket)
|
||||
control_stream = ZMQStream(self.control_socket)
|
||||
|
||||
kernel_factory = self.kernel_class.instance
|
||||
|
||||
kernel = kernel_factory(parent=self, session=self.session,
|
||||
shell_streams=[shell_stream, control_stream],
|
||||
iopub_thread=self.iopub_thread,
|
||||
iopub_socket=self.iopub_socket,
|
||||
stdin_socket=self.stdin_socket,
|
||||
log=self.log,
|
||||
profile_dir=self.profile_dir,
|
||||
user_ns=self.user_ns,
|
||||
)
|
||||
kernel.record_ports({
|
||||
name + '_port': port for name, port in self.ports.items()
|
||||
})
|
||||
self.kernel = kernel
|
||||
|
||||
# Allow the displayhook to get the execution count
|
||||
self.displayhook.get_execution_count = lambda: kernel.execution_count
|
||||
|
||||
def init_gui_pylab(self):
|
||||
"""Enable GUI event loop integration, taking pylab into account."""
|
||||
|
||||
# Register inline backend as default
|
||||
# this is higher priority than matplotlibrc,
|
||||
# but lower priority than anything else (mpl.use() for instance).
|
||||
# This only affects matplotlib >= 1.5
|
||||
if not os.environ.get('MPLBACKEND'):
|
||||
os.environ['MPLBACKEND'] = 'module://yap_kernel.pylab.backend_inline'
|
||||
|
||||
# Provide a wrapper for :meth:`InteractiveShellApp.init_gui_pylab`
|
||||
# to ensure that any exception is printed straight to stderr.
|
||||
# Normally _showtraceback associates the reply with an execution,
|
||||
# which means frontends will never draw it, as this exception
|
||||
# is not associated with any execute request.
|
||||
|
||||
shell = self.shell
|
||||
_showtraceback = shell._showtraceback
|
||||
try:
|
||||
# replace error-sending traceback with stderr
|
||||
def print_tb(etype, evalue, stb):
|
||||
print ("GUI event loop or pylab initialization failed",
|
||||
file=sys.stderr)
|
||||
print (shell.InteractiveTB.stb2text(stb), file=sys.stderr)
|
||||
shell._showtraceback = print_tb
|
||||
InteractiveShellApp.init_gui_pylab(self)
|
||||
finally:
|
||||
shell._showtraceback = _showtraceback
|
||||
|
||||
def init_shell(self):
|
||||
self.shell = getattr(self.kernel, 'shell', None)
|
||||
if self.shell:
|
||||
self.shell.configurables.append(self)
|
||||
|
||||
def init_extensions(self):
|
||||
super(YAPKernelApp, self).init_extensions()
|
||||
# BEGIN HARDCODED WIDGETS HACK
|
||||
# Ensure ipywidgets extension is loaded if available
|
||||
extension_man = self.shell.extension_manager
|
||||
if 'ipywidgets' not in extension_man.loaded:
|
||||
try:
|
||||
extension_man.load_extension('ipywidgets')
|
||||
except ImportError as e:
|
||||
self.log.debug('ipywidgets package not installed. Widgets will not be available.')
|
||||
# END HARDCODED WIDGETS HACK
|
||||
|
||||
def configure_tornado_logger(self):
|
||||
""" Configure the tornado logging.Logger.
|
||||
|
||||
Must set up the tornado logger or else tornado will call
|
||||
basicConfig for the root logger which makes the root logger
|
||||
go to the real sys.stderr instead of the capture streams.
|
||||
This function mimics the setup of logging.basicConfig.
|
||||
"""
|
||||
logger = logging.getLogger('tornado')
|
||||
handler = logging.StreamHandler()
|
||||
formatter = logging.Formatter(logging.BASIC_FORMAT)
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
@catch_config_error
|
||||
def initialize(self, argv=None):
|
||||
super(YAPKernelApp, self).initialize(argv)
|
||||
if self.subapp is not None:
|
||||
return
|
||||
# register zmq IOLoop with tornado
|
||||
zmq_ioloop.install()
|
||||
self.init_blackhole()
|
||||
self.init_connection_file()
|
||||
self.init_poller()
|
||||
self.init_sockets()
|
||||
self.init_heartbeat()
|
||||
# writing/displaying connection info must be *after* init_sockets/heartbeat
|
||||
self.write_connection_file()
|
||||
# Log connection info after writing connection file, so that the connection
|
||||
# file is definitely available at the time someone reads the log.
|
||||
self.log_connection_info()
|
||||
self.init_io()
|
||||
self.init_signal()
|
||||
self.init_kernel()
|
||||
# shell init steps
|
||||
self.init_path()
|
||||
self.init_shell()
|
||||
if self.shell:
|
||||
self.init_gui_pylab()
|
||||
self.init_extensions()
|
||||
self.init_code()
|
||||
# flush stdout/stderr, so that anything written to these streams during
|
||||
# initialization do not get associated with the first execution request
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
|
||||
def start(self):
|
||||
if self.subapp is not None:
|
||||
return self.subapp.start()
|
||||
if self.poller is not None:
|
||||
self.poller.start()
|
||||
self.kernel.start()
|
||||
try:
|
||||
ioloop.IOLoop.instance().start()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
launch_new_instance = YAPKernelApp.launch_instance
|
||||
|
||||
def main():
|
||||
"""Run an YAPKernel as an application"""
|
||||
app = YAPKernelApp.instance()
|
||||
app.initialize()
|
||||
app.start()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
756
packages/python/yap_kernel/yap_kernel/kernelbase.py
Normal file
756
packages/python/yap_kernel/yap_kernel/kernelbase.py
Normal file
@ -0,0 +1,756 @@
|
||||
"""Base class for a kernel that talks to frontends over 0MQ."""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import sys
|
||||
import time
|
||||
import logging
|
||||
import uuid
|
||||
|
||||
from datetime import datetime
|
||||
try:
|
||||
# jupyter_client >= 5, use tz-aware now
|
||||
from jupyter_client.session import utcnow as now
|
||||
except ImportError:
|
||||
# jupyter_client < 5, use local now()
|
||||
now = datetime.now
|
||||
|
||||
from signal import signal, default_int_handler, SIGINT
|
||||
|
||||
import zmq
|
||||
from tornado import ioloop
|
||||
from zmq.eventloop.zmqstream import ZMQStream
|
||||
|
||||
from traitlets.config.configurable import SingletonConfigurable
|
||||
from IPython.core.error import StdinNotImplementedError
|
||||
from ipython_genutils import py3compat
|
||||
from ipython_genutils.py3compat import unicode_type, string_types
|
||||
from yap_kernel.jsonutil import json_clean
|
||||
from traitlets import (
|
||||
Any, Instance, Float, Dict, List, Set, Integer, Unicode, Bool, observe, default
|
||||
)
|
||||
|
||||
from jupyter_client.session import Session
|
||||
|
||||
from ._version import kernel_protocol_version
|
||||
|
||||
class Kernel(SingletonConfigurable):
|
||||
|
||||
#---------------------------------------------------------------------------
|
||||
# Kernel interface
|
||||
#---------------------------------------------------------------------------
|
||||
|
||||
# attribute to override with a GUI
|
||||
eventloop = Any(None)
|
||||
|
||||
@observe('eventloop')
|
||||
def _update_eventloop(self, change):
|
||||
"""schedule call to eventloop from IOLoop"""
|
||||
loop = ioloop.IOLoop.instance()
|
||||
loop.add_callback(self.enter_eventloop)
|
||||
|
||||
session = Instance(Session, allow_none=True)
|
||||
profile_dir = Instance('IPython.core.profiledir.ProfileDir', allow_none=True)
|
||||
shell_streams = List()
|
||||
control_stream = Instance(ZMQStream, allow_none=True)
|
||||
iopub_socket = Any()
|
||||
iopub_thread = Any()
|
||||
stdin_socket = Any()
|
||||
log = Instance(logging.Logger, allow_none=True)
|
||||
|
||||
# identities:
|
||||
int_id = Integer(-1)
|
||||
ident = Unicode()
|
||||
|
||||
@default('ident')
|
||||
def _default_ident(self):
|
||||
return unicode_type(uuid.uuid4())
|
||||
|
||||
# This should be overridden by wrapper kernels that implement any real
|
||||
# language.
|
||||
language_info = {}
|
||||
|
||||
# any links that should go in the help menu
|
||||
help_links = List()
|
||||
|
||||
# Private interface
|
||||
|
||||
_darwin_app_nap = Bool(True,
|
||||
help="""Whether to use appnope for compatiblity with OS X App Nap.
|
||||
|
||||
Only affects OS X >= 10.9.
|
||||
"""
|
||||
).tag(config=True)
|
||||
|
||||
# track associations with current request
|
||||
_allow_stdin = Bool(False)
|
||||
_parent_header = Dict()
|
||||
_parent_ident = Any(b'')
|
||||
# Time to sleep after flushing the stdout/err buffers in each execute
|
||||
# cycle. While this introduces a hard limit on the minimal latency of the
|
||||
# execute cycle, it helps prevent output synchronization problems for
|
||||
# clients.
|
||||
# Units are in seconds. The minimum zmq latency on local host is probably
|
||||
# ~150 microseconds, set this to 500us for now. We may need to increase it
|
||||
# a little if it's not enough after more interactive testing.
|
||||
_execute_sleep = Float(0.0005).tag(config=True)
|
||||
|
||||
# Frequency of the kernel's event loop.
|
||||
# Units are in seconds, kernel subclasses for GUI toolkits may need to
|
||||
# adapt to milliseconds.
|
||||
_poll_interval = Float(0.05).tag(config=True)
|
||||
|
||||
# If the shutdown was requested over the network, we leave here the
|
||||
# necessary reply message so it can be sent by our registered atexit
|
||||
# handler. This ensures that the reply is only sent to clients truly at
|
||||
# the end of our shutdown process (which happens after the underlying
|
||||
# IPython shell's own shutdown).
|
||||
_shutdown_message = None
|
||||
|
||||
# This is a dict of port number that the kernel is listening on. It is set
|
||||
# by record_ports and used by connect_request.
|
||||
_recorded_ports = Dict()
|
||||
|
||||
# set of aborted msg_ids
|
||||
aborted = Set()
|
||||
|
||||
# Track execution count here. For IPython, we override this to use the
|
||||
# execution count we store in the shell.
|
||||
execution_count = 0
|
||||
|
||||
msg_types = [
|
||||
'execute_request', 'complete_request',
|
||||
'inspect_request', 'history_request',
|
||||
'comm_info_request', 'kernel_info_request',
|
||||
'connect_request', 'shutdown_request',
|
||||
'is_complete_request',
|
||||
# deprecated:
|
||||
'apply_request',
|
||||
]
|
||||
# add deprecated ipyparallel control messages
|
||||
control_msg_types = msg_types + ['clear_request', 'abort_request']
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(Kernel, self).__init__(**kwargs)
|
||||
|
||||
# Build dict of handlers for message types
|
||||
self.shell_handlers = {}
|
||||
for msg_type in self.msg_types:
|
||||
self.shell_handlers[msg_type] = getattr(self, msg_type)
|
||||
|
||||
self.control_handlers = {}
|
||||
for msg_type in self.control_msg_types:
|
||||
self.control_handlers[msg_type] = getattr(self, msg_type)
|
||||
|
||||
|
||||
def dispatch_control(self, msg):
|
||||
"""dispatch control requests"""
|
||||
idents,msg = self.session.feed_identities(msg, copy=False)
|
||||
try:
|
||||
msg = self.session.deserialize(msg, content=True, copy=False)
|
||||
except:
|
||||
self.log.error("Invalid Control Message", exc_info=True)
|
||||
return
|
||||
|
||||
self.log.debug("Control received: %s", msg)
|
||||
|
||||
# Set the parent message for side effects.
|
||||
self.set_parent(idents, msg)
|
||||
self._publish_status(u'busy')
|
||||
|
||||
header = msg['header']
|
||||
msg_type = header['msg_type']
|
||||
|
||||
handler = self.control_handlers.get(msg_type, None)
|
||||
if handler is None:
|
||||
self.log.error("UNKNOWN CONTROL MESSAGE TYPE: %r", msg_type)
|
||||
else:
|
||||
try:
|
||||
handler(self.control_stream, idents, msg)
|
||||
except Exception:
|
||||
self.log.error("Exception in control handler:", exc_info=True)
|
||||
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
self._publish_status(u'idle')
|
||||
|
||||
def should_handle(self, stream, msg, idents):
|
||||
"""Check whether a shell-channel message should be handled
|
||||
|
||||
Allows subclasses to prevent handling of certain messages (e.g. aborted requests).
|
||||
"""
|
||||
msg_id = msg['header']['msg_id']
|
||||
if msg_id in self.aborted:
|
||||
msg_type = msg['header']['msg_type']
|
||||
# is it safe to assume a msg_id will not be resubmitted?
|
||||
self.aborted.remove(msg_id)
|
||||
reply_type = msg_type.split('_')[0] + '_reply'
|
||||
status = {'status' : 'aborted'}
|
||||
md = {'engine' : self.ident}
|
||||
md.update(status)
|
||||
self.session.send(stream, reply_type, metadata=md,
|
||||
content=status, parent=msg, ident=idents)
|
||||
return False
|
||||
return True
|
||||
|
||||
def dispatch_shell(self, stream, msg):
|
||||
"""dispatch shell requests"""
|
||||
# flush control requests first
|
||||
if self.control_stream:
|
||||
self.control_stream.flush()
|
||||
|
||||
idents,msg = self.session.feed_identities(msg, copy=False)
|
||||
try:
|
||||
msg = self.session.deserialize(msg, content=True, copy=False)
|
||||
except:
|
||||
self.log.error("Invalid Message", exc_info=True)
|
||||
return
|
||||
|
||||
# Set the parent message for side effects.
|
||||
self.set_parent(idents, msg)
|
||||
self._publish_status(u'busy')
|
||||
|
||||
header = msg['header']
|
||||
msg_id = header['msg_id']
|
||||
msg_type = msg['header']['msg_type']
|
||||
|
||||
# Print some info about this message and leave a '--->' marker, so it's
|
||||
# easier to trace visually the message chain when debugging. Each
|
||||
# handler prints its message at the end.
|
||||
self.log.debug('\n*** MESSAGE TYPE:%s***', msg_type)
|
||||
self.log.debug(' Content: %s\n --->\n ', msg['content'])
|
||||
|
||||
if not self.should_handle(stream, msg, idents):
|
||||
return
|
||||
|
||||
handler = self.shell_handlers.get(msg_type, None)
|
||||
if handler is None:
|
||||
self.log.warn("Unknown message type: %r", msg_type)
|
||||
else:
|
||||
self.log.debug("%s: %s", msg_type, msg)
|
||||
self.pre_handler_hook()
|
||||
try:
|
||||
handler(stream, idents, msg)
|
||||
except Exception:
|
||||
self.log.error("Exception in message handler:", exc_info=True)
|
||||
finally:
|
||||
self.post_handler_hook()
|
||||
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
self._publish_status(u'idle')
|
||||
|
||||
def pre_handler_hook(self):
|
||||
"""Hook to execute before calling message handler"""
|
||||
# ensure default_int_handler during handler call
|
||||
self.saved_sigint_handler = signal(SIGINT, default_int_handler)
|
||||
|
||||
def post_handler_hook(self):
|
||||
"""Hook to execute after calling message handler"""
|
||||
signal(SIGINT, self.saved_sigint_handler)
|
||||
|
||||
def enter_eventloop(self):
|
||||
"""enter eventloop"""
|
||||
self.log.info("entering eventloop %s", self.eventloop)
|
||||
for stream in self.shell_streams:
|
||||
# flush any pending replies,
|
||||
# which may be skipped by entering the eventloop
|
||||
stream.flush(zmq.POLLOUT)
|
||||
# restore default_int_handler
|
||||
signal(SIGINT, default_int_handler)
|
||||
while self.eventloop is not None:
|
||||
try:
|
||||
self.eventloop(self)
|
||||
except KeyboardInterrupt:
|
||||
# Ctrl-C shouldn't crash the kernel
|
||||
self.log.error("KeyboardInterrupt caught in kernel")
|
||||
continue
|
||||
else:
|
||||
# eventloop exited cleanly, this means we should stop (right?)
|
||||
self.eventloop = None
|
||||
break
|
||||
self.log.info("exiting eventloop")
|
||||
|
||||
def start(self):
|
||||
"""register dispatchers for streams"""
|
||||
if self.control_stream:
|
||||
self.control_stream.on_recv(self.dispatch_control, copy=False)
|
||||
|
||||
def make_dispatcher(stream):
|
||||
def dispatcher(msg):
|
||||
return self.dispatch_shell(stream, msg)
|
||||
return dispatcher
|
||||
|
||||
for s in self.shell_streams:
|
||||
s.on_recv(make_dispatcher(s), copy=False)
|
||||
|
||||
# publish idle status
|
||||
self._publish_status('starting')
|
||||
|
||||
def do_one_iteration(self):
|
||||
"""step eventloop just once"""
|
||||
if self.control_stream:
|
||||
self.control_stream.flush()
|
||||
for stream in self.shell_streams:
|
||||
# handle at most one request per iteration
|
||||
stream.flush(zmq.POLLIN, 1)
|
||||
stream.flush(zmq.POLLOUT)
|
||||
|
||||
def record_ports(self, ports):
|
||||
"""Record the ports that this kernel is using.
|
||||
|
||||
The creator of the Kernel instance must call this methods if they
|
||||
want the :meth:`connect_request` method to return the port numbers.
|
||||
"""
|
||||
self._recorded_ports = ports
|
||||
|
||||
#---------------------------------------------------------------------------
|
||||
# Kernel request handlers
|
||||
#---------------------------------------------------------------------------
|
||||
|
||||
def _publish_execute_input(self, code, parent, execution_count):
|
||||
"""Publish the code request on the iopub stream."""
|
||||
|
||||
self.session.send(self.iopub_socket, u'execute_input',
|
||||
{u'code':code, u'execution_count': execution_count},
|
||||
parent=parent, ident=self._topic('execute_input')
|
||||
)
|
||||
|
||||
def _publish_status(self, status, parent=None):
|
||||
"""send status (busy/idle) on IOPub"""
|
||||
self.session.send(self.iopub_socket,
|
||||
u'status',
|
||||
{u'execution_state': status},
|
||||
parent=parent or self._parent_header,
|
||||
ident=self._topic('status'),
|
||||
)
|
||||
|
||||
def set_parent(self, ident, parent):
|
||||
"""Set the current parent_header
|
||||
|
||||
Side effects (IOPub messages) and replies are associated with
|
||||
the request that caused them via the parent_header.
|
||||
|
||||
The parent identity is used to route input_request messages
|
||||
on the stdin channel.
|
||||
"""
|
||||
self._parent_ident = ident
|
||||
self._parent_header = parent
|
||||
|
||||
def send_response(self, stream, msg_or_type, content=None, ident=None,
|
||||
buffers=None, track=False, header=None, metadata=None):
|
||||
"""Send a response to the message we're currently processing.
|
||||
|
||||
This accepts all the parameters of :meth:`jupyter_client.session.Session.send`
|
||||
except ``parent``.
|
||||
|
||||
This relies on :meth:`set_parent` having been called for the current
|
||||
message.
|
||||
"""
|
||||
return self.session.send(stream, msg_or_type, content, self._parent_header,
|
||||
ident, buffers, track, header, metadata)
|
||||
|
||||
def init_metadata(self, parent):
|
||||
"""Initialize metadata.
|
||||
|
||||
Run at the beginning of execution requests.
|
||||
"""
|
||||
# FIXME: `started` is part of ipyparallel
|
||||
# Remove for yap_kernel 5.0
|
||||
return {
|
||||
'started': now(),
|
||||
}
|
||||
|
||||
def finish_metadata(self, parent, metadata, reply_content):
|
||||
"""Finish populating metadata.
|
||||
|
||||
Run after completing an execution request.
|
||||
"""
|
||||
return metadata
|
||||
|
||||
def execute_request(self, stream, ident, parent):
|
||||
"""handle an execute_request"""
|
||||
|
||||
try:
|
||||
content = parent[u'content']
|
||||
code = py3compat.cast_unicode_py2(content[u'code'])
|
||||
silent = content[u'silent']
|
||||
store_history = content.get(u'store_history', not silent)
|
||||
user_expressions = content.get('user_expressions', {})
|
||||
allow_stdin = content.get('allow_stdin', False)
|
||||
except:
|
||||
self.log.error("Got bad msg: ")
|
||||
self.log.error("%s", parent)
|
||||
return
|
||||
|
||||
stop_on_error = content.get('stop_on_error', True)
|
||||
|
||||
metadata = self.init_metadata(parent)
|
||||
|
||||
# Re-broadcast our input for the benefit of listening clients, and
|
||||
# start computing output
|
||||
if not silent:
|
||||
self.execution_count += 1
|
||||
self._publish_execute_input(code, parent, self.execution_count)
|
||||
|
||||
reply_content = self.do_execute(code, silent, store_history,
|
||||
user_expressions, allow_stdin)
|
||||
|
||||
# Flush output before sending the reply.
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
# FIXME: on rare occasions, the flush doesn't seem to make it to the
|
||||
# clients... This seems to mitigate the problem, but we definitely need
|
||||
# to better understand what's going on.
|
||||
if self._execute_sleep:
|
||||
time.sleep(self._execute_sleep)
|
||||
|
||||
# Send the reply.
|
||||
reply_content = json_clean(reply_content)
|
||||
metadata = self.finish_metadata(parent, metadata, reply_content)
|
||||
|
||||
reply_msg = self.session.send(stream, u'execute_reply',
|
||||
reply_content, parent, metadata=metadata,
|
||||
ident=ident)
|
||||
|
||||
self.log.debug("%s", reply_msg)
|
||||
|
||||
if not silent and reply_msg['content']['status'] == u'error' and stop_on_error:
|
||||
self._abort_queues()
|
||||
|
||||
def do_execute(self, code, silent, store_history=True,
|
||||
user_expressions=None, allow_stdin=False):
|
||||
"""Execute user code. Must be overridden by subclasses.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def complete_request(self, stream, ident, parent):
|
||||
content = parent['content']
|
||||
code = content['code']
|
||||
cursor_pos = content['cursor_pos']
|
||||
|
||||
matches = self.do_complete(code, cursor_pos)
|
||||
matches = json_clean(matches)
|
||||
completion_msg = self.session.send(stream, 'complete_reply',
|
||||
matches, parent, ident)
|
||||
self.log.debug("%s", completion_msg)
|
||||
|
||||
def do_complete(self, code, cursor_pos):
|
||||
"""Override in subclasses to find completions.
|
||||
"""
|
||||
return {'matches' : [],
|
||||
'cursor_end' : cursor_pos,
|
||||
'cursor_start' : cursor_pos,
|
||||
'metadata' : {},
|
||||
'status' : 'ok'}
|
||||
|
||||
def inspect_request(self, stream, ident, parent):
|
||||
content = parent['content']
|
||||
|
||||
reply_content = self.do_inspect(content['code'], content['cursor_pos'],
|
||||
content.get('detail_level', 0))
|
||||
# Before we send this object over, we scrub it for JSON usage
|
||||
reply_content = json_clean(reply_content)
|
||||
msg = self.session.send(stream, 'inspect_reply',
|
||||
reply_content, parent, ident)
|
||||
self.log.debug("%s", msg)
|
||||
|
||||
def do_inspect(self, code, cursor_pos, detail_level=0):
|
||||
"""Override in subclasses to allow introspection.
|
||||
"""
|
||||
return {'status': 'ok', 'data': {}, 'metadata': {}, 'found': False}
|
||||
|
||||
def history_request(self, stream, ident, parent):
|
||||
content = parent['content']
|
||||
|
||||
reply_content = self.do_history(**content)
|
||||
|
||||
reply_content = json_clean(reply_content)
|
||||
msg = self.session.send(stream, 'history_reply',
|
||||
reply_content, parent, ident)
|
||||
self.log.debug("%s", msg)
|
||||
|
||||
def do_history(self, hist_access_type, output, raw, session=None, start=None,
|
||||
stop=None, n=None, pattern=None, unique=False):
|
||||
"""Override in subclasses to access history.
|
||||
"""
|
||||
return {'status': 'ok', 'history': []}
|
||||
|
||||
def connect_request(self, stream, ident, parent):
|
||||
if self._recorded_ports is not None:
|
||||
content = self._recorded_ports.copy()
|
||||
else:
|
||||
content = {}
|
||||
content['status'] = 'ok'
|
||||
msg = self.session.send(stream, 'connect_reply',
|
||||
content, parent, ident)
|
||||
self.log.debug("%s", msg)
|
||||
|
||||
@property
|
||||
def kernel_info(self):
|
||||
return {
|
||||
'protocol_version': kernel_protocol_version,
|
||||
'implementation': self.implementation,
|
||||
'implementation_version': self.implementation_version,
|
||||
'language_info': self.language_info,
|
||||
'banner': self.banner,
|
||||
'help_links': self.help_links,
|
||||
}
|
||||
|
||||
def kernel_info_request(self, stream, ident, parent):
|
||||
content = {'status': 'ok'}
|
||||
content.update(self.kernel_info)
|
||||
msg = self.session.send(stream, 'kernel_info_reply',
|
||||
content, parent, ident)
|
||||
self.log.debug("%s", msg)
|
||||
|
||||
def comm_info_request(self, stream, ident, parent):
|
||||
content = parent['content']
|
||||
target_name = content.get('target_name', None)
|
||||
|
||||
# Should this be moved to yapkernel?
|
||||
if hasattr(self, 'comm_manager'):
|
||||
comms = {
|
||||
k: dict(target_name=v.target_name)
|
||||
for (k, v) in self.comm_manager.comms.items()
|
||||
if v.target_name == target_name or target_name is None
|
||||
}
|
||||
else:
|
||||
comms = {}
|
||||
reply_content = dict(comms=comms, status='ok')
|
||||
msg = self.session.send(stream, 'comm_info_reply',
|
||||
reply_content, parent, ident)
|
||||
self.log.debug("%s", msg)
|
||||
|
||||
def shutdown_request(self, stream, ident, parent):
|
||||
content = self.do_shutdown(parent['content']['restart'])
|
||||
self.session.send(stream, u'shutdown_reply', content, parent, ident=ident)
|
||||
# same content, but different msg_id for broadcasting on IOPub
|
||||
self._shutdown_message = self.session.msg(u'shutdown_reply',
|
||||
content, parent
|
||||
)
|
||||
|
||||
self._at_shutdown()
|
||||
# call sys.exit after a short delay
|
||||
loop = ioloop.IOLoop.instance()
|
||||
loop.add_timeout(time.time()+0.1, loop.stop)
|
||||
|
||||
def do_shutdown(self, restart):
|
||||
"""Override in subclasses to do things when the frontend shuts down the
|
||||
kernel.
|
||||
"""
|
||||
return {'status': 'ok', 'restart': restart}
|
||||
|
||||
def is_complete_request(self, stream, ident, parent):
|
||||
content = parent['content']
|
||||
code = content['code']
|
||||
|
||||
reply_content = self.do_is_complete(code)
|
||||
reply_content = json_clean(reply_content)
|
||||
reply_msg = self.session.send(stream, 'is_complete_reply',
|
||||
reply_content, parent, ident)
|
||||
self.log.debug("%s", reply_msg)
|
||||
|
||||
def do_is_complete(self, code):
|
||||
"""Override in subclasses to find completions.
|
||||
"""
|
||||
return {'status' : 'unknown',
|
||||
}
|
||||
|
||||
#---------------------------------------------------------------------------
|
||||
# Engine methods (DEPRECATED)
|
||||
#---------------------------------------------------------------------------
|
||||
|
||||
def apply_request(self, stream, ident, parent):
|
||||
self.log.warn("""apply_request is deprecated in kernel_base, moving to ipyparallel.""")
|
||||
try:
|
||||
content = parent[u'content']
|
||||
bufs = parent[u'buffers']
|
||||
msg_id = parent['header']['msg_id']
|
||||
except:
|
||||
self.log.error("Got bad msg: %s", parent, exc_info=True)
|
||||
return
|
||||
|
||||
md = self.init_metadata(parent)
|
||||
|
||||
reply_content, result_buf = self.do_apply(content, bufs, msg_id, md)
|
||||
|
||||
# flush i/o
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
|
||||
md = self.finish_metadata(parent, md, reply_content)
|
||||
|
||||
self.session.send(stream, u'apply_reply', reply_content,
|
||||
parent=parent, ident=ident,buffers=result_buf, metadata=md)
|
||||
|
||||
def do_apply(self, content, bufs, msg_id, reply_metadata):
|
||||
"""DEPRECATED"""
|
||||
raise NotImplementedError
|
||||
|
||||
#---------------------------------------------------------------------------
|
||||
# Control messages (DEPRECATED)
|
||||
#---------------------------------------------------------------------------
|
||||
|
||||
def abort_request(self, stream, ident, parent):
|
||||
"""abort a specific msg by id"""
|
||||
self.log.warn("abort_request is deprecated in kernel_base. It os only part of IPython parallel")
|
||||
msg_ids = parent['content'].get('msg_ids', None)
|
||||
if isinstance(msg_ids, string_types):
|
||||
msg_ids = [msg_ids]
|
||||
if not msg_ids:
|
||||
self._abort_queues()
|
||||
for mid in msg_ids:
|
||||
self.aborted.add(str(mid))
|
||||
|
||||
content = dict(status='ok')
|
||||
reply_msg = self.session.send(stream, 'abort_reply', content=content,
|
||||
parent=parent, ident=ident)
|
||||
self.log.debug("%s", reply_msg)
|
||||
|
||||
def clear_request(self, stream, idents, parent):
|
||||
"""Clear our namespace."""
|
||||
self.log.warn("clear_request is deprecated in kernel_base. It os only part of IPython parallel")
|
||||
content = self.do_clear()
|
||||
self.session.send(stream, 'clear_reply', ident=idents, parent=parent,
|
||||
content = content)
|
||||
|
||||
def do_clear(self):
|
||||
"""DEPRECATED"""
|
||||
raise NotImplementedError
|
||||
|
||||
#---------------------------------------------------------------------------
|
||||
# Protected interface
|
||||
#---------------------------------------------------------------------------
|
||||
|
||||
def _topic(self, topic):
|
||||
"""prefixed topic for IOPub messages"""
|
||||
base = "kernel.%s" % self.ident
|
||||
|
||||
return py3compat.cast_bytes("%s.%s" % (base, topic))
|
||||
|
||||
def _abort_queues(self):
|
||||
for stream in self.shell_streams:
|
||||
if stream:
|
||||
self._abort_queue(stream)
|
||||
|
||||
def _abort_queue(self, stream):
|
||||
poller = zmq.Poller()
|
||||
poller.register(stream.socket, zmq.POLLIN)
|
||||
while True:
|
||||
idents,msg = self.session.recv(stream, zmq.NOBLOCK, content=True)
|
||||
if msg is None:
|
||||
return
|
||||
|
||||
self.log.info("Aborting:")
|
||||
self.log.info("%s", msg)
|
||||
msg_type = msg['header']['msg_type']
|
||||
reply_type = msg_type.split('_')[0] + '_reply'
|
||||
|
||||
status = {'status' : 'aborted'}
|
||||
md = {'engine' : self.ident}
|
||||
md.update(status)
|
||||
self._publish_status('busy', parent=msg)
|
||||
reply_msg = self.session.send(stream, reply_type, metadata=md,
|
||||
content=status, parent=msg, ident=idents)
|
||||
self._publish_status('idle', parent=msg)
|
||||
self.log.debug("%s", reply_msg)
|
||||
# We need to wait a bit for requests to come in. This can probably
|
||||
# be set shorter for true asynchronous clients.
|
||||
poller.poll(50)
|
||||
|
||||
def _no_raw_input(self):
|
||||
"""Raise StdinNotImplentedError if active frontend doesn't support
|
||||
stdin."""
|
||||
raise StdinNotImplementedError("raw_input was called, but this "
|
||||
"frontend does not support stdin.")
|
||||
|
||||
def getpass(self, prompt='', stream=None):
|
||||
"""Forward getpass to frontends
|
||||
|
||||
Raises
|
||||
------
|
||||
StdinNotImplentedError if active frontend doesn't support stdin.
|
||||
"""
|
||||
if not self._allow_stdin:
|
||||
raise StdinNotImplementedError(
|
||||
"getpass was called, but this frontend does not support input requests."
|
||||
)
|
||||
if stream is not None:
|
||||
import warnings
|
||||
warnings.warn("The `stream` parameter of `getpass.getpass` will have no effect when using yap_kernel",
|
||||
UserWarning, stacklevel=2)
|
||||
return self._input_request(prompt,
|
||||
self._parent_ident,
|
||||
self._parent_header,
|
||||
password=True,
|
||||
)
|
||||
|
||||
def raw_input(self, prompt=''):
|
||||
"""Forward raw_input to frontends
|
||||
|
||||
Raises
|
||||
------
|
||||
StdinNotImplentedError if active frontend doesn't support stdin.
|
||||
"""
|
||||
if not self._allow_stdin:
|
||||
raise StdinNotImplementedError(
|
||||
"raw_input was called, but this frontend does not support input requests."
|
||||
)
|
||||
return self._input_request(str(prompt),
|
||||
self._parent_ident,
|
||||
self._parent_header,
|
||||
password=False,
|
||||
)
|
||||
|
||||
def _input_request(self, prompt, ident, parent, password=False):
|
||||
# Flush output before making the request.
|
||||
sys.stderr.flush()
|
||||
sys.stdout.flush()
|
||||
# flush the stdin socket, to purge stale replies
|
||||
while True:
|
||||
try:
|
||||
self.stdin_socket.recv_multipart(zmq.NOBLOCK)
|
||||
except zmq.ZMQError as e:
|
||||
if e.errno == zmq.EAGAIN:
|
||||
break
|
||||
else:
|
||||
raise
|
||||
|
||||
# Send the input request.
|
||||
content = json_clean(dict(prompt=prompt, password=password))
|
||||
self.session.send(self.stdin_socket, u'input_request', content, parent,
|
||||
ident=ident)
|
||||
|
||||
# Await a response.
|
||||
while True:
|
||||
try:
|
||||
ident, reply = self.session.recv(self.stdin_socket, 0)
|
||||
except Exception:
|
||||
self.log.warn("Invalid Message:", exc_info=True)
|
||||
except KeyboardInterrupt:
|
||||
# re-raise KeyboardInterrupt, to truncate traceback
|
||||
raise KeyboardInterrupt
|
||||
else:
|
||||
break
|
||||
try:
|
||||
value = py3compat.unicode_to_str(reply['content']['value'])
|
||||
except:
|
||||
self.log.error("Bad input_reply: %s", parent)
|
||||
value = ''
|
||||
if value == '\x04':
|
||||
# EOF
|
||||
raise EOFError
|
||||
return value
|
||||
|
||||
def _at_shutdown(self):
|
||||
"""Actions taken at shutdown by the kernel, called by python's atexit.
|
||||
"""
|
||||
# io.rprint("Kernel at_shutdown") # dbg
|
||||
if self._shutdown_message is not None:
|
||||
self.session.send(self.iopub_socket, self._shutdown_message, ident=self._topic('shutdown'))
|
||||
self.log.debug("%s", self._shutdown_message)
|
||||
[ s.flush(zmq.POLLOUT) for s in self.shell_streams ]
|
188
packages/python/yap_kernel/yap_kernel/kernelspec.py
Normal file
188
packages/python/yap_kernel/yap_kernel/kernelspec.py
Normal file
@ -0,0 +1,188 @@
|
||||
"""The IPython kernel spec for Jupyter"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import errno
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
from jupyter_client.kernelspec import KernelSpecManager
|
||||
|
||||
pjoin = os.path.join
|
||||
|
||||
KERNEL_NAME = 'YAPKernel'
|
||||
|
||||
# path to kernelspec resources
|
||||
RESOURCES = pjoin(os.path.dirname(__file__), 'resources')
|
||||
|
||||
|
||||
def make_yap_kernel_cmd(mod='yap_kernel', executable=None, extra_arguments=None, **kw):
|
||||
"""Build Popen command list for launching an IPython kernel.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
mod : str, optional (default 'yap_kernel')
|
||||
A string of an IPython module whose __main__ starts an IPython kernel
|
||||
|
||||
executable : str, optional (default sys.executable)
|
||||
The Python executable to use for the kernel process.
|
||||
|
||||
extra_arguments : list, optional
|
||||
A list of extra arguments to pass when executing the launch code.
|
||||
|
||||
Returns
|
||||
-------
|
||||
|
||||
A Popen command list
|
||||
"""
|
||||
if executable is None:
|
||||
executable = sys.executable
|
||||
extra_arguments = extra_arguments or []
|
||||
arguments = [executable, '-m', mod, '-f', '{connection_file}']
|
||||
arguments.extend(extra_arguments)
|
||||
|
||||
return arguments
|
||||
|
||||
|
||||
def get_kernel_dict(extra_arguments=None):
|
||||
"""Construct dict for kernel.json"""
|
||||
return {
|
||||
'argv': make_yap_kernel_cmd(extra_arguments=extra_arguments),
|
||||
'display_name': 'YAP 6a',
|
||||
'language': 'prolog',
|
||||
}
|
||||
|
||||
|
||||
def write_kernel_spec(path=None, overrides=None, extra_arguments=None):
|
||||
"""Write a kernel spec directory to `path`
|
||||
|
||||
If `path` is not specified, a temporary directory is created.
|
||||
If `overrides` is given, the kernelspec JSON is updated before writing.
|
||||
|
||||
The path to the kernelspec is always returned.
|
||||
"""
|
||||
if path is None:
|
||||
path = os.path.join(tempfile.mkdtemp(suffix='_kernels'), KERNEL_NAME)
|
||||
|
||||
# stage resources
|
||||
shutil.copytree(RESOURCES, path)
|
||||
# write kernel.json
|
||||
kernel_dict = get_kernel_dict(extra_arguments)
|
||||
|
||||
if overrides:
|
||||
kernel_dict.update(overrides)
|
||||
with open(pjoin(path, 'kernel.json'), 'w') as f:
|
||||
json.dump(kernel_dict, f, indent=1)
|
||||
|
||||
return path
|
||||
|
||||
|
||||
def install(kernel_spec_manager=None, user=False, kernel_name=KERNEL_NAME, display_name=None,
|
||||
prefix=None, profile=None):
|
||||
"""Install the IPython kernelspec for Jupyter
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
kernel_spec_manager: KernelSpecManager [optional]
|
||||
A KernelSpecManager to use for installation.
|
||||
If none provided, a default instance will be created.
|
||||
user: bool [default: False]
|
||||
Whether to do a user-only install, or system-wide.
|
||||
kernel_name: str, optional
|
||||
Specify a name for the kernelspec.
|
||||
This is needed for having multiple IPython kernels for different environments.
|
||||
display_name: str, optional
|
||||
Specify the display name for the kernelspec
|
||||
profile: str, optional
|
||||
Specify a custom profile to be loaded by the kernel.
|
||||
prefix: str, optional
|
||||
Specify an install prefix for the kernelspec.
|
||||
This is needed to install into a non-default location, such as a conda/virtual-env.
|
||||
|
||||
Returns
|
||||
-------
|
||||
|
||||
The path where the kernelspec was installed.
|
||||
"""
|
||||
if kernel_spec_manager is None:
|
||||
kernel_spec_manager = KernelSpecManager()
|
||||
|
||||
if (kernel_name != KERNEL_NAME) and (display_name is None):
|
||||
# kernel_name is specified and display_name is not
|
||||
# default display_name to kernel_name
|
||||
display_name = kernel_name
|
||||
overrides = {}
|
||||
if display_name:
|
||||
overrides["display_name"] = display_name
|
||||
if profile:
|
||||
extra_arguments = ["--profile", profile]
|
||||
if not display_name:
|
||||
# add the profile to the default display name
|
||||
overrides["display_name"] = 'Python %i [profile=%s]' % (sys.version_info[0], profile)
|
||||
else:
|
||||
extra_arguments = None
|
||||
path = write_kernel_spec(overrides=overrides, extra_arguments=extra_arguments)
|
||||
dest = kernel_spec_manager.install_kernel_spec(
|
||||
path, kernel_name=kernel_name, user=user, prefix=prefix)
|
||||
# cleanup afterward
|
||||
shutil.rmtree(path)
|
||||
return dest
|
||||
|
||||
# Entrypoint
|
||||
|
||||
from traitlets.config import Application
|
||||
|
||||
|
||||
class InstallYAPKernelSpecApp(Application):
|
||||
"""Dummy app wrapping argparse"""
|
||||
name = 'ipython-kernel-install'
|
||||
|
||||
def initialize(self, argv=None):
|
||||
if argv is None:
|
||||
argv = sys.argv[1:]
|
||||
self.argv = argv
|
||||
|
||||
def start(self):
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(prog=self.name,
|
||||
description="Install the IPython kernel spec.")
|
||||
parser.add_argument('--user', action='store_true',
|
||||
help="Install for the current user instead of system-wide")
|
||||
parser.add_argument('--name', type=str, default=KERNEL_NAME,
|
||||
help="Specify a name for the kernelspec."
|
||||
" This is needed to have multiple IPython kernels at the same time.")
|
||||
parser.add_argument('--display-name', type=str,
|
||||
help="Specify the display name for the kernelspec."
|
||||
" This is helpful when you have multiple IPython kernels.")
|
||||
parser.add_argument('--profile', type=str,
|
||||
help="Specify an IPython profile to load. "
|
||||
"This can be used to create custom versions of the kernel.")
|
||||
parser.add_argument('--prefix', type=str,
|
||||
help="Specify an install prefix for the kernelspec."
|
||||
" This is needed to install into a non-default location, such as a conda/virtual-env.")
|
||||
parser.add_argument('--sys-prefix', action='store_const', const=sys.prefix, dest='prefix',
|
||||
help="Install to Python's sys.prefix."
|
||||
" Shorthand for --prefix='%s'. For use in conda/virtual-envs." % sys.prefix)
|
||||
opts = parser.parse_args(self.argv)
|
||||
try:
|
||||
dest = install(user=opts.user, kernel_name=opts.name, profile=opts.profile,
|
||||
prefix=opts.prefix, display_name=opts.display_name)
|
||||
except OSError as e:
|
||||
if e.errno == errno.EACCES:
|
||||
print(e, file=sys.stderr)
|
||||
if opts.user:
|
||||
print("Perhaps you want `sudo` or `--user`?", file=sys.stderr)
|
||||
self.exit(1)
|
||||
raise
|
||||
print("Installed kernelspec %s in %s" % (opts.name, dest))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
InstallYAPKernelSpecApp.launch_instance()
|
23
packages/python/yap_kernel/yap_kernel/log.py
Normal file
23
packages/python/yap_kernel/yap_kernel/log.py
Normal file
@ -0,0 +1,23 @@
|
||||
from logging import INFO, DEBUG, WARN, ERROR, FATAL
|
||||
|
||||
from zmq.log.handlers import PUBHandler
|
||||
|
||||
import warnings
|
||||
warnings.warn("yap_kernel.log is deprecated. It has moved to ipyparallel.engine.log", DeprecationWarning)
|
||||
|
||||
class EnginePUBHandler(PUBHandler):
|
||||
"""A simple PUBHandler subclass that sets root_topic"""
|
||||
engine=None
|
||||
|
||||
def __init__(self, engine, *args, **kwargs):
|
||||
PUBHandler.__init__(self,*args, **kwargs)
|
||||
self.engine = engine
|
||||
|
||||
@property
|
||||
def root_topic(self):
|
||||
"""this is a property, in case the handler is created
|
||||
before the engine gets registered with an id"""
|
||||
if isinstance(getattr(self.engine, 'id', None), int):
|
||||
return "engine.%i"%self.engine.id
|
||||
else:
|
||||
return "engine"
|
117
packages/python/yap_kernel/yap_kernel/parentpoller.py
Normal file
117
packages/python/yap_kernel/yap_kernel/parentpoller.py
Normal file
@ -0,0 +1,117 @@
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
try:
|
||||
import ctypes
|
||||
except:
|
||||
ctypes = None
|
||||
import os
|
||||
import platform
|
||||
import signal
|
||||
import time
|
||||
try:
|
||||
from _thread import interrupt_main # Py 3
|
||||
except ImportError:
|
||||
from thread import interrupt_main # Py 2
|
||||
from threading import Thread
|
||||
|
||||
from traitlets.log import get_logger
|
||||
|
||||
import warnings
|
||||
|
||||
class ParentPollerUnix(Thread):
|
||||
""" A Unix-specific daemon thread that terminates the program immediately
|
||||
when the parent process no longer exists.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super(ParentPollerUnix, self).__init__()
|
||||
self.daemon = True
|
||||
|
||||
def run(self):
|
||||
# We cannot use os.waitpid because it works only for child processes.
|
||||
from errno import EINTR
|
||||
while True:
|
||||
try:
|
||||
if os.getppid() == 1:
|
||||
get_logger().warning("Parent appears to have exited, shutting down.")
|
||||
os._exit(1)
|
||||
time.sleep(1.0)
|
||||
except OSError as e:
|
||||
if e.errno == EINTR:
|
||||
continue
|
||||
raise
|
||||
|
||||
|
||||
class ParentPollerWindows(Thread):
|
||||
""" A Windows-specific daemon thread that listens for a special event that
|
||||
signals an interrupt and, optionally, terminates the program immediately
|
||||
when the parent process no longer exists.
|
||||
"""
|
||||
|
||||
def __init__(self, interrupt_handle=None, parent_handle=None):
|
||||
""" Create the poller. At least one of the optional parameters must be
|
||||
provided.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
interrupt_handle : HANDLE (int), optional
|
||||
If provided, the program will generate a Ctrl+C event when this
|
||||
handle is signaled.
|
||||
|
||||
parent_handle : HANDLE (int), optional
|
||||
If provided, the program will terminate immediately when this
|
||||
handle is signaled.
|
||||
"""
|
||||
assert(interrupt_handle or parent_handle)
|
||||
super(ParentPollerWindows, self).__init__()
|
||||
if ctypes is None:
|
||||
raise ImportError("ParentPollerWindows requires ctypes")
|
||||
self.daemon = True
|
||||
self.interrupt_handle = interrupt_handle
|
||||
self.parent_handle = parent_handle
|
||||
|
||||
def run(self):
|
||||
""" Run the poll loop. This method never returns.
|
||||
"""
|
||||
try:
|
||||
from _winapi import WAIT_OBJECT_0, INFINITE
|
||||
except ImportError:
|
||||
from _subprocess import WAIT_OBJECT_0, INFINITE
|
||||
|
||||
# Build the list of handle to listen on.
|
||||
handles = []
|
||||
if self.interrupt_handle:
|
||||
handles.append(self.interrupt_handle)
|
||||
if self.parent_handle:
|
||||
handles.append(self.parent_handle)
|
||||
arch = platform.architecture()[0]
|
||||
c_int = ctypes.c_int64 if arch.startswith('64') else ctypes.c_int
|
||||
|
||||
# Listen forever.
|
||||
while True:
|
||||
result = ctypes.windll.kernel32.WaitForMultipleObjects(
|
||||
len(handles), # nCount
|
||||
(c_int * len(handles))(*handles), # lpHandles
|
||||
False, # bWaitAll
|
||||
INFINITE) # dwMilliseconds
|
||||
|
||||
if WAIT_OBJECT_0 <= result < len(handles):
|
||||
handle = handles[result - WAIT_OBJECT_0]
|
||||
|
||||
if handle == self.interrupt_handle:
|
||||
# check if signal handler is callable
|
||||
# to avoid 'int not callable' error (Python issue #23395)
|
||||
if callable(signal.getsignal(signal.SIGINT)):
|
||||
interrupt_main()
|
||||
|
||||
elif handle == self.parent_handle:
|
||||
get_logger().warning("Parent appears to have exited, shutting down.")
|
||||
os._exit(1)
|
||||
elif result < 0:
|
||||
# wait failed, just give up and stop polling.
|
||||
warnings.warn("""Parent poll failed. If the frontend dies,
|
||||
the kernel may be left running. Please let us know
|
||||
about your system (bitness, Python, etc.) at
|
||||
ipython-dev@scipy.org""")
|
||||
return
|
455
packages/python/yap_kernel/yap_kernel/pickleutil.py
Normal file
455
packages/python/yap_kernel/yap_kernel/pickleutil.py
Normal file
@ -0,0 +1,455 @@
|
||||
# encoding: utf-8
|
||||
"""Pickle related utilities. Perhaps this should be called 'can'."""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import warnings
|
||||
warnings.warn("yap_kernel.pickleutil is deprecated. It has moved to ipyparallel.", DeprecationWarning)
|
||||
|
||||
import copy
|
||||
import sys
|
||||
from types import FunctionType
|
||||
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
|
||||
from ipython_genutils import py3compat
|
||||
from ipython_genutils.importstring import import_item
|
||||
from ipython_genutils.py3compat import string_types, iteritems, buffer_to_bytes, buffer_to_bytes_py2
|
||||
|
||||
# This registers a hook when it's imported
|
||||
try:
|
||||
# available since ipyparallel 5.1.1
|
||||
from ipyparallel.serialize import codeutil
|
||||
except ImportError:
|
||||
# Deprecated since yap_kernel 4.3.1
|
||||
from yap_kernel import codeutil
|
||||
|
||||
from traitlets.log import get_logger
|
||||
|
||||
if py3compat.PY3:
|
||||
buffer = memoryview
|
||||
class_type = type
|
||||
else:
|
||||
from types import ClassType
|
||||
class_type = (type, ClassType)
|
||||
|
||||
try:
|
||||
PICKLE_PROTOCOL = pickle.DEFAULT_PROTOCOL
|
||||
except AttributeError:
|
||||
PICKLE_PROTOCOL = pickle.HIGHEST_PROTOCOL
|
||||
|
||||
def _get_cell_type(a=None):
|
||||
"""the type of a closure cell doesn't seem to be importable,
|
||||
so just create one
|
||||
"""
|
||||
def inner():
|
||||
return a
|
||||
return type(py3compat.get_closure(inner)[0])
|
||||
|
||||
cell_type = _get_cell_type()
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Functions
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
|
||||
def interactive(f):
|
||||
"""decorator for making functions appear as interactively defined.
|
||||
This results in the function being linked to the user_ns as globals()
|
||||
instead of the module globals().
|
||||
"""
|
||||
|
||||
# build new FunctionType, so it can have the right globals
|
||||
# interactive functions never have closures, that's kind of the point
|
||||
if isinstance(f, FunctionType):
|
||||
mainmod = __import__('__main__')
|
||||
f = FunctionType(f.__code__, mainmod.__dict__,
|
||||
f.__name__, f.__defaults__,
|
||||
)
|
||||
# associate with __main__ for uncanning
|
||||
f.__module__ = '__main__'
|
||||
return f
|
||||
|
||||
|
||||
def use_dill():
|
||||
"""use dill to expand serialization support
|
||||
|
||||
adds support for object methods and closures to serialization.
|
||||
"""
|
||||
# import dill causes most of the magic
|
||||
import dill
|
||||
|
||||
# dill doesn't work with cPickle,
|
||||
# tell the two relevant modules to use plain pickle
|
||||
|
||||
global pickle
|
||||
pickle = dill
|
||||
|
||||
try:
|
||||
from yap_kernel import serialize
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
serialize.pickle = dill
|
||||
|
||||
# disable special function handling, let dill take care of it
|
||||
can_map.pop(FunctionType, None)
|
||||
|
||||
def use_cloudpickle():
|
||||
"""use cloudpickle to expand serialization support
|
||||
|
||||
adds support for object methods and closures to serialization.
|
||||
"""
|
||||
import cloudpickle
|
||||
|
||||
global pickle
|
||||
pickle = cloudpickle
|
||||
|
||||
try:
|
||||
from yap_kernel import serialize
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
serialize.pickle = cloudpickle
|
||||
|
||||
# disable special function handling, let cloudpickle take care of it
|
||||
can_map.pop(FunctionType, None)
|
||||
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Classes
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
|
||||
class CannedObject(object):
|
||||
def __init__(self, obj, keys=[], hook=None):
|
||||
"""can an object for safe pickling
|
||||
|
||||
Parameters
|
||||
==========
|
||||
|
||||
obj:
|
||||
The object to be canned
|
||||
keys: list (optional)
|
||||
list of attribute names that will be explicitly canned / uncanned
|
||||
hook: callable (optional)
|
||||
An optional extra callable,
|
||||
which can do additional processing of the uncanned object.
|
||||
|
||||
large data may be offloaded into the buffers list,
|
||||
used for zero-copy transfers.
|
||||
"""
|
||||
self.keys = keys
|
||||
self.obj = copy.copy(obj)
|
||||
self.hook = can(hook)
|
||||
for key in keys:
|
||||
setattr(self.obj, key, can(getattr(obj, key)))
|
||||
|
||||
self.buffers = []
|
||||
|
||||
def get_object(self, g=None):
|
||||
if g is None:
|
||||
g = {}
|
||||
obj = self.obj
|
||||
for key in self.keys:
|
||||
setattr(obj, key, uncan(getattr(obj, key), g))
|
||||
|
||||
if self.hook:
|
||||
self.hook = uncan(self.hook, g)
|
||||
self.hook(obj, g)
|
||||
return self.obj
|
||||
|
||||
|
||||
class Reference(CannedObject):
|
||||
"""object for wrapping a remote reference by name."""
|
||||
def __init__(self, name):
|
||||
if not isinstance(name, string_types):
|
||||
raise TypeError("illegal name: %r"%name)
|
||||
self.name = name
|
||||
self.buffers = []
|
||||
|
||||
def __repr__(self):
|
||||
return "<Reference: %r>"%self.name
|
||||
|
||||
def get_object(self, g=None):
|
||||
if g is None:
|
||||
g = {}
|
||||
|
||||
return eval(self.name, g)
|
||||
|
||||
|
||||
class CannedCell(CannedObject):
|
||||
"""Can a closure cell"""
|
||||
def __init__(self, cell):
|
||||
self.cell_contents = can(cell.cell_contents)
|
||||
|
||||
def get_object(self, g=None):
|
||||
cell_contents = uncan(self.cell_contents, g)
|
||||
def inner():
|
||||
return cell_contents
|
||||
return py3compat.get_closure(inner)[0]
|
||||
|
||||
|
||||
class CannedFunction(CannedObject):
|
||||
|
||||
def __init__(self, f):
|
||||
self._check_type(f)
|
||||
self.code = f.__code__
|
||||
if f.__defaults__:
|
||||
self.defaults = [ can(fd) for fd in f.__defaults__ ]
|
||||
else:
|
||||
self.defaults = None
|
||||
|
||||
closure = py3compat.get_closure(f)
|
||||
if closure:
|
||||
self.closure = tuple( can(cell) for cell in closure )
|
||||
else:
|
||||
self.closure = None
|
||||
|
||||
self.module = f.__module__ or '__main__'
|
||||
self.__name__ = f.__name__
|
||||
self.buffers = []
|
||||
|
||||
def _check_type(self, obj):
|
||||
assert isinstance(obj, FunctionType), "Not a function type"
|
||||
|
||||
def get_object(self, g=None):
|
||||
# try to load function back into its module:
|
||||
if not self.module.startswith('__'):
|
||||
__import__(self.module)
|
||||
g = sys.modules[self.module].__dict__
|
||||
|
||||
if g is None:
|
||||
g = {}
|
||||
if self.defaults:
|
||||
defaults = tuple(uncan(cfd, g) for cfd in self.defaults)
|
||||
else:
|
||||
defaults = None
|
||||
if self.closure:
|
||||
closure = tuple(uncan(cell, g) for cell in self.closure)
|
||||
else:
|
||||
closure = None
|
||||
newFunc = FunctionType(self.code, g, self.__name__, defaults, closure)
|
||||
return newFunc
|
||||
|
||||
class CannedClass(CannedObject):
|
||||
|
||||
def __init__(self, cls):
|
||||
self._check_type(cls)
|
||||
self.name = cls.__name__
|
||||
self.old_style = not isinstance(cls, type)
|
||||
self._canned_dict = {}
|
||||
for k,v in cls.__dict__.items():
|
||||
if k not in ('__weakref__', '__dict__'):
|
||||
self._canned_dict[k] = can(v)
|
||||
if self.old_style:
|
||||
mro = []
|
||||
else:
|
||||
mro = cls.mro()
|
||||
|
||||
self.parents = [ can(c) for c in mro[1:] ]
|
||||
self.buffers = []
|
||||
|
||||
def _check_type(self, obj):
|
||||
assert isinstance(obj, class_type), "Not a class type"
|
||||
|
||||
def get_object(self, g=None):
|
||||
parents = tuple(uncan(p, g) for p in self.parents)
|
||||
return type(self.name, parents, uncan_dict(self._canned_dict, g=g))
|
||||
|
||||
class CannedArray(CannedObject):
|
||||
def __init__(self, obj):
|
||||
from numpy import ascontiguousarray
|
||||
self.shape = obj.shape
|
||||
self.dtype = obj.dtype.descr if obj.dtype.fields else obj.dtype.str
|
||||
self.pickled = False
|
||||
if sum(obj.shape) == 0:
|
||||
self.pickled = True
|
||||
elif obj.dtype == 'O':
|
||||
# can't handle object dtype with buffer approach
|
||||
self.pickled = True
|
||||
elif obj.dtype.fields and any(dt == 'O' for dt,sz in obj.dtype.fields.values()):
|
||||
self.pickled = True
|
||||
if self.pickled:
|
||||
# just pickle it
|
||||
self.buffers = [pickle.dumps(obj, PICKLE_PROTOCOL)]
|
||||
else:
|
||||
# ensure contiguous
|
||||
obj = ascontiguousarray(obj, dtype=None)
|
||||
self.buffers = [buffer(obj)]
|
||||
|
||||
def get_object(self, g=None):
|
||||
from numpy import frombuffer
|
||||
data = self.buffers[0]
|
||||
if self.pickled:
|
||||
# we just pickled it
|
||||
return pickle.loads(buffer_to_bytes_py2(data))
|
||||
else:
|
||||
if not py3compat.PY3 and isinstance(data, memoryview):
|
||||
# frombuffer doesn't accept memoryviews on Python 2,
|
||||
# so cast to old-style buffer
|
||||
data = buffer(data.tobytes())
|
||||
return frombuffer(data, dtype=self.dtype).reshape(self.shape)
|
||||
|
||||
|
||||
class CannedBytes(CannedObject):
|
||||
wrap = staticmethod(buffer_to_bytes)
|
||||
|
||||
def __init__(self, obj):
|
||||
self.buffers = [obj]
|
||||
|
||||
def get_object(self, g=None):
|
||||
data = self.buffers[0]
|
||||
return self.wrap(data)
|
||||
|
||||
class CannedBuffer(CannedBytes):
|
||||
wrap = buffer
|
||||
|
||||
class CannedMemoryView(CannedBytes):
|
||||
wrap = memoryview
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Functions
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
def _import_mapping(mapping, original=None):
|
||||
"""import any string-keys in a type mapping
|
||||
|
||||
"""
|
||||
log = get_logger()
|
||||
log.debug("Importing canning map")
|
||||
for key,value in list(mapping.items()):
|
||||
if isinstance(key, string_types):
|
||||
try:
|
||||
cls = import_item(key)
|
||||
except Exception:
|
||||
if original and key not in original:
|
||||
# only message on user-added classes
|
||||
log.error("canning class not importable: %r", key, exc_info=True)
|
||||
mapping.pop(key)
|
||||
else:
|
||||
mapping[cls] = mapping.pop(key)
|
||||
|
||||
def istype(obj, check):
|
||||
"""like isinstance(obj, check), but strict
|
||||
|
||||
This won't catch subclasses.
|
||||
"""
|
||||
if isinstance(check, tuple):
|
||||
for cls in check:
|
||||
if type(obj) is cls:
|
||||
return True
|
||||
return False
|
||||
else:
|
||||
return type(obj) is check
|
||||
|
||||
def can(obj):
|
||||
"""prepare an object for pickling"""
|
||||
|
||||
import_needed = False
|
||||
|
||||
for cls,canner in iteritems(can_map):
|
||||
if isinstance(cls, string_types):
|
||||
import_needed = True
|
||||
break
|
||||
elif istype(obj, cls):
|
||||
return canner(obj)
|
||||
|
||||
if import_needed:
|
||||
# perform can_map imports, then try again
|
||||
# this will usually only happen once
|
||||
_import_mapping(can_map, _original_can_map)
|
||||
return can(obj)
|
||||
|
||||
return obj
|
||||
|
||||
def can_class(obj):
|
||||
if isinstance(obj, class_type) and obj.__module__ == '__main__':
|
||||
return CannedClass(obj)
|
||||
else:
|
||||
return obj
|
||||
|
||||
def can_dict(obj):
|
||||
"""can the *values* of a dict"""
|
||||
if istype(obj, dict):
|
||||
newobj = {}
|
||||
for k, v in iteritems(obj):
|
||||
newobj[k] = can(v)
|
||||
return newobj
|
||||
else:
|
||||
return obj
|
||||
|
||||
sequence_types = (list, tuple, set)
|
||||
|
||||
def can_sequence(obj):
|
||||
"""can the elements of a sequence"""
|
||||
if istype(obj, sequence_types):
|
||||
t = type(obj)
|
||||
return t([can(i) for i in obj])
|
||||
else:
|
||||
return obj
|
||||
|
||||
def uncan(obj, g=None):
|
||||
"""invert canning"""
|
||||
|
||||
import_needed = False
|
||||
for cls,uncanner in iteritems(uncan_map):
|
||||
if isinstance(cls, string_types):
|
||||
import_needed = True
|
||||
break
|
||||
elif isinstance(obj, cls):
|
||||
return uncanner(obj, g)
|
||||
|
||||
if import_needed:
|
||||
# perform uncan_map imports, then try again
|
||||
# this will usually only happen once
|
||||
_import_mapping(uncan_map, _original_uncan_map)
|
||||
return uncan(obj, g)
|
||||
|
||||
return obj
|
||||
|
||||
def uncan_dict(obj, g=None):
|
||||
if istype(obj, dict):
|
||||
newobj = {}
|
||||
for k, v in iteritems(obj):
|
||||
newobj[k] = uncan(v,g)
|
||||
return newobj
|
||||
else:
|
||||
return obj
|
||||
|
||||
def uncan_sequence(obj, g=None):
|
||||
if istype(obj, sequence_types):
|
||||
t = type(obj)
|
||||
return t([uncan(i,g) for i in obj])
|
||||
else:
|
||||
return obj
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# API dictionaries
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
# These dicts can be extended for custom serialization of new objects
|
||||
|
||||
can_map = {
|
||||
'numpy.ndarray' : CannedArray,
|
||||
FunctionType : CannedFunction,
|
||||
bytes : CannedBytes,
|
||||
memoryview : CannedMemoryView,
|
||||
cell_type : CannedCell,
|
||||
class_type : can_class,
|
||||
}
|
||||
if buffer is not memoryview:
|
||||
can_map[buffer] = CannedBuffer
|
||||
|
||||
uncan_map = {
|
||||
CannedObject : lambda obj, g: obj.get_object(g),
|
||||
dict : uncan_dict,
|
||||
}
|
||||
|
||||
# for use in _import_mapping:
|
||||
_original_can_map = can_map.copy()
|
||||
_original_uncan_map = uncan_map.copy()
|
163
packages/python/yap_kernel/yap_kernel/pylab/backend_inline.py
Normal file
163
packages/python/yap_kernel/yap_kernel/pylab/backend_inline.py
Normal file
@ -0,0 +1,163 @@
|
||||
"""A matplotlib backend for publishing figures via display_data"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import matplotlib
|
||||
from matplotlib.backends.backend_agg import new_figure_manager, FigureCanvasAgg # analysis: ignore
|
||||
from matplotlib._pylab_helpers import Gcf
|
||||
|
||||
from IPython.core.getipython import get_ipython
|
||||
from IPython.core.display import display
|
||||
|
||||
from .config import InlineBackend
|
||||
|
||||
|
||||
def show(close=None, block=None):
|
||||
"""Show all figures as SVG/PNG payloads sent to the IPython clients.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
close : bool, optional
|
||||
If true, a ``plt.close('all')`` call is automatically issued after
|
||||
sending all the figures. If this is set, the figures will entirely
|
||||
removed from the internal list of figures.
|
||||
block : Not used.
|
||||
The `block` parameter is a Matplotlib experimental parameter.
|
||||
We accept it in the function signature for compatibility with other
|
||||
backends.
|
||||
"""
|
||||
if close is None:
|
||||
close = InlineBackend.instance().close_figures
|
||||
try:
|
||||
for figure_manager in Gcf.get_all_fig_managers():
|
||||
display(figure_manager.canvas.figure)
|
||||
finally:
|
||||
show._to_draw = []
|
||||
# only call close('all') if any to close
|
||||
# close triggers gc.collect, which can be slow
|
||||
if close and Gcf.get_all_fig_managers():
|
||||
matplotlib.pyplot.close('all')
|
||||
|
||||
|
||||
# This flag will be reset by draw_if_interactive when called
|
||||
show._draw_called = False
|
||||
# list of figures to draw when flush_figures is called
|
||||
show._to_draw = []
|
||||
|
||||
|
||||
def draw_if_interactive():
|
||||
"""
|
||||
Is called after every pylab drawing command
|
||||
"""
|
||||
# signal that the current active figure should be sent at the end of
|
||||
# execution. Also sets the _draw_called flag, signaling that there will be
|
||||
# something to send. At the end of the code execution, a separate call to
|
||||
# flush_figures() will act upon these values
|
||||
manager = Gcf.get_active()
|
||||
if manager is None:
|
||||
return
|
||||
fig = manager.canvas.figure
|
||||
|
||||
# Hack: matplotlib FigureManager objects in interacive backends (at least
|
||||
# in some of them) monkeypatch the figure object and add a .show() method
|
||||
# to it. This applies the same monkeypatch in order to support user code
|
||||
# that might expect `.show()` to be part of the official API of figure
|
||||
# objects.
|
||||
# For further reference:
|
||||
# https://github.com/ipython/ipython/issues/1612
|
||||
# https://github.com/matplotlib/matplotlib/issues/835
|
||||
|
||||
if not hasattr(fig, 'show'):
|
||||
# Queue up `fig` for display
|
||||
fig.show = lambda *a: display(fig)
|
||||
|
||||
# If matplotlib was manually set to non-interactive mode, this function
|
||||
# should be a no-op (otherwise we'll generate duplicate plots, since a user
|
||||
# who set ioff() manually expects to make separate draw/show calls).
|
||||
if not matplotlib.is_interactive():
|
||||
return
|
||||
|
||||
# ensure current figure will be drawn, and each subsequent call
|
||||
# of draw_if_interactive() moves the active figure to ensure it is
|
||||
# drawn last
|
||||
try:
|
||||
show._to_draw.remove(fig)
|
||||
except ValueError:
|
||||
# ensure it only appears in the draw list once
|
||||
pass
|
||||
# Queue up the figure for drawing in next show() call
|
||||
show._to_draw.append(fig)
|
||||
show._draw_called = True
|
||||
|
||||
|
||||
def flush_figures():
|
||||
"""Send all figures that changed
|
||||
|
||||
This is meant to be called automatically and will call show() if, during
|
||||
prior code execution, there had been any calls to draw_if_interactive.
|
||||
|
||||
This function is meant to be used as a post_execute callback in IPython,
|
||||
so user-caused errors are handled with showtraceback() instead of being
|
||||
allowed to raise. If this function is not called from within IPython,
|
||||
then these exceptions will raise.
|
||||
"""
|
||||
if not show._draw_called:
|
||||
return
|
||||
|
||||
if InlineBackend.instance().close_figures:
|
||||
# ignore the tracking, just draw and close all figures
|
||||
try:
|
||||
return show(True)
|
||||
except Exception as e:
|
||||
# safely show traceback if in IPython, else raise
|
||||
ip = get_ipython()
|
||||
if ip is None:
|
||||
raise e
|
||||
else:
|
||||
ip.showtraceback()
|
||||
return
|
||||
try:
|
||||
# exclude any figures that were closed:
|
||||
active = set([fm.canvas.figure for fm in Gcf.get_all_fig_managers()])
|
||||
for fig in [ fig for fig in show._to_draw if fig in active ]:
|
||||
try:
|
||||
display(fig)
|
||||
except Exception as e:
|
||||
# safely show traceback if in IPython, else raise
|
||||
ip = get_ipython()
|
||||
if ip is None:
|
||||
raise e
|
||||
else:
|
||||
ip.showtraceback()
|
||||
return
|
||||
finally:
|
||||
# clear flags for next round
|
||||
show._to_draw = []
|
||||
show._draw_called = False
|
||||
|
||||
|
||||
# Changes to matplotlib in version 1.2 requires a mpl backend to supply a default
|
||||
# figurecanvas. This is set here to a Agg canvas
|
||||
# See https://github.com/matplotlib/matplotlib/pull/1125
|
||||
FigureCanvas = FigureCanvasAgg
|
||||
|
||||
def _enable_matplotlib_integration():
|
||||
"""Enable extra IPython matplotlib integration when we are loaded as the matplotlib backend."""
|
||||
from matplotlib import get_backend
|
||||
ip = get_ipython()
|
||||
backend = get_backend()
|
||||
if ip and backend == 'module://%s' % __name__:
|
||||
from IPython.core.pylabtools import configure_inline_support
|
||||
try:
|
||||
configure_inline_support(ip, backend)
|
||||
except ImportError:
|
||||
# bugs may cause a circular import on Python 2
|
||||
def configure_once(*args):
|
||||
configure_inline_support(ip, backend)
|
||||
ip.events.unregister('post_run_cell', configure_once)
|
||||
ip.events.register('post_run_cell', configure_once)
|
||||
|
||||
_enable_matplotlib_integration()
|
110
packages/python/yap_kernel/yap_kernel/pylab/config.py
Normal file
110
packages/python/yap_kernel/yap_kernel/pylab/config.py
Normal file
@ -0,0 +1,110 @@
|
||||
"""Configurable for configuring the IPython inline backend
|
||||
|
||||
This module does not import anything from matplotlib.
|
||||
"""
|
||||
#-----------------------------------------------------------------------------
|
||||
# Copyright (C) 2011 The IPython Development Team
|
||||
#
|
||||
# Distributed under the terms of the BSD License. The full license is in
|
||||
# the file COPYING, distributed as part of this software.
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Imports
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
from traitlets.config.configurable import SingletonConfigurable
|
||||
from traitlets import (
|
||||
Dict, Instance, Set, Bool, TraitError, Unicode
|
||||
)
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Configurable for inline backend options
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
def pil_available():
|
||||
"""Test if PIL/Pillow is available"""
|
||||
out = False
|
||||
try:
|
||||
from PIL import Image
|
||||
out = True
|
||||
except:
|
||||
pass
|
||||
return out
|
||||
|
||||
# inherit from InlineBackendConfig for deprecation purposes
|
||||
class InlineBackendConfig(SingletonConfigurable):
|
||||
pass
|
||||
|
||||
class InlineBackend(InlineBackendConfig):
|
||||
"""An object to store configuration of the inline backend."""
|
||||
|
||||
# The typical default figure size is too large for inline use,
|
||||
# so we shrink the figure size to 6x4, and tweak fonts to
|
||||
# make that fit.
|
||||
rc = Dict({'figure.figsize': (6.0,4.0),
|
||||
# play nicely with white background in the Qt and notebook frontend
|
||||
'figure.facecolor': (1,1,1,0),
|
||||
'figure.edgecolor': (1,1,1,0),
|
||||
# 12pt labels get cutoff on 6x4 logplots, so use 10pt.
|
||||
'font.size': 10,
|
||||
# 72 dpi matches SVG/qtconsole
|
||||
# this only affects PNG export, as SVG has no dpi setting
|
||||
'figure.dpi': 72,
|
||||
# 10pt still needs a little more room on the xlabel:
|
||||
'figure.subplot.bottom' : .125
|
||||
},
|
||||
help="""Subset of matplotlib rcParams that should be different for the
|
||||
inline backend."""
|
||||
).tag(config=True)
|
||||
|
||||
figure_formats = Set({'png'},
|
||||
help="""A set of figure formats to enable: 'png',
|
||||
'retina', 'jpeg', 'svg', 'pdf'.""").tag(config=True)
|
||||
|
||||
def _update_figure_formatters(self):
|
||||
if self.shell is not None:
|
||||
from IPython.core.pylabtools import select_figure_formats
|
||||
select_figure_formats(self.shell, self.figure_formats, **self.print_figure_kwargs)
|
||||
|
||||
def _figure_formats_changed(self, name, old, new):
|
||||
if 'jpg' in new or 'jpeg' in new:
|
||||
if not pil_available():
|
||||
raise TraitError("Requires PIL/Pillow for JPG figures")
|
||||
self._update_figure_formatters()
|
||||
|
||||
figure_format = Unicode(help="""The figure format to enable (deprecated
|
||||
use `figure_formats` instead)""").tag(config=True)
|
||||
|
||||
def _figure_format_changed(self, name, old, new):
|
||||
if new:
|
||||
self.figure_formats = {new}
|
||||
|
||||
print_figure_kwargs = Dict({'bbox_inches' : 'tight'},
|
||||
help="""Extra kwargs to be passed to fig.canvas.print_figure.
|
||||
|
||||
Logical examples include: bbox_inches, quality (for jpeg figures), etc.
|
||||
"""
|
||||
).tag(config=True)
|
||||
_print_figure_kwargs_changed = _update_figure_formatters
|
||||
|
||||
close_figures = Bool(True,
|
||||
help="""Close all figures at the end of each cell.
|
||||
|
||||
When True, ensures that each cell starts with no active figures, but it
|
||||
also means that one must keep track of references in order to edit or
|
||||
redraw figures in subsequent cells. This mode is ideal for the notebook,
|
||||
where residual plots from other cells might be surprising.
|
||||
|
||||
When False, one must call figure() to create new figures. This means
|
||||
that gcf() and getfigs() can reference figures created in other cells,
|
||||
and the active figure can continue to be edited with pylab/pyplot
|
||||
methods that reference the current active figure. This mode facilitates
|
||||
iterative editing of figures, and behaves most consistently with
|
||||
other matplotlib backends, but figure barriers between cells must
|
||||
be explicit.
|
||||
""").tag(config=True)
|
||||
|
||||
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
|
||||
allow_none=True)
|
||||
|
BIN
packages/python/yap_kernel/yap_kernel/resources/logo-32x32.png
Normal file
BIN
packages/python/yap_kernel/yap_kernel/resources/logo-32x32.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 1.1 KiB |
BIN
packages/python/yap_kernel/yap_kernel/resources/logo-64x64.png
Normal file
BIN
packages/python/yap_kernel/yap_kernel/resources/logo-64x64.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 2.1 KiB |
186
packages/python/yap_kernel/yap_kernel/serialize.py
Normal file
186
packages/python/yap_kernel/yap_kernel/serialize.py
Normal file
@ -0,0 +1,186 @@
|
||||
"""serialization utilities for apply messages"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import warnings
|
||||
warnings.warn("yap_kernel.serialize is deprecated. It has moved to ipyparallel.serialize", DeprecationWarning)
|
||||
|
||||
try:
|
||||
import cPickle
|
||||
pickle = cPickle
|
||||
except:
|
||||
cPickle = None
|
||||
import pickle
|
||||
|
||||
from itertools import chain
|
||||
|
||||
from ipython_genutils.py3compat import PY3, buffer_to_bytes_py2
|
||||
from yap_kernel.pickleutil import (
|
||||
can, uncan, can_sequence, uncan_sequence, CannedObject,
|
||||
istype, sequence_types, PICKLE_PROTOCOL,
|
||||
)
|
||||
from jupyter_client.session import MAX_ITEMS, MAX_BYTES
|
||||
|
||||
|
||||
if PY3:
|
||||
buffer = memoryview
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Serialization Functions
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _extract_buffers(obj, threshold=MAX_BYTES):
|
||||
"""extract buffers larger than a certain threshold"""
|
||||
buffers = []
|
||||
if isinstance(obj, CannedObject) and obj.buffers:
|
||||
for i,buf in enumerate(obj.buffers):
|
||||
if len(buf) > threshold:
|
||||
# buffer larger than threshold, prevent pickling
|
||||
obj.buffers[i] = None
|
||||
buffers.append(buf)
|
||||
# buffer too small for separate send, coerce to bytes
|
||||
# because pickling buffer objects just results in broken pointers
|
||||
elif isinstance(buf, memoryview):
|
||||
obj.buffers[i] = buf.tobytes()
|
||||
elif isinstance(buf, buffer):
|
||||
obj.buffers[i] = bytes(buf)
|
||||
return buffers
|
||||
|
||||
def _restore_buffers(obj, buffers):
|
||||
"""restore buffers extracted by """
|
||||
if isinstance(obj, CannedObject) and obj.buffers:
|
||||
for i,buf in enumerate(obj.buffers):
|
||||
if buf is None:
|
||||
obj.buffers[i] = buffers.pop(0)
|
||||
|
||||
def serialize_object(obj, buffer_threshold=MAX_BYTES, item_threshold=MAX_ITEMS):
|
||||
"""Serialize an object into a list of sendable buffers.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
obj : object
|
||||
The object to be serialized
|
||||
buffer_threshold : int
|
||||
The threshold (in bytes) for pulling out data buffers
|
||||
to avoid pickling them.
|
||||
item_threshold : int
|
||||
The maximum number of items over which canning will iterate.
|
||||
Containers (lists, dicts) larger than this will be pickled without
|
||||
introspection.
|
||||
|
||||
Returns
|
||||
-------
|
||||
[bufs] : list of buffers representing the serialized object.
|
||||
"""
|
||||
buffers = []
|
||||
if istype(obj, sequence_types) and len(obj) < item_threshold:
|
||||
cobj = can_sequence(obj)
|
||||
for c in cobj:
|
||||
buffers.extend(_extract_buffers(c, buffer_threshold))
|
||||
elif istype(obj, dict) and len(obj) < item_threshold:
|
||||
cobj = {}
|
||||
for k in sorted(obj):
|
||||
c = can(obj[k])
|
||||
buffers.extend(_extract_buffers(c, buffer_threshold))
|
||||
cobj[k] = c
|
||||
else:
|
||||
cobj = can(obj)
|
||||
buffers.extend(_extract_buffers(cobj, buffer_threshold))
|
||||
|
||||
buffers.insert(0, pickle.dumps(cobj, PICKLE_PROTOCOL))
|
||||
return buffers
|
||||
|
||||
def deserialize_object(buffers, g=None):
|
||||
"""reconstruct an object serialized by serialize_object from data buffers.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
bufs : list of buffers/bytes
|
||||
|
||||
g : globals to be used when uncanning
|
||||
|
||||
Returns
|
||||
-------
|
||||
|
||||
(newobj, bufs) : unpacked object, and the list of remaining unused buffers.
|
||||
"""
|
||||
bufs = list(buffers)
|
||||
pobj = buffer_to_bytes_py2(bufs.pop(0))
|
||||
canned = pickle.loads(pobj)
|
||||
if istype(canned, sequence_types) and len(canned) < MAX_ITEMS:
|
||||
for c in canned:
|
||||
_restore_buffers(c, bufs)
|
||||
newobj = uncan_sequence(canned, g)
|
||||
elif istype(canned, dict) and len(canned) < MAX_ITEMS:
|
||||
newobj = {}
|
||||
for k in sorted(canned):
|
||||
c = canned[k]
|
||||
_restore_buffers(c, bufs)
|
||||
newobj[k] = uncan(c, g)
|
||||
else:
|
||||
_restore_buffers(canned, bufs)
|
||||
newobj = uncan(canned, g)
|
||||
|
||||
return newobj, bufs
|
||||
|
||||
def pack_apply_message(f, args, kwargs, buffer_threshold=MAX_BYTES, item_threshold=MAX_ITEMS):
|
||||
"""pack up a function, args, and kwargs to be sent over the wire
|
||||
|
||||
Each element of args/kwargs will be canned for special treatment,
|
||||
but inspection will not go any deeper than that.
|
||||
|
||||
Any object whose data is larger than `threshold` will not have their data copied
|
||||
(only numpy arrays and bytes/buffers support zero-copy)
|
||||
|
||||
Message will be a list of bytes/buffers of the format:
|
||||
|
||||
[ cf, pinfo, <arg_bufs>, <kwarg_bufs> ]
|
||||
|
||||
With length at least two + len(args) + len(kwargs)
|
||||
"""
|
||||
|
||||
arg_bufs = list(chain.from_iterable(
|
||||
serialize_object(arg, buffer_threshold, item_threshold) for arg in args))
|
||||
|
||||
kw_keys = sorted(kwargs.keys())
|
||||
kwarg_bufs = list(chain.from_iterable(
|
||||
serialize_object(kwargs[key], buffer_threshold, item_threshold) for key in kw_keys))
|
||||
|
||||
info = dict(nargs=len(args), narg_bufs=len(arg_bufs), kw_keys=kw_keys)
|
||||
|
||||
msg = [pickle.dumps(can(f), PICKLE_PROTOCOL)]
|
||||
msg.append(pickle.dumps(info, PICKLE_PROTOCOL))
|
||||
msg.extend(arg_bufs)
|
||||
msg.extend(kwarg_bufs)
|
||||
|
||||
return msg
|
||||
|
||||
def unpack_apply_message(bufs, g=None, copy=True):
|
||||
"""unpack f,args,kwargs from buffers packed by pack_apply_message()
|
||||
Returns: original f,args,kwargs"""
|
||||
bufs = list(bufs) # allow us to pop
|
||||
assert len(bufs) >= 2, "not enough buffers!"
|
||||
pf = buffer_to_bytes_py2(bufs.pop(0))
|
||||
f = uncan(pickle.loads(pf), g)
|
||||
pinfo = buffer_to_bytes_py2(bufs.pop(0))
|
||||
info = pickle.loads(pinfo)
|
||||
arg_bufs, kwarg_bufs = bufs[:info['narg_bufs']], bufs[info['narg_bufs']:]
|
||||
|
||||
args = []
|
||||
for i in range(info['nargs']):
|
||||
arg, arg_bufs = deserialize_object(arg_bufs, g)
|
||||
args.append(arg)
|
||||
args = tuple(args)
|
||||
assert not arg_bufs, "Shouldn't be any arg bufs left over"
|
||||
|
||||
kwargs = {}
|
||||
for key in info['kw_keys']:
|
||||
kwarg, kwarg_bufs = deserialize_object(kwarg_bufs, g)
|
||||
kwargs[key] = kwarg
|
||||
assert not kwarg_bufs, "Shouldn't be any kwarg bufs left over"
|
||||
|
||||
return f,args,kwargs
|
49
packages/python/yap_kernel/yap_kernel/tests/__init__.py
Normal file
49
packages/python/yap_kernel/yap_kernel/tests/__init__.py
Normal file
@ -0,0 +1,49 @@
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
try:
|
||||
from unittest.mock import patch
|
||||
except ImportError:
|
||||
from mock import patch
|
||||
|
||||
from jupyter_core import paths as jpaths
|
||||
from IPython import paths as ipaths
|
||||
from yap_kernel.kernelspec import install
|
||||
|
||||
pjoin = os.path.join
|
||||
|
||||
tmp = None
|
||||
patchers = []
|
||||
|
||||
def setup():
|
||||
"""setup temporary env for tests"""
|
||||
global tmp
|
||||
tmp = tempfile.mkdtemp()
|
||||
patchers[:] = [
|
||||
patch.dict(os.environ, {
|
||||
'HOME': tmp,
|
||||
# Let tests work with --user install when HOME is changed:
|
||||
'PYTHONPATH': os.pathsep.join(sys.path),
|
||||
}),
|
||||
]
|
||||
for p in patchers:
|
||||
p.start()
|
||||
|
||||
# install IPython in the temp home:
|
||||
install(user=True)
|
||||
|
||||
|
||||
def teardown():
|
||||
for p in patchers:
|
||||
p.stop()
|
||||
|
||||
try:
|
||||
shutil.rmtree(tmp)
|
||||
except (OSError, IOError):
|
||||
# no such file
|
||||
pass
|
63
packages/python/yap_kernel/yap_kernel/tests/test_connect.py
Normal file
63
packages/python/yap_kernel/yap_kernel/tests/test_connect.py
Normal file
@ -0,0 +1,63 @@
|
||||
"""Tests for kernel connection utilities"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
import nose.tools as nt
|
||||
|
||||
from traitlets.config import Config
|
||||
from ipython_genutils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
|
||||
from ipython_genutils.py3compat import str_to_bytes
|
||||
from yap_kernel import connect
|
||||
from yap_kernel.kernelapp import YAP_KernelApp
|
||||
|
||||
|
||||
sample_info = dict(ip='1.2.3.4', transport='ipc',
|
||||
shell_port=1, hb_port=2, iopub_port=3, stdin_port=4, control_port=5,
|
||||
key=b'abc123', signature_scheme='hmac-md5',
|
||||
)
|
||||
|
||||
|
||||
class DummyKernelApp(YAP_KernelApp):
|
||||
def initialize(self, argv=[]):
|
||||
self.init_profile_dir()
|
||||
self.init_connection_file()
|
||||
|
||||
|
||||
def test_get_connection_file():
|
||||
cfg = Config()
|
||||
with TemporaryWorkingDirectory() as d:
|
||||
cfg.ProfileDir.location = d
|
||||
cf = 'kernel.json'
|
||||
app = DummyKernelApp(config=cfg, connection_file=cf)
|
||||
app.initialize()
|
||||
|
||||
profile_cf = os.path.join(app.connection_dir, cf)
|
||||
nt.assert_equal(profile_cf, app.abs_connection_file)
|
||||
with open(profile_cf, 'w') as f:
|
||||
f.write("{}")
|
||||
nt.assert_true(os.path.exists(profile_cf))
|
||||
nt.assert_equal(connect.get_connection_file(app), profile_cf)
|
||||
|
||||
app.connection_file = cf
|
||||
nt.assert_equal(connect.get_connection_file(app), profile_cf)
|
||||
|
||||
|
||||
def test_get_connection_info():
|
||||
with TemporaryDirectory() as d:
|
||||
cf = os.path.join(d, 'kernel.json')
|
||||
connect.write_connection_file(cf, **sample_info)
|
||||
json_info = connect.get_connection_info(cf)
|
||||
info = connect.get_connection_info(cf, unpack=True)
|
||||
|
||||
nt.assert_equal(type(json_info), type(""))
|
||||
sub_info = {k:v for k,v in info.items() if k in sample_info}
|
||||
nt.assert_equal(sub_info, sample_info)
|
||||
|
||||
info2 = json.loads(json_info)
|
||||
info2['key'] = str_to_bytes(info2['key'])
|
||||
sub_info2 = {k:v for k,v in info.items() if k in sample_info}
|
||||
nt.assert_equal(sub_info2, sample_info)
|
163
packages/python/yap_kernel/yap_kernel/tests/test_embed_kernel.py
Normal file
163
packages/python/yap_kernel/yap_kernel/tests/test_embed_kernel.py
Normal file
@ -0,0 +1,163 @@
|
||||
"""test IPython.embed_kernel()"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
|
||||
from contextlib import contextmanager
|
||||
from subprocess import Popen, PIPE
|
||||
|
||||
import nose.tools as nt
|
||||
|
||||
from jupyter_client import BlockingKernelClient
|
||||
from jupyter_core import paths
|
||||
from IPython.paths import get_ipython_dir
|
||||
from ipython_genutils import py3compat
|
||||
from ipython_genutils.py3compat import unicode_type
|
||||
|
||||
|
||||
SETUP_TIMEOUT = 60
|
||||
TIMEOUT = 15
|
||||
|
||||
|
||||
@contextmanager
|
||||
def setup_kernel(cmd):
|
||||
"""start an embedded kernel in a subprocess, and wait for it to be ready
|
||||
|
||||
Returns
|
||||
-------
|
||||
kernel_manager: connected KernelManager instance
|
||||
"""
|
||||
kernel = Popen([sys.executable, '-c', cmd], stdout=PIPE, stderr=PIPE)
|
||||
connection_file = os.path.join(
|
||||
paths.jupyter_runtime_dir(),
|
||||
'kernel-%i.json' % kernel.pid,
|
||||
)
|
||||
# wait for connection file to exist, timeout after 5s
|
||||
tic = time.time()
|
||||
while not os.path.exists(connection_file) \
|
||||
and kernel.poll() is None \
|
||||
and time.time() < tic + SETUP_TIMEOUT:
|
||||
time.sleep(0.1)
|
||||
|
||||
if kernel.poll() is not None:
|
||||
o,e = kernel.communicate()
|
||||
e = py3compat.cast_unicode(e)
|
||||
raise IOError("Kernel failed to start:\n%s" % e)
|
||||
|
||||
if not os.path.exists(connection_file):
|
||||
if kernel.poll() is None:
|
||||
kernel.terminate()
|
||||
raise IOError("Connection file %r never arrived" % connection_file)
|
||||
|
||||
client = BlockingKernelClient(connection_file=connection_file)
|
||||
client.load_connection_file()
|
||||
client.start_channels()
|
||||
client.wait_for_ready()
|
||||
|
||||
try:
|
||||
yield client
|
||||
finally:
|
||||
client.stop_channels()
|
||||
kernel.terminate()
|
||||
|
||||
def test_embed_kernel_basic():
|
||||
"""IPython.embed_kernel() is basically functional"""
|
||||
cmd = '\n'.join([
|
||||
'from IPython import embed_kernel',
|
||||
'def go():',
|
||||
' a=5',
|
||||
' b="hi there"',
|
||||
' embed_kernel()',
|
||||
'go()',
|
||||
'',
|
||||
])
|
||||
|
||||
with setup_kernel(cmd) as client:
|
||||
# oinfo a (int)
|
||||
msg_id = client.inspect('a')
|
||||
msg = client.get_shell_msg(block=True, timeout=TIMEOUT)
|
||||
content = msg['content']
|
||||
nt.assert_true(content['found'])
|
||||
|
||||
msg_id = client.execute("c=a*2")
|
||||
msg = client.get_shell_msg(block=True, timeout=TIMEOUT)
|
||||
content = msg['content']
|
||||
nt.assert_equal(content['status'], u'ok')
|
||||
|
||||
# oinfo c (should be 10)
|
||||
msg_id = client.inspect('c')
|
||||
msg = client.get_shell_msg(block=True, timeout=TIMEOUT)
|
||||
content = msg['content']
|
||||
nt.assert_true(content['found'])
|
||||
text = content['data']['text/plain']
|
||||
nt.assert_in('10', text)
|
||||
|
||||
def test_embed_kernel_namespace():
|
||||
"""IPython.embed_kernel() inherits calling namespace"""
|
||||
cmd = '\n'.join([
|
||||
'from IPython import embed_kernel',
|
||||
'def go():',
|
||||
' a=5',
|
||||
' b="hi there"',
|
||||
' embed_kernel()',
|
||||
'go()',
|
||||
'',
|
||||
])
|
||||
|
||||
with setup_kernel(cmd) as client:
|
||||
# oinfo a (int)
|
||||
msg_id = client.inspect('a')
|
||||
msg = client.get_shell_msg(block=True, timeout=TIMEOUT)
|
||||
content = msg['content']
|
||||
nt.assert_true(content['found'])
|
||||
text = content['data']['text/plain']
|
||||
nt.assert_in(u'5', text)
|
||||
|
||||
# oinfo b (str)
|
||||
msg_id = client.inspect('b')
|
||||
msg = client.get_shell_msg(block=True, timeout=TIMEOUT)
|
||||
content = msg['content']
|
||||
nt.assert_true(content['found'])
|
||||
text = content['data']['text/plain']
|
||||
nt.assert_in(u'hi there', text)
|
||||
|
||||
# oinfo c (undefined)
|
||||
msg_id = client.inspect('c')
|
||||
msg = client.get_shell_msg(block=True, timeout=TIMEOUT)
|
||||
content = msg['content']
|
||||
nt.assert_false(content['found'])
|
||||
|
||||
def test_embed_kernel_reentrant():
|
||||
"""IPython.embed_kernel() can be called multiple times"""
|
||||
cmd = '\n'.join([
|
||||
'from IPython import embed_kernel',
|
||||
'count = 0',
|
||||
'def go():',
|
||||
' global count',
|
||||
' embed_kernel()',
|
||||
' count = count + 1',
|
||||
'',
|
||||
'while True:'
|
||||
' go()',
|
||||
'',
|
||||
])
|
||||
|
||||
with setup_kernel(cmd) as client:
|
||||
for i in range(5):
|
||||
msg_id = client.inspect('count')
|
||||
msg = client.get_shell_msg(block=True, timeout=TIMEOUT)
|
||||
content = msg['content']
|
||||
nt.assert_true(content['found'])
|
||||
text = content['data']['text/plain']
|
||||
nt.assert_in(unicode_type(i), text)
|
||||
|
||||
# exit from embed_kernel
|
||||
client.execute("get_ipython().exit_now = True")
|
||||
msg = client.get_shell_msg(block=True, timeout=TIMEOUT)
|
||||
time.sleep(0.2)
|
42
packages/python/yap_kernel/yap_kernel/tests/test_io.py
Normal file
42
packages/python/yap_kernel/yap_kernel/tests/test_io.py
Normal file
@ -0,0 +1,42 @@
|
||||
"""Test IO capturing functionality"""
|
||||
|
||||
import io
|
||||
|
||||
import zmq
|
||||
|
||||
from jupyter_client.session import Session
|
||||
from yap_kernel.iostream import IOPubThread, OutStream
|
||||
|
||||
import nose.tools as nt
|
||||
|
||||
def test_io_api():
|
||||
"""Test that wrapped stdout has the same API as a normal TextIO object"""
|
||||
session = Session()
|
||||
ctx = zmq.Context()
|
||||
pub = ctx.socket(zmq.PUB)
|
||||
thread = IOPubThread(pub)
|
||||
thread.start()
|
||||
|
||||
stream = OutStream(session, thread, 'stdout')
|
||||
|
||||
# cleanup unused zmq objects before we start testing
|
||||
thread.stop()
|
||||
thread.close()
|
||||
ctx.term()
|
||||
|
||||
assert stream.errors is None
|
||||
assert not stream.isatty()
|
||||
with nt.assert_raises(io.UnsupportedOperation):
|
||||
stream.detach()
|
||||
with nt.assert_raises(io.UnsupportedOperation):
|
||||
next(stream)
|
||||
with nt.assert_raises(io.UnsupportedOperation):
|
||||
stream.read()
|
||||
with nt.assert_raises(io.UnsupportedOperation):
|
||||
stream.readline()
|
||||
with nt.assert_raises(io.UnsupportedOperation):
|
||||
stream.seek()
|
||||
with nt.assert_raises(io.UnsupportedOperation):
|
||||
stream.tell()
|
||||
|
||||
|
113
packages/python/yap_kernel/yap_kernel/tests/test_jsonutil.py
Normal file
113
packages/python/yap_kernel/yap_kernel/tests/test_jsonutil.py
Normal file
@ -0,0 +1,113 @@
|
||||
# coding: utf-8
|
||||
"""Test suite for our JSON utilities."""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import json
|
||||
import sys
|
||||
|
||||
if sys.version_info < (3,):
|
||||
from base64 import decodestring as decodebytes
|
||||
else:
|
||||
from base64 import decodebytes
|
||||
|
||||
from datetime import datetime
|
||||
import numbers
|
||||
|
||||
import nose.tools as nt
|
||||
|
||||
from .. import jsonutil
|
||||
from ..jsonutil import json_clean, encode_images
|
||||
from ipython_genutils.py3compat import unicode_to_str, str_to_bytes, iteritems
|
||||
|
||||
class MyInt(object):
|
||||
def __int__(self):
|
||||
return 389
|
||||
numbers.Integral.register(MyInt)
|
||||
|
||||
class MyFloat(object):
|
||||
def __float__(self):
|
||||
return 3.14
|
||||
numbers.Real.register(MyFloat)
|
||||
|
||||
|
||||
def test():
|
||||
# list of input/expected output. Use None for the expected output if it
|
||||
# can be the same as the input.
|
||||
pairs = [(1, None), # start with scalars
|
||||
(1.0, None),
|
||||
('a', None),
|
||||
(True, None),
|
||||
(False, None),
|
||||
(None, None),
|
||||
# Containers
|
||||
([1, 2], None),
|
||||
((1, 2), [1, 2]),
|
||||
(set([1, 2]), [1, 2]),
|
||||
(dict(x=1), None),
|
||||
({'x': 1, 'y':[1,2,3], '1':'int'}, None),
|
||||
# More exotic objects
|
||||
((x for x in range(3)), [0, 1, 2]),
|
||||
(iter([1, 2]), [1, 2]),
|
||||
(datetime(1991, 7, 3, 12, 00), "1991-07-03T12:00:00.000000"),
|
||||
(MyFloat(), 3.14),
|
||||
(MyInt(), 389)
|
||||
]
|
||||
|
||||
for val, jval in pairs:
|
||||
if jval is None:
|
||||
jval = val
|
||||
out = json_clean(val)
|
||||
# validate our cleanup
|
||||
nt.assert_equal(out, jval)
|
||||
# and ensure that what we return, indeed encodes cleanly
|
||||
json.loads(json.dumps(out))
|
||||
|
||||
|
||||
def test_encode_images():
|
||||
# invalid data, but the header and footer are from real files
|
||||
pngdata = b'\x89PNG\r\n\x1a\nblahblahnotactuallyvalidIEND\xaeB`\x82'
|
||||
jpegdata = b'\xff\xd8\xff\xe0\x00\x10JFIFblahblahjpeg(\xa0\x0f\xff\xd9'
|
||||
pdfdata = b'%PDF-1.\ntrailer<</Root<</Pages<</Kids[<</MediaBox[0 0 3 3]>>]>>>>>>'
|
||||
|
||||
fmt = {
|
||||
'image/png' : pngdata,
|
||||
'image/jpeg' : jpegdata,
|
||||
'application/pdf' : pdfdata
|
||||
}
|
||||
encoded = encode_images(fmt)
|
||||
for key, value in iteritems(fmt):
|
||||
# encoded has unicode, want bytes
|
||||
decoded = decodebytes(encoded[key].encode('ascii'))
|
||||
nt.assert_equal(decoded, value)
|
||||
encoded2 = encode_images(encoded)
|
||||
nt.assert_equal(encoded, encoded2)
|
||||
|
||||
b64_str = {}
|
||||
for key, encoded in iteritems(encoded):
|
||||
b64_str[key] = unicode_to_str(encoded)
|
||||
encoded3 = encode_images(b64_str)
|
||||
nt.assert_equal(encoded3, b64_str)
|
||||
for key, value in iteritems(fmt):
|
||||
# encoded3 has str, want bytes
|
||||
decoded = decodebytes(str_to_bytes(encoded3[key]))
|
||||
nt.assert_equal(decoded, value)
|
||||
|
||||
def test_lambda():
|
||||
with nt.assert_raises(ValueError):
|
||||
json_clean(lambda : 1)
|
||||
|
||||
|
||||
def test_exception():
|
||||
bad_dicts = [{1:'number', '1':'string'},
|
||||
{True:'bool', 'True':'string'},
|
||||
]
|
||||
for d in bad_dicts:
|
||||
nt.assert_raises(ValueError, json_clean, d)
|
||||
|
||||
|
||||
def test_unicode_dict():
|
||||
data = {u'üniço∂e': u'üniço∂e'}
|
||||
clean = jsonutil.json_clean(data)
|
||||
nt.assert_equal(data, clean)
|
283
packages/python/yap_kernel/yap_kernel/tests/test_kernel.py
Normal file
283
packages/python/yap_kernel/yap_kernel/tests/test_kernel.py
Normal file
@ -0,0 +1,283 @@
|
||||
# coding: utf-8
|
||||
"""test the IPython Kernel"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import io
|
||||
import os.path
|
||||
import sys
|
||||
import time
|
||||
|
||||
import nose.tools as nt
|
||||
|
||||
from IPython.testing import decorators as dec, tools as tt
|
||||
from ipython_genutils import py3compat
|
||||
from IPython.paths import locate_profile
|
||||
from ipython_genutils.tempdir import TemporaryDirectory
|
||||
|
||||
from .utils import (
|
||||
new_kernel, kernel, TIMEOUT, assemble_output, execute,
|
||||
flush_channels, wait_for_idle)
|
||||
|
||||
|
||||
def _check_master(kc, expected=True, stream="stdout"):
|
||||
execute(kc=kc, code="import sys")
|
||||
flush_channels(kc)
|
||||
msg_id, content = execute(kc=kc, code="print (sys.%s._is_master_process())" % stream)
|
||||
stdout, stderr = assemble_output(kc.iopub_channel)
|
||||
nt.assert_equal(stdout.strip(), repr(expected))
|
||||
|
||||
|
||||
def _check_status(content):
|
||||
"""If status=error, show the traceback"""
|
||||
if content['status'] == 'error':
|
||||
nt.assert_true(False, ''.join(['\n'] + content['traceback']))
|
||||
|
||||
|
||||
# printing tests
|
||||
|
||||
def test_simple_print():
|
||||
"""simple print statement in kernel"""
|
||||
with kernel() as kc:
|
||||
iopub = kc.iopub_channel
|
||||
msg_id, content = execute(kc=kc, code="print ('hi')")
|
||||
stdout, stderr = assemble_output(iopub)
|
||||
nt.assert_equal(stdout, 'hi\n')
|
||||
nt.assert_equal(stderr, '')
|
||||
_check_master(kc, expected=True)
|
||||
|
||||
|
||||
def test_sys_path():
|
||||
"""test that sys.path doesn't get messed up by default"""
|
||||
with kernel() as kc:
|
||||
msg_id, content = execute(kc=kc, code="import sys; print (repr(sys.path[0]))")
|
||||
stdout, stderr = assemble_output(kc.iopub_channel)
|
||||
nt.assert_equal(stdout, "''\n")
|
||||
|
||||
def test_sys_path_profile_dir():
|
||||
"""test that sys.path doesn't get messed up when `--profile-dir` is specified"""
|
||||
|
||||
with new_kernel(['--profile-dir', locate_profile('default')]) as kc:
|
||||
msg_id, content = execute(kc=kc, code="import sys; print (repr(sys.path[0]))")
|
||||
stdout, stderr = assemble_output(kc.iopub_channel)
|
||||
nt.assert_equal(stdout, "''\n")
|
||||
|
||||
@dec.skipif(sys.platform == 'win32', "subprocess prints fail on Windows")
|
||||
def test_subprocess_print():
|
||||
"""printing from forked mp.Process"""
|
||||
with new_kernel() as kc:
|
||||
iopub = kc.iopub_channel
|
||||
|
||||
_check_master(kc, expected=True)
|
||||
flush_channels(kc)
|
||||
np = 5
|
||||
code = '\n'.join([
|
||||
"from __future__ import print_function",
|
||||
"import time",
|
||||
"import multiprocessing as mp",
|
||||
"pool = [mp.Process(target=print, args=('hello', i,)) for i in range(%i)]" % np,
|
||||
"for p in pool: p.start()",
|
||||
"for p in pool: p.join()",
|
||||
"time.sleep(0.5),"
|
||||
])
|
||||
|
||||
msg_id, content = execute(kc=kc, code=code)
|
||||
stdout, stderr = assemble_output(iopub)
|
||||
nt.assert_equal(stdout.count("hello"), np, stdout)
|
||||
for n in range(np):
|
||||
nt.assert_equal(stdout.count(str(n)), 1, stdout)
|
||||
nt.assert_equal(stderr, '')
|
||||
_check_master(kc, expected=True)
|
||||
_check_master(kc, expected=True, stream="stderr")
|
||||
|
||||
|
||||
def test_subprocess_noprint():
|
||||
"""mp.Process without print doesn't trigger iostream mp_mode"""
|
||||
with kernel() as kc:
|
||||
iopub = kc.iopub_channel
|
||||
|
||||
np = 5
|
||||
code = '\n'.join([
|
||||
"import multiprocessing as mp",
|
||||
"pool = [mp.Process(target=range, args=(i,)) for i in range(%i)]" % np,
|
||||
"for p in pool: p.start()",
|
||||
"for p in pool: p.join()"
|
||||
])
|
||||
|
||||
msg_id, content = execute(kc=kc, code=code)
|
||||
stdout, stderr = assemble_output(iopub)
|
||||
nt.assert_equal(stdout, '')
|
||||
nt.assert_equal(stderr, '')
|
||||
|
||||
_check_master(kc, expected=True)
|
||||
_check_master(kc, expected=True, stream="stderr")
|
||||
|
||||
|
||||
@dec.skipif(sys.platform == 'win32', "subprocess prints fail on Windows")
|
||||
def test_subprocess_error():
|
||||
"""error in mp.Process doesn't crash"""
|
||||
with new_kernel() as kc:
|
||||
iopub = kc.iopub_channel
|
||||
|
||||
code = '\n'.join([
|
||||
"import multiprocessing as mp",
|
||||
"p = mp.Process(target=int, args=('hi',))",
|
||||
"p.start()",
|
||||
"p.join()",
|
||||
])
|
||||
|
||||
msg_id, content = execute(kc=kc, code=code)
|
||||
stdout, stderr = assemble_output(iopub)
|
||||
nt.assert_equal(stdout, '')
|
||||
nt.assert_true("ValueError" in stderr, stderr)
|
||||
|
||||
_check_master(kc, expected=True)
|
||||
_check_master(kc, expected=True, stream="stderr")
|
||||
|
||||
# raw_input tests
|
||||
|
||||
def test_raw_input():
|
||||
"""test [raw_]input"""
|
||||
with kernel() as kc:
|
||||
iopub = kc.iopub_channel
|
||||
|
||||
input_f = "input" if py3compat.PY3 else "raw_input"
|
||||
theprompt = "prompt> "
|
||||
code = 'print({input_f}("{theprompt}"))'.format(**locals())
|
||||
msg_id = kc.execute(code, allow_stdin=True)
|
||||
msg = kc.get_stdin_msg(block=True, timeout=TIMEOUT)
|
||||
nt.assert_equal(msg['header']['msg_type'], u'input_request')
|
||||
content = msg['content']
|
||||
nt.assert_equal(content['prompt'], theprompt)
|
||||
text = "some text"
|
||||
kc.input(text)
|
||||
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
|
||||
nt.assert_equal(reply['content']['status'], 'ok')
|
||||
stdout, stderr = assemble_output(iopub)
|
||||
nt.assert_equal(stdout, text + "\n")
|
||||
|
||||
|
||||
@dec.skipif(py3compat.PY3)
|
||||
def test_eval_input():
|
||||
"""test input() on Python 2"""
|
||||
with kernel() as kc:
|
||||
iopub = kc.iopub_channel
|
||||
|
||||
input_f = "input" if py3compat.PY3 else "raw_input"
|
||||
theprompt = "prompt> "
|
||||
code = 'print(input("{theprompt}"))'.format(**locals())
|
||||
msg_id = kc.execute(code, allow_stdin=True)
|
||||
msg = kc.get_stdin_msg(block=True, timeout=TIMEOUT)
|
||||
nt.assert_equal(msg['header']['msg_type'], u'input_request')
|
||||
content = msg['content']
|
||||
nt.assert_equal(content['prompt'], theprompt)
|
||||
kc.input("1+1")
|
||||
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
|
||||
nt.assert_equal(reply['content']['status'], 'ok')
|
||||
stdout, stderr = assemble_output(iopub)
|
||||
nt.assert_equal(stdout, "2\n")
|
||||
|
||||
|
||||
def test_save_history():
|
||||
# Saving history from the kernel with %hist -f was failing because of
|
||||
# unicode problems on Python 2.
|
||||
with kernel() as kc, TemporaryDirectory() as td:
|
||||
file = os.path.join(td, 'hist.out')
|
||||
execute(u'a=1', kc=kc)
|
||||
wait_for_idle(kc)
|
||||
execute(u'b=u"abcþ"', kc=kc)
|
||||
wait_for_idle(kc)
|
||||
_, reply = execute("%hist -f " + file, kc=kc)
|
||||
nt.assert_equal(reply['status'], 'ok')
|
||||
with io.open(file, encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
nt.assert_in(u'a=1', content)
|
||||
nt.assert_in(u'b=u"abcþ"', content)
|
||||
|
||||
|
||||
@dec.skip_without('faulthandler')
|
||||
def test_smoke_faulthandler():
|
||||
with kernel() as kc:
|
||||
# Note: faulthandler.register is not available on windows.
|
||||
code = u'\n'.join([
|
||||
'import sys',
|
||||
'import faulthandler',
|
||||
'import signal',
|
||||
'faulthandler.enable()',
|
||||
'if not sys.platform.startswith("win32"):',
|
||||
' faulthandler.register(signal.SIGTERM)'])
|
||||
_, reply = execute(code, kc=kc)
|
||||
nt.assert_equal(reply['status'], 'ok', reply.get('traceback', ''))
|
||||
|
||||
|
||||
def test_help_output():
|
||||
"""ipython kernel --help-all works"""
|
||||
tt.help_all_output_test('kernel')
|
||||
|
||||
|
||||
def test_is_complete():
|
||||
with kernel() as kc:
|
||||
# There are more test cases for this in core - here we just check
|
||||
# that the kernel exposes the interface correctly.
|
||||
kc.is_complete('2+2')
|
||||
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
|
||||
assert reply['content']['status'] == 'complete'
|
||||
|
||||
# SyntaxError should mean it's complete
|
||||
kc.is_complete('raise = 2')
|
||||
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
|
||||
assert reply['content']['status'] == 'invalid'
|
||||
|
||||
kc.is_complete('a = [1,\n2,')
|
||||
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
|
||||
assert reply['content']['status'] == 'incomplete'
|
||||
assert reply['content']['indent'] == ''
|
||||
|
||||
|
||||
def test_complete():
|
||||
with kernel() as kc:
|
||||
execute(u'a = 1', kc=kc)
|
||||
wait_for_idle(kc)
|
||||
cell = 'import IPython\nb = a.'
|
||||
kc.complete(cell)
|
||||
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
|
||||
c = reply['content']
|
||||
nt.assert_equal(c['status'], 'ok')
|
||||
nt.assert_equal(c['cursor_start'], cell.find('a.'))
|
||||
nt.assert_equal(c['cursor_end'], cell.find('a.') + 2)
|
||||
matches = c['matches']
|
||||
nt.assert_greater(len(matches), 0)
|
||||
for match in matches:
|
||||
nt.assert_equal(match[:2], 'a.')
|
||||
|
||||
|
||||
@dec.skip_without('matplotlib')
|
||||
def test_matplotlib_inline_on_import():
|
||||
with kernel() as kc:
|
||||
cell = '\n'.join([
|
||||
'import matplotlib, matplotlib.pyplot as plt',
|
||||
'backend = matplotlib.get_backend()'
|
||||
])
|
||||
_, reply = execute(cell,
|
||||
user_expressions={'backend': 'backend'},
|
||||
kc=kc)
|
||||
_check_status(reply)
|
||||
backend_bundle = reply['user_expressions']['backend']
|
||||
_check_status(backend_bundle)
|
||||
nt.assert_in('backend_inline', backend_bundle['data']['text/plain'])
|
||||
|
||||
|
||||
def test_shutdown():
|
||||
"""Kernel exits after polite shutdown_request"""
|
||||
with new_kernel() as kc:
|
||||
km = kc.parent
|
||||
execute(u'a = 1', kc=kc)
|
||||
wait_for_idle(kc)
|
||||
kc.shutdown()
|
||||
for i in range(100): # 10s timeout
|
||||
if km.is_alive():
|
||||
time.sleep(.1)
|
||||
else:
|
||||
break
|
||||
nt.assert_false(km.is_alive())
|
146
packages/python/yap_kernel/yap_kernel/tests/test_kernelspec.py
Normal file
146
packages/python/yap_kernel/yap_kernel/tests/test_kernelspec.py
Normal file
@ -0,0 +1,146 @@
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import json
|
||||
import io
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
try:
|
||||
from unittest import mock
|
||||
except ImportError:
|
||||
import mock # py2
|
||||
|
||||
from jupyter_core.paths import jupyter_data_dir
|
||||
|
||||
from yap_kernel.kernelspec import (
|
||||
make_yapkernel_cmd,
|
||||
get_kernel_dict,
|
||||
write_kernel_spec,
|
||||
install,
|
||||
InstallYAPKernelSpecApp,
|
||||
KERNEL_NAME,
|
||||
RESOURCES,
|
||||
)
|
||||
|
||||
import nose.tools as nt
|
||||
|
||||
pjoin = os.path.join
|
||||
|
||||
|
||||
def test_make_yapkernel_cmd():
|
||||
cmd = make_yapkernel_cmd()
|
||||
nt.assert_equal(cmd, [
|
||||
sys.executable,
|
||||
'-m',
|
||||
'yap_kernel_launcher',
|
||||
'-f',
|
||||
'{connection_file}'
|
||||
])
|
||||
|
||||
|
||||
def assert_kernel_dict(d):
|
||||
nt.assert_equal(d['argv'], make_yapkernel_cmd())
|
||||
nt.assert_equal(d['display_name'], 'Python %i' % sys.version_info[0])
|
||||
nt.assert_equal(d['language'], 'python')
|
||||
|
||||
|
||||
def test_get_kernel_dict():
|
||||
d = get_kernel_dict()
|
||||
assert_kernel_dict(d)
|
||||
|
||||
|
||||
def assert_kernel_dict_with_profile(d):
|
||||
nt.assert_equal(d['argv'], make_yapkernel_cmd(
|
||||
extra_arguments=["--profile", "test"]))
|
||||
nt.assert_equal(d['display_name'], 'Python %i' % sys.version_info[0])
|
||||
nt.assert_equal(d['language'], 'python')
|
||||
|
||||
|
||||
def test_get_kernel_dict_with_profile():
|
||||
d = get_kernel_dict(["--profile", "test"])
|
||||
assert_kernel_dict_with_profile(d)
|
||||
|
||||
|
||||
def assert_is_spec(path):
|
||||
for fname in os.listdir(RESOURCES):
|
||||
dst = pjoin(path, fname)
|
||||
assert os.path.exists(dst)
|
||||
kernel_json = pjoin(path, 'kernel.json')
|
||||
assert os.path.exists(kernel_json)
|
||||
with io.open(kernel_json, encoding='utf8') as f:
|
||||
json.load(f)
|
||||
|
||||
|
||||
def test_write_kernel_spec():
|
||||
path = write_kernel_spec()
|
||||
assert_is_spec(path)
|
||||
shutil.rmtree(path)
|
||||
|
||||
|
||||
def test_write_kernel_spec_path():
|
||||
path = os.path.join(tempfile.mkdtemp(), KERNEL_NAME)
|
||||
path2 = write_kernel_spec(path)
|
||||
nt.assert_equal(path, path2)
|
||||
assert_is_spec(path)
|
||||
shutil.rmtree(path)
|
||||
|
||||
|
||||
def test_install_kernelspec():
|
||||
|
||||
path = tempfile.mkdtemp()
|
||||
try:
|
||||
test = InstallYAPKernelSpecApp.launch_instance(argv=['--prefix', path])
|
||||
assert_is_spec(os.path.join(
|
||||
path, 'share', 'jupyter', 'kernels', KERNEL_NAME))
|
||||
finally:
|
||||
shutil.rmtree(path)
|
||||
|
||||
|
||||
def test_install_user():
|
||||
tmp = tempfile.mkdtemp()
|
||||
|
||||
with mock.patch.dict(os.environ, {'HOME': tmp}):
|
||||
install(user=True)
|
||||
data_dir = jupyter_data_dir()
|
||||
|
||||
assert_is_spec(os.path.join(data_dir, 'kernels', KERNEL_NAME))
|
||||
|
||||
|
||||
def test_install():
|
||||
system_jupyter_dir = tempfile.mkdtemp()
|
||||
|
||||
with mock.patch('jupyter_client.kernelspec.SYSTEM_JUPYTER_PATH',
|
||||
[system_jupyter_dir]):
|
||||
install()
|
||||
|
||||
assert_is_spec(os.path.join(system_jupyter_dir, 'kernels', KERNEL_NAME))
|
||||
|
||||
|
||||
def test_install_profile():
|
||||
system_jupyter_dir = tempfile.mkdtemp()
|
||||
|
||||
with mock.patch('jupyter_client.kernelspec.SYSTEM_JUPYTER_PATH',
|
||||
[system_jupyter_dir]):
|
||||
install(profile="Test")
|
||||
|
||||
spec = os.path.join(system_jupyter_dir, 'kernels', KERNEL_NAME, "kernel.json")
|
||||
with open(spec) as f:
|
||||
spec = json.load(f)
|
||||
nt.assert_true(spec["display_name"].endswith(" [profile=Test]"))
|
||||
nt.assert_equal(spec["argv"][-2:], ["--profile", "Test"])
|
||||
|
||||
|
||||
def test_install_display_name_overrides_profile():
|
||||
system_jupyter_dir = tempfile.mkdtemp()
|
||||
|
||||
with mock.patch('jupyter_client.kernelspec.SYSTEM_JUPYTER_PATH',
|
||||
[system_jupyter_dir]):
|
||||
install(display_name="Display", profile="Test")
|
||||
|
||||
spec = os.path.join(system_jupyter_dir, 'kernels', KERNEL_NAME, "kernel.json")
|
||||
with open(spec) as f:
|
||||
spec = json.load(f)
|
||||
nt.assert_equal(spec["display_name"], "Display")
|
539
packages/python/yap_kernel/yap_kernel/tests/test_message_spec.py
Normal file
539
packages/python/yap_kernel/yap_kernel/tests/test_message_spec.py
Normal file
@ -0,0 +1,539 @@
|
||||
"""Test suite for our zeromq-based message specification."""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import re
|
||||
import sys
|
||||
from distutils.version import LooseVersion as V
|
||||
try:
|
||||
from queue import Empty # Py 3
|
||||
except ImportError:
|
||||
from Queue import Empty # Py 2
|
||||
|
||||
import nose.tools as nt
|
||||
from nose.plugins.skip import SkipTest
|
||||
|
||||
from traitlets import (
|
||||
HasTraits, TraitError, Bool, Unicode, Dict, Integer, List, Enum
|
||||
)
|
||||
from ipython_genutils.py3compat import string_types, iteritems
|
||||
|
||||
from .utils import TIMEOUT, start_global_kernel, flush_channels, execute
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Globals
|
||||
#-----------------------------------------------------------------------------
|
||||
KC = None
|
||||
|
||||
def setup():
|
||||
global KC
|
||||
KC = start_global_kernel()
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Message Spec References
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
class Reference(HasTraits):
|
||||
|
||||
"""
|
||||
Base class for message spec specification testing.
|
||||
|
||||
This class is the core of the message specification test. The
|
||||
idea is that child classes implement trait attributes for each
|
||||
message keys, so that message keys can be tested against these
|
||||
traits using :meth:`check` method.
|
||||
|
||||
"""
|
||||
|
||||
def check(self, d):
|
||||
"""validate a dict against our traits"""
|
||||
for key in self.trait_names():
|
||||
nt.assert_in(key, d)
|
||||
# FIXME: always allow None, probably not a good idea
|
||||
if d[key] is None:
|
||||
continue
|
||||
try:
|
||||
setattr(self, key, d[key])
|
||||
except TraitError as e:
|
||||
assert False, str(e)
|
||||
|
||||
|
||||
class Version(Unicode):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.min = kwargs.pop('min', None)
|
||||
self.max = kwargs.pop('max', None)
|
||||
kwargs['default_value'] = self.min
|
||||
super(Version, self).__init__(*args, **kwargs)
|
||||
|
||||
def validate(self, obj, value):
|
||||
if self.min and V(value) < V(self.min):
|
||||
raise TraitError("bad version: %s < %s" % (value, self.min))
|
||||
if self.max and (V(value) > V(self.max)):
|
||||
raise TraitError("bad version: %s > %s" % (value, self.max))
|
||||
|
||||
|
||||
class RMessage(Reference):
|
||||
msg_id = Unicode()
|
||||
msg_type = Unicode()
|
||||
header = Dict()
|
||||
parent_header = Dict()
|
||||
content = Dict()
|
||||
|
||||
def check(self, d):
|
||||
super(RMessage, self).check(d)
|
||||
RHeader().check(self.header)
|
||||
if self.parent_header:
|
||||
RHeader().check(self.parent_header)
|
||||
|
||||
class RHeader(Reference):
|
||||
msg_id = Unicode()
|
||||
msg_type = Unicode()
|
||||
session = Unicode()
|
||||
username = Unicode()
|
||||
version = Version(min='5.0')
|
||||
|
||||
mime_pat = re.compile(r'^[\w\-\+\.]+/[\w\-\+\.]+$')
|
||||
|
||||
class MimeBundle(Reference):
|
||||
metadata = Dict()
|
||||
data = Dict()
|
||||
def _data_changed(self, name, old, new):
|
||||
for k,v in iteritems(new):
|
||||
assert mime_pat.match(k)
|
||||
nt.assert_is_instance(v, string_types)
|
||||
|
||||
|
||||
# shell replies
|
||||
class Reply(Reference):
|
||||
status = Enum((u'ok', u'error'), default_value=u'ok')
|
||||
|
||||
|
||||
class ExecuteReply(Reply):
|
||||
execution_count = Integer()
|
||||
|
||||
def check(self, d):
|
||||
Reference.check(self, d)
|
||||
if d['status'] == 'ok':
|
||||
ExecuteReplyOkay().check(d)
|
||||
elif d['status'] == 'error':
|
||||
ExecuteReplyError().check(d)
|
||||
|
||||
|
||||
class ExecuteReplyOkay(Reply):
|
||||
status = Enum(('ok',))
|
||||
user_expressions = Dict()
|
||||
|
||||
|
||||
class ExecuteReplyError(Reply):
|
||||
ename = Unicode()
|
||||
evalue = Unicode()
|
||||
traceback = List(Unicode())
|
||||
|
||||
|
||||
class InspectReply(Reply, MimeBundle):
|
||||
found = Bool()
|
||||
|
||||
|
||||
class ArgSpec(Reference):
|
||||
args = List(Unicode())
|
||||
varargs = Unicode()
|
||||
varkw = Unicode()
|
||||
defaults = List()
|
||||
|
||||
|
||||
class Status(Reference):
|
||||
execution_state = Enum((u'busy', u'idle', u'starting'), default_value=u'busy')
|
||||
|
||||
|
||||
class CompleteReply(Reply):
|
||||
matches = List(Unicode())
|
||||
cursor_start = Integer()
|
||||
cursor_end = Integer()
|
||||
status = Unicode()
|
||||
|
||||
|
||||
class LanguageInfo(Reference):
|
||||
name = Unicode('python')
|
||||
version = Unicode(sys.version.split()[0])
|
||||
|
||||
|
||||
class KernelInfoReply(Reply):
|
||||
protocol_version = Version(min='5.0')
|
||||
implementation = Unicode('ipython')
|
||||
implementation_version = Version(min='2.1')
|
||||
language_info = Dict()
|
||||
banner = Unicode()
|
||||
|
||||
def check(self, d):
|
||||
Reference.check(self, d)
|
||||
LanguageInfo().check(d['language_info'])
|
||||
|
||||
|
||||
class ConnectReply(Reference):
|
||||
shell_port = Integer()
|
||||
control_port = Integer()
|
||||
stdin_port = Integer()
|
||||
iopub_port = Integer()
|
||||
hb_port = Integer()
|
||||
|
||||
|
||||
class CommInfoReply(Reply):
|
||||
comms = Dict()
|
||||
|
||||
|
||||
class IsCompleteReply(Reference):
|
||||
status = Enum((u'complete', u'incomplete', u'invalid', u'unknown'), default_value=u'complete')
|
||||
|
||||
def check(self, d):
|
||||
Reference.check(self, d)
|
||||
if d['status'] == 'incomplete':
|
||||
IsCompleteReplyIncomplete().check(d)
|
||||
|
||||
|
||||
class IsCompleteReplyIncomplete(Reference):
|
||||
indent = Unicode()
|
||||
|
||||
|
||||
# IOPub messages
|
||||
|
||||
class ExecuteInput(Reference):
|
||||
code = Unicode()
|
||||
execution_count = Integer()
|
||||
|
||||
|
||||
class Error(ExecuteReplyError):
|
||||
"""Errors are the same as ExecuteReply, but without status"""
|
||||
status = None # no status field
|
||||
|
||||
|
||||
class Stream(Reference):
|
||||
name = Enum((u'stdout', u'stderr'), default_value=u'stdout')
|
||||
text = Unicode()
|
||||
|
||||
|
||||
class DisplayData(MimeBundle):
|
||||
pass
|
||||
|
||||
|
||||
class ExecuteResult(MimeBundle):
|
||||
execution_count = Integer()
|
||||
|
||||
|
||||
class HistoryReply(Reply):
|
||||
history = List(List())
|
||||
|
||||
|
||||
references = {
|
||||
'execute_reply' : ExecuteReply(),
|
||||
'inspect_reply' : InspectReply(),
|
||||
'status' : Status(),
|
||||
'complete_reply' : CompleteReply(),
|
||||
'kernel_info_reply': KernelInfoReply(),
|
||||
'connect_reply': ConnectReply(),
|
||||
'comm_info_reply': CommInfoReply(),
|
||||
'is_complete_reply': IsCompleteReply(),
|
||||
'execute_input' : ExecuteInput(),
|
||||
'execute_result' : ExecuteResult(),
|
||||
'history_reply' : HistoryReply(),
|
||||
'error' : Error(),
|
||||
'stream' : Stream(),
|
||||
'display_data' : DisplayData(),
|
||||
'header' : RHeader(),
|
||||
}
|
||||
"""
|
||||
Specifications of `content` part of the reply messages.
|
||||
"""
|
||||
|
||||
|
||||
def validate_message(msg, msg_type=None, parent=None):
|
||||
"""validate a message
|
||||
|
||||
This is a generator, and must be iterated through to actually
|
||||
trigger each test.
|
||||
|
||||
If msg_type and/or parent are given, the msg_type and/or parent msg_id
|
||||
are compared with the given values.
|
||||
"""
|
||||
RMessage().check(msg)
|
||||
if msg_type:
|
||||
nt.assert_equal(msg['msg_type'], msg_type)
|
||||
if parent:
|
||||
nt.assert_equal(msg['parent_header']['msg_id'], parent)
|
||||
content = msg['content']
|
||||
ref = references[msg['msg_type']]
|
||||
ref.check(content)
|
||||
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Tests
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
# Shell channel
|
||||
|
||||
def test_execute():
|
||||
flush_channels()
|
||||
|
||||
msg_id = KC.execute(code='x=1')
|
||||
reply = KC.get_shell_msg(timeout=TIMEOUT)
|
||||
validate_message(reply, 'execute_reply', msg_id)
|
||||
|
||||
|
||||
def test_execute_silent():
|
||||
flush_channels()
|
||||
msg_id, reply = execute(code='x=1', silent=True)
|
||||
|
||||
# flush status=idle
|
||||
status = KC.iopub_channel.get_msg(timeout=TIMEOUT)
|
||||
validate_message(status, 'status', msg_id)
|
||||
nt.assert_equal(status['content']['execution_state'], 'idle')
|
||||
|
||||
nt.assert_raises(Empty, KC.iopub_channel.get_msg, timeout=0.1)
|
||||
count = reply['execution_count']
|
||||
|
||||
msg_id, reply = execute(code='x=2', silent=True)
|
||||
|
||||
# flush status=idle
|
||||
status = KC.iopub_channel.get_msg(timeout=TIMEOUT)
|
||||
validate_message(status, 'status', msg_id)
|
||||
nt.assert_equal(status['content']['execution_state'], 'idle')
|
||||
|
||||
nt.assert_raises(Empty, KC.iopub_channel.get_msg, timeout=0.1)
|
||||
count_2 = reply['execution_count']
|
||||
nt.assert_equal(count_2, count)
|
||||
|
||||
|
||||
def test_execute_error():
|
||||
flush_channels()
|
||||
|
||||
msg_id, reply = execute(code='1/0')
|
||||
nt.assert_equal(reply['status'], 'error')
|
||||
nt.assert_equal(reply['ename'], 'ZeroDivisionError')
|
||||
|
||||
error = KC.iopub_channel.get_msg(timeout=TIMEOUT)
|
||||
validate_message(error, 'error', msg_id)
|
||||
|
||||
|
||||
def test_execute_inc():
|
||||
"""execute request should increment execution_count"""
|
||||
flush_channels()
|
||||
|
||||
msg_id, reply = execute(code='x=1')
|
||||
count = reply['execution_count']
|
||||
|
||||
flush_channels()
|
||||
|
||||
msg_id, reply = execute(code='x=2')
|
||||
count_2 = reply['execution_count']
|
||||
nt.assert_equal(count_2, count+1)
|
||||
|
||||
def test_execute_stop_on_error():
|
||||
"""execute request should not abort execution queue with stop_on_error False"""
|
||||
flush_channels()
|
||||
|
||||
fail = '\n'.join([
|
||||
# sleep to ensure subsequent message is waiting in the queue to be aborted
|
||||
'import time',
|
||||
'time.sleep(0.5)',
|
||||
'raise ValueError',
|
||||
])
|
||||
KC.execute(code=fail)
|
||||
msg_id = KC.execute(code='print("Hello")')
|
||||
KC.get_shell_msg(timeout=TIMEOUT)
|
||||
reply = KC.get_shell_msg(timeout=TIMEOUT)
|
||||
nt.assert_equal(reply['content']['status'], 'aborted')
|
||||
|
||||
flush_channels()
|
||||
|
||||
KC.execute(code=fail, stop_on_error=False)
|
||||
msg_id = KC.execute(code='print("Hello")')
|
||||
KC.get_shell_msg(timeout=TIMEOUT)
|
||||
reply = KC.get_shell_msg(timeout=TIMEOUT)
|
||||
nt.assert_equal(reply['content']['status'], 'ok')
|
||||
|
||||
|
||||
def test_user_expressions():
|
||||
flush_channels()
|
||||
|
||||
msg_id, reply = execute(code='x=1', user_expressions=dict(foo='x+1'))
|
||||
user_expressions = reply['user_expressions']
|
||||
nt.assert_equal(user_expressions, {u'foo': {
|
||||
u'status': u'ok',
|
||||
u'data': {u'text/plain': u'2'},
|
||||
u'metadata': {},
|
||||
}})
|
||||
|
||||
|
||||
def test_user_expressions_fail():
|
||||
flush_channels()
|
||||
|
||||
msg_id, reply = execute(code='x=0', user_expressions=dict(foo='nosuchname'))
|
||||
user_expressions = reply['user_expressions']
|
||||
foo = user_expressions['foo']
|
||||
nt.assert_equal(foo['status'], 'error')
|
||||
nt.assert_equal(foo['ename'], 'NameError')
|
||||
|
||||
|
||||
def test_oinfo():
|
||||
flush_channels()
|
||||
|
||||
msg_id = KC.inspect('a')
|
||||
reply = KC.get_shell_msg(timeout=TIMEOUT)
|
||||
validate_message(reply, 'inspect_reply', msg_id)
|
||||
|
||||
|
||||
def test_oinfo_found():
|
||||
flush_channels()
|
||||
|
||||
msg_id, reply = execute(code='a=5')
|
||||
|
||||
msg_id = KC.inspect('a')
|
||||
reply = KC.get_shell_msg(timeout=TIMEOUT)
|
||||
validate_message(reply, 'inspect_reply', msg_id)
|
||||
content = reply['content']
|
||||
assert content['found']
|
||||
text = content['data']['text/plain']
|
||||
nt.assert_in('Type:', text)
|
||||
nt.assert_in('Docstring:', text)
|
||||
|
||||
|
||||
def test_oinfo_detail():
|
||||
flush_channels()
|
||||
|
||||
msg_id, reply = execute(code='ip=get_ipython()')
|
||||
|
||||
msg_id = KC.inspect('ip.object_inspect', cursor_pos=10, detail_level=1)
|
||||
reply = KC.get_shell_msg(timeout=TIMEOUT)
|
||||
validate_message(reply, 'inspect_reply', msg_id)
|
||||
content = reply['content']
|
||||
assert content['found']
|
||||
text = content['data']['text/plain']
|
||||
nt.assert_in('Signature:', text)
|
||||
nt.assert_in('Source:', text)
|
||||
|
||||
|
||||
def test_oinfo_not_found():
|
||||
flush_channels()
|
||||
|
||||
msg_id = KC.inspect('dne')
|
||||
reply = KC.get_shell_msg(timeout=TIMEOUT)
|
||||
validate_message(reply, 'inspect_reply', msg_id)
|
||||
content = reply['content']
|
||||
nt.assert_false(content['found'])
|
||||
|
||||
|
||||
def test_complete():
|
||||
flush_channels()
|
||||
|
||||
msg_id, reply = execute(code="alpha = albert = 5")
|
||||
|
||||
msg_id = KC.complete('al', 2)
|
||||
reply = KC.get_shell_msg(timeout=TIMEOUT)
|
||||
validate_message(reply, 'complete_reply', msg_id)
|
||||
matches = reply['content']['matches']
|
||||
for name in ('alpha', 'albert'):
|
||||
nt.assert_in(name, matches)
|
||||
|
||||
|
||||
def test_kernel_info_request():
|
||||
flush_channels()
|
||||
|
||||
msg_id = KC.kernel_info()
|
||||
reply = KC.get_shell_msg(timeout=TIMEOUT)
|
||||
validate_message(reply, 'kernel_info_reply', msg_id)
|
||||
|
||||
|
||||
def test_connect_request():
|
||||
flush_channels()
|
||||
msg = KC.session.msg('connect_request')
|
||||
KC.shell_channel.send(msg)
|
||||
return msg['header']['msg_id']
|
||||
|
||||
msg_id = KC.kernel_info()
|
||||
reply = KC.get_shell_msg(timeout=TIMEOUT)
|
||||
validate_message(reply, 'connect_reply', msg_id)
|
||||
|
||||
|
||||
def test_comm_info_request():
|
||||
flush_channels()
|
||||
if not hasattr(KC, 'comm_info'):
|
||||
raise SkipTest()
|
||||
msg_id = KC.comm_info()
|
||||
reply = KC.get_shell_msg(timeout=TIMEOUT)
|
||||
validate_message(reply, 'comm_info_reply', msg_id)
|
||||
|
||||
|
||||
def test_single_payload():
|
||||
flush_channels()
|
||||
msg_id, reply = execute(code="for i in range(3):\n"+
|
||||
" x=range?\n")
|
||||
payload = reply['payload']
|
||||
next_input_pls = [pl for pl in payload if pl["source"] == "set_next_input"]
|
||||
nt.assert_equal(len(next_input_pls), 1)
|
||||
|
||||
def test_is_complete():
|
||||
flush_channels()
|
||||
|
||||
msg_id = KC.is_complete("a = 1")
|
||||
reply = KC.get_shell_msg(timeout=TIMEOUT)
|
||||
validate_message(reply, 'is_complete_reply', msg_id)
|
||||
|
||||
def test_history_range():
|
||||
flush_channels()
|
||||
|
||||
msg_id_exec = KC.execute(code='x=1', store_history = True)
|
||||
reply_exec = KC.get_shell_msg(timeout=TIMEOUT)
|
||||
|
||||
msg_id = KC.history(hist_access_type = 'range', raw = True, output = True, start = 1, stop = 2, session = 0)
|
||||
reply = KC.get_shell_msg(timeout=TIMEOUT)
|
||||
validate_message(reply, 'history_reply', msg_id)
|
||||
content = reply['content']
|
||||
nt.assert_equal(len(content['history']), 1)
|
||||
|
||||
def test_history_tail():
|
||||
flush_channels()
|
||||
|
||||
msg_id_exec = KC.execute(code='x=1', store_history = True)
|
||||
reply_exec = KC.get_shell_msg(timeout=TIMEOUT)
|
||||
|
||||
msg_id = KC.history(hist_access_type = 'tail', raw = True, output = True, n = 1, session = 0)
|
||||
reply = KC.get_shell_msg(timeout=TIMEOUT)
|
||||
validate_message(reply, 'history_reply', msg_id)
|
||||
content = reply['content']
|
||||
nt.assert_equal(len(content['history']), 1)
|
||||
|
||||
def test_history_search():
|
||||
flush_channels()
|
||||
|
||||
msg_id_exec = KC.execute(code='x=1', store_history = True)
|
||||
reply_exec = KC.get_shell_msg(timeout=TIMEOUT)
|
||||
|
||||
msg_id = KC.history(hist_access_type = 'search', raw = True, output = True, n = 1, pattern = '*', session = 0)
|
||||
reply = KC.get_shell_msg(timeout=TIMEOUT)
|
||||
validate_message(reply, 'history_reply', msg_id)
|
||||
content = reply['content']
|
||||
nt.assert_equal(len(content['history']), 1)
|
||||
|
||||
# IOPub channel
|
||||
|
||||
|
||||
def test_stream():
|
||||
flush_channels()
|
||||
|
||||
msg_id, reply = execute("print('hi')")
|
||||
|
||||
stdout = KC.iopub_channel.get_msg(timeout=TIMEOUT)
|
||||
validate_message(stdout, 'stream', msg_id)
|
||||
content = stdout['content']
|
||||
nt.assert_equal(content['text'], u'hi\n')
|
||||
|
||||
|
||||
def test_display_data():
|
||||
flush_channels()
|
||||
|
||||
msg_id, reply = execute("from IPython.core.display import display; display(1)")
|
||||
|
||||
display = KC.iopub_channel.get_msg(timeout=TIMEOUT)
|
||||
validate_message(display, 'display_data', parent=msg_id)
|
||||
data = display['content']['data']
|
||||
nt.assert_equal(data['text/plain'], u'1')
|
@ -0,0 +1,68 @@
|
||||
|
||||
import os
|
||||
import pickle
|
||||
|
||||
import nose.tools as nt
|
||||
|
||||
from yap_kernel.pickleutil import can, uncan, codeutil
|
||||
|
||||
def interactive(f):
|
||||
f.__module__ = '__main__'
|
||||
return f
|
||||
|
||||
def dumps(obj):
|
||||
return pickle.dumps(can(obj))
|
||||
|
||||
def loads(obj):
|
||||
return uncan(pickle.loads(obj))
|
||||
|
||||
def test_no_closure():
|
||||
@interactive
|
||||
def foo():
|
||||
a = 5
|
||||
return a
|
||||
|
||||
pfoo = dumps(foo)
|
||||
bar = loads(pfoo)
|
||||
nt.assert_equal(foo(), bar())
|
||||
|
||||
def test_generator_closure():
|
||||
# this only creates a closure on Python 3
|
||||
@interactive
|
||||
def foo():
|
||||
i = 'i'
|
||||
r = [ i for j in (1,2) ]
|
||||
return r
|
||||
|
||||
pfoo = dumps(foo)
|
||||
bar = loads(pfoo)
|
||||
nt.assert_equal(foo(), bar())
|
||||
|
||||
def test_nested_closure():
|
||||
@interactive
|
||||
def foo():
|
||||
i = 'i'
|
||||
def g():
|
||||
return i
|
||||
return g()
|
||||
|
||||
pfoo = dumps(foo)
|
||||
bar = loads(pfoo)
|
||||
nt.assert_equal(foo(), bar())
|
||||
|
||||
def test_closure():
|
||||
i = 'i'
|
||||
@interactive
|
||||
def foo():
|
||||
return i
|
||||
|
||||
pfoo = dumps(foo)
|
||||
bar = loads(pfoo)
|
||||
nt.assert_equal(foo(), bar())
|
||||
|
||||
def test_uncan_bytes_buffer():
|
||||
data = b'data'
|
||||
canned = can(data)
|
||||
canned.buffers = [memoryview(buf) for buf in canned.buffers]
|
||||
out = uncan(canned)
|
||||
nt.assert_equal(out, data)
|
210
packages/python/yap_kernel/yap_kernel/tests/test_serialize.py
Normal file
210
packages/python/yap_kernel/yap_kernel/tests/test_serialize.py
Normal file
@ -0,0 +1,210 @@
|
||||
"""test serialization tools"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import pickle
|
||||
from collections import namedtuple
|
||||
|
||||
import nose.tools as nt
|
||||
|
||||
from yap_kernel.serialize import serialize_object, deserialize_object
|
||||
from IPython.testing import decorators as dec
|
||||
from yap_kernel.pickleutil import CannedArray, CannedClass, interactive
|
||||
from ipython_genutils.py3compat import iteritems
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Globals and Utilities
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
def roundtrip(obj):
|
||||
"""roundtrip an object through serialization"""
|
||||
bufs = serialize_object(obj)
|
||||
obj2, remainder = deserialize_object(bufs)
|
||||
nt.assert_equals(remainder, [])
|
||||
return obj2
|
||||
|
||||
|
||||
SHAPES = ((100,), (1024,10), (10,8,6,5), (), (0,))
|
||||
DTYPES = ('uint8', 'float64', 'int32', [('g', 'float32')], '|S10')
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Tests
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
def new_array(shape, dtype):
|
||||
import numpy
|
||||
return numpy.random.random(shape).astype(dtype)
|
||||
|
||||
def test_roundtrip_simple():
|
||||
for obj in [
|
||||
'hello',
|
||||
dict(a='b', b=10),
|
||||
[1,2,'hi'],
|
||||
(b'123', 'hello'),
|
||||
]:
|
||||
obj2 = roundtrip(obj)
|
||||
nt.assert_equal(obj, obj2)
|
||||
|
||||
def test_roundtrip_nested():
|
||||
for obj in [
|
||||
dict(a=range(5), b={1:b'hello'}),
|
||||
[range(5),[range(3),(1,[b'whoda'])]],
|
||||
]:
|
||||
obj2 = roundtrip(obj)
|
||||
nt.assert_equal(obj, obj2)
|
||||
|
||||
def test_roundtrip_buffered():
|
||||
for obj in [
|
||||
dict(a=b"x"*1025),
|
||||
b"hello"*500,
|
||||
[b"hello"*501, 1,2,3]
|
||||
]:
|
||||
bufs = serialize_object(obj)
|
||||
nt.assert_equal(len(bufs), 2)
|
||||
obj2, remainder = deserialize_object(bufs)
|
||||
nt.assert_equal(remainder, [])
|
||||
nt.assert_equal(obj, obj2)
|
||||
|
||||
def test_roundtrip_memoryview():
|
||||
b = b'asdf' * 1025
|
||||
view = memoryview(b)
|
||||
bufs = serialize_object(view)
|
||||
nt.assert_equal(len(bufs), 2)
|
||||
v2, remainder = deserialize_object(bufs)
|
||||
nt.assert_equal(remainder, [])
|
||||
nt.assert_equal(v2.tobytes(), b)
|
||||
|
||||
@dec.skip_without('numpy')
|
||||
def test_numpy():
|
||||
import numpy
|
||||
from numpy.testing.utils import assert_array_equal
|
||||
for shape in SHAPES:
|
||||
for dtype in DTYPES:
|
||||
A = new_array(shape, dtype=dtype)
|
||||
bufs = serialize_object(A)
|
||||
bufs = [memoryview(b) for b in bufs]
|
||||
B, r = deserialize_object(bufs)
|
||||
nt.assert_equal(r, [])
|
||||
nt.assert_equal(A.shape, B.shape)
|
||||
nt.assert_equal(A.dtype, B.dtype)
|
||||
assert_array_equal(A,B)
|
||||
|
||||
@dec.skip_without('numpy')
|
||||
def test_recarray():
|
||||
import numpy
|
||||
from numpy.testing.utils import assert_array_equal
|
||||
for shape in SHAPES:
|
||||
for dtype in [
|
||||
[('f', float), ('s', '|S10')],
|
||||
[('n', int), ('s', '|S1'), ('u', 'uint32')],
|
||||
]:
|
||||
A = new_array(shape, dtype=dtype)
|
||||
|
||||
bufs = serialize_object(A)
|
||||
B, r = deserialize_object(bufs)
|
||||
nt.assert_equal(r, [])
|
||||
nt.assert_equal(A.shape, B.shape)
|
||||
nt.assert_equal(A.dtype, B.dtype)
|
||||
assert_array_equal(A,B)
|
||||
|
||||
@dec.skip_without('numpy')
|
||||
def test_numpy_in_seq():
|
||||
import numpy
|
||||
from numpy.testing.utils import assert_array_equal
|
||||
for shape in SHAPES:
|
||||
for dtype in DTYPES:
|
||||
A = new_array(shape, dtype=dtype)
|
||||
bufs = serialize_object((A,1,2,b'hello'))
|
||||
canned = pickle.loads(bufs[0])
|
||||
nt.assert_is_instance(canned[0], CannedArray)
|
||||
tup, r = deserialize_object(bufs)
|
||||
B = tup[0]
|
||||
nt.assert_equal(r, [])
|
||||
nt.assert_equal(A.shape, B.shape)
|
||||
nt.assert_equal(A.dtype, B.dtype)
|
||||
assert_array_equal(A,B)
|
||||
|
||||
@dec.skip_without('numpy')
|
||||
def test_numpy_in_dict():
|
||||
import numpy
|
||||
from numpy.testing.utils import assert_array_equal
|
||||
for shape in SHAPES:
|
||||
for dtype in DTYPES:
|
||||
A = new_array(shape, dtype=dtype)
|
||||
bufs = serialize_object(dict(a=A,b=1,c=range(20)))
|
||||
canned = pickle.loads(bufs[0])
|
||||
nt.assert_is_instance(canned['a'], CannedArray)
|
||||
d, r = deserialize_object(bufs)
|
||||
B = d['a']
|
||||
nt.assert_equal(r, [])
|
||||
nt.assert_equal(A.shape, B.shape)
|
||||
nt.assert_equal(A.dtype, B.dtype)
|
||||
assert_array_equal(A,B)
|
||||
|
||||
def test_class():
|
||||
@interactive
|
||||
class C(object):
|
||||
a=5
|
||||
bufs = serialize_object(dict(C=C))
|
||||
canned = pickle.loads(bufs[0])
|
||||
nt.assert_is_instance(canned['C'], CannedClass)
|
||||
d, r = deserialize_object(bufs)
|
||||
C2 = d['C']
|
||||
nt.assert_equal(C2.a, C.a)
|
||||
|
||||
def test_class_oldstyle():
|
||||
@interactive
|
||||
class C:
|
||||
a=5
|
||||
|
||||
bufs = serialize_object(dict(C=C))
|
||||
canned = pickle.loads(bufs[0])
|
||||
nt.assert_is_instance(canned['C'], CannedClass)
|
||||
d, r = deserialize_object(bufs)
|
||||
C2 = d['C']
|
||||
nt.assert_equal(C2.a, C.a)
|
||||
|
||||
def test_tuple():
|
||||
tup = (lambda x:x, 1)
|
||||
bufs = serialize_object(tup)
|
||||
canned = pickle.loads(bufs[0])
|
||||
nt.assert_is_instance(canned, tuple)
|
||||
t2, r = deserialize_object(bufs)
|
||||
nt.assert_equal(t2[0](t2[1]), tup[0](tup[1]))
|
||||
|
||||
point = namedtuple('point', 'x y')
|
||||
|
||||
def test_namedtuple():
|
||||
p = point(1,2)
|
||||
bufs = serialize_object(p)
|
||||
canned = pickle.loads(bufs[0])
|
||||
nt.assert_is_instance(canned, point)
|
||||
p2, r = deserialize_object(bufs, globals())
|
||||
nt.assert_equal(p2.x, p.x)
|
||||
nt.assert_equal(p2.y, p.y)
|
||||
|
||||
def test_list():
|
||||
lis = [lambda x:x, 1]
|
||||
bufs = serialize_object(lis)
|
||||
canned = pickle.loads(bufs[0])
|
||||
nt.assert_is_instance(canned, list)
|
||||
l2, r = deserialize_object(bufs)
|
||||
nt.assert_equal(l2[0](l2[1]), lis[0](lis[1]))
|
||||
|
||||
def test_class_inheritance():
|
||||
@interactive
|
||||
class C(object):
|
||||
a=5
|
||||
|
||||
@interactive
|
||||
class D(C):
|
||||
b=10
|
||||
|
||||
bufs = serialize_object(dict(D=D))
|
||||
canned = pickle.loads(bufs[0])
|
||||
nt.assert_is_instance(canned['D'], CannedClass)
|
||||
d, r = deserialize_object(bufs)
|
||||
D2 = d['D']
|
||||
nt.assert_equal(D2.a, D.a)
|
||||
nt.assert_equal(D2.b, D.b)
|
@ -0,0 +1,48 @@
|
||||
import nose.tools as nt
|
||||
|
||||
from .test_embed_kernel import setup_kernel
|
||||
|
||||
TIMEOUT = 15
|
||||
|
||||
def test_ipython_start_kernel_userns():
|
||||
cmd = ('from IPython import start_kernel\n'
|
||||
'ns = {"tre": 123}\n'
|
||||
'start_kernel(user_ns=ns)')
|
||||
|
||||
with setup_kernel(cmd) as client:
|
||||
msg_id = client.inspect('tre')
|
||||
msg = client.get_shell_msg(block=True, timeout=TIMEOUT)
|
||||
content = msg['content']
|
||||
assert content['found']
|
||||
text = content['data']['text/plain']
|
||||
nt.assert_in(u'123', text)
|
||||
|
||||
# user_module should be an instance of DummyMod
|
||||
msg_id = client.execute("usermod = get_ipython().user_module")
|
||||
msg = client.get_shell_msg(block=True, timeout=TIMEOUT)
|
||||
content = msg['content']
|
||||
nt.assert_equal(content['status'], u'ok')
|
||||
msg_id = client.inspect('usermod')
|
||||
msg = client.get_shell_msg(block=True, timeout=TIMEOUT)
|
||||
content = msg['content']
|
||||
assert content['found']
|
||||
text = content['data']['text/plain']
|
||||
nt.assert_in(u'DummyMod', text)
|
||||
|
||||
def test_ipython_start_kernel_no_userns():
|
||||
# Issue #4188 - user_ns should be passed to shell as None, not {}
|
||||
cmd = ('from IPython import start_kernel\n'
|
||||
'start_kernel()')
|
||||
|
||||
with setup_kernel(cmd) as client:
|
||||
# user_module should not be an instance of DummyMod
|
||||
msg_id = client.execute("usermod = get_ipython().user_module")
|
||||
msg = client.get_shell_msg(block=True, timeout=TIMEOUT)
|
||||
content = msg['content']
|
||||
nt.assert_equal(content['status'], u'ok')
|
||||
msg_id = client.inspect('usermod')
|
||||
msg = client.get_shell_msg(block=True, timeout=TIMEOUT)
|
||||
content = msg['content']
|
||||
assert content['found']
|
||||
text = content['data']['text/plain']
|
||||
nt.assert_not_in(u'DummyMod', text)
|
208
packages/python/yap_kernel/yap_kernel/tests/test_zmq_shell.py
Normal file
208
packages/python/yap_kernel/yap_kernel/tests/test_zmq_shell.py
Normal file
@ -0,0 +1,208 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
""" Tests for zmq shell / display publisher. """
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import os
|
||||
try:
|
||||
from queue import Queue
|
||||
except ImportError:
|
||||
# py2
|
||||
from Queue import Queue
|
||||
from threading import Thread
|
||||
import unittest
|
||||
|
||||
from traitlets import Int
|
||||
import zmq
|
||||
|
||||
from yap_kernel.zmqshell import ZMQDisplayPublisher
|
||||
from jupyter_client.session import Session
|
||||
|
||||
|
||||
class NoReturnDisplayHook(object):
|
||||
"""
|
||||
A dummy DisplayHook which allows us to monitor
|
||||
the number of times an object is called, but which
|
||||
does *not* return a message when it is called.
|
||||
"""
|
||||
call_count = 0
|
||||
|
||||
def __call__(self, obj):
|
||||
self.call_count += 1
|
||||
|
||||
|
||||
class ReturnDisplayHook(NoReturnDisplayHook):
|
||||
"""
|
||||
A dummy DisplayHook with the same counting ability
|
||||
as its base class, but which also returns the same
|
||||
message when it is called.
|
||||
"""
|
||||
def __call__(self, obj):
|
||||
super(ReturnDisplayHook, self).__call__(obj)
|
||||
return obj
|
||||
|
||||
|
||||
class CounterSession(Session):
|
||||
"""
|
||||
This is a simple subclass to allow us to count
|
||||
the calls made to the session object by the display
|
||||
publisher.
|
||||
"""
|
||||
send_count = Int(0)
|
||||
|
||||
def send(self, *args, **kwargs):
|
||||
"""
|
||||
A trivial override to just augment the existing call
|
||||
with an increment to the send counter.
|
||||
"""
|
||||
self.send_count += 1
|
||||
super(CounterSession, self).send(*args, **kwargs)
|
||||
|
||||
|
||||
class ZMQDisplayPublisherTests(unittest.TestCase):
|
||||
"""
|
||||
Tests the ZMQDisplayPublisher in zmqshell.py
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
self.context = zmq.Context()
|
||||
self.socket = self.context.socket(zmq.PUB)
|
||||
self.session = CounterSession()
|
||||
|
||||
self.disp_pub = ZMQDisplayPublisher(
|
||||
session = self.session,
|
||||
pub_socket = self.socket
|
||||
)
|
||||
|
||||
def tearDown(self):
|
||||
"""
|
||||
We need to close the socket in order to proceed with the
|
||||
tests.
|
||||
TODO - There is still an open file handler to '/dev/null',
|
||||
presumably created by zmq.
|
||||
"""
|
||||
self.disp_pub.clear_output()
|
||||
self.socket.close()
|
||||
self.context.term()
|
||||
|
||||
def test_display_publisher_creation(self):
|
||||
"""
|
||||
Since there's no explicit constructor, here we confirm
|
||||
that keyword args get assigned correctly, and override
|
||||
the defaults.
|
||||
"""
|
||||
self.assertEqual(self.disp_pub.session, self.session)
|
||||
self.assertEqual(self.disp_pub.pub_socket, self.socket)
|
||||
|
||||
def test_thread_local_hooks(self):
|
||||
"""
|
||||
Confirms that the thread_local attribute is correctly
|
||||
initialised with an empty list for the display hooks
|
||||
"""
|
||||
self.assertEqual(self.disp_pub._hooks, [])
|
||||
def hook(msg):
|
||||
return msg
|
||||
self.disp_pub.register_hook(hook)
|
||||
self.assertEqual(self.disp_pub._hooks, [hook])
|
||||
|
||||
q = Queue()
|
||||
def set_thread_hooks():
|
||||
q.put(self.disp_pub._hooks)
|
||||
t = Thread(target=set_thread_hooks)
|
||||
t.start()
|
||||
thread_hooks = q.get(timeout=10)
|
||||
self.assertEqual(thread_hooks, [])
|
||||
|
||||
def test_publish(self):
|
||||
"""
|
||||
Publish should prepare the message and eventually call
|
||||
`send` by default.
|
||||
"""
|
||||
data = dict(a = 1)
|
||||
|
||||
self.assertEqual(self.session.send_count, 0)
|
||||
self.disp_pub.publish(data)
|
||||
self.assertEqual(self.session.send_count, 1)
|
||||
|
||||
def test_display_hook_halts_send(self):
|
||||
"""
|
||||
If a hook is installed, and on calling the object
|
||||
it does *not* return a message, then we assume that
|
||||
the message has been consumed, and should not be
|
||||
processed (`sent`) in the normal manner.
|
||||
"""
|
||||
data = dict(a = 1)
|
||||
hook = NoReturnDisplayHook()
|
||||
|
||||
self.disp_pub.register_hook(hook)
|
||||
self.assertEqual(hook.call_count, 0)
|
||||
self.assertEqual(self.session.send_count, 0)
|
||||
|
||||
self.disp_pub.publish(data)
|
||||
|
||||
self.assertEqual(hook.call_count, 1)
|
||||
self.assertEqual(self.session.send_count, 0)
|
||||
|
||||
def test_display_hook_return_calls_send(self):
|
||||
"""
|
||||
If a hook is installed and on calling the object
|
||||
it returns a new message, then we assume that this
|
||||
is just a message transformation, and the message
|
||||
should be sent in the usual manner.
|
||||
"""
|
||||
data = dict(a=1)
|
||||
hook = ReturnDisplayHook()
|
||||
|
||||
self.disp_pub.register_hook(hook)
|
||||
self.assertEqual(hook.call_count, 0)
|
||||
self.assertEqual(self.session.send_count, 0)
|
||||
|
||||
self.disp_pub.publish(data)
|
||||
|
||||
self.assertEqual(hook.call_count, 1)
|
||||
self.assertEqual(self.session.send_count, 1)
|
||||
|
||||
def test_unregister_hook(self):
|
||||
"""
|
||||
Once a hook is unregistered, it should not be called
|
||||
during `publish`.
|
||||
"""
|
||||
data = dict(a = 1)
|
||||
hook = NoReturnDisplayHook()
|
||||
|
||||
self.disp_pub.register_hook(hook)
|
||||
self.assertEqual(hook.call_count, 0)
|
||||
self.assertEqual(self.session.send_count, 0)
|
||||
|
||||
self.disp_pub.publish(data)
|
||||
|
||||
self.assertEqual(hook.call_count, 1)
|
||||
self.assertEqual(self.session.send_count, 0)
|
||||
|
||||
#
|
||||
# After unregistering the `NoReturn` hook, any calls
|
||||
# to publish should *not* got through the DisplayHook,
|
||||
# but should instead hit the usual `session.send` call
|
||||
# at the end.
|
||||
#
|
||||
# As a result, the hook call count should *not* increase,
|
||||
# but the session send count *should* increase.
|
||||
#
|
||||
first = self.disp_pub.unregister_hook(hook)
|
||||
self.disp_pub.publish(data)
|
||||
|
||||
self.assertTrue(first)
|
||||
self.assertEqual(hook.call_count, 1)
|
||||
self.assertEqual(self.session.send_count, 1)
|
||||
|
||||
#
|
||||
# If a hook is not installed, `unregister_hook`
|
||||
# should return false.
|
||||
#
|
||||
second = self.disp_pub.unregister_hook(hook)
|
||||
self.assertFalse(second)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
166
packages/python/yap_kernel/yap_kernel/tests/utils.py
Normal file
166
packages/python/yap_kernel/yap_kernel/tests/utils.py
Normal file
@ -0,0 +1,166 @@
|
||||
"""utilities for testing IPython kernels"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import atexit
|
||||
import os
|
||||
|
||||
from contextlib import contextmanager
|
||||
from subprocess import PIPE, STDOUT
|
||||
try:
|
||||
from queue import Empty # Py 3
|
||||
except ImportError:
|
||||
from Queue import Empty # Py 2
|
||||
|
||||
import nose
|
||||
import nose.tools as nt
|
||||
|
||||
from jupyter_client import manager
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Globals
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
STARTUP_TIMEOUT = 60
|
||||
TIMEOUT = 15
|
||||
|
||||
KM = None
|
||||
KC = None
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# code
|
||||
#-------------------------------------------------------------------------------
|
||||
def start_new_kernel(**kwargs):
|
||||
"""start a new kernel, and return its Manager and Client
|
||||
|
||||
Integrates with our output capturing for tests.
|
||||
"""
|
||||
try:
|
||||
stdout = nose.iptest_stdstreams_fileno()
|
||||
except AttributeError:
|
||||
stdout = open(os.devnull)
|
||||
kwargs.update(dict(stdout=stdout, stderr=STDOUT))
|
||||
return manager.start_new_kernel(startup_timeout=STARTUP_TIMEOUT, **kwargs)
|
||||
|
||||
def flush_channels(kc=None):
|
||||
"""flush any messages waiting on the queue"""
|
||||
from .test_message_spec import validate_message
|
||||
|
||||
if kc is None:
|
||||
kc = KC
|
||||
for channel in (kc.shell_channel, kc.iopub_channel):
|
||||
while True:
|
||||
try:
|
||||
msg = channel.get_msg(block=True, timeout=0.1)
|
||||
except Empty:
|
||||
break
|
||||
else:
|
||||
validate_message(msg)
|
||||
|
||||
|
||||
def execute(code='', kc=None, **kwargs):
|
||||
"""wrapper for doing common steps for validating an execution request"""
|
||||
from .test_message_spec import validate_message
|
||||
if kc is None:
|
||||
kc = KC
|
||||
msg_id = kc.execute(code=code, **kwargs)
|
||||
reply = kc.get_shell_msg(timeout=TIMEOUT)
|
||||
validate_message(reply, 'execute_reply', msg_id)
|
||||
busy = kc.get_iopub_msg(timeout=TIMEOUT)
|
||||
validate_message(busy, 'status', msg_id)
|
||||
nt.assert_equal(busy['content']['execution_state'], 'busy')
|
||||
|
||||
if not kwargs.get('silent'):
|
||||
execute_input = kc.get_iopub_msg(timeout=TIMEOUT)
|
||||
validate_message(execute_input, 'execute_input', msg_id)
|
||||
nt.assert_equal(execute_input['content']['code'], code)
|
||||
|
||||
return msg_id, reply['content']
|
||||
|
||||
def start_global_kernel():
|
||||
"""start the global kernel (if it isn't running) and return its client"""
|
||||
global KM, KC
|
||||
if KM is None:
|
||||
KM, KC = start_new_kernel()
|
||||
atexit.register(stop_global_kernel)
|
||||
else:
|
||||
flush_channels(KC)
|
||||
return KC
|
||||
|
||||
@contextmanager
|
||||
def kernel():
|
||||
"""Context manager for the global kernel instance
|
||||
|
||||
Should be used for most kernel tests
|
||||
|
||||
Returns
|
||||
-------
|
||||
kernel_client: connected KernelClient instance
|
||||
"""
|
||||
yield start_global_kernel()
|
||||
|
||||
def uses_kernel(test_f):
|
||||
"""Decorator for tests that use the global kernel"""
|
||||
def wrapped_test():
|
||||
with kernel() as kc:
|
||||
test_f(kc)
|
||||
wrapped_test.__doc__ = test_f.__doc__
|
||||
wrapped_test.__name__ = test_f.__name__
|
||||
return wrapped_test
|
||||
|
||||
def stop_global_kernel():
|
||||
"""Stop the global shared kernel instance, if it exists"""
|
||||
global KM, KC
|
||||
KC.stop_channels()
|
||||
KC = None
|
||||
if KM is None:
|
||||
return
|
||||
KM.shutdown_kernel(now=True)
|
||||
KM = None
|
||||
|
||||
def new_kernel(argv=None):
|
||||
"""Context manager for a new kernel in a subprocess
|
||||
|
||||
Should only be used for tests where the kernel must not be re-used.
|
||||
|
||||
Returns
|
||||
-------
|
||||
kernel_client: connected KernelClient instance
|
||||
"""
|
||||
stdout = getattr(nose, 'iptest_stdstreams_fileno', open(os.devnull))
|
||||
kwargs = dict(stdout=stdout, stderr=STDOUT)
|
||||
if argv is not None:
|
||||
kwargs['extra_arguments'] = argv
|
||||
return manager.run_kernel(**kwargs)
|
||||
|
||||
def assemble_output(iopub):
|
||||
"""assemble stdout/err from an execution"""
|
||||
stdout = ''
|
||||
stderr = ''
|
||||
while True:
|
||||
msg = iopub.get_msg(block=True, timeout=1)
|
||||
msg_type = msg['msg_type']
|
||||
content = msg['content']
|
||||
if msg_type == 'status' and content['execution_state'] == 'idle':
|
||||
# idle message signals end of output
|
||||
break
|
||||
elif msg['msg_type'] == 'stream':
|
||||
if content['name'] == 'stdout':
|
||||
stdout += content['text']
|
||||
elif content['name'] == 'stderr':
|
||||
stderr += content['text']
|
||||
else:
|
||||
raise KeyError("bad stream: %r" % content['name'])
|
||||
else:
|
||||
# other output, ignored
|
||||
pass
|
||||
return stdout, stderr
|
||||
|
||||
def wait_for_idle(kc):
|
||||
while True:
|
||||
msg = kc.iopub_channel.get_msg(block=True, timeout=1)
|
||||
msg_type = msg['msg_type']
|
||||
content = msg['content']
|
||||
if msg_type == 'status' and content['execution_state'] == 'idle':
|
||||
break
|
381
packages/python/yap_kernel/yap_kernel/yapkernel.py
Normal file
381
packages/python/yap_kernel/yap_kernel/yapkernel.py
Normal file
@ -0,0 +1,381 @@
|
||||
"""The IPython kernel implementation"""
|
||||
|
||||
import getpass
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from IPython.core import release
|
||||
from ipython_genutils.py3compat import builtin_mod, PY3, unicode_type, safe_unicode
|
||||
from IPython.utils.tokenutil import token_at_cursor, line_at_cursor
|
||||
from traitlets import Instance, Type, Any, List
|
||||
|
||||
from .comm import CommManager
|
||||
from .kernelbase import Kernel as KernelBase
|
||||
from .zmqshell import ZMQInteractiveShell
|
||||
from .interactiveshell import YAPInteraction
|
||||
|
||||
class YAPKernel(KernelBase):
|
||||
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
|
||||
allow_none=True)
|
||||
shell_class = Type(ZMQInteractiveShell)
|
||||
user_module = Any()
|
||||
def _user_module_changed(self, name, old, new):
|
||||
if self.shell is not None:
|
||||
self.shell.user_module = new
|
||||
|
||||
user_ns = Instance(dict, args=None, allow_none=True)
|
||||
def _user_ns_changed(self, name, old, new):
|
||||
if self.shell is not None:
|
||||
self.shell.user_ns = new
|
||||
self.shell.init_user_ns()
|
||||
|
||||
# A reference to the Python builtin 'raw_input' function.
|
||||
# (i.e., __builtin__.raw_input for Python 2.7, builtins.input for Python 3)
|
||||
_sys_raw_input = Any()
|
||||
_sys_eval_input = Any()
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(YAPKernel, self).__init__(**kwargs)
|
||||
|
||||
# Initialize the InteractiveShell subclass
|
||||
self.shell = self.shell_class.instance(parent=self,
|
||||
profile_dir = self.profile_dir,
|
||||
user_module = self.user_module,
|
||||
user_ns = self.user_ns,
|
||||
kernel = self,
|
||||
)
|
||||
self.shell.displayhook.session = self.session
|
||||
self.shell.displayhook.pub_socket = self.iopub_socket
|
||||
self.shell.displayhook.topic = self._topic('execute_result')
|
||||
self.shell.display_pub.session = self.session
|
||||
self.shell.display_pub.pub_socket = self.iopub_socket
|
||||
self.comm_manager = CommManager(parent=self, kernel=self)
|
||||
|
||||
self.shell.configurables.append(self.comm_manager)
|
||||
comm_msg_types = [ 'comm_open', 'comm_msg', 'comm_close' ]
|
||||
for msg_type in comm_msg_types:
|
||||
self.shell_handlers[msg_type] = getattr(self.comm_manager, msg_type)
|
||||
|
||||
self.engine = YAPInteraction(self)
|
||||
self.shell.run_cell = self.engine.run_cell
|
||||
|
||||
help_links = List([
|
||||
{
|
||||
'text': "Python",
|
||||
'url': "http://docs.python.org/%i.%i" % sys.version_info[:2],
|
||||
},
|
||||
{
|
||||
'text': "IPython",
|
||||
'url': "http://ipython.org/documentation.html",
|
||||
},
|
||||
{
|
||||
'text': "NumPy",
|
||||
'url': "http://docs.scipy.org/doc/numpy/reference/",
|
||||
},
|
||||
{
|
||||
'text': "SciPy",
|
||||
'url': "http://docs.scipy.org/doc/scipy/reference/",
|
||||
},
|
||||
{
|
||||
'text': "Matplotlib",
|
||||
'url': "http://matplotlib.org/contents.html",
|
||||
},
|
||||
{
|
||||
'text': "SymPy",
|
||||
'url': "http://docs.sympy.org/latest/index.html",
|
||||
},
|
||||
{
|
||||
'text': "pandas",
|
||||
'url': "http://pandas.pydata.org/pandas-docs/stable/",
|
||||
},
|
||||
]).tag(config=True)
|
||||
|
||||
# Kernel info fields
|
||||
implementation = 'yap'
|
||||
implementation_version = "6.3"
|
||||
language_info = {
|
||||
'name': 'YAP Kernel',
|
||||
'version': '6.3',
|
||||
'mimetype': 'text/x-prolog',
|
||||
'codemirror_mode': {
|
||||
'name': 'prolog',
|
||||
'version': sys.version_info[0]
|
||||
},
|
||||
'pygments_lexer': 'prolog',
|
||||
'nbconvert_exporter': 'prolog',
|
||||
'file_extension': '.yap'
|
||||
}
|
||||
|
||||
@property
|
||||
def banner(self):
|
||||
return self.shell.banner
|
||||
|
||||
def start(self):
|
||||
self.shell.exit_now = False
|
||||
super(YAPKernel, self).start()
|
||||
|
||||
def set_parent(self, ident, parent):
|
||||
"""Overridden from parent to tell the display hook and output streams
|
||||
about the parent message.
|
||||
"""
|
||||
super(YAPKernel, self).set_parent(ident, parent)
|
||||
self.shell.set_parent(parent)
|
||||
|
||||
def init_metadata(self, parent):
|
||||
"""Initialize metadata.
|
||||
|
||||
Run at the beginning of each execution request.
|
||||
"""
|
||||
md = super(YAPKernel, self).init_metadata(parent)
|
||||
# FIXME: remove deprecated ipyparallel-specific code
|
||||
# This is required for ipyparallel < 5.0
|
||||
md.update({
|
||||
'dependencies_met' : True,
|
||||
'engine' : self.ident,
|
||||
})
|
||||
return md
|
||||
|
||||
def finish_metadata(self, parent, metadata, reply_content):
|
||||
"""Finish populating metadata.
|
||||
|
||||
Run after completing an execution request.
|
||||
"""
|
||||
# FIXME: remove deprecated ipyparallel-specific code
|
||||
# This is required by ipyparallel < 5.0
|
||||
metadata['status'] = reply_content['status']
|
||||
if reply_content['status'] == 'error' and reply_content['ename'] == 'UnmetDependency':
|
||||
metadata['dependencies_met'] = False
|
||||
|
||||
return metadata
|
||||
|
||||
def _forward_input(self, allow_stdin=False):
|
||||
"""Forward raw_input and getpass to the current frontend.
|
||||
|
||||
via input_request
|
||||
"""
|
||||
self._allow_stdin = allow_stdin
|
||||
|
||||
if PY3:
|
||||
self._sys_raw_input = builtin_mod.input
|
||||
builtin_mod.input = self.raw_input
|
||||
else:
|
||||
self._sys_raw_input = builtin_mod.raw_input
|
||||
self._sys_eval_input = builtin_mod.input
|
||||
builtin_mod.raw_input = self.raw_input
|
||||
builtin_mod.input = lambda prompt='': eval(self.raw_input(prompt))
|
||||
self._save_getpass = getpass.getpass
|
||||
getpass.getpass = self.getpass
|
||||
|
||||
def _restore_input(self):
|
||||
"""Restore raw_input, getpass"""
|
||||
if PY3:
|
||||
builtin_mod.input = self._sys_raw_input
|
||||
else:
|
||||
builtin_mod.raw_input = self._sys_raw_input
|
||||
builtin_mod.input = self._sys_eval_input
|
||||
|
||||
getpass.getpass = self._save_getpass
|
||||
|
||||
@property
|
||||
def execution_count(self):
|
||||
return self.shell.execution_count
|
||||
|
||||
@execution_count.setter
|
||||
def execution_count(self, value):
|
||||
# Ignore the incrememnting done by KernelBase, in favour of our shell's
|
||||
# execution counter.
|
||||
pass
|
||||
|
||||
def do_execute(self, code, silent, store_history=True,
|
||||
user_expressions=None, allow_stdin=False):
|
||||
shell = self.shell # we'll need this a lot here
|
||||
|
||||
self._forward_input(allow_stdin)
|
||||
|
||||
reply_content = {}
|
||||
try:
|
||||
res = self.shell.run_cell(code, store_history=store_history, silent=silent)
|
||||
finally:
|
||||
self._restore_input()
|
||||
|
||||
if res.error_before_exec is not None:
|
||||
err = res.error_before_exec
|
||||
else:
|
||||
err = res.error_in_exec
|
||||
|
||||
if res.success:
|
||||
reply_content[u'status'] = u'ok'
|
||||
else:
|
||||
reply_content[u'status'] = u'error'
|
||||
|
||||
reply_content.update({
|
||||
u'traceback': shell._last_traceback or [],
|
||||
u'ename': unicode_type(type(err).__name__),
|
||||
u'evalue': safe_unicode(err),
|
||||
})
|
||||
|
||||
# FIXME: deprecated piece for ipyparallel (remove in 5.0):
|
||||
e_info = dict(engine_uuid=self.ident, engine_id=self.int_id,
|
||||
method='execute')
|
||||
reply_content['engine_info'] = e_info
|
||||
|
||||
|
||||
# Return the execution counter so clients can display prompts
|
||||
reply_content['execution_count'] = shell.execution_count - 1
|
||||
|
||||
if 'traceback' in reply_content:
|
||||
self.log.info("Exception in execute request:\n%s", '\n'.join(reply_content['traceback']))
|
||||
|
||||
|
||||
# At this point, we can tell whether the main code execution succeeded
|
||||
# or not. If it did, we proceed to evaluate user_expressions
|
||||
if reply_content['status'] == 'ok':
|
||||
reply_content[u'user_expressions'] = \
|
||||
shell.user_expressions(user_expressions or {})
|
||||
else:
|
||||
# If there was an error, don't even try to compute expressions
|
||||
reply_content[u'user_expressions'] = {}
|
||||
|
||||
# Payloads should be retrieved regardless of outcome, so we can both
|
||||
# recover partial output (that could have been generated early in a
|
||||
# block, before an error) and always clear the payload system.
|
||||
reply_content[u'payload'] = shell.payload_manager.read_payload()
|
||||
# Be aggressive about clearing the payload because we don't want
|
||||
# it to sit in memory until the next execute_request comes in.
|
||||
shell.payload_manager.clear_payload()
|
||||
|
||||
return reply_content
|
||||
|
||||
def do_complete(self, code, cursor_pos):
|
||||
# FIXME: IPython completers currently assume single line,
|
||||
# but completion messages give multi-line context
|
||||
# For now, extract line from cell, based on cursor_pos:
|
||||
if cursor_pos is None:
|
||||
cursor_pos = len(code)
|
||||
line, offset = line_at_cursor(code, cursor_pos)
|
||||
line_cursor = cursor_pos - offset
|
||||
|
||||
txt, matches = self.shell.complete('', line, line_cursor)
|
||||
return {'matches' : matches,
|
||||
'cursor_end' : cursor_pos,
|
||||
'cursor_start' : cursor_pos - len(txt),
|
||||
'metadata' : {},
|
||||
'status' : 'ok'}
|
||||
|
||||
def do_inspect(self, code, cursor_pos, detail_level=0):
|
||||
name = token_at_cursor(code, cursor_pos)
|
||||
info = self.shell.object_inspect(name)
|
||||
|
||||
reply_content = {'status' : 'ok'}
|
||||
reply_content['data'] = data = {}
|
||||
reply_content['metadata'] = {}
|
||||
reply_content['found'] = info['found']
|
||||
if info['found']:
|
||||
info_text = self.shell.object_inspect_text(
|
||||
name,
|
||||
detail_level=detail_level,
|
||||
)
|
||||
data['text/plain'] = info_text
|
||||
|
||||
return reply_content
|
||||
|
||||
def do_history(self, hist_access_type, output, raw, session=0, start=0,
|
||||
stop=None, n=None, pattern=None, unique=False):
|
||||
if hist_access_type == 'tail':
|
||||
hist = self.shell.history_manager.get_tail(n, raw=raw, output=output,
|
||||
include_latest=True)
|
||||
|
||||
elif hist_access_type == 'range':
|
||||
hist = self.shell.history_manager.get_range(session, start, stop,
|
||||
raw=raw, output=output)
|
||||
|
||||
elif hist_access_type == 'search':
|
||||
hist = self.shell.history_manager.search(
|
||||
pattern, raw=raw, output=output, n=n, unique=unique)
|
||||
else:
|
||||
hist = []
|
||||
|
||||
return {
|
||||
'status': 'ok',
|
||||
'history' : list(hist),
|
||||
}
|
||||
|
||||
def do_shutdown(self, restart):
|
||||
self.shell.exit_now = True
|
||||
return dict(status='ok', restart=restart)
|
||||
|
||||
def do_is_complete(self, code):
|
||||
status, indent_spaces = self.shell.input_transformer_manager.check_complete(code)
|
||||
r = {'status': status}
|
||||
if status == 'incomplete':
|
||||
r['indent'] = ' ' * indent_spaces
|
||||
return r
|
||||
|
||||
def do_apply(self, content, bufs, msg_id, reply_metadata):
|
||||
from .serialize import serialize_object, unpack_apply_message
|
||||
shell = self.shell
|
||||
try:
|
||||
working = shell.user_ns
|
||||
|
||||
prefix = "_"+str(msg_id).replace("-","")+"_"
|
||||
|
||||
f,args,kwargs = unpack_apply_message(bufs, working, copy=False)
|
||||
|
||||
fname = getattr(f, '__name__', 'f')
|
||||
|
||||
fname = prefix+"f"
|
||||
argname = prefix+"args"
|
||||
kwargname = prefix+"kwargs"
|
||||
resultname = prefix+"result"
|
||||
|
||||
ns = { fname : f, argname : args, kwargname : kwargs , resultname : None }
|
||||
# print ns
|
||||
working.update(ns)
|
||||
code = "%s = %s(*%s,**%s)" % (resultname, fname, argname, kwargname)
|
||||
try:
|
||||
exec(code, shell.user_global_ns, shell.user_ns)
|
||||
result = working.get(resultname)
|
||||
finally:
|
||||
for key in ns:
|
||||
working.pop(key)
|
||||
|
||||
result_buf = serialize_object(result,
|
||||
buffer_threshold=self.session.buffer_threshold,
|
||||
item_threshold=self.session.item_threshold,
|
||||
)
|
||||
|
||||
except BaseException as e:
|
||||
# invoke IPython traceback formatting
|
||||
shell.showtraceback()
|
||||
reply_content = {
|
||||
u'traceback': shell._last_traceback or [],
|
||||
u'ename': unicode_type(type(e).__name__),
|
||||
u'evalue': safe_unicode(e),
|
||||
}
|
||||
# FIXME: deprecated piece for ipyparallel (remove in 5.0):
|
||||
e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method='apply')
|
||||
reply_content['engine_info'] = e_info
|
||||
|
||||
self.send_response(self.iopub_socket, u'error', reply_content,
|
||||
ident=self._topic('error'))
|
||||
self.log.info("Exception in apply request:\n%s", '\n'.join(reply_content['traceback']))
|
||||
result_buf = []
|
||||
reply_content['status'] = 'error'
|
||||
else:
|
||||
reply_content = {'status' : 'ok'}
|
||||
|
||||
return reply_content, result_buf
|
||||
|
||||
def do_clear(self):
|
||||
self.shell.reset(False)
|
||||
return dict(status='ok')
|
||||
|
||||
|
||||
# This exists only for backwards compatibility - use YAPKernel instead
|
||||
|
||||
class Kernel(YAPKernel):
|
||||
def __init__(self, *args, **kwargs):
|
||||
import warnings
|
||||
warnings.warn('Kernel is a deprecated alias of yap_kernel.yapkernel.YAPKernel',
|
||||
DeprecationWarning)
|
||||
super(Kernel, self).__init__(*args, **kwargs)
|
601
packages/python/yap_kernel/yap_kernel/zmqshell.py
Normal file
601
packages/python/yap_kernel/yap_kernel/zmqshell.py
Normal file
@ -0,0 +1,601 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""A ZMQ-based subclass of InteractiveShell.
|
||||
|
||||
This code is meant to ease the refactoring of the base InteractiveShell into
|
||||
something with a cleaner architecture for 2-process use, without actually
|
||||
breaking InteractiveShell itself. So we're doing something a bit ugly, where
|
||||
we subclass and override what we want to fix. Once this is working well, we
|
||||
can go back to the base class and refactor the code for a cleaner inheritance
|
||||
implementation that doesn't rely on so much monkeypatching.
|
||||
|
||||
But this lets us maintain a fully working IPython as we develop the new
|
||||
machinery. This should thus be thought of as scaffolding.
|
||||
"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import warnings
|
||||
from threading import local
|
||||
|
||||
from tornado import ioloop
|
||||
|
||||
from IPython.core.interactiveshell import (
|
||||
InteractiveShell, InteractiveShellABC
|
||||
)
|
||||
from IPython.core import page
|
||||
from IPython.core.autocall import ZMQExitAutocall
|
||||
from IPython.core.displaypub import DisplayPublisher
|
||||
from IPython.core.error import UsageError
|
||||
from IPython.core.magics import MacroToEdit, CodeMagics
|
||||
from IPython.core.magic import magics_class, line_magic, Magics
|
||||
from IPython.core import payloadpage
|
||||
from IPython.core.usage import default_banner
|
||||
from IPython.display import display, Javascript
|
||||
from yap_kernel import (
|
||||
get_connection_file, get_connection_info, connect_qtconsole
|
||||
)
|
||||
from IPython.utils import openpy
|
||||
from yap_kernel.jsonutil import json_clean, encode_images
|
||||
from IPython.utils.process import arg_split
|
||||
from ipython_genutils import py3compat
|
||||
from ipython_genutils.py3compat import unicode_type
|
||||
from traitlets import (
|
||||
Instance, Type, Dict, CBool, CBytes, Any, default, observe
|
||||
)
|
||||
from yap_kernel.displayhook import ZMQShellDisplayHook
|
||||
|
||||
from jupyter_core.paths import jupyter_runtime_dir
|
||||
from jupyter_client.session import extract_header, Session
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Functions and classes
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
class ZMQDisplayPublisher(DisplayPublisher):
|
||||
"""A display publisher that publishes data using a ZeroMQ PUB socket."""
|
||||
|
||||
session = Instance(Session, allow_none=True)
|
||||
pub_socket = Any(allow_none=True)
|
||||
parent_header = Dict({})
|
||||
topic = CBytes(b'display_data')
|
||||
|
||||
# thread_local:
|
||||
# An attribute used to ensure the correct output message
|
||||
# is processed. See yap_kernel Issue 113 for a discussion.
|
||||
_thread_local = Any()
|
||||
|
||||
def set_parent(self, parent):
|
||||
"""Set the parent for outbound messages."""
|
||||
self.parent_header = extract_header(parent)
|
||||
|
||||
def _flush_streams(self):
|
||||
"""flush IO Streams prior to display"""
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
|
||||
@default('_thread_local')
|
||||
def _default_thread_local(self):
|
||||
"""Initialize our thread local storage"""
|
||||
return local()
|
||||
|
||||
@property
|
||||
def _hooks(self):
|
||||
if not hasattr(self._thread_local, 'hooks'):
|
||||
# create new list for a new thread
|
||||
self._thread_local.hooks = []
|
||||
return self._thread_local.hooks
|
||||
|
||||
def publish(self, data, metadata=None, source=None, transient=None,
|
||||
update=False,
|
||||
):
|
||||
"""Publish a display-data message
|
||||
|
||||
Parameters
|
||||
----------
|
||||
data: dict
|
||||
A mime-bundle dict, keyed by mime-type.
|
||||
metadata: dict, optional
|
||||
Metadata associated with the data.
|
||||
transient: dict, optional, keyword-only
|
||||
Transient data that may only be relevant during a live display,
|
||||
such as display_id.
|
||||
Transient data should not be persisted to documents.
|
||||
update: bool, optional, keyword-only
|
||||
If True, send an update_display_data message instead of display_data.
|
||||
"""
|
||||
self._flush_streams()
|
||||
if metadata is None:
|
||||
metadata = {}
|
||||
if transient is None:
|
||||
transient = {}
|
||||
self._validate_data(data, metadata)
|
||||
content = {}
|
||||
content['data'] = encode_images(data)
|
||||
content['metadata'] = metadata
|
||||
content['transient'] = transient
|
||||
|
||||
msg_type = 'update_display_data' if update else 'display_data'
|
||||
|
||||
# Use 2-stage process to send a message,
|
||||
# in order to put it through the transform
|
||||
# hooks before potentially sending.
|
||||
msg = self.session.msg(
|
||||
msg_type, json_clean(content),
|
||||
parent=self.parent_header
|
||||
)
|
||||
|
||||
# Each transform either returns a new
|
||||
# message or None. If None is returned,
|
||||
# the message has been 'used' and we return.
|
||||
for hook in self._hooks:
|
||||
msg = hook(msg)
|
||||
if msg is None:
|
||||
return
|
||||
|
||||
self.session.send(
|
||||
self.pub_socket, msg, ident=self.topic,
|
||||
)
|
||||
|
||||
def clear_output(self, wait=False):
|
||||
"""Clear output associated with the current execution (cell).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
wait: bool (default: False)
|
||||
If True, the output will not be cleared immediately,
|
||||
instead waiting for the next display before clearing.
|
||||
This reduces bounce during repeated clear & display loops.
|
||||
|
||||
"""
|
||||
content = dict(wait=wait)
|
||||
self._flush_streams()
|
||||
self.session.send(
|
||||
self.pub_socket, u'clear_output', content,
|
||||
parent=self.parent_header, ident=self.topic,
|
||||
)
|
||||
|
||||
def register_hook(self, hook):
|
||||
"""
|
||||
Registers a hook with the thread-local storage.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
hook : Any callable object
|
||||
|
||||
Returns
|
||||
-------
|
||||
Either a publishable message, or `None`.
|
||||
|
||||
The DisplayHook objects must return a message from
|
||||
the __call__ method if they still require the
|
||||
`session.send` method to be called after tranformation.
|
||||
Returning `None` will halt that execution path, and
|
||||
session.send will not be called.
|
||||
"""
|
||||
self._hooks.append(hook)
|
||||
|
||||
def unregister_hook(self, hook):
|
||||
"""
|
||||
Un-registers a hook with the thread-local storage.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
hook: Any callable object which has previously been
|
||||
registered as a hook.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool - `True` if the hook was removed, `False` if it wasn't
|
||||
found.
|
||||
"""
|
||||
try:
|
||||
self._hooks.remove(hook)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
@magics_class
|
||||
class KernelMagics(Magics):
|
||||
#------------------------------------------------------------------------
|
||||
# Magic overrides
|
||||
#------------------------------------------------------------------------
|
||||
# Once the base class stops inheriting from magic, this code needs to be
|
||||
# moved into a separate machinery as well. For now, at least isolate here
|
||||
# the magics which this class needs to implement differently from the base
|
||||
# class, or that are unique to it.
|
||||
|
||||
_find_edit_target = CodeMagics._find_edit_target
|
||||
|
||||
@line_magic
|
||||
def edit(self, parameter_s='', last_call=['','']):
|
||||
"""Bring up an editor and execute the resulting code.
|
||||
|
||||
Usage:
|
||||
%edit [options] [args]
|
||||
|
||||
%edit runs an external text editor. You will need to set the command for
|
||||
this editor via the ``TerminalInteractiveShell.editor`` option in your
|
||||
configuration file before it will work.
|
||||
|
||||
This command allows you to conveniently edit multi-line code right in
|
||||
your IPython session.
|
||||
|
||||
If called without arguments, %edit opens up an empty editor with a
|
||||
temporary file and will execute the contents of this file when you
|
||||
close it (don't forget to save it!).
|
||||
|
||||
Options:
|
||||
|
||||
-n <number>
|
||||
Open the editor at a specified line number. By default, the IPython
|
||||
editor hook uses the unix syntax 'editor +N filename', but you can
|
||||
configure this by providing your own modified hook if your favorite
|
||||
editor supports line-number specifications with a different syntax.
|
||||
|
||||
-p
|
||||
Call the editor with the same data as the previous time it was used,
|
||||
regardless of how long ago (in your current session) it was.
|
||||
|
||||
-r
|
||||
Use 'raw' input. This option only applies to input taken from the
|
||||
user's history. By default, the 'processed' history is used, so that
|
||||
magics are loaded in their transformed version to valid Python. If
|
||||
this option is given, the raw input as typed as the command line is
|
||||
used instead. When you exit the editor, it will be executed by
|
||||
IPython's own processor.
|
||||
|
||||
Arguments:
|
||||
|
||||
If arguments are given, the following possibilites exist:
|
||||
|
||||
- The arguments are numbers or pairs of colon-separated numbers (like
|
||||
1 4:8 9). These are interpreted as lines of previous input to be
|
||||
loaded into the editor. The syntax is the same of the %macro command.
|
||||
|
||||
- If the argument doesn't start with a number, it is evaluated as a
|
||||
variable and its contents loaded into the editor. You can thus edit
|
||||
any string which contains python code (including the result of
|
||||
previous edits).
|
||||
|
||||
- If the argument is the name of an object (other than a string),
|
||||
IPython will try to locate the file where it was defined and open the
|
||||
editor at the point where it is defined. You can use ``%edit function``
|
||||
to load an editor exactly at the point where 'function' is defined,
|
||||
edit it and have the file be executed automatically.
|
||||
|
||||
If the object is a macro (see %macro for details), this opens up your
|
||||
specified editor with a temporary file containing the macro's data.
|
||||
Upon exit, the macro is reloaded with the contents of the file.
|
||||
|
||||
Note: opening at an exact line is only supported under Unix, and some
|
||||
editors (like kedit and gedit up to Gnome 2.8) do not understand the
|
||||
'+NUMBER' parameter necessary for this feature. Good editors like
|
||||
(X)Emacs, vi, jed, pico and joe all do.
|
||||
|
||||
- If the argument is not found as a variable, IPython will look for a
|
||||
file with that name (adding .py if necessary) and load it into the
|
||||
editor. It will execute its contents with execfile() when you exit,
|
||||
loading any code in the file into your interactive namespace.
|
||||
|
||||
Unlike in the terminal, this is designed to use a GUI editor, and we do
|
||||
not know when it has closed. So the file you edit will not be
|
||||
automatically executed or printed.
|
||||
|
||||
Note that %edit is also available through the alias %ed.
|
||||
"""
|
||||
|
||||
opts,args = self.parse_options(parameter_s, 'prn:')
|
||||
|
||||
try:
|
||||
filename, lineno, _ = CodeMagics._find_edit_target(self.shell, args, opts, last_call)
|
||||
except MacroToEdit:
|
||||
# TODO: Implement macro editing over 2 processes.
|
||||
print("Macro editing not yet implemented in 2-process model.")
|
||||
return
|
||||
|
||||
# Make sure we send to the client an absolute path, in case the working
|
||||
# directory of client and kernel don't match
|
||||
filename = os.path.abspath(filename)
|
||||
|
||||
payload = {
|
||||
'source' : 'edit_magic',
|
||||
'filename' : filename,
|
||||
'line_number' : lineno
|
||||
}
|
||||
self.shell.payload_manager.write_payload(payload)
|
||||
|
||||
# A few magics that are adapted to the specifics of using pexpect and a
|
||||
# remote terminal
|
||||
|
||||
@line_magic
|
||||
def clear(self, arg_s):
|
||||
"""Clear the terminal."""
|
||||
if os.name == 'posix':
|
||||
self.shell.system("clear")
|
||||
else:
|
||||
self.shell.system("cls")
|
||||
|
||||
if os.name == 'nt':
|
||||
# This is the usual name in windows
|
||||
cls = line_magic('cls')(clear)
|
||||
|
||||
# Terminal pagers won't work over pexpect, but we do have our own pager
|
||||
|
||||
@line_magic
|
||||
def less(self, arg_s):
|
||||
"""Show a file through the pager.
|
||||
|
||||
Files ending in .py are syntax-highlighted."""
|
||||
if not arg_s:
|
||||
raise UsageError('Missing filename.')
|
||||
|
||||
if arg_s.endswith('.py'):
|
||||
cont = self.shell.pycolorize(openpy.read_py_file(arg_s, skip_encoding_cookie=False))
|
||||
else:
|
||||
cont = open(arg_s).read()
|
||||
page.page(cont)
|
||||
|
||||
more = line_magic('more')(less)
|
||||
|
||||
# Man calls a pager, so we also need to redefine it
|
||||
if os.name == 'posix':
|
||||
@line_magic
|
||||
def man(self, arg_s):
|
||||
"""Find the man page for the given command and display in pager."""
|
||||
page.page(self.shell.getoutput('man %s | col -b' % arg_s,
|
||||
split=False))
|
||||
|
||||
@line_magic
|
||||
def connect_info(self, arg_s):
|
||||
"""Print information for connecting other clients to this kernel
|
||||
|
||||
It will print the contents of this session's connection file, as well as
|
||||
shortcuts for local clients.
|
||||
|
||||
In the simplest case, when called from the most recently launched kernel,
|
||||
secondary clients can be connected, simply with:
|
||||
|
||||
$> jupyter <app> --existing
|
||||
|
||||
"""
|
||||
|
||||
try:
|
||||
connection_file = get_connection_file()
|
||||
info = get_connection_info(unpack=False)
|
||||
except Exception as e:
|
||||
warnings.warn("Could not get connection info: %r" % e)
|
||||
return
|
||||
|
||||
# if it's in the default dir, truncate to basename
|
||||
if jupyter_runtime_dir() == os.path.dirname(connection_file):
|
||||
connection_file = os.path.basename(connection_file)
|
||||
|
||||
|
||||
print (info + '\n')
|
||||
print ("Paste the above JSON into a file, and connect with:\n"
|
||||
" $> jupyter <app> --existing <file>\n"
|
||||
"or, if you are local, you can connect with just:\n"
|
||||
" $> jupyter <app> --existing {0}\n"
|
||||
"or even just:\n"
|
||||
" $> jupyter <app> --existing\n"
|
||||
"if this is the most recent Jupyter kernel you have started.".format(
|
||||
connection_file
|
||||
)
|
||||
)
|
||||
|
||||
@line_magic
|
||||
def qtconsole(self, arg_s):
|
||||
"""Open a qtconsole connected to this kernel.
|
||||
|
||||
Useful for connecting a qtconsole to running notebooks, for better
|
||||
debugging.
|
||||
"""
|
||||
|
||||
# %qtconsole should imply bind_kernel for engines:
|
||||
# FIXME: move to ipyparallel Kernel subclass
|
||||
if 'ipyparallel' in sys.modules:
|
||||
from ipyparallel import bind_kernel
|
||||
bind_kernel()
|
||||
|
||||
try:
|
||||
connect_qtconsole(argv=arg_split(arg_s, os.name=='posix'))
|
||||
except Exception as e:
|
||||
warnings.warn("Could not start qtconsole: %r" % e)
|
||||
return
|
||||
|
||||
@line_magic
|
||||
def autosave(self, arg_s):
|
||||
"""Set the autosave interval in the notebook (in seconds).
|
||||
|
||||
The default value is 120, or two minutes.
|
||||
``%autosave 0`` will disable autosave.
|
||||
|
||||
This magic only has an effect when called from the notebook interface.
|
||||
It has no effect when called in a startup file.
|
||||
"""
|
||||
|
||||
try:
|
||||
interval = int(arg_s)
|
||||
except ValueError:
|
||||
raise UsageError("%%autosave requires an integer, got %r" % arg_s)
|
||||
|
||||
# javascript wants milliseconds
|
||||
milliseconds = 1000 * interval
|
||||
display(Javascript("IPython.notebook.set_autosave_interval(%i)" % milliseconds),
|
||||
include=['application/javascript']
|
||||
)
|
||||
if interval:
|
||||
print("Autosaving every %i seconds" % interval)
|
||||
else:
|
||||
print("Autosave disabled")
|
||||
|
||||
|
||||
class ZMQInteractiveShell(InteractiveShell):
|
||||
"""A subclass of InteractiveShell for ZMQ."""
|
||||
|
||||
displayhook_class = Type(ZMQShellDisplayHook)
|
||||
display_pub_class = Type(ZMQDisplayPublisher)
|
||||
data_pub_class = Type('yap_kernel.datapub.ZMQDataPublisher')
|
||||
kernel = Any()
|
||||
parent_header = Any()
|
||||
|
||||
@default('banner1')
|
||||
def _default_banner1(self):
|
||||
return default_banner
|
||||
|
||||
# Override the traitlet in the parent class, because there's no point using
|
||||
# readline for the kernel. Can be removed when the readline code is moved
|
||||
# to the terminal frontend.
|
||||
colors_force = CBool(True)
|
||||
readline_use = CBool(False)
|
||||
# autoindent has no meaning in a zmqshell, and attempting to enable it
|
||||
# will print a warning in the absence of readline.
|
||||
autoindent = CBool(False)
|
||||
|
||||
exiter = Instance(ZMQExitAutocall)
|
||||
|
||||
@default('exiter')
|
||||
def _default_exiter(self):
|
||||
return ZMQExitAutocall(self)
|
||||
|
||||
@observe('exit_now')
|
||||
def _update_exit_now(self, change):
|
||||
"""stop eventloop when exit_now fires"""
|
||||
if change['new']:
|
||||
loop = ioloop.IOLoop.instance()
|
||||
loop.add_timeout(time.time() + 0.1, loop.stop)
|
||||
|
||||
keepkernel_on_exit = None
|
||||
|
||||
# Over ZeroMQ, GUI control isn't done with PyOS_InputHook as there is no
|
||||
# interactive input being read; we provide event loop support in yapkernel
|
||||
def enable_gui(self, gui):
|
||||
from .eventloops import enable_gui as real_enable_gui
|
||||
try:
|
||||
real_enable_gui(gui)
|
||||
self.active_eventloop = gui
|
||||
except ValueError as e:
|
||||
raise UsageError("%s" % e)
|
||||
|
||||
def init_environment(self):
|
||||
"""Configure the user's environment."""
|
||||
env = os.environ
|
||||
# These two ensure 'ls' produces nice coloring on BSD-derived systems
|
||||
env['TERM'] = 'xterm-color'
|
||||
env['CLICOLOR'] = '1'
|
||||
# Since normal pagers don't work at all (over pexpect we don't have
|
||||
# single-key control of the subprocess), try to disable paging in
|
||||
# subprocesses as much as possible.
|
||||
env['PAGER'] = 'cat'
|
||||
env['GIT_PAGER'] = 'cat'
|
||||
|
||||
def init_hooks(self):
|
||||
super(ZMQInteractiveShell, self).init_hooks()
|
||||
self.set_hook('show_in_pager', page.as_hook(payloadpage.page), 99)
|
||||
|
||||
def init_data_pub(self):
|
||||
"""Delay datapub init until request, for deprecation warnings"""
|
||||
pass
|
||||
|
||||
@property
|
||||
def data_pub(self):
|
||||
if not hasattr(self, '_data_pub'):
|
||||
warnings.warn("InteractiveShell.data_pub is deprecated outside IPython parallel.",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
||||
self._data_pub = self.data_pub_class(parent=self)
|
||||
self._data_pub.session = self.display_pub.session
|
||||
self._data_pub.pub_socket = self.display_pub.pub_socket
|
||||
return self._data_pub
|
||||
|
||||
@data_pub.setter
|
||||
def data_pub(self, pub):
|
||||
self._data_pub = pub
|
||||
|
||||
def ask_exit(self):
|
||||
"""Engage the exit actions."""
|
||||
self.exit_now = (not self.keepkernel_on_exit)
|
||||
payload = dict(
|
||||
source='ask_exit',
|
||||
keepkernel=self.keepkernel_on_exit,
|
||||
)
|
||||
self.payload_manager.write_payload(payload)
|
||||
|
||||
def run_cell(self, *args, **kwargs):
|
||||
self._last_traceback = None
|
||||
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
|
||||
|
||||
def _showtraceback(self, etype, evalue, stb):
|
||||
# try to preserve ordering of tracebacks and print statements
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
|
||||
exc_content = {
|
||||
u'traceback' : stb,
|
||||
u'ename' : unicode_type(etype.__name__),
|
||||
u'evalue' : py3compat.safe_unicode(evalue),
|
||||
}
|
||||
|
||||
dh = self.displayhook
|
||||
# Send exception info over pub socket for other clients than the caller
|
||||
# to pick up
|
||||
topic = None
|
||||
if dh.topic:
|
||||
topic = dh.topic.replace(b'execute_result', b'error')
|
||||
|
||||
exc_msg = dh.session.send(dh.pub_socket, u'error', json_clean(exc_content),
|
||||
dh.parent_header, ident=topic)
|
||||
|
||||
# FIXME - Once we rely on Python 3, the traceback is stored on the
|
||||
# exception object, so we shouldn't need to store it here.
|
||||
self._last_traceback = stb
|
||||
|
||||
def set_next_input(self, text, replace=False):
|
||||
"""Send the specified text to the frontend to be presented at the next
|
||||
input cell."""
|
||||
payload = dict(
|
||||
source='set_next_input',
|
||||
text=text,
|
||||
replace=replace,
|
||||
)
|
||||
self.payload_manager.write_payload(payload)
|
||||
|
||||
def set_parent(self, parent):
|
||||
"""Set the parent header for associating output with its triggering input"""
|
||||
self.parent_header = parent
|
||||
self.displayhook.set_parent(parent)
|
||||
self.display_pub.set_parent(parent)
|
||||
if hasattr(self, '_data_pub'):
|
||||
self.data_pub.set_parent(parent)
|
||||
try:
|
||||
sys.stdout.set_parent(parent)
|
||||
except AttributeError:
|
||||
pass
|
||||
try:
|
||||
sys.stderr.set_parent(parent)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
def get_parent(self):
|
||||
return self.parent_header
|
||||
|
||||
def init_magics(self):
|
||||
super(ZMQInteractiveShell, self).init_magics()
|
||||
self.register_magics(KernelMagics)
|
||||
self.magics_manager.register_alias('ed', 'edit')
|
||||
|
||||
def init_virtualenv(self):
|
||||
# Overridden not to do virtualenv detection, because it's probably
|
||||
# not appropriate in a kernel. To use a kernel in a virtualenv, install
|
||||
# it inside the virtualenv.
|
||||
# https://ipython.readthedocs.io/en/latest/install/kernel_install.html
|
||||
pass
|
||||
|
||||
InteractiveShellABC.register(ZMQInteractiveShell)
|
16
packages/python/yap_kernel/yap_kernel_launcher.py
Normal file
16
packages/python/yap_kernel/yap_kernel_launcher.py
Normal file
@ -0,0 +1,16 @@
|
||||
"""Entry point for launching an IPython kernel.
|
||||
|
||||
This is separate from the yap_kernel package so we can avoid doing imports until
|
||||
after removing the cwd from sys.path.
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Remove the CWD from sys.path while we load stuff.
|
||||
# This is added back by InteractiveShellApp.init_path()
|
||||
if sys.path[0] == '':
|
||||
del sys.path[0]
|
||||
|
||||
from yap_kernel import kernelapp as app
|
||||
app.launch_new_instance()
|
Reference in New Issue
Block a user