15 #define vm_exec rb_vm_exec
27 #include "probes.dmyh"
45 static inline const VALUE *
46 VM_EP_LEP(
const VALUE *ep)
48 while (!VM_ENV_LOCAL_P(ep)) {
49 ep = VM_ENV_PREV_EP(ep);
81 static inline const VALUE *
84 return VM_EP_LEP(cfp->
ep);
87 static inline const VALUE *
90 return VM_ENV_PREV_EP(cfp->
ep);
97 const VALUE *ep = VM_CF_LEP(cfp);
98 return VM_ENV_BLOCK_HANDLER(ep);
104 return VM_FRAME_CFRAME_KW_P(cfp);
111 return VM_FRAME_CFRAME_EMPTY_KW_P(cfp);
117 return VM_CF_BLOCK_HANDLER(cfp);
120 #if VM_CHECK_MODE > 0
128 if (start <= (
VALUE *)cfp && (
VALUE *)cfp < end) {
143 if (start <= ep && ep < end) {
154 if (VM_EP_IN_HEAP_P(ec, ep)) {
172 rb_vm_ep_in_heap_p(
const VALUE *ep)
176 return vm_ep_in_heap_p_(ec, ep);
208 vm_block_handler_verify(block_handler);
209 return block_handler;
216 int omod_shared =
FALSE;
225 scope_visi.visi.method_visi = visi;
229 if (prev_cref !=
NULL && prev_cref != (
void *)1 ) {
230 refinements = CREF_REFINEMENTS(prev_cref);
232 if (!
NIL_P(refinements)) {
234 CREF_OMOD_SHARED_SET(prev_cref);
240 if (pushed_by_eval) CREF_PUSHED_BY_EVAL_SET(cref);
241 if (omod_shared) CREF_OMOD_SHARED_SET(cref);
249 return vm_cref_new0(klass, visi, module_func, prev_cref, pushed_by_eval,
FALSE);
255 return vm_cref_new0(klass, visi, module_func, prev_cref, pushed_by_eval,
TRUE);
267 VALUE klass = CREF_CLASS(cref);
269 rb_cref_t *next_cref = CREF_NEXT(cref), *new_cref;
270 int pushed_by_eval = CREF_PUSHED_BY_EVAL(cref);
272 new_cref = vm_cref_new(klass, visi->method_visi, visi->
module_func, next_cref, pushed_by_eval);
274 if (!
NIL_P(CREF_REFINEMENTS(cref))) {
277 CREF_REFINEMENTS_SET(new_cref, ref);
278 CREF_OMOD_SHARED_UNSET(new_cref);
288 VALUE top_wrapper = rb_ec_thread_ptr(ec)->top_wrapper;
300 return vm_cref_new_toplevel(
GET_EC());
304 vm_cref_dump(
const char *mesg,
const rb_cref_t *cref)
306 fprintf(stderr,
"vm_cref_dump: %s (%p)\n", mesg, (
void *)cref);
310 cref = CREF_NEXT(cref);
329 #if VM_COLLECT_USAGE_DETAILS
330 static void vm_collect_usage_operand(
int insn,
int n,
VALUE op);
331 static void vm_collect_usage_insn(
int insn);
332 static void vm_collect_usage_register(
int reg,
int isset);
361 mjit_add_class_serial(class_serial);
370 #define ruby_vm_redefined_flag GET_VM()->redefined_flag
383 static void thread_free(
void *
ptr);
402 klass =
RBASIC(klass)->klass;
411 const char *classname, *filename;
415 classname =
"<unknown>";
452 static VALUE sym_global_method_state, sym_global_constant_state, sym_class_serial;
469 if (sym_global_method_state == 0) {
470 #define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
471 S(global_method_state);
472 S(global_constant_state);
477 #define SET(name, attr) \
478 if (key == sym_##name) \
479 return SERIALT2NUM(attr); \
480 else if (hash != Qnil) \
481 rb_hash_aset(hash, sym_##name, SERIALT2NUM(attr));
500 if (iseq->
body->
type != ISEQ_TYPE_TOP) {
507 (
VALUE)vm_cref_new_toplevel(ec),
532 vm_set_eval_stack(ec, iseq, 0, &bind->
block);
536 vm_bind_update_env(toplevel_binding, bind, vm_make_env_object(ec, ec->
cfp));
543 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
555 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp))
bp();
556 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
557 if (VM_FRAME_RUBYFRAME_P(cfp)) {
570 if (VM_FRAME_RUBYFRAME_P(cfp)) {
576 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
577 if (VM_FRAME_RUBYFRAME_P(cfp)) {
598 vm_pop_frame(ec, cfp, cfp->
ep);
607 while (ec->
cfp != cfp) {
609 printf(
"skipped frame: %s\n", vm_frametype_name(ec->
cfp));
633 ruby_vm_run_at_exit_hooks(
rb_vm_t *vm)
653 fprintf(stderr,
"---\n");
654 fprintf(stderr,
"envptr: %p\n", (
void *)&
env->ep[0]);
655 fprintf(stderr,
"envval: %10p ", (
void *)
env->ep[1]);
657 fprintf(stderr,
"ep: %10p\n", (
void *)
env->ep);
659 fprintf(stderr,
">>\n");
661 fprintf(stderr,
"<<\n");
669 if (check_env(
env)) {
679 switch (vm_block_handler_type(block_handler)) {
682 return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler),
rb_cProc);
686 return block_handler;
695 const VALUE *
const ep = cfp->
ep;
698 VALUE *env_body, *env_ep;
699 int local_size, env_size;
701 if (VM_ENV_ESCAPED_P(ep)) {
702 return VM_ENV_ENVVAL(ep);
705 if (!VM_ENV_LOCAL_P(ep)) {
706 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
708 if (!VM_ENV_ESCAPED_P(prev_ep)) {
711 while (prev_cfp->
ep != prev_ep) {
716 vm_make_env_each(ec, prev_cfp);
721 VALUE block_handler = VM_ENV_BLOCK_HANDLER(ep);
724 VALUE blockprocval = vm_block_handler_escape(ec, block_handler);
729 if (!VM_FRAME_RUBYFRAME_P(cfp)) {
748 env_size = local_size +
751 MEMCPY(env_body, ep - (local_size - 1 ),
VALUE, local_size);
754 for (i = 0; i < local_size; i++) {
755 if (VM_FRAME_RUBYFRAME_P(cfp)) {
757 ep[-local_size + i] = 0;
762 env_iseq = VM_FRAME_RUBYFRAME_P(cfp) ? cfp->
iseq :
NULL;
763 env_ep = &env_body[local_size - 1 ];
765 env = vm_env_new(env_ep, env_body, env_size, env_iseq);
769 VM_STACK_ENV_WRITE(ep, 0, (
VALUE)
env);
776 VALUE envval = vm_make_env_each(ec, cfp);
779 check_env_value((
const rb_env_t *)envval);
790 vm_make_env_object(ec, cfp);
800 if (VM_ENV_LOCAL_P(ep)) {
804 return VM_ENV_ENVVAL_PTR(VM_ENV_PREV_EP(ep));
823 collect_local_variables_in_iseq(
env->iseq, vars);
830 if (VM_ENV_ESCAPED_P(ep)) {
831 collect_local_variables_in_env(VM_ENV_ENVVAL_PTR(ep), vars);
843 local_var_list_init(&vars);
844 collect_local_variables_in_env(
env, &vars);
845 return local_var_list_finish(&vars);
852 local_var_list_init(&vars);
853 while (collect_local_variables_in_iseq(iseq, &vars)) {
856 return local_var_list_finish(&vars);
862 vm_proc_create_from_captured(
VALUE klass,
865 int8_t is_from_method, int8_t is_lambda)
877 vm_block_type_set(&proc->
block, block_type);
888 switch (vm_block_type(src)) {
905 proc_create(
VALUE klass,
const struct rb_block *block, int8_t is_from_method, int8_t is_lambda)
912 vm_block_type_set(&proc->
block, block->
type);
937 if (!VM_ENV_ESCAPED_P(captured->
ep)) {
939 vm_make_env_object(ec, cfp);
945 procval = vm_proc_create_from_captured(klass, captured,
957 VALUE bindval, envval;
960 if (cfp == 0 || ruby_level_cfp == 0) {
965 envval = vm_make_env_object(ec, cfp);
966 if (cfp == ruby_level_cfp) {
974 vm_bind_update_env(bindval, bind, envval);
987 VALUE path = pathobj_path(pathobj);
988 VALUE realpath = pathobj_realpath(pathobj);
995 ID minibuf[4], *dyns = minibuf;
998 if (dyncount < 0)
return 0;
1000 base_block = &bind->
block;
1001 base_iseq = vm_block_iseq(base_block);
1006 MEMCPY(dyns + 1, dynvars,
ID, dyncount);
1008 ast.
root = &tmp_node;
1019 tmp_node.nd_tbl = 0;
1022 vm_set_eval_stack(ec, iseq, 0, base_block);
1023 vm_bind_update_env(bindval, bind, envval = vm_make_env_object(ec, ec->
cfp));
1041 ec->
cfp->
sp + arg_size,
1061 ec->
cfp->
sp + arg_size,
1107 for (i=0; i<
argc; i++) {
1111 opt_pc = vm_yield_setup_args(ec, iseq,
argc, sp, kw_splat, passed_block_handler,
1116 return invoke_block(ec, iseq,
self,
captured, cref,
type, opt_pc);
1119 return invoke_bmethod(ec, iseq,
self,
captured, me,
type, opt_pc);
1127 int is_lambda,
int force_blockarg)
1130 switch (vm_block_handler_type(block_handler)) {
1134 return invoke_iseq_block_from_c(ec, captured, captured->
self,
1135 argc,
argv, kw_splat, passed_block_handler,
1136 cref, is_lambda,
NULL);
1139 return vm_yield_with_cfunc(ec, VM_BH_TO_IFUNC_BLOCK(block_handler),
1140 VM_BH_TO_IFUNC_BLOCK(block_handler)->
self,
1143 return vm_yield_with_symbol(ec, VM_BH_TO_SYMBOL(block_handler),
1144 argc,
argv, kw_splat, passed_block_handler);
1146 if (force_blockarg ==
FALSE) {
1147 is_lambda = block_proc_is_lambda(VM_BH_TO_PROC(block_handler));
1149 block_handler = vm_proc_to_block_handler(VM_BH_TO_PROC(block_handler));
1159 VALUE block_handler = VM_CF_BLOCK_HANDLER(ec->
cfp);
1160 vm_block_handler_verify(block_handler);
1165 return block_handler;
1171 return invoke_block_from_c_bh(ec, check_block_handler(ec),
1173 cref, is_lambda,
FALSE);
1179 return invoke_block_from_c_bh(ec, check_block_handler(ec),
1187 return invoke_block_from_c_bh(ec, check_block_handler(ec),
1188 argc,
argv, kw_splat, block_handler,
1195 return invoke_block_from_c_bh(ec, check_block_handler(ec), 1, &args,
1202 int kw_splat,
VALUE passed_block_handler,
int is_lambda,
1208 int kw_splat,
VALUE passed_block_handler,
int is_lambda,
1214 switch (vm_block_type(block)) {
1216 return invoke_iseq_block_from_c(ec, &block->
as.
captured,
self,
argc,
argv, kw_splat, passed_block_handler,
NULL, is_lambda, me);
1222 return vm_yield_with_cfunc(ec, &block->
as.
captured,
self,
argc,
argv, kw_splat, passed_block_handler, me);
1224 return vm_yield_with_symbol(ec, block->
as.
symbol,
argc,
argv, kw_splat, passed_block_handler);
1226 is_lambda = block_proc_is_lambda(block->
as.
proc);
1227 block = vm_proc_block(block->
as.
proc);
1238 return invoke_block_from_c_proc(ec,
proc,
self,
argc,
argv, kw_splat, passed_block_handler,
proc->is_lambda,
NULL);
1245 return invoke_block_from_c_proc(ec,
proc,
self,
argc,
argv, kw_splat, block_handler,
TRUE, me);
1252 VALUE self = vm_block_self(&
proc->block);
1253 vm_block_handler_verify(passed_block_handler);
1255 if (
proc->is_from_method) {
1259 return vm_invoke_proc(ec,
proc,
self,
argc,
argv, kw_splat, passed_block_handler);
1268 while (cfp->
pc == 0) {
1270 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
1280 cfp = vm_normal_frame(ec, cfp);
1281 return lep_svar_get(ec, cfp ? VM_CF_LEP(cfp) : 0,
key);
1287 cfp = vm_normal_frame(ec, cfp);
1288 lep_svar_set(ec, cfp ? VM_CF_LEP(cfp) : 0,
key, val);
1294 return vm_cfp_svar_get(ec, ec->
cfp,
key);
1300 vm_cfp_svar_set(ec, ec->
cfp,
key, val);
1365 if (cfp && VM_FRAME_RUBYFRAME_P(cfp)) {
1370 if (pline) *pline = 0;
1387 return vm_ec_cref(ec);
1395 rb_cref_t *cref = vm_cref_replace_with_duplicated_cref(cfp->
ep);
1405 if (!cfp || cfp->
self !=
self)
return NULL;
1406 if (!vm_env_cref_by_cref(cfp->
ep))
return NULL;
1407 cref = vm_get_cref(cfp->
ep);
1408 if (CREF_CLASS(cref) != cbase)
return NULL;
1417 dp(CREF_CLASS(cref));
1418 printf(
"%ld\n", CREF_VISI(cref));
1419 cref = CREF_NEXT(cref);
1433 return vm_get_cbase(cfp->
ep);
1439 make_localjump_error(
const char *mesg,
VALUE value,
int reason)
1473 VALUE exc = make_localjump_error(mesg, value, reason);
1484 mesg =
"unexpected return";
1487 mesg =
"unexpected break";
1490 mesg =
"unexpected next";
1493 mesg =
"unexpected redo";
1497 mesg =
"retry outside of rescue clause";
1504 val =
GET_EC()->tag->retval;
1506 return make_localjump_error(mesg, val, state);
1520 while (VM_ENV_LOCAL_P(cfp->
ep)) {
1532 const VALUE *ep = VM_CF_PREV_EP(cfp);
1554 vm_iter_break(
GET_EC(), val);
1559 static st_table *vm_opt_method_table = 0;
1560 static st_table *vm_opt_mid_table = 0;
1563 vm_redefinition_check_flag(
VALUE klass)
1583 if (!vm_opt_mid_table) {
1593 switch (def->type) {
1609 if (vm_redefinition_check_method_type(me->
def)) {
1611 int flag = vm_redefinition_check_flag(klass);
1619 check_redefined_method(
ID mid,
VALUE value,
void *data)
1625 if (newme != me) rb_vm_check_redefinition_opt_method(me, me->
owner);
1633 if (!vm_redefinition_check_flag(klass))
return;
1642 if (me && vm_redefinition_check_method_type(me->
def)) {
1652 vm_init_redefined_flag(
void)
1660 #define OP(mid_, bop_) (mid = id##mid_, bop = BOP_##bop_, ruby_vm_redefined_flag[bop] = 0)
1661 #define C(k) add_opt_method(rb_c##k, mid, bop)
1662 OP(PLUS, PLUS), (
C(Integer),
C(Float),
C(String),
C(Array));
1663 OP(MINUS, MINUS), (
C(Integer),
C(Float));
1664 OP(MULT, MULT), (
C(Integer),
C(Float));
1667 OP(Eq, EQ), (
C(Integer),
C(Float),
C(String),
C(Symbol));
1668 OP(Eqq, EQQ), (
C(Integer),
C(Float),
C(Symbol),
C(String),
1669 C(NilClass),
C(TrueClass),
C(FalseClass));
1670 OP(LT, LT), (
C(Integer),
C(Float));
1671 OP(LE, LE), (
C(Integer),
C(Float));
1672 OP(GT, GT), (
C(Integer),
C(Float));
1673 OP(GE, GE), (
C(Integer),
C(Float));
1674 OP(LTLT, LTLT), (
C(String),
C(Array));
1676 OP(ASET, ASET), (
C(Array),
C(Hash));
1677 OP(Length, LENGTH), (
C(Array),
C(String),
C(Hash));
1678 OP(
Size, SIZE), (
C(Array),
C(String),
C(Hash));
1679 OP(EmptyP, EMPTY_P), (
C(Array),
C(String),
C(Hash));
1680 OP(Succ, SUCC), (
C(Integer),
C(String),
C(Time));
1682 OP(Freeze, FREEZE), (
C(String));
1683 OP(UMinus, UMINUS), (
C(String));
1687 OP(And, AND), (
C(Integer));
1688 OP(Or,
OR), (
C(Integer));
1700 switch (VM_FRAME_TYPE(cfp)) {
1720 THROW_DATA_CONSUMED_P(
err) ==
FALSE) {
1721 return THROW_DATA_VAL(
err);
1733 unsigned long type = VM_FRAME_TYPE(cfp);
1734 #define C(t) if (type == VM_FRAME_MAGIC_##t) return #t
1762 switch (VM_FRAME_TYPE(ec->
cfp)) {
1772 THROW_DATA_CONSUMED_SET(
err);
1775 if (VM_FRAME_BMETHOD_P(ec->
cfp)) {
1782 if (!will_finish_vm_exec) {
1790 frame_return_value(
err));
1800 frame_return_value(
err),
TRUE);
1803 THROW_DATA_CONSUMED_SET(
err);
1811 THROW_DATA_CONSUMED_SET(
err);
1919 if (!mjit_enable_p || (result = mjit_exec(ec)) ==
Qundef) {
1920 result = vm_exec_core(ec, initial);
1927 while ((result = vm_exec_handle_exception(ec, state, result, &initial)) ==
Qundef) {
1929 result = vm_exec_core(ec, initial);
1933 if ((state = _tag.state) ==
TAG_NONE)
break;
1950 const struct iseq_catch_table *ct;
1951 unsigned long epc, cont_pc, cont_sp;
1957 cont_pc = cont_sp = 0;
1978 escape_cfp = THROW_DATA_CATCH_FRAME(
err);
1980 if (cfp == escape_cfp) {
1982 if (!VM_FRAME_FINISHED_P(cfp)) {
1983 THROW_DATA_CATCH_FRAME_SET(
err, cfp + 1);
1988 if (ct)
for (i = 0; i < ct->size; i++) {
1990 if (entry->
start < epc && entry->
end >= epc) {
1991 if (entry->
type == CATCH_TYPE_ENSURE) {
1992 catch_iseq = entry->
iseq;
1993 cont_pc = entry->
cont;
1994 cont_sp = entry->
sp;
1999 if (catch_iseq ==
NULL) {
2001 THROW_DATA_CATCH_FRAME_SET(
err, cfp + 1);
2002 hook_before_rewind(ec, ec->
cfp,
TRUE, state,
err);
2004 return THROW_DATA_VAL(
err);
2011 #if OPT_STACK_CACHING
2012 *initial = THROW_DATA_VAL(
err);
2014 *ec->
cfp->
sp++ = THROW_DATA_VAL(
err);
2024 if (ct)
for (i = 0; i < ct->size; i++) {
2026 if (entry->
start < epc && entry->
end >= epc) {
2028 if (entry->
type == CATCH_TYPE_RESCUE ||
2029 entry->
type == CATCH_TYPE_ENSURE) {
2030 catch_iseq = entry->
iseq;
2031 cont_pc = entry->
cont;
2032 cont_sp = entry->
sp;
2040 if (ct)
for (i = 0; i < ct->size; i++) {
2042 if (entry->
start < epc && entry->
end >= epc) {
2044 if (entry->
type == CATCH_TYPE_ENSURE) {
2045 catch_iseq = entry->
iseq;
2046 cont_pc = entry->
cont;
2047 cont_sp = entry->
sp;
2050 else if (entry->
type == CATCH_TYPE_RETRY) {
2052 escape_cfp = THROW_DATA_CATCH_FRAME(
err);
2053 if (cfp == escape_cfp) {
2062 else if (state ==
TAG_BREAK && !escape_cfp) {
2063 type = CATCH_TYPE_BREAK;
2065 search_restart_point:
2067 if (ct)
for (i = 0; i < ct->size; i++) {
2070 if (entry->
start < epc && entry->
end >= epc) {
2071 if (entry->
type == CATCH_TYPE_ENSURE) {
2072 catch_iseq = entry->
iseq;
2073 cont_pc = entry->
cont;
2074 cont_sp = entry->
sp;
2079 cfp->
sp = vm_base_ptr(cfp) + entry->
sp;
2082 #if OPT_STACK_CACHING
2083 *initial = THROW_DATA_VAL(
err);
2085 *ec->
cfp->
sp++ = THROW_DATA_VAL(
err);
2096 type = CATCH_TYPE_REDO;
2097 goto search_restart_point;
2100 type = CATCH_TYPE_NEXT;
2101 goto search_restart_point;
2105 if (ct)
for (i = 0; i < ct->size; i++) {
2107 if (entry->
start < epc && entry->
end >= epc) {
2109 if (entry->
type == CATCH_TYPE_ENSURE) {
2110 catch_iseq = entry->
iseq;
2111 cont_pc = entry->
cont;
2112 cont_sp = entry->
sp;
2119 if (catch_iseq !=
NULL) {
2121 const int arg_size = 1;
2123 rb_iseq_check(catch_iseq);
2124 cfp->
sp = vm_base_ptr(cfp) + cont_sp;
2134 cfp->
sp + arg_size ,
2145 hook_before_rewind(ec, ec->
cfp,
FALSE, state,
err);
2147 if (VM_FRAME_FINISHED_P(ec->
cfp)) {
2167 vm_set_top_stack(ec, iseq);
2178 vm_set_main_stack(ec, iseq);
2190 if (called_idp) *called_idp = me->
called_id;
2191 if (klassp) *klassp = me->
owner;
2221 recv, block_handler,
2222 (
VALUE)vm_cref_new_toplevel(ec),
2223 0, reg_cfp->
sp, 0, 0);
2246 RUBY_GC_INFO(
"-------------------------------------------------\n");
2251 const VALUE *obj_ary;
2261 for (i=0; i <
len; i++) {
2268 for (j=0; j < jlen; j++) {
2300 #undef rb_vm_register_special_exception
2341 rb_vm_living_threads_init(vm);
2342 ruby_vm_run_at_exit_hooks(vm);
2366 vm_memsize(
const void *
ptr)
2387 vm_default_params(
void)
2391 #define SET(name) rb_hash_aset(result, ID2SYM(rb_intern(#name)), SIZET2NUM(vm->default_params.name));
2392 SET(thread_vm_stack_size);
2393 SET(thread_machine_stack_size);
2394 SET(fiber_vm_stack_size);
2395 SET(fiber_machine_stack_size);
2402 get_param(
const char *
name,
size_t default_value,
size_t min_value)
2405 size_t result = default_value;
2407 long val = atol(envval);
2408 if (val < (
long)min_value) {
2409 val = (long)min_value;
2413 if (0) fprintf(stderr,
"%s: %"PRIuSIZE"\n",
name, result);
2419 check_machine_stack_size(
size_t *sizep)
2421 #ifdef PTHREAD_STACK_MIN
2422 size_t size = *sizep;
2425 #ifdef PTHREAD_STACK_MIN
2426 if (
size < PTHREAD_STACK_MIN) {
2427 *sizep = PTHREAD_STACK_MIN * 2;
2433 vm_default_params_setup(
rb_vm_t *vm)
2436 get_param(
"RUBY_THREAD_VM_STACK_SIZE",
2441 get_param(
"RUBY_THREAD_MACHINE_STACK_SIZE",
2446 get_param(
"RUBY_FIBER_VM_STACK_SIZE",
2451 get_param(
"RUBY_FIBER_MACHINE_STACK_SIZE",
2464 rb_vm_living_threads_init(vm);
2468 vm_default_params_setup(vm);
2481 while (cfp != limit_cfp) {
2487 if (!VM_ENV_LOCAL_P(ep)) {
2488 VALUE *prev_ep = (
VALUE *)VM_ENV_PREV_EP(ep);
2512 while (cfp != limit_cfp) {
2519 if (!VM_ENV_LOCAL_P(ep)) {
2520 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
2554 thread_compact(
void *
ptr)
2565 thread_mark(
void *
ptr)
2573 case thread_invoke_type_proc:
2577 case thread_invoke_type_func:
2603 thread_free(
void *
ptr)
2609 rb_bug(
"thread_free: locking_mutex must be NULL (%p:%p)", (
void *)th, (
void *)th->locking_mutex);
2612 rb_bug(
"thread_free: keeping_mutexes must be NULL (%p:%p)", (
void *)th, (
void *)th->keeping_mutexes);
2628 thread_memsize(
const void *
ptr)
2642 #define thread_data_type ruby_threadptr_data_type
2666 thread_alloc(
VALUE klass)
2729 #ifdef NON_SCALAR_THREAD_ID
2730 th->thread_id_string[0] =
'\0';
2733 #if OPT_CALL_THREADED_CODE
2741 ruby_thread_init(
VALUE self)
2758 VALUE self = thread_alloc(klass);
2759 ruby_thread_init(
self);
2763 #define REWIND_CFP(expr) do { \
2764 rb_execution_context_t *ec__ = GET_EC(); \
2765 VALUE *const curr_sp = (ec__->cfp++)->sp; \
2766 VALUE *const saved_sp = ec__->cfp->sp; \
2767 ec__->cfp->sp = curr_sp; \
2769 (ec__->cfp--)->sp = saved_sp; \
2801 m_core_set_postexe(
VALUE self)
2838 REWIND_CFP(hash = core_hash_merge_kwd(hash, kw));
2863 if (!
NIL_P(options)) {
2864 static ID keyword_ids[1];
2865 if (!keyword_ids[0])
2876 return mjit_resume();
2898 #include <execinfo.h>
2899 #define MAX_NATIVE_TRACE 1024
2900 static void *trace[MAX_NATIVE_TRACE];
2901 int n = (int)backtrace(trace, MAX_NATIVE_TRACE);
2902 char **syms = backtrace_symbols(trace, n);
2909 for (i=0; i<n; i++) {
2917 #if VM_COLLECT_USAGE_DETAILS
2918 static VALUE usage_analysis_insn_start(
VALUE self);
2919 static VALUE usage_analysis_operand_start(
VALUE self);
2920 static VALUE usage_analysis_register_start(
VALUE self);
2921 static VALUE usage_analysis_insn_stop(
VALUE self);
2922 static VALUE usage_analysis_operand_stop(
VALUE self);
2923 static VALUE usage_analysis_register_stop(
VALUE self);
2924 static VALUE usage_analysis_insn_running(
VALUE self);
2925 static VALUE usage_analysis_operand_running(
VALUE self);
2926 static VALUE usage_analysis_register_running(
VALUE self);
2927 static VALUE usage_analysis_insn_clear(
VALUE self);
2928 static VALUE usage_analysis_operand_clear(
VALUE self);
2929 static VALUE usage_analysis_register_clear(
VALUE self);
2973 #if USE_DEBUG_COUNTER
3167 #if VM_COLLECT_USAGE_DETAILS
3169 #define define_usage_analysis_hash(name) \
3170 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_" #name, rb_hash_new())
3171 define_usage_analysis_hash(
INSN);
3172 define_usage_analysis_hash(REGS);
3173 define_usage_analysis_hash(INSN_BIGRAM);
3195 #if OPT_DIRECT_THREADED_CODE
3197 #elif OPT_TOKEN_THREADED_CODE
3199 #elif OPT_CALL_THREADED_CODE
3203 #if OPT_STACK_CACHING
3206 #if OPT_OPERANDS_UNIFICATION
3209 #if OPT_INSTRUCTIONS_UNIFICATION
3212 #if OPT_INLINE_METHOD_CACHE
3215 #if OPT_BLOCKINLINING
3259 rb_thread_set_current(th);
3261 rb_vm_living_threads_insert(vm, th);
3265 th->
ec->
cfp->
pc = iseq->body->iseq_encoded;
3276 vm_init_redefined_flag();
3307 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
3319 rb_thread_set_current_raw(th);
3341 main_to_s(
VALUE obj)
3349 return GET_VM()->top_self;
3363 ruby_vm_verbose_ptr(
rb_vm_t *vm)
3369 ruby_vm_debug_ptr(
rb_vm_t *vm)
3377 return ruby_vm_verbose_ptr(
GET_VM());
3383 return ruby_vm_debug_ptr(
GET_VM());
3394 return GET_VM()->frozen_strings;
3397 #if VM_COLLECT_USAGE_DETAILS
3399 #define HASH_ASET(h, k, v) rb_hash_aset((h), (st_data_t)(k), (st_data_t)(v))
3413 vm_analysis_insn(
int insn)
3417 static int prev_insn = -1;
3423 CONST_ID(usage_hash,
"USAGE_ANALYSIS_INSN");
3424 CONST_ID(bigram_hash,
"USAGE_ANALYSIS_INSN_BIGRAM");
3428 HASH_ASET(uh,
INT2FIX(insn), ihash);
3436 if (prev_insn != -1) {
3455 vm_analysis_operand(
int insn,
int n,
VALUE op)
3465 CONST_ID(usage_hash,
"USAGE_ANALYSIS_INSN");
3470 HASH_ASET(uh,
INT2FIX(insn), ihash);
3474 HASH_ASET(ihash,
INT2FIX(n), ophash);
3487 vm_analysis_register(
int reg,
int isset)
3492 static const char regstrs[][5] = {
3500 static const char getsetstr[][4] = {
3504 static VALUE syms[
sizeof(regstrs) /
sizeof(regstrs[0])][2];
3508 CONST_ID(usage_hash,
"USAGE_ANALYSIS_REGS");
3513 for (i = 0; i < (int)(
sizeof(regstrs) /
sizeof(regstrs[0])); i++) {
3515 for (j = 0; j < 2; j++) {
3516 snprintf(buff, 0x10,
"%d %s %-4s", i, getsetstr[j], regstrs[i]);
3521 valstr = syms[reg][isset];
3532 static void (*ruby_vm_collect_usage_func_insn)(
int insn) =
NULL;
3533 static void (*ruby_vm_collect_usage_func_operand)(
int insn,
int n,
VALUE op) =
NULL;
3534 static void (*ruby_vm_collect_usage_func_register)(
int reg,
int isset) =
NULL;
3538 usage_analysis_insn_start(
VALUE self)
3540 ruby_vm_collect_usage_func_insn = vm_analysis_insn;
3546 usage_analysis_operand_start(
VALUE self)
3548 ruby_vm_collect_usage_func_operand = vm_analysis_operand;
3554 usage_analysis_register_start(
VALUE self)
3556 ruby_vm_collect_usage_func_register = vm_analysis_register;
3562 usage_analysis_insn_stop(
VALUE self)
3564 ruby_vm_collect_usage_func_insn = 0;
3570 usage_analysis_operand_stop(
VALUE self)
3572 ruby_vm_collect_usage_func_operand = 0;
3578 usage_analysis_register_stop(
VALUE self)
3580 ruby_vm_collect_usage_func_register = 0;
3586 usage_analysis_insn_running(
VALUE self)
3588 if (ruby_vm_collect_usage_func_insn == 0)
return Qfalse;
3594 usage_analysis_operand_running(
VALUE self)
3596 if (ruby_vm_collect_usage_func_operand == 0)
return Qfalse;
3602 usage_analysis_register_running(
VALUE self)
3604 if (ruby_vm_collect_usage_func_register == 0)
return Qfalse;
3610 usage_analysis_insn_clear(
VALUE self)
3617 CONST_ID(usage_hash,
"USAGE_ANALYSIS_INSN");
3618 CONST_ID(bigram_hash,
"USAGE_ANALYSIS_INSN_BIGRAM");
3629 usage_analysis_operand_clear(
VALUE self)
3634 CONST_ID(usage_hash,
"USAGE_ANALYSIS_INSN");
3643 usage_analysis_register_clear(
VALUE self)
3648 CONST_ID(usage_hash,
"USAGE_ANALYSIS_REGS");
3663 #if VM_COLLECT_USAGE_DETAILS
3666 vm_collect_usage_insn(
int insn)
3668 if (RUBY_DTRACE_INSN_ENABLED()) {
3671 if (ruby_vm_collect_usage_func_insn)
3672 (*ruby_vm_collect_usage_func_insn)(insn);
3680 vm_collect_usage_operand(
int insn,
int n,
VALUE op)
3682 if (RUBY_DTRACE_INSN_OPERAND_ENABLED()) {
3690 if (ruby_vm_collect_usage_func_operand)
3691 (*ruby_vm_collect_usage_func_operand)(insn, n, op);
3697 vm_collect_usage_register(
int reg,
int isset)
3699 if (ruby_vm_collect_usage_func_register)
3700 (*ruby_vm_collect_usage_func_register)(reg, isset);
3706 #include "vm_call_iseq_optimized.inc"