< prev index next >

src/share/vm/opto/callGenerator.cpp

Print this page
rev 10544 : call to __Value methods can't pass fields as arguments


 136   kit.C->print_inlining_update(this);
 137   PhaseGVN& gvn = kit.gvn();
 138   bool is_static = method()->is_static();
 139   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
 140                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
 141 
 142   if (kit.C->log() != NULL) {
 143     kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
 144   }
 145 
 146   CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci());
 147   if (is_inlined_mh_linker(jvms, method())) {
 148     // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
 149     // additional information about the method being invoked should be attached
 150     // to the call site to make resolution logic work
 151     // (see SharedRuntime::resolve_static_call_C).
 152     call->set_override_symbolic_info(true);
 153   }
 154   _call_node = call;  // Save the call node in case we need it later
 155   if (!is_static) {
 156     if (kit.argument(0)->is_ValueType()) {
 157       if (!ValueTypePassFieldsAsArgs) {
 158         ValueTypeNode* vt = kit.argument(0)->as_ValueType();
 159         vt->store_to_memory(&kit);
 160       }
 161     } else {
 162       // Make an explicit receiver null_check as part of this call.
 163       // Since we share a map with the caller, his JVMS gets adjusted.
 164       kit.null_check_receiver_before_call(method());
 165     }
 166     if (kit.stopped()) {
 167       // And dump it back to the caller, decorated with any exceptions:
 168       return kit.transfer_exceptions_into_jvms();
 169     }
 170     // Mark the call node as virtual, sort of:
 171     call->set_optimized_virtual(true);
 172     if (method()->is_method_handle_intrinsic() ||
 173         method()->is_compiled_lambda_form()) {
 174       call->set_method_handle_invoke(true);
 175     }
 176   }
 177   kit.set_arguments_for_java_call(call);
 178   kit.set_edges_for_java_call(call, false, _separate_io_proj);
 179   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 180   // Check if return value is a value type pointer
 181   if (gvn.type(ret)->isa_valuetypeptr()) {


 415   }
 416   jvms->set_map(map);
 417 
 418   // Make enough space in the expression stack to transfer
 419   // the incoming arguments and return value.
 420   map->ensure_stack(jvms, jvms->method()->max_stack());
 421   const TypeTuple *domain_sig = call->_tf->domain_sig();
 422   uint nargs = method()->arg_size();
 423   assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature");
 424 
 425   uint j = TypeFunc::Parms;
 426   for (uint i1 = 0; i1 < nargs; i1++) {
 427     const Type* t = domain_sig->field_at(TypeFunc::Parms + i1);
 428     if (!ValueTypePassFieldsAsArgs) {
 429       Node* arg = call->in(TypeFunc::Parms + i1);
 430       if (t->isa_valuetypeptr()) {
 431         arg = ValueTypeNode::make(gvn, map->memory(), arg);
 432       }
 433       map->set_argument(jvms, i1, arg);
 434     } else {
 435       if (t->isa_valuetypeptr()) {
 436         ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass();
 437         Node* vt = C->create_vt_node(call, vk, vk, 0, j);
 438         map->set_argument(jvms, i1, gvn.transform(vt));
 439         j += vk->value_arg_slots();
 440       } else {
 441         map->set_argument(jvms, i1, call->in(j));
 442         j++;
 443       }
 444     }
 445   }
 446 
 447   C->print_inlining_assert_ready();
 448 
 449   C->print_inlining_move_to(this);
 450 
 451   C->log_late_inline(this);
 452 
 453   // This check is done here because for_method_handle_inline() method
 454   // needs jvms for inlined state.
 455   if (!do_late_inline_check(jvms)) {




 136   kit.C->print_inlining_update(this);
 137   PhaseGVN& gvn = kit.gvn();
 138   bool is_static = method()->is_static();
 139   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
 140                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
 141 
 142   if (kit.C->log() != NULL) {
 143     kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
 144   }
 145 
 146   CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci());
 147   if (is_inlined_mh_linker(jvms, method())) {
 148     // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
 149     // additional information about the method being invoked should be attached
 150     // to the call site to make resolution logic work
 151     // (see SharedRuntime::resolve_static_call_C).
 152     call->set_override_symbolic_info(true);
 153   }
 154   _call_node = call;  // Save the call node in case we need it later
 155   if (!is_static) {
 156     if (!kit.argument(0)->is_ValueType()) {





 157       // Make an explicit receiver null_check as part of this call.
 158       // Since we share a map with the caller, his JVMS gets adjusted.
 159       kit.null_check_receiver_before_call(method());
 160     }
 161     if (kit.stopped()) {
 162       // And dump it back to the caller, decorated with any exceptions:
 163       return kit.transfer_exceptions_into_jvms();
 164     }
 165     // Mark the call node as virtual, sort of:
 166     call->set_optimized_virtual(true);
 167     if (method()->is_method_handle_intrinsic() ||
 168         method()->is_compiled_lambda_form()) {
 169       call->set_method_handle_invoke(true);
 170     }
 171   }
 172   kit.set_arguments_for_java_call(call);
 173   kit.set_edges_for_java_call(call, false, _separate_io_proj);
 174   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 175   // Check if return value is a value type pointer
 176   if (gvn.type(ret)->isa_valuetypeptr()) {


 410   }
 411   jvms->set_map(map);
 412 
 413   // Make enough space in the expression stack to transfer
 414   // the incoming arguments and return value.
 415   map->ensure_stack(jvms, jvms->method()->max_stack());
 416   const TypeTuple *domain_sig = call->_tf->domain_sig();
 417   uint nargs = method()->arg_size();
 418   assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature");
 419 
 420   uint j = TypeFunc::Parms;
 421   for (uint i1 = 0; i1 < nargs; i1++) {
 422     const Type* t = domain_sig->field_at(TypeFunc::Parms + i1);
 423     if (!ValueTypePassFieldsAsArgs) {
 424       Node* arg = call->in(TypeFunc::Parms + i1);
 425       if (t->isa_valuetypeptr()) {
 426         arg = ValueTypeNode::make(gvn, map->memory(), arg);
 427       }
 428       map->set_argument(jvms, i1, arg);
 429     } else {
 430       if (t->isa_valuetypeptr() && t->is_valuetypeptr()->klass() != C->env()->___Value_klass()) {
 431         ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass();
 432         Node* vt = C->create_vt_node(call, vk, vk, 0, j);
 433         map->set_argument(jvms, i1, gvn.transform(vt));
 434         j += vk->value_arg_slots();
 435       } else {
 436         map->set_argument(jvms, i1, call->in(j));
 437         j++;
 438       }
 439     }
 440   }
 441 
 442   C->print_inlining_assert_ready();
 443 
 444   C->print_inlining_move_to(this);
 445 
 446   C->log_late_inline(this);
 447 
 448   // This check is done here because for_method_handle_inline() method
 449   // needs jvms for inlined state.
 450   if (!do_late_inline_check(jvms)) {


< prev index next >