3088 Node* shift = new AndINode(in2, ConNode::make(TypeInt::make(mask)));
3089 n->set_req(2, shift);
3090 }
3091 }
3092 if (in2->outcnt() == 0) { // Remove dead node
3093 in2->disconnect_inputs(NULL, this);
3094 }
3095 }
3096 break;
3097 case Op_MemBarStoreStore:
3098 case Op_MemBarRelease:
3099 // Break the link with AllocateNode: it is no longer useful and
3100 // confuses register allocation.
3101 if (n->req() > MemBarNode::Precedent) {
3102 n->set_req(MemBarNode::Precedent, top());
3103 }
3104 break;
3105 default:
3106 assert( !n->is_Call(), "" );
3107 assert( !n->is_Mem(), "" );
3108 break;
3109 }
3110
3111 // Collect CFG split points
3112 if (n->is_MultiBranch())
3113 frc._tests.push(n);
3114 }
3115
3116 //------------------------------final_graph_reshaping_walk---------------------
3117 // Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
3118 // requires that the walk visits a node's inputs before visiting the node.
3119 void Compile::final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ) {
3120 ResourceArea *area = Thread::current()->resource_area();
3121 Unique_Node_List sfpt(area);
3122
3123 frc._visited.set(root->_idx); // first, mark node as visited
3124 uint cnt = root->req();
3125 Node *n = root;
3126 uint i = 0;
3127 while (true) {
3304 if( Use24BitFPMode && Use24BitFP && UseSSE == 0 &&
3305 frc.get_float_count() > 32 &&
3306 frc.get_double_count() == 0 &&
3307 (10 * frc.get_call_count() < frc.get_float_count()) ) {
3308 set_24_bit_selection_and_mode( false, true );
3309 }
3310
3311 set_java_calls(frc.get_java_call_count());
3312 set_inner_loops(frc.get_inner_loop_count());
3313
3314 // No infinite loops, no reason to bail out.
3315 return false;
3316 }
3317
3318 //-----------------------------too_many_traps----------------------------------
3319 // Report if there are too many traps at the current method and bci.
3320 // Return true if there was a trap, and/or PerMethodTrapLimit is exceeded.
3321 bool Compile::too_many_traps(ciMethod* method,
3322 int bci,
3323 Deoptimization::DeoptReason reason) {
3324 ciMethodData* md = method->method_data();
3325 if (md->is_empty()) {
3326 // Assume the trap has not occurred, or that it occurred only
3327 // because of a transient condition during start-up in the interpreter.
3328 return false;
3329 }
3330 ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : NULL;
3331 if (md->has_trap_at(bci, m, reason) != 0) {
3332 // Assume PerBytecodeTrapLimit==0, for a more conservative heuristic.
3333 // Also, if there are multiple reasons, or if there is no per-BCI record,
3334 // assume the worst.
3335 if (log())
3336 log()->elem("observe trap='%s' count='%d'",
3337 Deoptimization::trap_reason_name(reason),
3338 md->trap_count(reason));
3339 return true;
3340 } else {
3341 // Ignore method/bci and see if there have been too many globally.
3342 return too_many_traps(reason, md);
3343 }
3353 int mcount = (logmd == NULL)? -1: (int)logmd->trap_count(reason);
3354 log()->elem("observe trap='%s' count='0' mcount='%d' ccount='%d'",
3355 Deoptimization::trap_reason_name(reason),
3356 mcount, trap_count(reason));
3357 }
3358 return true;
3359 } else {
3360 // The coast is clear.
3361 return false;
3362 }
3363 }
3364
3365 //--------------------------too_many_recompiles--------------------------------
3366 // Report if there are too many recompiles at the current method and bci.
3367 // Consults PerBytecodeRecompilationCutoff and PerMethodRecompilationCutoff.
3368 // Is not eager to return true, since this will cause the compiler to use
3369 // Action_none for a trap point, to avoid too many recompilations.
3370 bool Compile::too_many_recompiles(ciMethod* method,
3371 int bci,
3372 Deoptimization::DeoptReason reason) {
3373 ciMethodData* md = method->method_data();
3374 if (md->is_empty()) {
3375 // Assume the trap has not occurred, or that it occurred only
3376 // because of a transient condition during start-up in the interpreter.
3377 return false;
3378 }
3379 // Pick a cutoff point well within PerBytecodeRecompilationCutoff.
3380 uint bc_cutoff = (uint) PerBytecodeRecompilationCutoff / 8;
3381 uint m_cutoff = (uint) PerMethodRecompilationCutoff / 2 + 1; // not zero
3382 Deoptimization::DeoptReason per_bc_reason
3383 = Deoptimization::reason_recorded_per_bytecode_if_any(reason);
3384 ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : NULL;
3385 if ((per_bc_reason == Deoptimization::Reason_none
3386 || md->has_trap_at(bci, m, reason) != 0)
3387 // The trap frequency measure we care about is the recompile count:
3388 && md->trap_recompiled_at(bci, m)
3389 && md->overflow_recompile_count() >= bc_cutoff) {
3390 // Do not emit a trap here if it has already caused recompilations.
3391 // Also, if there are multiple reasons, or if there is no per-BCI record,
3392 // assume the worst.
|
3088 Node* shift = new AndINode(in2, ConNode::make(TypeInt::make(mask)));
3089 n->set_req(2, shift);
3090 }
3091 }
3092 if (in2->outcnt() == 0) { // Remove dead node
3093 in2->disconnect_inputs(NULL, this);
3094 }
3095 }
3096 break;
3097 case Op_MemBarStoreStore:
3098 case Op_MemBarRelease:
3099 // Break the link with AllocateNode: it is no longer useful and
3100 // confuses register allocation.
3101 if (n->req() > MemBarNode::Precedent) {
3102 n->set_req(MemBarNode::Precedent, top());
3103 }
3104 break;
3105 default:
3106 assert( !n->is_Call(), "" );
3107 assert( !n->is_Mem(), "" );
3108 assert( nop != Op_ProfileBranch, "should be eliminated during IGVN");
3109 break;
3110 }
3111
3112 // Collect CFG split points
3113 if (n->is_MultiBranch())
3114 frc._tests.push(n);
3115 }
3116
3117 //------------------------------final_graph_reshaping_walk---------------------
3118 // Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
3119 // requires that the walk visits a node's inputs before visiting the node.
3120 void Compile::final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ) {
3121 ResourceArea *area = Thread::current()->resource_area();
3122 Unique_Node_List sfpt(area);
3123
3124 frc._visited.set(root->_idx); // first, mark node as visited
3125 uint cnt = root->req();
3126 Node *n = root;
3127 uint i = 0;
3128 while (true) {
3305 if( Use24BitFPMode && Use24BitFP && UseSSE == 0 &&
3306 frc.get_float_count() > 32 &&
3307 frc.get_double_count() == 0 &&
3308 (10 * frc.get_call_count() < frc.get_float_count()) ) {
3309 set_24_bit_selection_and_mode( false, true );
3310 }
3311
3312 set_java_calls(frc.get_java_call_count());
3313 set_inner_loops(frc.get_inner_loop_count());
3314
3315 // No infinite loops, no reason to bail out.
3316 return false;
3317 }
3318
3319 //-----------------------------too_many_traps----------------------------------
3320 // Report if there are too many traps at the current method and bci.
3321 // Return true if there was a trap, and/or PerMethodTrapLimit is exceeded.
3322 bool Compile::too_many_traps(ciMethod* method,
3323 int bci,
3324 Deoptimization::DeoptReason reason) {
3325 if (method->is_shared()) {
3326 // Don't trust gathered profile for shared LambdaForms (marked w/ @LambdaForm.Shared).
3327 return false;
3328 }
3329 ciMethodData* md = method->method_data();
3330 if (md->is_empty()) {
3331 // Assume the trap has not occurred, or that it occurred only
3332 // because of a transient condition during start-up in the interpreter.
3333 return false;
3334 }
3335 ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : NULL;
3336 if (md->has_trap_at(bci, m, reason) != 0) {
3337 // Assume PerBytecodeTrapLimit==0, for a more conservative heuristic.
3338 // Also, if there are multiple reasons, or if there is no per-BCI record,
3339 // assume the worst.
3340 if (log())
3341 log()->elem("observe trap='%s' count='%d'",
3342 Deoptimization::trap_reason_name(reason),
3343 md->trap_count(reason));
3344 return true;
3345 } else {
3346 // Ignore method/bci and see if there have been too many globally.
3347 return too_many_traps(reason, md);
3348 }
3358 int mcount = (logmd == NULL)? -1: (int)logmd->trap_count(reason);
3359 log()->elem("observe trap='%s' count='0' mcount='%d' ccount='%d'",
3360 Deoptimization::trap_reason_name(reason),
3361 mcount, trap_count(reason));
3362 }
3363 return true;
3364 } else {
3365 // The coast is clear.
3366 return false;
3367 }
3368 }
3369
3370 //--------------------------too_many_recompiles--------------------------------
3371 // Report if there are too many recompiles at the current method and bci.
3372 // Consults PerBytecodeRecompilationCutoff and PerMethodRecompilationCutoff.
3373 // Is not eager to return true, since this will cause the compiler to use
3374 // Action_none for a trap point, to avoid too many recompilations.
3375 bool Compile::too_many_recompiles(ciMethod* method,
3376 int bci,
3377 Deoptimization::DeoptReason reason) {
3378 if (method->is_shared()) {
3379 // Don't trust gathered profile for shared LambdaForms (marked w/ @LambdaForm.Shared).
3380 return false;
3381 }
3382 ciMethodData* md = method->method_data();
3383 if (md->is_empty()) {
3384 // Assume the trap has not occurred, or that it occurred only
3385 // because of a transient condition during start-up in the interpreter.
3386 return false;
3387 }
3388 // Pick a cutoff point well within PerBytecodeRecompilationCutoff.
3389 uint bc_cutoff = (uint) PerBytecodeRecompilationCutoff / 8;
3390 uint m_cutoff = (uint) PerMethodRecompilationCutoff / 2 + 1; // not zero
3391 Deoptimization::DeoptReason per_bc_reason
3392 = Deoptimization::reason_recorded_per_bytecode_if_any(reason);
3393 ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : NULL;
3394 if ((per_bc_reason == Deoptimization::Reason_none
3395 || md->has_trap_at(bci, m, reason) != 0)
3396 // The trap frequency measure we care about is the recompile count:
3397 && md->trap_recompiled_at(bci, m)
3398 && md->overflow_recompile_count() >= bc_cutoff) {
3399 // Do not emit a trap here if it has already caused recompilations.
3400 // Also, if there are multiple reasons, or if there is no per-BCI record,
3401 // assume the worst.
|