447
448 uint choice = 0; // Bigger is most important
449 uint latency = 0; // Bigger is scheduled first
450 uint score = 0; // Bigger is better
451 int idx = -1; // Index in worklist
452 int cand_cnt = 0; // Candidate count
453
454 for( uint i=0; i<cnt; i++ ) { // Inspect entire worklist
455 // Order in worklist is used to break ties.
456 // See caller for how this is used to delay scheduling
457 // of induction variable increments to after the other
458 // uses of the phi are scheduled.
459 Node *n = worklist[i]; // Get Node on worklist
460
461 int iop = n->is_Mach() ? n->as_Mach()->ideal_Opcode() : 0;
462 if( n->is_Proj() || // Projections always win
463 n->Opcode()== Op_Con || // So does constant 'Top'
464 iop == Op_CreateEx || // Create-exception must start block
465 iop == Op_CheckCastPP
466 ) {
467 // select the node n
468 // remove n from worklist and retain the order of remaining nodes
469 worklist.remove((uint)i);
470 return n;
471 }
472
473 // Final call in a block must be adjacent to 'catch'
474 Node *e = block->end();
475 if( e->is_Catch() && e->in(0)->in(0) == n )
476 continue;
477
478 // Memory op for an implicit null check has to be at the end of the block
479 if( e->is_MachNullCheck() && e->in(1) == n )
480 continue;
481
482 // Schedule IV increment last.
483 if (e->is_Mach() && e->as_Mach()->ideal_Opcode() == Op_CountedLoopEnd &&
484 e->in(1)->in(1) == n && n->is_iteratively_computed())
485 continue;
486
487 uint n_choice = 2;
488
489 // See if this instruction is consumed by a branch. If so, then (as the
535
536 // Keep best latency found
537 cand_cnt++;
538 if (choice < n_choice ||
539 (choice == n_choice &&
540 ((StressLCM && Compile::randomized_select(cand_cnt)) ||
541 (!StressLCM &&
542 (latency < n_latency ||
543 (latency == n_latency &&
544 (score < n_score))))))) {
545 choice = n_choice;
546 latency = n_latency;
547 score = n_score;
548 idx = i; // Also keep index in worklist
549 }
550 } // End of for all ready nodes in worklist
551
552 assert(idx >= 0, "index should be set");
553 Node *n = worklist[(uint)idx]; // Get the winner
554
555 // select the node n
556 // remove n from worklist and retain the order of remaining nodes
557 worklist.remove((uint)idx);
558 return n;
559 }
560
561
562 //------------------------------set_next_call----------------------------------
563 void PhaseCFG::set_next_call(Block* block, Node* n, VectorSet& next_call) {
564 if( next_call.test_set(n->_idx) ) return;
565 for( uint i=0; i<n->len(); i++ ) {
566 Node *m = n->in(i);
567 if( !m ) continue; // must see all nodes in block that precede call
568 if (get_block_for_node(m) == block) {
569 set_next_call(block, m, next_call);
570 }
571 }
572 }
573
574 //------------------------------needed_for_next_call---------------------------
575 // Set the flag 'next_call' for each Node that is needed for the next call to
576 // be scheduled. This flag lets me bias scheduling so Nodes needed for the
577 // next subroutine call get priority - basically it moves things NOT needed
|
447
448 uint choice = 0; // Bigger is most important
449 uint latency = 0; // Bigger is scheduled first
450 uint score = 0; // Bigger is better
451 int idx = -1; // Index in worklist
452 int cand_cnt = 0; // Candidate count
453
454 for( uint i=0; i<cnt; i++ ) { // Inspect entire worklist
455 // Order in worklist is used to break ties.
456 // See caller for how this is used to delay scheduling
457 // of induction variable increments to after the other
458 // uses of the phi are scheduled.
459 Node *n = worklist[i]; // Get Node on worklist
460
461 int iop = n->is_Mach() ? n->as_Mach()->ideal_Opcode() : 0;
462 if( n->is_Proj() || // Projections always win
463 n->Opcode()== Op_Con || // So does constant 'Top'
464 iop == Op_CreateEx || // Create-exception must start block
465 iop == Op_CheckCastPP
466 ) {
467 worklist.map(i,worklist.pop());
468 return n;
469 }
470
471 // Final call in a block must be adjacent to 'catch'
472 Node *e = block->end();
473 if( e->is_Catch() && e->in(0)->in(0) == n )
474 continue;
475
476 // Memory op for an implicit null check has to be at the end of the block
477 if( e->is_MachNullCheck() && e->in(1) == n )
478 continue;
479
480 // Schedule IV increment last.
481 if (e->is_Mach() && e->as_Mach()->ideal_Opcode() == Op_CountedLoopEnd &&
482 e->in(1)->in(1) == n && n->is_iteratively_computed())
483 continue;
484
485 uint n_choice = 2;
486
487 // See if this instruction is consumed by a branch. If so, then (as the
533
534 // Keep best latency found
535 cand_cnt++;
536 if (choice < n_choice ||
537 (choice == n_choice &&
538 ((StressLCM && Compile::randomized_select(cand_cnt)) ||
539 (!StressLCM &&
540 (latency < n_latency ||
541 (latency == n_latency &&
542 (score < n_score))))))) {
543 choice = n_choice;
544 latency = n_latency;
545 score = n_score;
546 idx = i; // Also keep index in worklist
547 }
548 } // End of for all ready nodes in worklist
549
550 assert(idx >= 0, "index should be set");
551 Node *n = worklist[(uint)idx]; // Get the winner
552
553 worklist.map((uint)idx, worklist.pop()); // Compress worklist
554 return n;
555 }
556
557
558 //------------------------------set_next_call----------------------------------
559 void PhaseCFG::set_next_call(Block* block, Node* n, VectorSet& next_call) {
560 if( next_call.test_set(n->_idx) ) return;
561 for( uint i=0; i<n->len(); i++ ) {
562 Node *m = n->in(i);
563 if( !m ) continue; // must see all nodes in block that precede call
564 if (get_block_for_node(m) == block) {
565 set_next_call(block, m, next_call);
566 }
567 }
568 }
569
570 //------------------------------needed_for_next_call---------------------------
571 // Set the flag 'next_call' for each Node that is needed for the next call to
572 // be scheduled. This flag lets me bias scheduling so Nodes needed for the
573 // next subroutine call get priority - basically it moves things NOT needed
|