378 if( mach->is_MachCall() ) {
379 MachCallNode *mcall = mach->as_MachCall();
380 // This destination address is NOT PC-relative
381
382 mcall->method_set((intptr_t)mcall->entry_point());
383
384 if( mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method ) {
385 stub_size += size_java_to_interp();
386 reloc_size += reloc_java_to_interp();
387 }
388 } else if (mach->is_MachSafePoint()) {
389 // If call/safepoint are adjacent, account for possible
390 // nop to disambiguate the two safepoints.
391 if (min_offset_from_last_call == 0) {
392 blk_size += nop_size;
393 }
394 }
395 }
396 min_offset_from_last_call += inst_size;
397 // Remember end of call offset
398 if (nj->is_MachCall() && nj->as_MachCall()->is_safepoint_node()) {
399 min_offset_from_last_call = 0;
400 }
401 }
402
403 // During short branch replacement, we store the relative (to blk_starts)
404 // end of jump in jmp_end, rather than the absolute end of jump. This
405 // is so that we do not need to recompute sizes of all nodes when we compute
406 // correct blk_starts in our next sizing pass.
407 jmp_end[i] = blk_size;
408 DEBUG_ONLY( jmp_target[i] = 0; )
409
410 // When the next block starts a loop, we may insert pad NOP
411 // instructions. Since we cannot know our future alignment,
412 // assume the worst.
413 if( i<_cfg->_num_blocks-1 ) {
414 Block *nb = _cfg->_blocks[i+1];
415 int max_loop_pad = nb->code_alignment()-relocInfo::addr_unit();
416 if( max_loop_pad > 0 ) {
417 assert(is_power_of_2(max_loop_pad+relocInfo::addr_unit()), "");
418 blk_size += max_loop_pad;
430 // candidates because the shortening in the first pass exposed
431 // more opportunities. Unfortunately, this would require
432 // recomputing the starting and ending positions for the blocks
433 for( i=0; i<_cfg->_num_blocks; i++ ) {
434 Block *b = _cfg->_blocks[i];
435
436 int j;
437 // Find the branch; ignore trailing NOPs.
438 for( j = b->_nodes.size()-1; j>=0; j-- ) {
439 nj = b->_nodes[j];
440 if( !nj->is_Mach() || nj->as_Mach()->ideal_Opcode() != Op_Con )
441 break;
442 }
443
444 if (j >= 0) {
445 if( nj->is_Mach() && nj->as_Mach()->may_be_short_branch() ) {
446 MachNode *mach = nj->as_Mach();
447 // This requires the TRUE branch target be in succs[0]
448 uint bnum = b->non_connector_successor(0)->_pre_order;
449 uintptr_t target = blk_starts[bnum];
450 if( mach->is_pc_relative() ) {
451 int offset = target-(blk_starts[i] + jmp_end[i]);
452 if (_matcher->is_short_branch_offset(mach->rule(), offset)) {
453 // We've got a winner. Replace this branch.
454 MachNode* replacement = mach->short_branch_version(this);
455 b->_nodes.map(j, replacement);
456 mach->subsume_by(replacement);
457
458 // Update the jmp_end size to save time in our
459 // next pass.
460 jmp_end[i] -= (mach->size(_regalloc) - replacement->size(_regalloc));
461 DEBUG_ONLY( jmp_target[i] = bnum; );
462 DEBUG_ONLY( jmp_rule[i] = mach->rule(); );
463 }
464 } else {
465 #ifndef PRODUCT
466 mach->dump(3);
467 #endif
468 Unimplemented();
469 }
470 }
491 if( nj->is_Mach() ) {
492 int padding = nj->as_Mach()->compute_padding(adr);
493 // If call/safepoint are adjacent insert a nop (5010568)
494 if (padding == 0 && nj->is_MachSafePoint() && !nj->is_MachCall() &&
495 adr == last_call_adr ) {
496 padding = nop_size;
497 }
498 if(padding > 0) {
499 assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
500 int nops_cnt = padding / nop_size;
501 MachNode *nop = new (this) MachNopNode(nops_cnt);
502 b->_nodes.insert(j++, nop);
503 _cfg->_bbs.map( nop->_idx, b );
504 adr += padding;
505 last_inst++;
506 }
507 }
508 adr += nj->size(_regalloc);
509
510 // Remember end of call offset
511 if (nj->is_MachCall() && nj->as_MachCall()->is_safepoint_node()) {
512 last_call_adr = adr;
513 }
514 }
515
516 if ( i != _cfg->_num_blocks-1) {
517 // Get the size of the block
518 uint blk_size = adr - blk_starts[i];
519
520 // When the next block is the top of a loop, we may insert pad NOP
521 // instructions.
522 Block *nb = _cfg->_blocks[i+1];
523 int current_offset = blk_starts[i] + blk_size;
524 current_offset += nb->alignment_padding(current_offset);
525 // Save block size; update total method size
526 blk_starts[i+1] = current_offset;
527 }
528 }
529
530 #ifdef ASSERT
531 for( i=0; i<_cfg->_num_blocks; i++ ) { // For all blocks
1278 int nops_cnt = padding / nop_size;
1279 MachNode *nop = new (this) MachNopNode(nops_cnt);
1280 b->_nodes.insert(j++, nop);
1281 last_inst++;
1282 _cfg->_bbs.map( nop->_idx, b );
1283 nop->emit(*cb, _regalloc);
1284 cb->flush_bundle(true);
1285 current_offset = cb->insts_size();
1286 }
1287
1288 // Remember the start of the last call in a basic block
1289 if (is_mcall) {
1290 MachCallNode *mcall = mach->as_MachCall();
1291
1292 // This destination address is NOT PC-relative
1293 mcall->method_set((intptr_t)mcall->entry_point());
1294
1295 // Save the return address
1296 call_returns[b->_pre_order] = current_offset + mcall->ret_addr_offset();
1297
1298 if (!mcall->is_safepoint_node()) {
1299 is_mcall = false;
1300 is_sfn = false;
1301 }
1302 }
1303
1304 // sfn will be valid whenever mcall is valid now because of inheritance
1305 if( is_sfn || is_mcall ) {
1306
1307 // Handle special safepoint nodes for synchronization
1308 if( !is_mcall ) {
1309 MachSafePointNode *sfn = mach->as_MachSafePoint();
1310 // !!!!! Stubs only need an oopmap right now, so bail out
1311 if( sfn->jvms()->method() == NULL) {
1312 // Write the oopmap directly to the code blob??!!
1313 # ifdef ENABLE_ZAP_DEAD_LOCALS
1314 assert( !is_node_getting_a_safepoint(sfn), "logic does not match; false positive");
1315 # endif
1316 continue;
1317 }
1318 } // End synchronization
1548 int j;
1549
1550 // Find the branch; ignore trailing NOPs.
1551 for( j = b->_nodes.size()-1; j>=0; j-- ) {
1552 n = b->_nodes[j];
1553 if( !n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con )
1554 break;
1555 }
1556
1557 // If we didn't find anything, continue
1558 if( j < 0 ) continue;
1559
1560 // Compute ExceptionHandlerTable subtable entry and add it
1561 // (skip empty blocks)
1562 if( n->is_Catch() ) {
1563
1564 // Get the offset of the return from the call
1565 uint call_return = call_returns[b->_pre_order];
1566 #ifdef ASSERT
1567 assert( call_return > 0, "no call seen for this basic block" );
1568 while( b->_nodes[--j]->Opcode() == Op_MachProj ) ;
1569 assert( b->_nodes[j]->is_Call(), "CatchProj must follow call" );
1570 #endif
1571 // last instruction is a CatchNode, find it's CatchProjNodes
1572 int nof_succs = b->_num_succs;
1573 // allocate space
1574 GrowableArray<intptr_t> handler_bcis(nof_succs);
1575 GrowableArray<intptr_t> handler_pcos(nof_succs);
1576 // iterate through all successors
1577 for (int j = 0; j < nof_succs; j++) {
1578 Block* s = b->_succs[j];
1579 bool found_p = false;
1580 for( uint k = 1; k < s->num_preds(); k++ ) {
1581 Node *pk = s->pred(k);
1582 if( pk->is_CatchProj() && pk->in(0) == n ) {
1583 const CatchProjNode* p = pk->as_CatchProj();
1584 found_p = true;
1585 // add the corresponding handler bci & pco information
1586 if( p->_con != CatchProjNode::fall_through_index ) {
1587 // p leads to an exception handler (and is not fall through)
1588 assert(s == _cfg->_blocks[s->_pre_order],"bad numbering");
1589 // no duplicates, please
2333 int iop = mach->ideal_Opcode();
2334 if( iop == Op_CreateEx ) continue; // CreateEx is pinned
2335 if( iop == Op_Con ) continue; // Do not schedule Top
2336 if( iop == Op_Node && // Do not schedule PhiNodes, ProjNodes
2337 mach->pipeline() == MachNode::pipeline_class() &&
2338 !n->is_SpillCopy() ) // Breakpoints, Prolog, etc
2339 continue;
2340 break; // Funny loop structure to be sure...
2341 }
2342 // Compute last "interesting" instruction in block - last instruction we
2343 // might schedule. _bb_end points just after last schedulable inst. We
2344 // normally schedule conditional branches (despite them being forced last
2345 // in the block), because they have delay slots we can fill. Calls all
2346 // have their delay slots filled in the template expansions, so we don't
2347 // bother scheduling them.
2348 Node *last = bb->_nodes[_bb_end];
2349 if( last->is_Catch() ||
2350 // Exclude unreachable path case when Halt node is in a separate block.
2351 (_bb_end > 1 && last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) {
2352 // There must be a prior call. Skip it.
2353 while( !bb->_nodes[--_bb_end]->is_Call() ) {
2354 assert( bb->_nodes[_bb_end]->is_Proj(), "skipping projections after expected call" );
2355 }
2356 } else if( last->is_MachNullCheck() ) {
2357 // Backup so the last null-checked memory instruction is
2358 // outside the schedulable range. Skip over the nullcheck,
2359 // projection, and the memory nodes.
2360 Node *mem = last->in(1);
2361 do {
2362 _bb_end--;
2363 } while (mem != bb->_nodes[_bb_end]);
2364 } else {
2365 // Set _bb_end to point after last schedulable inst.
2366 _bb_end++;
2367 }
2368
2369 assert( _bb_start <= _bb_end, "inverted block ends" );
2370
2371 // Compute the register antidependencies for the basic block
2372 ComputeRegisterAntidependencies(bb);
2373 if (_cfg->C->failing()) return; // too many D-U pinch points
2374
2646 // We put edges from the prior and current DEF/KILLs to the pinch point.
2647 // We put the pinch point in _reg_node. If there's already a pinch point
2648 // we merely add an edge from the current DEF/KILL to the pinch point.
2649
2650 // After doing the DEF/KILLs, we handle USEs. For each used register, we
2651 // put an edge from the pinch point to the USE.
2652
2653 // To be expedient, the _reg_node array is pre-allocated for the whole
2654 // compilation. _reg_node is lazily initialized; it either contains a NULL,
2655 // or a valid def/kill/pinch-point, or a leftover node from some prior
2656 // block. Leftover node from some prior block is treated like a NULL (no
2657 // prior def, so no anti-dependence needed). Valid def is distinguished by
2658 // it being in the current block.
2659 bool fat_proj_seen = false;
2660 uint last_safept = _bb_end-1;
2661 Node* end_node = (_bb_end-1 >= _bb_start) ? b->_nodes[last_safept] : NULL;
2662 Node* last_safept_node = end_node;
2663 for( uint i = _bb_end-1; i >= _bb_start; i-- ) {
2664 Node *n = b->_nodes[i];
2665 int is_def = n->outcnt(); // def if some uses prior to adding precedence edges
2666 if( n->Opcode() == Op_MachProj && n->ideal_reg() == MachProjNode::fat_proj ) {
2667 // Fat-proj kills a slew of registers
2668 // This can add edges to 'n' and obscure whether or not it was a def,
2669 // hence the is_def flag.
2670 fat_proj_seen = true;
2671 RegMask rm = n->out_RegMask();// Make local copy
2672 while( rm.is_NotEmpty() ) {
2673 OptoReg::Name kill = rm.find_first_elem();
2674 rm.Remove(kill);
2675 anti_do_def( b, n, kill, is_def );
2676 }
2677 } else {
2678 // Get DEF'd registers the normal way
2679 anti_do_def( b, n, _regalloc->get_reg_first(n), is_def );
2680 anti_do_def( b, n, _regalloc->get_reg_second(n), is_def );
2681 }
2682
2683 // Check each register used by this instruction for a following DEF/KILL
2684 // that must occur afterward and requires an anti-dependence edge.
2685 for( uint j=0; j<n->req(); j++ ) {
2686 Node *def = n->in(j);
2687 if( def ) {
2688 assert( def->Opcode() != Op_MachProj || def->ideal_reg() != MachProjNode::fat_proj, "" );
2689 anti_do_use( b, n, _regalloc->get_reg_first(def) );
2690 anti_do_use( b, n, _regalloc->get_reg_second(def) );
2691 }
2692 }
2693 // Do not allow defs of new derived values to float above GC
2694 // points unless the base is definitely available at the GC point.
2695
2696 Node *m = b->_nodes[i];
2697
2698 // Add precedence edge from following safepoint to use of derived pointer
2699 if( last_safept_node != end_node &&
2700 m != last_safept_node) {
2701 for (uint k = 1; k < m->req(); k++) {
2702 const Type *t = m->in(k)->bottom_type();
2703 if( t->isa_oop_ptr() &&
2704 t->is_ptr()->offset() != 0 ) {
2705 last_safept_node->add_prec( m );
2706 break;
2707 }
2708 }
|
378 if( mach->is_MachCall() ) {
379 MachCallNode *mcall = mach->as_MachCall();
380 // This destination address is NOT PC-relative
381
382 mcall->method_set((intptr_t)mcall->entry_point());
383
384 if( mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method ) {
385 stub_size += size_java_to_interp();
386 reloc_size += reloc_java_to_interp();
387 }
388 } else if (mach->is_MachSafePoint()) {
389 // If call/safepoint are adjacent, account for possible
390 // nop to disambiguate the two safepoints.
391 if (min_offset_from_last_call == 0) {
392 blk_size += nop_size;
393 }
394 }
395 }
396 min_offset_from_last_call += inst_size;
397 // Remember end of call offset
398 if (nj->is_MachCall() && !nj->is_MachCallLeaf()) {
399 min_offset_from_last_call = 0;
400 }
401 }
402
403 // During short branch replacement, we store the relative (to blk_starts)
404 // end of jump in jmp_end, rather than the absolute end of jump. This
405 // is so that we do not need to recompute sizes of all nodes when we compute
406 // correct blk_starts in our next sizing pass.
407 jmp_end[i] = blk_size;
408 DEBUG_ONLY( jmp_target[i] = 0; )
409
410 // When the next block starts a loop, we may insert pad NOP
411 // instructions. Since we cannot know our future alignment,
412 // assume the worst.
413 if( i<_cfg->_num_blocks-1 ) {
414 Block *nb = _cfg->_blocks[i+1];
415 int max_loop_pad = nb->code_alignment()-relocInfo::addr_unit();
416 if( max_loop_pad > 0 ) {
417 assert(is_power_of_2(max_loop_pad+relocInfo::addr_unit()), "");
418 blk_size += max_loop_pad;
430 // candidates because the shortening in the first pass exposed
431 // more opportunities. Unfortunately, this would require
432 // recomputing the starting and ending positions for the blocks
433 for( i=0; i<_cfg->_num_blocks; i++ ) {
434 Block *b = _cfg->_blocks[i];
435
436 int j;
437 // Find the branch; ignore trailing NOPs.
438 for( j = b->_nodes.size()-1; j>=0; j-- ) {
439 nj = b->_nodes[j];
440 if( !nj->is_Mach() || nj->as_Mach()->ideal_Opcode() != Op_Con )
441 break;
442 }
443
444 if (j >= 0) {
445 if( nj->is_Mach() && nj->as_Mach()->may_be_short_branch() ) {
446 MachNode *mach = nj->as_Mach();
447 // This requires the TRUE branch target be in succs[0]
448 uint bnum = b->non_connector_successor(0)->_pre_order;
449 uintptr_t target = blk_starts[bnum];
450 if( mach->is_Branch() ) {
451 int offset = target-(blk_starts[i] + jmp_end[i]);
452 if (_matcher->is_short_branch_offset(mach->rule(), offset)) {
453 // We've got a winner. Replace this branch.
454 MachNode* replacement = mach->short_branch_version(this);
455 b->_nodes.map(j, replacement);
456 mach->subsume_by(replacement);
457
458 // Update the jmp_end size to save time in our
459 // next pass.
460 jmp_end[i] -= (mach->size(_regalloc) - replacement->size(_regalloc));
461 DEBUG_ONLY( jmp_target[i] = bnum; );
462 DEBUG_ONLY( jmp_rule[i] = mach->rule(); );
463 }
464 } else {
465 #ifndef PRODUCT
466 mach->dump(3);
467 #endif
468 Unimplemented();
469 }
470 }
491 if( nj->is_Mach() ) {
492 int padding = nj->as_Mach()->compute_padding(adr);
493 // If call/safepoint are adjacent insert a nop (5010568)
494 if (padding == 0 && nj->is_MachSafePoint() && !nj->is_MachCall() &&
495 adr == last_call_adr ) {
496 padding = nop_size;
497 }
498 if(padding > 0) {
499 assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
500 int nops_cnt = padding / nop_size;
501 MachNode *nop = new (this) MachNopNode(nops_cnt);
502 b->_nodes.insert(j++, nop);
503 _cfg->_bbs.map( nop->_idx, b );
504 adr += padding;
505 last_inst++;
506 }
507 }
508 adr += nj->size(_regalloc);
509
510 // Remember end of call offset
511 if (nj->is_MachCall() && !nj->is_MachCallLeaf()) {
512 last_call_adr = adr;
513 }
514 }
515
516 if ( i != _cfg->_num_blocks-1) {
517 // Get the size of the block
518 uint blk_size = adr - blk_starts[i];
519
520 // When the next block is the top of a loop, we may insert pad NOP
521 // instructions.
522 Block *nb = _cfg->_blocks[i+1];
523 int current_offset = blk_starts[i] + blk_size;
524 current_offset += nb->alignment_padding(current_offset);
525 // Save block size; update total method size
526 blk_starts[i+1] = current_offset;
527 }
528 }
529
530 #ifdef ASSERT
531 for( i=0; i<_cfg->_num_blocks; i++ ) { // For all blocks
1278 int nops_cnt = padding / nop_size;
1279 MachNode *nop = new (this) MachNopNode(nops_cnt);
1280 b->_nodes.insert(j++, nop);
1281 last_inst++;
1282 _cfg->_bbs.map( nop->_idx, b );
1283 nop->emit(*cb, _regalloc);
1284 cb->flush_bundle(true);
1285 current_offset = cb->insts_size();
1286 }
1287
1288 // Remember the start of the last call in a basic block
1289 if (is_mcall) {
1290 MachCallNode *mcall = mach->as_MachCall();
1291
1292 // This destination address is NOT PC-relative
1293 mcall->method_set((intptr_t)mcall->entry_point());
1294
1295 // Save the return address
1296 call_returns[b->_pre_order] = current_offset + mcall->ret_addr_offset();
1297
1298 if (mcall->is_MachCallLeaf()) {
1299 is_mcall = false;
1300 is_sfn = false;
1301 }
1302 }
1303
1304 // sfn will be valid whenever mcall is valid now because of inheritance
1305 if( is_sfn || is_mcall ) {
1306
1307 // Handle special safepoint nodes for synchronization
1308 if( !is_mcall ) {
1309 MachSafePointNode *sfn = mach->as_MachSafePoint();
1310 // !!!!! Stubs only need an oopmap right now, so bail out
1311 if( sfn->jvms()->method() == NULL) {
1312 // Write the oopmap directly to the code blob??!!
1313 # ifdef ENABLE_ZAP_DEAD_LOCALS
1314 assert( !is_node_getting_a_safepoint(sfn), "logic does not match; false positive");
1315 # endif
1316 continue;
1317 }
1318 } // End synchronization
1548 int j;
1549
1550 // Find the branch; ignore trailing NOPs.
1551 for( j = b->_nodes.size()-1; j>=0; j-- ) {
1552 n = b->_nodes[j];
1553 if( !n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con )
1554 break;
1555 }
1556
1557 // If we didn't find anything, continue
1558 if( j < 0 ) continue;
1559
1560 // Compute ExceptionHandlerTable subtable entry and add it
1561 // (skip empty blocks)
1562 if( n->is_Catch() ) {
1563
1564 // Get the offset of the return from the call
1565 uint call_return = call_returns[b->_pre_order];
1566 #ifdef ASSERT
1567 assert( call_return > 0, "no call seen for this basic block" );
1568 while( b->_nodes[--j]->is_MachProj() ) ;
1569 assert( b->_nodes[j]->is_MachCall(), "CatchProj must follow call" );
1570 #endif
1571 // last instruction is a CatchNode, find it's CatchProjNodes
1572 int nof_succs = b->_num_succs;
1573 // allocate space
1574 GrowableArray<intptr_t> handler_bcis(nof_succs);
1575 GrowableArray<intptr_t> handler_pcos(nof_succs);
1576 // iterate through all successors
1577 for (int j = 0; j < nof_succs; j++) {
1578 Block* s = b->_succs[j];
1579 bool found_p = false;
1580 for( uint k = 1; k < s->num_preds(); k++ ) {
1581 Node *pk = s->pred(k);
1582 if( pk->is_CatchProj() && pk->in(0) == n ) {
1583 const CatchProjNode* p = pk->as_CatchProj();
1584 found_p = true;
1585 // add the corresponding handler bci & pco information
1586 if( p->_con != CatchProjNode::fall_through_index ) {
1587 // p leads to an exception handler (and is not fall through)
1588 assert(s == _cfg->_blocks[s->_pre_order],"bad numbering");
1589 // no duplicates, please
2333 int iop = mach->ideal_Opcode();
2334 if( iop == Op_CreateEx ) continue; // CreateEx is pinned
2335 if( iop == Op_Con ) continue; // Do not schedule Top
2336 if( iop == Op_Node && // Do not schedule PhiNodes, ProjNodes
2337 mach->pipeline() == MachNode::pipeline_class() &&
2338 !n->is_SpillCopy() ) // Breakpoints, Prolog, etc
2339 continue;
2340 break; // Funny loop structure to be sure...
2341 }
2342 // Compute last "interesting" instruction in block - last instruction we
2343 // might schedule. _bb_end points just after last schedulable inst. We
2344 // normally schedule conditional branches (despite them being forced last
2345 // in the block), because they have delay slots we can fill. Calls all
2346 // have their delay slots filled in the template expansions, so we don't
2347 // bother scheduling them.
2348 Node *last = bb->_nodes[_bb_end];
2349 if( last->is_Catch() ||
2350 // Exclude unreachable path case when Halt node is in a separate block.
2351 (_bb_end > 1 && last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) {
2352 // There must be a prior call. Skip it.
2353 while( !bb->_nodes[--_bb_end]->is_MachCall() ) {
2354 assert( bb->_nodes[_bb_end]->is_MachProj(), "skipping projections after expected call" );
2355 }
2356 } else if( last->is_MachNullCheck() ) {
2357 // Backup so the last null-checked memory instruction is
2358 // outside the schedulable range. Skip over the nullcheck,
2359 // projection, and the memory nodes.
2360 Node *mem = last->in(1);
2361 do {
2362 _bb_end--;
2363 } while (mem != bb->_nodes[_bb_end]);
2364 } else {
2365 // Set _bb_end to point after last schedulable inst.
2366 _bb_end++;
2367 }
2368
2369 assert( _bb_start <= _bb_end, "inverted block ends" );
2370
2371 // Compute the register antidependencies for the basic block
2372 ComputeRegisterAntidependencies(bb);
2373 if (_cfg->C->failing()) return; // too many D-U pinch points
2374
2646 // We put edges from the prior and current DEF/KILLs to the pinch point.
2647 // We put the pinch point in _reg_node. If there's already a pinch point
2648 // we merely add an edge from the current DEF/KILL to the pinch point.
2649
2650 // After doing the DEF/KILLs, we handle USEs. For each used register, we
2651 // put an edge from the pinch point to the USE.
2652
2653 // To be expedient, the _reg_node array is pre-allocated for the whole
2654 // compilation. _reg_node is lazily initialized; it either contains a NULL,
2655 // or a valid def/kill/pinch-point, or a leftover node from some prior
2656 // block. Leftover node from some prior block is treated like a NULL (no
2657 // prior def, so no anti-dependence needed). Valid def is distinguished by
2658 // it being in the current block.
2659 bool fat_proj_seen = false;
2660 uint last_safept = _bb_end-1;
2661 Node* end_node = (_bb_end-1 >= _bb_start) ? b->_nodes[last_safept] : NULL;
2662 Node* last_safept_node = end_node;
2663 for( uint i = _bb_end-1; i >= _bb_start; i-- ) {
2664 Node *n = b->_nodes[i];
2665 int is_def = n->outcnt(); // def if some uses prior to adding precedence edges
2666 if( n->is_MachProj() && n->ideal_reg() == MachProjNode::fat_proj ) {
2667 // Fat-proj kills a slew of registers
2668 // This can add edges to 'n' and obscure whether or not it was a def,
2669 // hence the is_def flag.
2670 fat_proj_seen = true;
2671 RegMask rm = n->out_RegMask();// Make local copy
2672 while( rm.is_NotEmpty() ) {
2673 OptoReg::Name kill = rm.find_first_elem();
2674 rm.Remove(kill);
2675 anti_do_def( b, n, kill, is_def );
2676 }
2677 } else {
2678 // Get DEF'd registers the normal way
2679 anti_do_def( b, n, _regalloc->get_reg_first(n), is_def );
2680 anti_do_def( b, n, _regalloc->get_reg_second(n), is_def );
2681 }
2682
2683 // Check each register used by this instruction for a following DEF/KILL
2684 // that must occur afterward and requires an anti-dependence edge.
2685 for( uint j=0; j<n->req(); j++ ) {
2686 Node *def = n->in(j);
2687 if( def ) {
2688 assert( !def->is_MachProj() || def->ideal_reg() != MachProjNode::fat_proj, "" );
2689 anti_do_use( b, n, _regalloc->get_reg_first(def) );
2690 anti_do_use( b, n, _regalloc->get_reg_second(def) );
2691 }
2692 }
2693 // Do not allow defs of new derived values to float above GC
2694 // points unless the base is definitely available at the GC point.
2695
2696 Node *m = b->_nodes[i];
2697
2698 // Add precedence edge from following safepoint to use of derived pointer
2699 if( last_safept_node != end_node &&
2700 m != last_safept_node) {
2701 for (uint k = 1; k < m->req(); k++) {
2702 const Type *t = m->in(k)->bottom_type();
2703 if( t->isa_oop_ptr() &&
2704 t->is_ptr()->offset() != 0 ) {
2705 last_safept_node->add_prec( m );
2706 break;
2707 }
2708 }
|