src/share/vm/opto/output.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/opto

src/share/vm/opto/output.cpp

Print this page




 349   memset(block_worst_case_pad, 0, nblocks * sizeof(int));
 350 
 351   DEBUG_ONLY( uint *jmp_target = NEW_RESOURCE_ARRAY(uint,nblocks); )
 352   DEBUG_ONLY( uint *jmp_rule = NEW_RESOURCE_ARRAY(uint,nblocks); )
 353 
 354   bool has_short_branch_candidate = false;
 355 
 356   // Initialize the sizes to 0
 357   code_size  = 0;          // Size in bytes of generated code
 358   stub_size  = 0;          // Size in bytes of all stub entries
 359   // Size in bytes of all relocation entries, including those in local stubs.
 360   // Start with 2-bytes of reloc info for the unvalidated entry point
 361   reloc_size = 1;          // Number of relocation entries
 362 
 363   // Make three passes.  The first computes pessimistic blk_starts,
 364   // relative jmp_offset and reloc_size information.  The second performs
 365   // short branch substitution using the pessimistic sizing.  The
 366   // third inserts nops where needed.
 367 
 368   // Step one, perform a pessimistic sizing pass.
 369   uint last_call_adr = max_uint;
 370   uint last_avoid_back_to_back_adr = max_uint;
 371   uint nop_size = (new (this) MachNopNode())->size(_regalloc);
 372   for (uint i = 0; i < nblocks; i++) { // For all blocks
 373     Block* block = _cfg->get_block(i);
 374 
 375     // During short branch replacement, we store the relative (to blk_starts)
 376     // offset of jump in jmp_offset, rather than the absolute offset of jump.
 377     // This is so that we do not need to recompute sizes of all nodes when
 378     // we compute correct blk_starts in our next sizing pass.
 379     jmp_offset[i] = 0;
 380     jmp_size[i]   = 0;
 381     jmp_nidx[i]   = -1;
 382     DEBUG_ONLY( jmp_target[i] = 0; )
 383     DEBUG_ONLY( jmp_rule[i]   = 0; )
 384 
 385     // Sum all instruction sizes to compute block size
 386     uint last_inst = block->number_of_nodes();
 387     uint blk_size = 0;
 388     for (uint j = 0; j < last_inst; j++) {
 389       Node* nj = block->get_node(j);
 390       // Handle machine instruction nodes


 462         // max_loop_pad in lock-step with blk_size, so sizing
 463         // calculations in subsequent blocks still can conservatively
 464         // detect that it may the last instruction in this block.
 465         if (last_call_adr == blk_starts[i]+blk_size) {
 466           last_call_adr += max_loop_pad;
 467         }
 468         if (last_avoid_back_to_back_adr == blk_starts[i]+blk_size) {
 469           last_avoid_back_to_back_adr += max_loop_pad;
 470         }
 471         blk_size += max_loop_pad;
 472         block_worst_case_pad[i + 1] = max_loop_pad;
 473       }
 474     }
 475 
 476     // Save block size; update total method size
 477     blk_starts[i+1] = blk_starts[i]+blk_size;
 478   }
 479 
 480   // Step two, replace eligible long jumps.
 481   bool progress = true;
 482   uint last_may_be_short_branch_adr = max_uint;
 483   while (has_short_branch_candidate && progress) {
 484     progress = false;
 485     has_short_branch_candidate = false;
 486     int adjust_block_start = 0;
 487     for (uint i = 0; i < nblocks; i++) {
 488       Block* block = _cfg->get_block(i);
 489       int idx = jmp_nidx[i];
 490       MachNode* mach = (idx == -1) ? NULL: block->get_node(idx)->as_Mach();
 491       if (mach != NULL && mach->may_be_short_branch()) {
 492 #ifdef ASSERT
 493         assert(jmp_size[i] > 0 && mach->is_MachBranch(), "sanity");
 494         int j;
 495         // Find the branch; ignore trailing NOPs.
 496         for (j = block->number_of_nodes()-1; j>=0; j--) {
 497           Node* n = block->get_node(j);
 498           if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con)
 499             break;
 500         }
 501         assert(j >= 0 && j == idx && block->get_node(j) == (Node*)mach, "sanity");
 502 #endif




 349   memset(block_worst_case_pad, 0, nblocks * sizeof(int));
 350 
 351   DEBUG_ONLY( uint *jmp_target = NEW_RESOURCE_ARRAY(uint,nblocks); )
 352   DEBUG_ONLY( uint *jmp_rule = NEW_RESOURCE_ARRAY(uint,nblocks); )
 353 
 354   bool has_short_branch_candidate = false;
 355 
 356   // Initialize the sizes to 0
 357   code_size  = 0;          // Size in bytes of generated code
 358   stub_size  = 0;          // Size in bytes of all stub entries
 359   // Size in bytes of all relocation entries, including those in local stubs.
 360   // Start with 2-bytes of reloc info for the unvalidated entry point
 361   reloc_size = 1;          // Number of relocation entries
 362 
 363   // Make three passes.  The first computes pessimistic blk_starts,
 364   // relative jmp_offset and reloc_size information.  The second performs
 365   // short branch substitution using the pessimistic sizing.  The
 366   // third inserts nops where needed.
 367 
 368   // Step one, perform a pessimistic sizing pass.
 369   uint last_call_adr = (uint) -1;
 370   uint last_avoid_back_to_back_adr = (uint) -1;
 371   uint nop_size = (new (this) MachNopNode())->size(_regalloc);
 372   for (uint i = 0; i < nblocks; i++) { // For all blocks
 373     Block* block = _cfg->get_block(i);
 374 
 375     // During short branch replacement, we store the relative (to blk_starts)
 376     // offset of jump in jmp_offset, rather than the absolute offset of jump.
 377     // This is so that we do not need to recompute sizes of all nodes when
 378     // we compute correct blk_starts in our next sizing pass.
 379     jmp_offset[i] = 0;
 380     jmp_size[i]   = 0;
 381     jmp_nidx[i]   = -1;
 382     DEBUG_ONLY( jmp_target[i] = 0; )
 383     DEBUG_ONLY( jmp_rule[i]   = 0; )
 384 
 385     // Sum all instruction sizes to compute block size
 386     uint last_inst = block->number_of_nodes();
 387     uint blk_size = 0;
 388     for (uint j = 0; j < last_inst; j++) {
 389       Node* nj = block->get_node(j);
 390       // Handle machine instruction nodes


 462         // max_loop_pad in lock-step with blk_size, so sizing
 463         // calculations in subsequent blocks still can conservatively
 464         // detect that it may the last instruction in this block.
 465         if (last_call_adr == blk_starts[i]+blk_size) {
 466           last_call_adr += max_loop_pad;
 467         }
 468         if (last_avoid_back_to_back_adr == blk_starts[i]+blk_size) {
 469           last_avoid_back_to_back_adr += max_loop_pad;
 470         }
 471         blk_size += max_loop_pad;
 472         block_worst_case_pad[i + 1] = max_loop_pad;
 473       }
 474     }
 475 
 476     // Save block size; update total method size
 477     blk_starts[i+1] = blk_starts[i]+blk_size;
 478   }
 479 
 480   // Step two, replace eligible long jumps.
 481   bool progress = true;
 482   uint last_may_be_short_branch_adr = (uint) -1;
 483   while (has_short_branch_candidate && progress) {
 484     progress = false;
 485     has_short_branch_candidate = false;
 486     int adjust_block_start = 0;
 487     for (uint i = 0; i < nblocks; i++) {
 488       Block* block = _cfg->get_block(i);
 489       int idx = jmp_nidx[i];
 490       MachNode* mach = (idx == -1) ? NULL: block->get_node(idx)->as_Mach();
 491       if (mach != NULL && mach->may_be_short_branch()) {
 492 #ifdef ASSERT
 493         assert(jmp_size[i] > 0 && mach->is_MachBranch(), "sanity");
 494         int j;
 495         // Find the branch; ignore trailing NOPs.
 496         for (j = block->number_of_nodes()-1; j>=0; j--) {
 497           Node* n = block->get_node(j);
 498           if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con)
 499             break;
 500         }
 501         assert(j >= 0 && j == idx && block->get_node(j) == (Node*)mach, "sanity");
 502 #endif


src/share/vm/opto/output.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File