src/cpu/x86/vm/x86_32.ad

Print this page
rev 6114 : 8037821: Account for trampoline stubs when estimating code buffer sizes
Summary: Correct calculation of stubs section size when trampoline stubs are used.
Contributed-by: Lutz.Schmidt@sap.com


1280 #endif
1281   masm.cmpptr(rax, Address(rcx, oopDesc::klass_offset_in_bytes()));
1282   masm.jump_cc(Assembler::notEqual,
1283                RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1284   /* WARNING these NOPs are critical so that verified entry point is properly
1285      aligned for patching by NativeJump::patch_verified_entry() */
1286   int nops_cnt = 2;
1287   if( !OptoBreakpoint ) // Leave space for int3
1288      nops_cnt += 1;
1289   masm.nop(nops_cnt);
1290 
1291   assert(cbuf.insts_size() - insts_size == size(ra_), "checking code size of inline cache node");
1292 }
1293 
1294 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1295   return OptoBreakpoint ? 11 : 12;
1296 }
1297 
1298 
1299 //=============================================================================
1300 uint size_exception_handler() {
1301   // NativeCall instruction size is the same as NativeJump.
1302   // exception handler starts out as jump and can be patched to
1303   // a call be deoptimization.  (4932387)
1304   // Note that this value is also credited (in output.cpp) to
1305   // the size of the code section.
1306   return NativeJump::instruction_size;
1307 }
1308 
1309 // Emit exception handler code.  Stuff framesize into a register
1310 // and call a VM stub routine.
1311 int emit_exception_handler(CodeBuffer& cbuf) {
1312 
1313   // Note that the code buffer's insts_mark is always relative to insts.
1314   // That's why we must use the macroassembler to generate a handler.
1315   MacroAssembler _masm(&cbuf);
1316   address base =
1317   __ start_a_stub(size_exception_handler());
1318   if (base == NULL)  return 0;  // CodeBuffer::expand failed
1319   int offset = __ offset();
1320   __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
1321   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
1322   __ end_a_stub();
1323   return offset;
1324 }
1325 
1326 uint size_deopt_handler() {
1327   // NativeCall instruction size is the same as NativeJump.
1328   // exception handler starts out as jump and can be patched to
1329   // a call be deoptimization.  (4932387)
1330   // Note that this value is also credited (in output.cpp) to
1331   // the size of the code section.
1332   return 5 + NativeJump::instruction_size; // pushl(); jmp;
1333 }
1334 
1335 // Emit deopt handler code.
1336 int emit_deopt_handler(CodeBuffer& cbuf) {
1337 
1338   // Note that the code buffer's insts_mark is always relative to insts.
1339   // That's why we must use the macroassembler to generate a handler.
1340   MacroAssembler _masm(&cbuf);
1341   address base =
1342   __ start_a_stub(size_exception_handler());
1343   if (base == NULL)  return 0;  // CodeBuffer::expand failed
1344   int offset = __ offset();
1345   InternalAddress here(__ pc());
1346   __ pushptr(here.addr());
1347 
1348   __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
1349   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
1350   __ end_a_stub();
1351   return offset;
1352 }
1353 
1354 int Matcher::regnum_to_fpu_offset(int regnum) {
1355   return regnum - 32; // The FP registers are in the second chunk
1356 }
1357 
1358 // This is UltraSparc specific, true just means we have fast l2f conversion
1359 const bool Matcher::convL2FSupported(void) {
1360   return true;
1361 }
1362 
1363 // Is this branch offset short enough that a short branch can be used?
1364 //
1365 // NOTE: If the platform does not provide any short branch variants, then
1366 //       this method should return false for offset 0.
1367 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
1368   // The passed offset is relative to address of the branch.
1369   // On 86 a branch displacement is calculated relative to address
1370   // of a next instruction.
1371   offset -= br_size;
1372 




1280 #endif
1281   masm.cmpptr(rax, Address(rcx, oopDesc::klass_offset_in_bytes()));
1282   masm.jump_cc(Assembler::notEqual,
1283                RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1284   /* WARNING these NOPs are critical so that verified entry point is properly
1285      aligned for patching by NativeJump::patch_verified_entry() */
1286   int nops_cnt = 2;
1287   if( !OptoBreakpoint ) // Leave space for int3
1288      nops_cnt += 1;
1289   masm.nop(nops_cnt);
1290 
1291   assert(cbuf.insts_size() - insts_size == size(ra_), "checking code size of inline cache node");
1292 }
1293 
1294 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1295   return OptoBreakpoint ? 11 : 12;
1296 }
1297 
1298 
1299 //=============================================================================





















































1300 
1301 int Matcher::regnum_to_fpu_offset(int regnum) {
1302   return regnum - 32; // The FP registers are in the second chunk
1303 }
1304 
1305 // This is UltraSparc specific, true just means we have fast l2f conversion
1306 const bool Matcher::convL2FSupported(void) {
1307   return true;
1308 }
1309 
1310 // Is this branch offset short enough that a short branch can be used?
1311 //
1312 // NOTE: If the platform does not provide any short branch variants, then
1313 //       this method should return false for offset 0.
1314 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
1315   // The passed offset is relative to address of the branch.
1316   // On 86 a branch displacement is calculated relative to address
1317   // of a next instruction.
1318   offset -= br_size;
1319