src/cpu/ppc/vm/c1_LIRAssembler_ppc.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File hotspot Sdiff src/cpu/ppc/vm

src/cpu/ppc/vm/c1_LIRAssembler_ppc.cpp

Print this page




 136               mo = frame_map()->address_for_monitor_object(i);
 137       assert(ml.index() == noreg && mo.index() == noreg, "sanity");
 138       __ ld(R0, slot_offset + 0, OSR_buf);
 139       __ std(R0, ml.disp(), ml.base());
 140       __ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf);
 141       __ std(R0, mo.disp(), mo.base());
 142     }
 143   }
 144 }
 145 
 146 
 147 int LIR_Assembler::emit_exception_handler() {
 148   // If the last instruction is a call (typically to do a throw which
 149   // is coming at the end after block reordering) the return address
 150   // must still point into the code area in order to avoid assertion
 151   // failures when searching for the corresponding bci => add a nop
 152   // (was bug 5/14/1999 - gri).
 153   __ nop();
 154 
 155   // Generate code for the exception handler.
 156   address handler_base = __ start_a_stub(exception_handler_size);
 157 
 158   if (handler_base == NULL) {
 159     // Not enough space left for the handler.
 160     bailout("exception handler overflow");
 161     return -1;
 162   }
 163 
 164   int offset = code_offset();
 165   address entry_point = CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::handle_exception_from_callee_id));
 166   //__ load_const_optimized(R0, entry_point);
 167   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry_point));
 168   __ mtctr(R0);
 169   __ bctr();
 170 
 171   guarantee(code_offset() - offset <= exception_handler_size, "overflow");
 172   __ end_a_stub();
 173 
 174   return offset;
 175 }
 176 
 177 
 178 // Emit the code to remove the frame from the stack in the exception
 179 // unwind path.
 180 int LIR_Assembler::emit_unwind_handler() {
 181   _masm->block_comment("Unwind handler");
 182 
 183   int offset = code_offset();
 184   bool preserve_exception = method()->is_synchronized() || compilation()->env()->dtrace_method_probes();
 185   const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/, Rexception_save = R31;
 186 
 187   // Fetch the exception from TLS and clear out exception related thread state.
 188   __ ld(Rexception, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
 189   __ li(R0, 0);
 190   __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
 191   __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread);


 216   __ bctr();
 217 
 218   // Emit the slow path assembly.
 219   if (stub != NULL) {
 220     stub->emit_code(this);
 221   }
 222 
 223   return offset;
 224 }
 225 
 226 
 227 int LIR_Assembler::emit_deopt_handler() {
 228   // If the last instruction is a call (typically to do a throw which
 229   // is coming at the end after block reordering) the return address
 230   // must still point into the code area in order to avoid assertion
 231   // failures when searching for the corresponding bci => add a nop
 232   // (was bug 5/14/1999 - gri).
 233   __ nop();
 234 
 235   // Generate code for deopt handler.
 236   address handler_base = __ start_a_stub(deopt_handler_size);
 237 
 238   if (handler_base == NULL) {
 239     // Not enough space left for the handler.
 240     bailout("deopt handler overflow");
 241     return -1;
 242   }
 243 
 244   int offset = code_offset();
 245   __ bl64_patchable(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type);
 246 
 247   guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
 248   __ end_a_stub();
 249 
 250   return offset;
 251 }
 252 
 253 
 254 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
 255   if (o == NULL) {
 256     __ li(reg, 0);
 257   } else {
 258     AddressLiteral addrlit = __ constant_oop_address(o);
 259     __ load_const(reg, addrlit, (reg != R0) ? R0 : noreg);
 260   }
 261 }
 262 
 263 
 264 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
 265   // Allocate a new index in table to hold the object once it's been patched.
 266   int oop_index = __ oop_recorder()->allocate_oop_index(NULL);
 267   PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);


1290     int offset = __ offset();
1291     add_debug_info_for_branch(info);
1292     __ load_from_polling_page(poll_addr);
1293     return offset;
1294   }
1295 
1296   __ load_const_optimized(tmp->as_register(), (intptr_t)os::get_polling_page(), R0); // TODO: PPC port: get_standard_polling_page()
1297   if (info != NULL) {
1298     add_debug_info_for_branch(info);
1299   }
1300   int offset = __ offset();
1301   __ relocate(relocInfo::poll_type);
1302   __ load_from_polling_page(tmp->as_register());
1303 
1304   return offset;
1305 }
1306 
1307 
1308 void LIR_Assembler::emit_static_call_stub() {
1309   address call_pc = __ pc();
1310   address stub = __ start_a_stub(max_static_call_stub_size);
1311   if (stub == NULL) {
1312     bailout("static call stub overflow");
1313     return;
1314   }
1315 
1316   // For java_to_interp stubs we use R11_scratch1 as scratch register
1317   // and in call trampoline stubs we use R12_scratch2. This way we
1318   // can distinguish them (see is_NativeCallTrampolineStub_at()).
1319   const Register reg_scratch = R11_scratch1;
1320 
1321   // Create a static stub relocation which relates this stub
1322   // with the call instruction at insts_call_instruction_offset in the
1323   // instructions code-section.
1324   int start = __ offset();
1325   __ relocate(static_stub_Relocation::spec(call_pc));
1326 
1327   // Now, create the stub's code:
1328   // - load the TOC
1329   // - load the inline cache oop from the constant pool
1330   // - load the call target from the constant pool
1331   // - call
1332   __ calculate_address_from_global_toc(reg_scratch, __ method_toc());
1333   AddressLiteral ic = __ allocate_metadata_address((Metadata *)NULL);
1334   bool success = __ load_const_from_method_toc(R19_inline_cache_reg, ic, reg_scratch, /*fixed_size*/ true);
1335 
1336   if (ReoptimizeCallSequences) {
1337     __ b64_patchable((address)-1, relocInfo::none);
1338   } else {
1339     AddressLiteral a((address)-1);
1340     success = success && __ load_const_from_method_toc(reg_scratch, a, reg_scratch, /*fixed_size*/ true);
1341     __ mtctr(reg_scratch);
1342     __ bctr();
1343   }
1344   if (!success) {
1345     bailout("const section overflow");
1346     return;
1347   }
1348 
1349   assert(__ offset() - start <= max_static_call_stub_size, "stub too big");
1350   __ end_a_stub();
1351 }
1352 
1353 
1354 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1355   bool unsigned_comp = (condition == lir_cond_belowEqual || condition == lir_cond_aboveEqual);
1356   if (opr1->is_single_fpu()) {
1357     __ fcmpu(BOOL_RESULT, opr1->as_float_reg(), opr2->as_float_reg());
1358   } else if (opr1->is_double_fpu()) {
1359     __ fcmpu(BOOL_RESULT, opr1->as_double_reg(), opr2->as_double_reg());
1360   } else if (opr1->is_single_cpu()) {
1361     if (opr2->is_constant()) {
1362       switch (opr2->as_constant_ptr()->type()) {
1363         case T_INT:
1364           {
1365             jint con = opr2->as_constant_ptr()->as_jint();
1366             if (unsigned_comp) {
1367               if (Assembler::is_uimm(con, 16)) {
1368                 __ cmplwi(BOOL_RESULT, opr1->as_register(), con);
1369               } else {




 136               mo = frame_map()->address_for_monitor_object(i);
 137       assert(ml.index() == noreg && mo.index() == noreg, "sanity");
 138       __ ld(R0, slot_offset + 0, OSR_buf);
 139       __ std(R0, ml.disp(), ml.base());
 140       __ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf);
 141       __ std(R0, mo.disp(), mo.base());
 142     }
 143   }
 144 }
 145 
 146 
 147 int LIR_Assembler::emit_exception_handler() {
 148   // If the last instruction is a call (typically to do a throw which
 149   // is coming at the end after block reordering) the return address
 150   // must still point into the code area in order to avoid assertion
 151   // failures when searching for the corresponding bci => add a nop
 152   // (was bug 5/14/1999 - gri).
 153   __ nop();
 154 
 155   // Generate code for the exception handler.
 156   address handler_base = __ start_a_stub(exception_handler_size());
 157 
 158   if (handler_base == NULL) {
 159     // Not enough space left for the handler.
 160     bailout("exception handler overflow");
 161     return -1;
 162   }
 163 
 164   int offset = code_offset();
 165   address entry_point = CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::handle_exception_from_callee_id));
 166   //__ load_const_optimized(R0, entry_point);
 167   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry_point));
 168   __ mtctr(R0);
 169   __ bctr();
 170 
 171   guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
 172   __ end_a_stub();
 173 
 174   return offset;
 175 }
 176 
 177 
 178 // Emit the code to remove the frame from the stack in the exception
 179 // unwind path.
 180 int LIR_Assembler::emit_unwind_handler() {
 181   _masm->block_comment("Unwind handler");
 182 
 183   int offset = code_offset();
 184   bool preserve_exception = method()->is_synchronized() || compilation()->env()->dtrace_method_probes();
 185   const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/, Rexception_save = R31;
 186 
 187   // Fetch the exception from TLS and clear out exception related thread state.
 188   __ ld(Rexception, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
 189   __ li(R0, 0);
 190   __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
 191   __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread);


 216   __ bctr();
 217 
 218   // Emit the slow path assembly.
 219   if (stub != NULL) {
 220     stub->emit_code(this);
 221   }
 222 
 223   return offset;
 224 }
 225 
 226 
 227 int LIR_Assembler::emit_deopt_handler() {
 228   // If the last instruction is a call (typically to do a throw which
 229   // is coming at the end after block reordering) the return address
 230   // must still point into the code area in order to avoid assertion
 231   // failures when searching for the corresponding bci => add a nop
 232   // (was bug 5/14/1999 - gri).
 233   __ nop();
 234 
 235   // Generate code for deopt handler.
 236   address handler_base = __ start_a_stub(deopt_handler_size());
 237 
 238   if (handler_base == NULL) {
 239     // Not enough space left for the handler.
 240     bailout("deopt handler overflow");
 241     return -1;
 242   }
 243 
 244   int offset = code_offset();
 245   __ bl64_patchable(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type);
 246 
 247   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 248   __ end_a_stub();
 249 
 250   return offset;
 251 }
 252 
 253 
 254 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
 255   if (o == NULL) {
 256     __ li(reg, 0);
 257   } else {
 258     AddressLiteral addrlit = __ constant_oop_address(o);
 259     __ load_const(reg, addrlit, (reg != R0) ? R0 : noreg);
 260   }
 261 }
 262 
 263 
 264 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
 265   // Allocate a new index in table to hold the object once it's been patched.
 266   int oop_index = __ oop_recorder()->allocate_oop_index(NULL);
 267   PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);


1290     int offset = __ offset();
1291     add_debug_info_for_branch(info);
1292     __ load_from_polling_page(poll_addr);
1293     return offset;
1294   }
1295 
1296   __ load_const_optimized(tmp->as_register(), (intptr_t)os::get_polling_page(), R0); // TODO: PPC port: get_standard_polling_page()
1297   if (info != NULL) {
1298     add_debug_info_for_branch(info);
1299   }
1300   int offset = __ offset();
1301   __ relocate(relocInfo::poll_type);
1302   __ load_from_polling_page(tmp->as_register());
1303 
1304   return offset;
1305 }
1306 
1307 
1308 void LIR_Assembler::emit_static_call_stub() {
1309   address call_pc = __ pc();
1310   address stub = __ start_a_stub(static_call_stub_size());
1311   if (stub == NULL) {
1312     bailout("static call stub overflow");
1313     return;
1314   }
1315 
1316   // For java_to_interp stubs we use R11_scratch1 as scratch register
1317   // and in call trampoline stubs we use R12_scratch2. This way we
1318   // can distinguish them (see is_NativeCallTrampolineStub_at()).
1319   const Register reg_scratch = R11_scratch1;
1320 
1321   // Create a static stub relocation which relates this stub
1322   // with the call instruction at insts_call_instruction_offset in the
1323   // instructions code-section.
1324   int start = __ offset();
1325   __ relocate(static_stub_Relocation::spec(call_pc));
1326 
1327   // Now, create the stub's code:
1328   // - load the TOC
1329   // - load the inline cache oop from the constant pool
1330   // - load the call target from the constant pool
1331   // - call
1332   __ calculate_address_from_global_toc(reg_scratch, __ method_toc());
1333   AddressLiteral ic = __ allocate_metadata_address((Metadata *)NULL);
1334   bool success = __ load_const_from_method_toc(R19_inline_cache_reg, ic, reg_scratch, /*fixed_size*/ true);
1335 
1336   if (ReoptimizeCallSequences) {
1337     __ b64_patchable((address)-1, relocInfo::none);
1338   } else {
1339     AddressLiteral a((address)-1);
1340     success = success && __ load_const_from_method_toc(reg_scratch, a, reg_scratch, /*fixed_size*/ true);
1341     __ mtctr(reg_scratch);
1342     __ bctr();
1343   }
1344   if (!success) {
1345     bailout("const section overflow");
1346     return;
1347   }
1348 
1349   assert(__ offset() - start <= static_call_stub_size(), "stub too big");
1350   __ end_a_stub();
1351 }
1352 
1353 
1354 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1355   bool unsigned_comp = (condition == lir_cond_belowEqual || condition == lir_cond_aboveEqual);
1356   if (opr1->is_single_fpu()) {
1357     __ fcmpu(BOOL_RESULT, opr1->as_float_reg(), opr2->as_float_reg());
1358   } else if (opr1->is_double_fpu()) {
1359     __ fcmpu(BOOL_RESULT, opr1->as_double_reg(), opr2->as_double_reg());
1360   } else if (opr1->is_single_cpu()) {
1361     if (opr2->is_constant()) {
1362       switch (opr2->as_constant_ptr()->type()) {
1363         case T_INT:
1364           {
1365             jint con = opr2->as_constant_ptr()->as_jint();
1366             if (unsigned_comp) {
1367               if (Assembler::is_uimm(con, 16)) {
1368                 __ cmplwi(BOOL_RESULT, opr1->as_register(), con);
1369               } else {


src/cpu/ppc/vm/c1_LIRAssembler_ppc.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File