src/cpu/s390/vm/c1_LIRAssembler_s390.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File hotspot Sdiff src/cpu/s390/vm

src/cpu/s390/vm/c1_LIRAssembler_s390.cpp

Print this page




 136 // --------------------------------------------------------------------------------------------
 137 
 138 address LIR_Assembler::emit_call_c(address a) {
 139   __ align_call_far_patchable(__ pc());
 140   address call_addr = __ call_c_opt(a);
 141   if (call_addr == NULL) {
 142     bailout("const section overflow");
 143   }
 144   return call_addr;
 145 }
 146 
 147 int LIR_Assembler::emit_exception_handler() {
 148   // If the last instruction is a call (typically to do a throw which
 149   // is coming at the end after block reordering) the return address
 150   // must still point into the code area in order to avoid assertion
 151   // failures when searching for the corresponding bci. => Add a nop.
 152   // (was bug 5/14/1999 - gri)
 153   __ nop();
 154 
 155   // Generate code for exception handler.
 156   address handler_base = __ start_a_stub(exception_handler_size);
 157   if (handler_base == NULL) {
 158     // Not enough space left for the handler.
 159     bailout("exception handler overflow");
 160     return -1;
 161   }
 162 
 163   int offset = code_offset();
 164 
 165   address a = Runtime1::entry_for (Runtime1::handle_exception_from_callee_id);
 166   address call_addr = emit_call_c(a);
 167   CHECK_BAILOUT_(-1);
 168   __ should_not_reach_here();
 169   guarantee(code_offset() - offset <= exception_handler_size, "overflow");
 170   __ end_a_stub();
 171 
 172   return offset;
 173 }
 174 
 175 // Emit the code to remove the frame from the stack in the exception
 176 // unwind path.
 177 int LIR_Assembler::emit_unwind_handler() {
 178 #ifndef PRODUCT
 179   if (CommentedAssembly) {
 180     _masm->block_comment("Unwind handler");
 181   }
 182 #endif
 183 
 184   int offset = code_offset();
 185   Register exception_oop_callee_saved = Z_R10; // Z_R10 is callee-saved.
 186   Register Rtmp1                      = Z_R11;
 187   Register Rtmp2                      = Z_R12;
 188 
 189   // Fetch the exception from TLS and clear out exception related thread state.


 234   __ load_const_optimized(Z_R5, Runtime1::entry_for (Runtime1::unwind_exception_id));
 235   __ z_br(Z_R5);
 236 
 237   // Emit the slow path assembly.
 238   if (stub != NULL) {
 239     stub->emit_code(this);
 240   }
 241 
 242   return offset;
 243 }
 244 
 245 int LIR_Assembler::emit_deopt_handler() {
 246   // If the last instruction is a call (typically to do a throw which
 247   // is coming at the end after block reordering) the return address
 248   // must still point into the code area in order to avoid assertion
 249   // failures when searching for the corresponding bci. => Add a nop.
 250   // (was bug 5/14/1999 - gri)
 251   __ nop();
 252 
 253   // Generate code for exception handler.
 254   address handler_base = __ start_a_stub(deopt_handler_size);
 255   if (handler_base == NULL) {
 256     // Not enough space left for the handler.
 257     bailout("deopt handler overflow");
 258     return -1;
 259   }  int offset = code_offset();
 260   // Size must be constant (see HandlerImpl::emit_deopt_handler).
 261   __ load_const(Z_R1_scratch, SharedRuntime::deopt_blob()->unpack());
 262   __ call(Z_R1_scratch);
 263   guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
 264   __ end_a_stub();
 265 
 266   return offset;
 267 }
 268 
 269 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
 270   if (o == NULL) {
 271     __ clear_reg(reg, true/*64bit*/, false/*set cc*/); // Must not kill cc set by cmove.
 272   } else {
 273     AddressLiteral a = __ allocate_oop_address(o);
 274     bool success = __ load_oop_from_toc(reg, a, reg);
 275     if (!success) {
 276       bailout("const section overflow");
 277     }
 278   }
 279 }
 280 
 281 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
 282   // Allocate a new index in table to hold the object once it's been patched.
 283   int oop_index = __ oop_recorder()->allocate_oop_index(NULL);


1141   __ z_br(Z_R14); // Return to caller.
1142 }
1143 
1144 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
1145   AddressLiteral pp(os::get_polling_page());
1146   __ load_const_optimized(tmp->as_register_lo(), pp);
1147   guarantee(info != NULL, "Shouldn't be NULL");
1148   add_debug_info_for_branch(info);
1149   int offset = __ offset();
1150   __ relocate(relocInfo::poll_type);
1151   __ load_from_polling_page(tmp->as_register_lo());
1152   return offset;
1153 }
1154 
1155 void LIR_Assembler::emit_static_call_stub() {
1156 
1157   // Stub is fixed up when the corresponding call is converted from calling
1158   // compiled code to calling interpreted code.
1159 
1160   address call_pc = __ pc();
1161   address stub = __ start_a_stub(call_stub_size);
1162   if (stub == NULL) {
1163     bailout("static call stub overflow");
1164     return;
1165   }
1166 
1167   int start = __ offset();
1168 
1169   __ relocate(static_stub_Relocation::spec(call_pc));
1170 
1171   // See also Matcher::interpreter_method_oop_reg().
1172   AddressLiteral meta = __ allocate_metadata_address(NULL);
1173   bool success = __ load_const_from_toc(Z_method, meta);
1174 
1175   __ set_inst_mark();
1176   AddressLiteral a((address)-1);
1177   success = success && __ load_const_from_toc(Z_R1, a);
1178   if (!success) {
1179     bailout("const section overflow");
1180     return;
1181   }
1182 
1183   __ z_br(Z_R1);
1184   assert(__ offset() - start <= call_stub_size, "stub too big");
1185   __ end_a_stub(); // Update current stubs pointer and restore insts_end.
1186 }
1187 
1188 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1189   bool unsigned_comp = condition == lir_cond_belowEqual || condition == lir_cond_aboveEqual;
1190   if (opr1->is_single_cpu()) {
1191     Register reg1 = opr1->as_register();
1192     if (opr2->is_single_cpu()) {
1193       // cpu register - cpu register
1194       if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
1195         __ z_clgr(reg1, opr2->as_register());
1196       } else {
1197         assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");
1198         if (unsigned_comp) {
1199           __ z_clr(reg1, opr2->as_register());
1200         } else {
1201           __ z_cr(reg1, opr2->as_register());
1202         }
1203       }
1204     } else if (opr2->is_stack()) {




 136 // --------------------------------------------------------------------------------------------
 137 
 138 address LIR_Assembler::emit_call_c(address a) {
 139   __ align_call_far_patchable(__ pc());
 140   address call_addr = __ call_c_opt(a);
 141   if (call_addr == NULL) {
 142     bailout("const section overflow");
 143   }
 144   return call_addr;
 145 }
 146 
 147 int LIR_Assembler::emit_exception_handler() {
 148   // If the last instruction is a call (typically to do a throw which
 149   // is coming at the end after block reordering) the return address
 150   // must still point into the code area in order to avoid assertion
 151   // failures when searching for the corresponding bci. => Add a nop.
 152   // (was bug 5/14/1999 - gri)
 153   __ nop();
 154 
 155   // Generate code for exception handler.
 156   address handler_base = __ start_a_stub(exception_handler_size());
 157   if (handler_base == NULL) {
 158     // Not enough space left for the handler.
 159     bailout("exception handler overflow");
 160     return -1;
 161   }
 162 
 163   int offset = code_offset();
 164 
 165   address a = Runtime1::entry_for (Runtime1::handle_exception_from_callee_id);
 166   address call_addr = emit_call_c(a);
 167   CHECK_BAILOUT_(-1);
 168   __ should_not_reach_here();
 169   guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
 170   __ end_a_stub();
 171 
 172   return offset;
 173 }
 174 
 175 // Emit the code to remove the frame from the stack in the exception
 176 // unwind path.
 177 int LIR_Assembler::emit_unwind_handler() {
 178 #ifndef PRODUCT
 179   if (CommentedAssembly) {
 180     _masm->block_comment("Unwind handler");
 181   }
 182 #endif
 183 
 184   int offset = code_offset();
 185   Register exception_oop_callee_saved = Z_R10; // Z_R10 is callee-saved.
 186   Register Rtmp1                      = Z_R11;
 187   Register Rtmp2                      = Z_R12;
 188 
 189   // Fetch the exception from TLS and clear out exception related thread state.


 234   __ load_const_optimized(Z_R5, Runtime1::entry_for (Runtime1::unwind_exception_id));
 235   __ z_br(Z_R5);
 236 
 237   // Emit the slow path assembly.
 238   if (stub != NULL) {
 239     stub->emit_code(this);
 240   }
 241 
 242   return offset;
 243 }
 244 
 245 int LIR_Assembler::emit_deopt_handler() {
 246   // If the last instruction is a call (typically to do a throw which
 247   // is coming at the end after block reordering) the return address
 248   // must still point into the code area in order to avoid assertion
 249   // failures when searching for the corresponding bci. => Add a nop.
 250   // (was bug 5/14/1999 - gri)
 251   __ nop();
 252 
 253   // Generate code for exception handler.
 254   address handler_base = __ start_a_stub(deopt_handler_size());
 255   if (handler_base == NULL) {
 256     // Not enough space left for the handler.
 257     bailout("deopt handler overflow");
 258     return -1;
 259   }  int offset = code_offset();
 260   // Size must be constant (see HandlerImpl::emit_deopt_handler).
 261   __ load_const(Z_R1_scratch, SharedRuntime::deopt_blob()->unpack());
 262   __ call(Z_R1_scratch);
 263   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 264   __ end_a_stub();
 265 
 266   return offset;
 267 }
 268 
 269 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
 270   if (o == NULL) {
 271     __ clear_reg(reg, true/*64bit*/, false/*set cc*/); // Must not kill cc set by cmove.
 272   } else {
 273     AddressLiteral a = __ allocate_oop_address(o);
 274     bool success = __ load_oop_from_toc(reg, a, reg);
 275     if (!success) {
 276       bailout("const section overflow");
 277     }
 278   }
 279 }
 280 
 281 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
 282   // Allocate a new index in table to hold the object once it's been patched.
 283   int oop_index = __ oop_recorder()->allocate_oop_index(NULL);


1141   __ z_br(Z_R14); // Return to caller.
1142 }
1143 
1144 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
1145   AddressLiteral pp(os::get_polling_page());
1146   __ load_const_optimized(tmp->as_register_lo(), pp);
1147   guarantee(info != NULL, "Shouldn't be NULL");
1148   add_debug_info_for_branch(info);
1149   int offset = __ offset();
1150   __ relocate(relocInfo::poll_type);
1151   __ load_from_polling_page(tmp->as_register_lo());
1152   return offset;
1153 }
1154 
1155 void LIR_Assembler::emit_static_call_stub() {
1156 
1157   // Stub is fixed up when the corresponding call is converted from calling
1158   // compiled code to calling interpreted code.
1159 
1160   address call_pc = __ pc();
1161   address stub = __ start_a_stub(call_stub_size());
1162   if (stub == NULL) {
1163     bailout("static call stub overflow");
1164     return;
1165   }
1166 
1167   int start = __ offset();
1168 
1169   __ relocate(static_stub_Relocation::spec(call_pc));
1170 
1171   // See also Matcher::interpreter_method_oop_reg().
1172   AddressLiteral meta = __ allocate_metadata_address(NULL);
1173   bool success = __ load_const_from_toc(Z_method, meta);
1174 
1175   __ set_inst_mark();
1176   AddressLiteral a((address)-1);
1177   success = success && __ load_const_from_toc(Z_R1, a);
1178   if (!success) {
1179     bailout("const section overflow");
1180     return;
1181   }
1182 
1183   __ z_br(Z_R1);
1184   assert(__ offset() - start <= call_stub_size(), "stub too big");
1185   __ end_a_stub(); // Update current stubs pointer and restore insts_end.
1186 }
1187 
1188 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1189   bool unsigned_comp = condition == lir_cond_belowEqual || condition == lir_cond_aboveEqual;
1190   if (opr1->is_single_cpu()) {
1191     Register reg1 = opr1->as_register();
1192     if (opr2->is_single_cpu()) {
1193       // cpu register - cpu register
1194       if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
1195         __ z_clgr(reg1, opr2->as_register());
1196       } else {
1197         assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");
1198         if (unsigned_comp) {
1199           __ z_clr(reg1, opr2->as_register());
1200         } else {
1201           __ z_cr(reg1, opr2->as_register());
1202         }
1203       }
1204     } else if (opr2->is_stack()) {


src/cpu/s390/vm/c1_LIRAssembler_s390.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File