1 /* 2 * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "c1/c1_Compilation.hpp" 29 #include "c1/c1_FrameMap.hpp" 30 #include "c1/c1_Instruction.hpp" 31 #include "c1/c1_LIRAssembler.hpp" 32 #include "c1/c1_LIRGenerator.hpp" 33 #include "c1/c1_Runtime1.hpp" 34 #include "c1/c1_ValueStack.hpp" 35 #include "ci/ciArray.hpp" 36 #include "ci/ciObjArrayKlass.hpp" 37 #include "ci/ciTypeArrayKlass.hpp" 38 #include "ci/ciValueKlass.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/stubRoutines.hpp" 41 #include "vmreg_aarch64.inline.hpp" 42 43 #ifdef ASSERT 44 #define __ gen()->lir(__FILE__, __LINE__)-> 45 #else 46 #define __ gen()->lir()-> 47 #endif 48 49 // Item will be loaded into a byte register; Intel only 50 void LIRItem::load_byte_item() { 51 load_item(); 52 } 53 54 55 void LIRItem::load_nonconstant() { 56 LIR_Opr r = value()->operand(); 57 if (r->is_constant()) { 58 _result = r; 59 } else { 60 load_item(); 61 } 62 } 63 64 //-------------------------------------------------------------- 65 // LIRGenerator 66 //-------------------------------------------------------------- 67 68 69 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::r0_oop_opr; } 70 LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::r3_opr; } 71 LIR_Opr LIRGenerator::divInOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; } 72 LIR_Opr LIRGenerator::divOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; } 73 LIR_Opr LIRGenerator::remOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; } 74 LIR_Opr LIRGenerator::shiftCountOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; } 75 LIR_Opr LIRGenerator::syncLockOpr() { return new_register(T_INT); } 76 LIR_Opr LIRGenerator::syncTempOpr() { return FrameMap::r0_opr; } 77 LIR_Opr LIRGenerator::getThreadTemp() { return LIR_OprFact::illegalOpr; } 78 79 80 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) { 81 LIR_Opr opr; 82 switch (type->tag()) { 83 case intTag: opr = FrameMap::r0_opr; break; 84 case objectTag: opr = FrameMap::r0_oop_opr; break; 85 case longTag: opr = FrameMap::long0_opr; break; 86 case floatTag: opr = FrameMap::fpu0_float_opr; break; 87 case doubleTag: opr = FrameMap::fpu0_double_opr; break; 88 89 case addressTag: 90 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr; 91 } 92 93 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch"); 94 return opr; 95 } 96 97 98 LIR_Opr LIRGenerator::rlock_byte(BasicType type) { 99 LIR_Opr reg = new_register(T_INT); 100 set_vreg_flag(reg, LIRGenerator::byte_reg); 101 return reg; 102 } 103 104 105 void LIRGenerator::init_temps_for_substitutability_check(LIR_Opr& tmp1, LIR_Opr& tmp2) { 106 tmp1 = new_register(T_INT); 107 tmp2 = LIR_OprFact::illegalOpr; 108 } 109 110 111 //--------- loading items into registers -------------------------------- 112 113 114 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const { 115 if (v->type()->as_IntConstant() != NULL) { 116 return v->type()->as_IntConstant()->value() == 0L; 117 } else if (v->type()->as_LongConstant() != NULL) { 118 return v->type()->as_LongConstant()->value() == 0L; 119 } else if (v->type()->as_ObjectConstant() != NULL) { 120 return v->type()->as_ObjectConstant()->value()->is_null_object(); 121 } else { 122 return false; 123 } 124 } 125 126 bool LIRGenerator::can_inline_as_constant(Value v) const { 127 // FIXME: Just a guess 128 if (v->type()->as_IntConstant() != NULL) { 129 return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value()); 130 } else if (v->type()->as_LongConstant() != NULL) { 131 return v->type()->as_LongConstant()->value() == 0L; 132 } else if (v->type()->as_ObjectConstant() != NULL) { 133 return v->type()->as_ObjectConstant()->value()->is_null_object(); 134 } else { 135 return false; 136 } 137 } 138 139 140 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { return false; } 141 142 143 LIR_Opr LIRGenerator::safepoint_poll_register() { 144 return LIR_OprFact::illegalOpr; 145 } 146 147 148 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index, 149 int shift, int disp, BasicType type) { 150 assert(base->is_register(), "must be"); 151 intx large_disp = disp; 152 153 // accumulate fixed displacements 154 if (index->is_constant()) { 155 LIR_Const *constant = index->as_constant_ptr(); 156 if (constant->type() == T_INT) { 157 large_disp += index->as_jint() << shift; 158 } else { 159 assert(constant->type() == T_LONG, "should be"); 160 jlong c = index->as_jlong() << shift; 161 if ((jlong)((jint)c) == c) { 162 large_disp += c; 163 index = LIR_OprFact::illegalOpr; 164 } else { 165 LIR_Opr tmp = new_register(T_LONG); 166 __ move(index, tmp); 167 index = tmp; 168 // apply shift and displacement below 169 } 170 } 171 } 172 173 if (index->is_register()) { 174 // apply the shift and accumulate the displacement 175 if (shift > 0) { 176 LIR_Opr tmp = new_pointer_register(); 177 __ shift_left(index, shift, tmp); 178 index = tmp; 179 } 180 if (large_disp != 0) { 181 LIR_Opr tmp = new_pointer_register(); 182 if (Assembler::operand_valid_for_add_sub_immediate(large_disp)) { 183 __ add(tmp, tmp, LIR_OprFact::intptrConst(large_disp)); 184 index = tmp; 185 } else { 186 __ move(tmp, LIR_OprFact::intptrConst(large_disp)); 187 __ add(tmp, index, tmp); 188 index = tmp; 189 } 190 large_disp = 0; 191 } 192 } else if (large_disp != 0 && !Address::offset_ok_for_immed(large_disp, shift)) { 193 // index is illegal so replace it with the displacement loaded into a register 194 index = new_pointer_register(); 195 __ move(LIR_OprFact::intptrConst(large_disp), index); 196 large_disp = 0; 197 } 198 199 // at this point we either have base + index or base + displacement 200 if (large_disp == 0) { 201 return new LIR_Address(base, index, type); 202 } else { 203 assert(Address::offset_ok_for_immed(large_disp, 0), "must be"); 204 return new LIR_Address(base, large_disp, type); 205 } 206 } 207 208 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, 209 BasicType type) { 210 int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type); 211 int elem_size = type2aelembytes(type); 212 int shift = exact_log2(elem_size); 213 214 LIR_Address* addr; 215 if (index_opr->is_constant()) { 216 addr = new LIR_Address(array_opr, 217 offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type); 218 } else { 219 if (offset_in_bytes) { 220 LIR_Opr tmp = new_pointer_register(); 221 __ add(array_opr, LIR_OprFact::intConst(offset_in_bytes), tmp); 222 array_opr = tmp; 223 offset_in_bytes = 0; 224 } 225 addr = new LIR_Address(array_opr, 226 index_opr, 227 LIR_Address::scale(type), 228 offset_in_bytes, type); 229 } 230 return addr; 231 } 232 233 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) { 234 LIR_Opr r; 235 if (type == T_LONG) { 236 r = LIR_OprFact::longConst(x); 237 if (!Assembler::operand_valid_for_logical_immediate(false, x)) { 238 LIR_Opr tmp = new_register(type); 239 __ move(r, tmp); 240 return tmp; 241 } 242 } else if (type == T_INT) { 243 r = LIR_OprFact::intConst(x); 244 if (!Assembler::operand_valid_for_logical_immediate(true, x)) { 245 // This is all rather nasty. We don't know whether our constant 246 // is required for a logical or an arithmetic operation, wo we 247 // don't know what the range of valid values is!! 248 LIR_Opr tmp = new_register(type); 249 __ move(r, tmp); 250 return tmp; 251 } 252 } else { 253 ShouldNotReachHere(); 254 r = NULL; // unreachable 255 } 256 return r; 257 } 258 259 260 261 void LIRGenerator::increment_counter(address counter, BasicType type, int step) { 262 LIR_Opr pointer = new_pointer_register(); 263 __ move(LIR_OprFact::intptrConst(counter), pointer); 264 LIR_Address* addr = new LIR_Address(pointer, type); 265 increment_counter(addr, step); 266 } 267 268 269 void LIRGenerator::increment_counter(LIR_Address* addr, int step) { 270 LIR_Opr imm = NULL; 271 switch(addr->type()) { 272 case T_INT: 273 imm = LIR_OprFact::intConst(step); 274 break; 275 case T_LONG: 276 imm = LIR_OprFact::longConst(step); 277 break; 278 default: 279 ShouldNotReachHere(); 280 } 281 LIR_Opr reg = new_register(addr->type()); 282 __ load(addr, reg); 283 __ add(reg, imm, reg); 284 __ store(reg, addr); 285 } 286 287 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { 288 LIR_Opr reg = new_register(T_INT); 289 __ load(generate_address(base, disp, T_INT), reg, info); 290 __ cmp(condition, reg, LIR_OprFact::intConst(c)); 291 } 292 293 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) { 294 LIR_Opr reg1 = new_register(T_INT); 295 __ load(generate_address(base, disp, type), reg1, info); 296 __ cmp(condition, reg, reg1); 297 } 298 299 300 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) { 301 302 if (is_power_of_2(c - 1)) { 303 __ shift_left(left, exact_log2(c - 1), tmp); 304 __ add(tmp, left, result); 305 return true; 306 } else if (is_power_of_2(c + 1)) { 307 __ shift_left(left, exact_log2(c + 1), tmp); 308 __ sub(tmp, left, result); 309 return true; 310 } else { 311 return false; 312 } 313 } 314 315 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) { 316 BasicType type = item->type(); 317 __ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type)); 318 } 319 320 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) { 321 LIR_Opr tmp1 = new_register(objectType); 322 LIR_Opr tmp2 = new_register(objectType); 323 LIR_Opr tmp3 = new_register(objectType); 324 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci); 325 } 326 327 //---------------------------------------------------------------------- 328 // visitor functions 329 //---------------------------------------------------------------------- 330 331 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) { 332 assert(x->is_pinned(),""); 333 LIRItem obj(x->obj(), this); 334 obj.load_item(); 335 336 set_no_result(x); 337 338 // "lock" stores the address of the monitor stack slot, so this is not an oop 339 LIR_Opr lock = new_register(T_INT); 340 // Need a scratch register for biased locking 341 LIR_Opr scratch = LIR_OprFact::illegalOpr; 342 if (UseBiasedLocking || x->maybe_valuetype()) { 343 scratch = new_register(T_INT); 344 } 345 346 CodeEmitInfo* info_for_exception = NULL; 347 if (x->needs_null_check()) { 348 info_for_exception = state_for(x); 349 } 350 351 CodeStub* throw_imse_stub = 352 x->maybe_valuetype() ? 353 new SimpleExceptionStub(Runtime1::throw_illegal_monitor_state_exception_id, LIR_OprFact::illegalOpr, state_for(x)) : 354 NULL; 355 356 // this CodeEmitInfo must not have the xhandlers because here the 357 // object is already locked (xhandlers expect object to be unlocked) 358 CodeEmitInfo* info = state_for(x, x->state(), true); 359 monitor_enter(obj.result(), lock, syncTempOpr(), scratch, 360 x->monitor_no(), info_for_exception, info, throw_imse_stub); 361 } 362 363 364 void LIRGenerator::do_MonitorExit(MonitorExit* x) { 365 assert(x->is_pinned(),""); 366 367 LIRItem obj(x->obj(), this); 368 obj.dont_load_item(); 369 370 LIR_Opr lock = new_register(T_INT); 371 LIR_Opr obj_temp = new_register(T_INT); 372 set_no_result(x); 373 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no()); 374 } 375 376 377 void LIRGenerator::do_NegateOp(NegateOp* x) { 378 379 LIRItem from(x->x(), this); 380 from.load_item(); 381 LIR_Opr result = rlock_result(x); 382 __ negate (from.result(), result); 383 384 } 385 386 // for _fadd, _fmul, _fsub, _fdiv, _frem 387 // _dadd, _dmul, _dsub, _ddiv, _drem 388 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) { 389 390 if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) { 391 // float remainder is implemented as a direct call into the runtime 392 LIRItem right(x->x(), this); 393 LIRItem left(x->y(), this); 394 395 BasicTypeList signature(2); 396 if (x->op() == Bytecodes::_frem) { 397 signature.append(T_FLOAT); 398 signature.append(T_FLOAT); 399 } else { 400 signature.append(T_DOUBLE); 401 signature.append(T_DOUBLE); 402 } 403 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 404 405 const LIR_Opr result_reg = result_register_for(x->type()); 406 left.load_item_force(cc->at(1)); 407 right.load_item(); 408 409 __ move(right.result(), cc->at(0)); 410 411 address entry; 412 if (x->op() == Bytecodes::_frem) { 413 entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem); 414 } else { 415 entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem); 416 } 417 418 LIR_Opr result = rlock_result(x); 419 __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args()); 420 __ move(result_reg, result); 421 422 return; 423 } 424 425 LIRItem left(x->x(), this); 426 LIRItem right(x->y(), this); 427 LIRItem* left_arg = &left; 428 LIRItem* right_arg = &right; 429 430 // Always load right hand side. 431 right.load_item(); 432 433 if (!left.is_register()) 434 left.load_item(); 435 436 LIR_Opr reg = rlock(x); 437 LIR_Opr tmp = LIR_OprFact::illegalOpr; 438 if (x->is_strictfp() && (x->op() == Bytecodes::_dmul || x->op() == Bytecodes::_ddiv)) { 439 tmp = new_register(T_DOUBLE); 440 } 441 442 arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), x->is_strictfp()); 443 444 set_result(x, round_item(reg)); 445 } 446 447 // for _ladd, _lmul, _lsub, _ldiv, _lrem 448 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { 449 450 // missing test if instr is commutative and if we should swap 451 LIRItem left(x->x(), this); 452 LIRItem right(x->y(), this); 453 454 if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) { 455 456 left.load_item(); 457 bool need_zero_check = true; 458 if (right.is_constant()) { 459 jlong c = right.get_jlong_constant(); 460 // no need to do div-by-zero check if the divisor is a non-zero constant 461 if (c != 0) need_zero_check = false; 462 // do not load right if the divisor is a power-of-2 constant 463 if (c > 0 && is_power_of_2_long(c)) { 464 right.dont_load_item(); 465 } else { 466 right.load_item(); 467 } 468 } else { 469 right.load_item(); 470 } 471 if (need_zero_check) { 472 CodeEmitInfo* info = state_for(x); 473 __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0)); 474 __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info)); 475 } 476 477 rlock_result(x); 478 switch (x->op()) { 479 case Bytecodes::_lrem: 480 __ rem (left.result(), right.result(), x->operand()); 481 break; 482 case Bytecodes::_ldiv: 483 __ div (left.result(), right.result(), x->operand()); 484 break; 485 default: 486 ShouldNotReachHere(); 487 break; 488 } 489 490 491 } else { 492 assert (x->op() == Bytecodes::_lmul || x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, 493 "expect lmul, ladd or lsub"); 494 // add, sub, mul 495 left.load_item(); 496 if (! right.is_register()) { 497 if (x->op() == Bytecodes::_lmul 498 || ! right.is_constant() 499 || ! Assembler::operand_valid_for_add_sub_immediate(right.get_jlong_constant())) { 500 right.load_item(); 501 } else { // add, sub 502 assert (x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, "expect ladd or lsub"); 503 // don't load constants to save register 504 right.load_nonconstant(); 505 } 506 } 507 rlock_result(x); 508 arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL); 509 } 510 } 511 512 // for: _iadd, _imul, _isub, _idiv, _irem 513 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) { 514 515 // Test if instr is commutative and if we should swap 516 LIRItem left(x->x(), this); 517 LIRItem right(x->y(), this); 518 LIRItem* left_arg = &left; 519 LIRItem* right_arg = &right; 520 if (x->is_commutative() && left.is_stack() && right.is_register()) { 521 // swap them if left is real stack (or cached) and right is real register(not cached) 522 left_arg = &right; 523 right_arg = &left; 524 } 525 526 left_arg->load_item(); 527 528 // do not need to load right, as we can handle stack and constants 529 if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) { 530 531 rlock_result(x); 532 bool need_zero_check = true; 533 if (right.is_constant()) { 534 jint c = right.get_jint_constant(); 535 // no need to do div-by-zero check if the divisor is a non-zero constant 536 if (c != 0) need_zero_check = false; 537 // do not load right if the divisor is a power-of-2 constant 538 if (c > 0 && is_power_of_2(c)) { 539 right_arg->dont_load_item(); 540 } else { 541 right_arg->load_item(); 542 } 543 } else { 544 right_arg->load_item(); 545 } 546 if (need_zero_check) { 547 CodeEmitInfo* info = state_for(x); 548 __ cmp(lir_cond_equal, right_arg->result(), LIR_OprFact::longConst(0)); 549 __ branch(lir_cond_equal, T_INT, new DivByZeroStub(info)); 550 } 551 552 LIR_Opr ill = LIR_OprFact::illegalOpr; 553 if (x->op() == Bytecodes::_irem) { 554 __ irem(left_arg->result(), right_arg->result(), x->operand(), ill, NULL); 555 } else if (x->op() == Bytecodes::_idiv) { 556 __ idiv(left_arg->result(), right_arg->result(), x->operand(), ill, NULL); 557 } 558 559 } else if (x->op() == Bytecodes::_iadd || x->op() == Bytecodes::_isub) { 560 if (right.is_constant() 561 && Assembler::operand_valid_for_add_sub_immediate(right.get_jint_constant())) { 562 right.load_nonconstant(); 563 } else { 564 right.load_item(); 565 } 566 rlock_result(x); 567 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), LIR_OprFact::illegalOpr); 568 } else { 569 assert (x->op() == Bytecodes::_imul, "expect imul"); 570 if (right.is_constant()) { 571 jint c = right.get_jint_constant(); 572 if (c > 0 && c < max_jint && (is_power_of_2(c) || is_power_of_2(c - 1) || is_power_of_2(c + 1))) { 573 right_arg->dont_load_item(); 574 } else { 575 // Cannot use constant op. 576 right_arg->load_item(); 577 } 578 } else { 579 right.load_item(); 580 } 581 rlock_result(x); 582 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), new_register(T_INT)); 583 } 584 } 585 586 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) { 587 // when an operand with use count 1 is the left operand, then it is 588 // likely that no move for 2-operand-LIR-form is necessary 589 if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) { 590 x->swap_operands(); 591 } 592 593 ValueTag tag = x->type()->tag(); 594 assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters"); 595 switch (tag) { 596 case floatTag: 597 case doubleTag: do_ArithmeticOp_FPU(x); return; 598 case longTag: do_ArithmeticOp_Long(x); return; 599 case intTag: do_ArithmeticOp_Int(x); return; 600 default: ShouldNotReachHere(); return; 601 } 602 } 603 604 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr 605 void LIRGenerator::do_ShiftOp(ShiftOp* x) { 606 607 LIRItem left(x->x(), this); 608 LIRItem right(x->y(), this); 609 610 left.load_item(); 611 612 rlock_result(x); 613 if (right.is_constant()) { 614 right.dont_load_item(); 615 616 switch (x->op()) { 617 case Bytecodes::_ishl: { 618 int c = right.get_jint_constant() & 0x1f; 619 __ shift_left(left.result(), c, x->operand()); 620 break; 621 } 622 case Bytecodes::_ishr: { 623 int c = right.get_jint_constant() & 0x1f; 624 __ shift_right(left.result(), c, x->operand()); 625 break; 626 } 627 case Bytecodes::_iushr: { 628 int c = right.get_jint_constant() & 0x1f; 629 __ unsigned_shift_right(left.result(), c, x->operand()); 630 break; 631 } 632 case Bytecodes::_lshl: { 633 int c = right.get_jint_constant() & 0x3f; 634 __ shift_left(left.result(), c, x->operand()); 635 break; 636 } 637 case Bytecodes::_lshr: { 638 int c = right.get_jint_constant() & 0x3f; 639 __ shift_right(left.result(), c, x->operand()); 640 break; 641 } 642 case Bytecodes::_lushr: { 643 int c = right.get_jint_constant() & 0x3f; 644 __ unsigned_shift_right(left.result(), c, x->operand()); 645 break; 646 } 647 default: 648 ShouldNotReachHere(); 649 } 650 } else { 651 right.load_item(); 652 LIR_Opr tmp = new_register(T_INT); 653 switch (x->op()) { 654 case Bytecodes::_ishl: { 655 __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp); 656 __ shift_left(left.result(), tmp, x->operand(), tmp); 657 break; 658 } 659 case Bytecodes::_ishr: { 660 __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp); 661 __ shift_right(left.result(), tmp, x->operand(), tmp); 662 break; 663 } 664 case Bytecodes::_iushr: { 665 __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp); 666 __ unsigned_shift_right(left.result(), tmp, x->operand(), tmp); 667 break; 668 } 669 case Bytecodes::_lshl: { 670 __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp); 671 __ shift_left(left.result(), tmp, x->operand(), tmp); 672 break; 673 } 674 case Bytecodes::_lshr: { 675 __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp); 676 __ shift_right(left.result(), tmp, x->operand(), tmp); 677 break; 678 } 679 case Bytecodes::_lushr: { 680 __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp); 681 __ unsigned_shift_right(left.result(), tmp, x->operand(), tmp); 682 break; 683 } 684 default: 685 ShouldNotReachHere(); 686 } 687 } 688 } 689 690 // _iand, _land, _ior, _lor, _ixor, _lxor 691 void LIRGenerator::do_LogicOp(LogicOp* x) { 692 693 LIRItem left(x->x(), this); 694 LIRItem right(x->y(), this); 695 696 left.load_item(); 697 698 rlock_result(x); 699 if (right.is_constant() 700 && ((right.type()->tag() == intTag 701 && Assembler::operand_valid_for_logical_immediate(true, right.get_jint_constant())) 702 || (right.type()->tag() == longTag 703 && Assembler::operand_valid_for_logical_immediate(false, right.get_jlong_constant())))) { 704 right.dont_load_item(); 705 } else { 706 right.load_item(); 707 } 708 switch (x->op()) { 709 case Bytecodes::_iand: 710 case Bytecodes::_land: 711 __ logical_and(left.result(), right.result(), x->operand()); break; 712 case Bytecodes::_ior: 713 case Bytecodes::_lor: 714 __ logical_or (left.result(), right.result(), x->operand()); break; 715 case Bytecodes::_ixor: 716 case Bytecodes::_lxor: 717 __ logical_xor(left.result(), right.result(), x->operand()); break; 718 default: Unimplemented(); 719 } 720 } 721 722 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg 723 void LIRGenerator::do_CompareOp(CompareOp* x) { 724 LIRItem left(x->x(), this); 725 LIRItem right(x->y(), this); 726 ValueTag tag = x->x()->type()->tag(); 727 if (tag == longTag) { 728 left.set_destroys_register(); 729 } 730 left.load_item(); 731 right.load_item(); 732 LIR_Opr reg = rlock_result(x); 733 734 if (x->x()->type()->is_float_kind()) { 735 Bytecodes::Code code = x->op(); 736 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl)); 737 } else if (x->x()->type()->tag() == longTag) { 738 __ lcmp2int(left.result(), right.result(), reg); 739 } else { 740 Unimplemented(); 741 } 742 } 743 744 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) { 745 LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience 746 new_value.load_item(); 747 cmp_value.load_item(); 748 LIR_Opr result = new_register(T_INT); 749 if (type == T_OBJECT || type == T_ARRAY) { 750 __ cas_obj(addr, cmp_value.result(), new_value.result(), new_register(T_INT), new_register(T_INT), result); 751 } else if (type == T_INT) { 752 __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill); 753 } else if (type == T_LONG) { 754 __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill); 755 } else { 756 ShouldNotReachHere(); 757 Unimplemented(); 758 } 759 __ logical_xor(FrameMap::r8_opr, LIR_OprFact::intConst(1), result); 760 return result; 761 } 762 763 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) { 764 bool is_oop = type == T_OBJECT || type == T_ARRAY; 765 LIR_Opr result = new_register(type); 766 value.load_item(); 767 assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type"); 768 LIR_Opr tmp = new_register(T_INT); 769 __ xchg(addr, value.result(), result, tmp); 770 return result; 771 } 772 773 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) { 774 LIR_Opr result = new_register(type); 775 value.load_item(); 776 assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type"); 777 LIR_Opr tmp = new_register(T_INT); 778 __ xadd(addr, value.result(), result, tmp); 779 return result; 780 } 781 782 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) { 783 assert(x->number_of_arguments() == 1 || (x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow), "wrong type"); 784 if (x->id() == vmIntrinsics::_dexp || x->id() == vmIntrinsics::_dlog || 785 x->id() == vmIntrinsics::_dpow || x->id() == vmIntrinsics::_dcos || 786 x->id() == vmIntrinsics::_dsin || x->id() == vmIntrinsics::_dtan || 787 x->id() == vmIntrinsics::_dlog10) { 788 do_LibmIntrinsic(x); 789 return; 790 } 791 switch (x->id()) { 792 case vmIntrinsics::_dabs: 793 case vmIntrinsics::_dsqrt: { 794 assert(x->number_of_arguments() == 1, "wrong type"); 795 LIRItem value(x->argument_at(0), this); 796 value.load_item(); 797 LIR_Opr dst = rlock_result(x); 798 799 switch (x->id()) { 800 case vmIntrinsics::_dsqrt: { 801 __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr); 802 break; 803 } 804 case vmIntrinsics::_dabs: { 805 __ abs(value.result(), dst, LIR_OprFact::illegalOpr); 806 break; 807 } 808 default: 809 ShouldNotReachHere(); 810 } 811 break; 812 } 813 default: 814 ShouldNotReachHere(); 815 } 816 } 817 818 void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) { 819 LIRItem value(x->argument_at(0), this); 820 value.set_destroys_register(); 821 822 LIR_Opr calc_result = rlock_result(x); 823 LIR_Opr result_reg = result_register_for(x->type()); 824 825 CallingConvention* cc = NULL; 826 827 if (x->id() == vmIntrinsics::_dpow) { 828 LIRItem value1(x->argument_at(1), this); 829 830 value1.set_destroys_register(); 831 832 BasicTypeList signature(2); 833 signature.append(T_DOUBLE); 834 signature.append(T_DOUBLE); 835 cc = frame_map()->c_calling_convention(&signature); 836 value.load_item_force(cc->at(0)); 837 value1.load_item_force(cc->at(1)); 838 } else { 839 BasicTypeList signature(1); 840 signature.append(T_DOUBLE); 841 cc = frame_map()->c_calling_convention(&signature); 842 value.load_item_force(cc->at(0)); 843 } 844 845 switch (x->id()) { 846 case vmIntrinsics::_dexp: 847 if (StubRoutines::dexp() != NULL) { 848 __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args()); 849 } else { 850 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args()); 851 } 852 break; 853 case vmIntrinsics::_dlog: 854 if (StubRoutines::dlog() != NULL) { 855 __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args()); 856 } else { 857 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args()); 858 } 859 break; 860 case vmIntrinsics::_dlog10: 861 if (StubRoutines::dlog10() != NULL) { 862 __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args()); 863 } else { 864 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args()); 865 } 866 break; 867 case vmIntrinsics::_dpow: 868 if (StubRoutines::dpow() != NULL) { 869 __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args()); 870 } else { 871 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args()); 872 } 873 break; 874 case vmIntrinsics::_dsin: 875 if (StubRoutines::dsin() != NULL) { 876 __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args()); 877 } else { 878 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args()); 879 } 880 break; 881 case vmIntrinsics::_dcos: 882 if (StubRoutines::dcos() != NULL) { 883 __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args()); 884 } else { 885 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args()); 886 } 887 break; 888 case vmIntrinsics::_dtan: 889 if (StubRoutines::dtan() != NULL) { 890 __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args()); 891 } else { 892 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args()); 893 } 894 break; 895 default: ShouldNotReachHere(); 896 } 897 __ move(result_reg, calc_result); 898 } 899 900 901 void LIRGenerator::do_ArrayCopy(Intrinsic* x) { 902 assert(x->number_of_arguments() == 5, "wrong type"); 903 904 // Make all state_for calls early since they can emit code 905 CodeEmitInfo* info = state_for(x, x->state()); 906 907 LIRItem src(x->argument_at(0), this); 908 LIRItem src_pos(x->argument_at(1), this); 909 LIRItem dst(x->argument_at(2), this); 910 LIRItem dst_pos(x->argument_at(3), this); 911 LIRItem length(x->argument_at(4), this); 912 913 // operands for arraycopy must use fixed registers, otherwise 914 // LinearScan will fail allocation (because arraycopy always needs a 915 // call) 916 917 // The java calling convention will give us enough registers 918 // so that on the stub side the args will be perfect already. 919 // On the other slow/special case side we call C and the arg 920 // positions are not similar enough to pick one as the best. 921 // Also because the java calling convention is a "shifted" version 922 // of the C convention we can process the java args trivially into C 923 // args without worry of overwriting during the xfer 924 925 src.load_item_force (FrameMap::as_oop_opr(j_rarg0)); 926 src_pos.load_item_force (FrameMap::as_opr(j_rarg1)); 927 dst.load_item_force (FrameMap::as_oop_opr(j_rarg2)); 928 dst_pos.load_item_force (FrameMap::as_opr(j_rarg3)); 929 length.load_item_force (FrameMap::as_opr(j_rarg4)); 930 931 LIR_Opr tmp = FrameMap::as_opr(j_rarg5); 932 933 set_no_result(x); 934 935 int flags; 936 ciArrayKlass* expected_type; 937 arraycopy_helper(x, &flags, &expected_type); 938 939 __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint 940 } 941 942 void LIRGenerator::do_update_CRC32(Intrinsic* x) { 943 assert(UseCRC32Intrinsics, "why are we here?"); 944 // Make all state_for calls early since they can emit code 945 LIR_Opr result = rlock_result(x); 946 int flags = 0; 947 switch (x->id()) { 948 case vmIntrinsics::_updateCRC32: { 949 LIRItem crc(x->argument_at(0), this); 950 LIRItem val(x->argument_at(1), this); 951 // val is destroyed by update_crc32 952 val.set_destroys_register(); 953 crc.load_item(); 954 val.load_item(); 955 __ update_crc32(crc.result(), val.result(), result); 956 break; 957 } 958 case vmIntrinsics::_updateBytesCRC32: 959 case vmIntrinsics::_updateByteBufferCRC32: { 960 bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32); 961 962 LIRItem crc(x->argument_at(0), this); 963 LIRItem buf(x->argument_at(1), this); 964 LIRItem off(x->argument_at(2), this); 965 LIRItem len(x->argument_at(3), this); 966 buf.load_item(); 967 off.load_nonconstant(); 968 969 LIR_Opr index = off.result(); 970 int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0; 971 if(off.result()->is_constant()) { 972 index = LIR_OprFact::illegalOpr; 973 offset += off.result()->as_jint(); 974 } 975 LIR_Opr base_op = buf.result(); 976 977 if (index->is_valid()) { 978 LIR_Opr tmp = new_register(T_LONG); 979 __ convert(Bytecodes::_i2l, index, tmp); 980 index = tmp; 981 } 982 983 if (is_updateBytes) { 984 base_op = access_resolve(ACCESS_READ, base_op); 985 } 986 987 if (offset) { 988 LIR_Opr tmp = new_pointer_register(); 989 __ add(base_op, LIR_OprFact::intConst(offset), tmp); 990 base_op = tmp; 991 offset = 0; 992 } 993 994 LIR_Address* a = new LIR_Address(base_op, 995 index, 996 offset, 997 T_BYTE); 998 BasicTypeList signature(3); 999 signature.append(T_INT); 1000 signature.append(T_ADDRESS); 1001 signature.append(T_INT); 1002 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 1003 const LIR_Opr result_reg = result_register_for(x->type()); 1004 1005 LIR_Opr addr = new_pointer_register(); 1006 __ leal(LIR_OprFact::address(a), addr); 1007 1008 crc.load_item_force(cc->at(0)); 1009 __ move(addr, cc->at(1)); 1010 len.load_item_force(cc->at(2)); 1011 1012 __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args()); 1013 __ move(result_reg, result); 1014 1015 break; 1016 } 1017 default: { 1018 ShouldNotReachHere(); 1019 } 1020 } 1021 } 1022 1023 void LIRGenerator::do_update_CRC32C(Intrinsic* x) { 1024 assert(UseCRC32CIntrinsics, "why are we here?"); 1025 // Make all state_for calls early since they can emit code 1026 LIR_Opr result = rlock_result(x); 1027 int flags = 0; 1028 switch (x->id()) { 1029 case vmIntrinsics::_updateBytesCRC32C: 1030 case vmIntrinsics::_updateDirectByteBufferCRC32C: { 1031 bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C); 1032 int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0; 1033 1034 LIRItem crc(x->argument_at(0), this); 1035 LIRItem buf(x->argument_at(1), this); 1036 LIRItem off(x->argument_at(2), this); 1037 LIRItem end(x->argument_at(3), this); 1038 1039 buf.load_item(); 1040 off.load_nonconstant(); 1041 end.load_nonconstant(); 1042 1043 // len = end - off 1044 LIR_Opr len = end.result(); 1045 LIR_Opr tmpA = new_register(T_INT); 1046 LIR_Opr tmpB = new_register(T_INT); 1047 __ move(end.result(), tmpA); 1048 __ move(off.result(), tmpB); 1049 __ sub(tmpA, tmpB, tmpA); 1050 len = tmpA; 1051 1052 LIR_Opr index = off.result(); 1053 if(off.result()->is_constant()) { 1054 index = LIR_OprFact::illegalOpr; 1055 offset += off.result()->as_jint(); 1056 } 1057 LIR_Opr base_op = buf.result(); 1058 1059 if (index->is_valid()) { 1060 LIR_Opr tmp = new_register(T_LONG); 1061 __ convert(Bytecodes::_i2l, index, tmp); 1062 index = tmp; 1063 } 1064 1065 if (is_updateBytes) { 1066 base_op = access_resolve(ACCESS_READ, base_op); 1067 } 1068 1069 if (offset) { 1070 LIR_Opr tmp = new_pointer_register(); 1071 __ add(base_op, LIR_OprFact::intConst(offset), tmp); 1072 base_op = tmp; 1073 offset = 0; 1074 } 1075 1076 LIR_Address* a = new LIR_Address(base_op, 1077 index, 1078 offset, 1079 T_BYTE); 1080 BasicTypeList signature(3); 1081 signature.append(T_INT); 1082 signature.append(T_ADDRESS); 1083 signature.append(T_INT); 1084 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 1085 const LIR_Opr result_reg = result_register_for(x->type()); 1086 1087 LIR_Opr addr = new_pointer_register(); 1088 __ leal(LIR_OprFact::address(a), addr); 1089 1090 crc.load_item_force(cc->at(0)); 1091 __ move(addr, cc->at(1)); 1092 __ move(len, cc->at(2)); 1093 1094 __ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), getThreadTemp(), result_reg, cc->args()); 1095 __ move(result_reg, result); 1096 1097 break; 1098 } 1099 default: { 1100 ShouldNotReachHere(); 1101 } 1102 } 1103 } 1104 1105 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) { 1106 assert(x->number_of_arguments() == 3, "wrong type"); 1107 assert(UseFMA, "Needs FMA instructions support."); 1108 LIRItem value(x->argument_at(0), this); 1109 LIRItem value1(x->argument_at(1), this); 1110 LIRItem value2(x->argument_at(2), this); 1111 1112 value.load_item(); 1113 value1.load_item(); 1114 value2.load_item(); 1115 1116 LIR_Opr calc_input = value.result(); 1117 LIR_Opr calc_input1 = value1.result(); 1118 LIR_Opr calc_input2 = value2.result(); 1119 LIR_Opr calc_result = rlock_result(x); 1120 1121 switch (x->id()) { 1122 case vmIntrinsics::_fmaD: __ fmad(calc_input, calc_input1, calc_input2, calc_result); break; 1123 case vmIntrinsics::_fmaF: __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break; 1124 default: ShouldNotReachHere(); 1125 } 1126 } 1127 1128 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) { 1129 fatal("vectorizedMismatch intrinsic is not implemented on this platform"); 1130 } 1131 1132 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f 1133 // _i2b, _i2c, _i2s 1134 void LIRGenerator::do_Convert(Convert* x) { 1135 LIRItem value(x->value(), this); 1136 value.load_item(); 1137 LIR_Opr input = value.result(); 1138 LIR_Opr result = rlock(x); 1139 1140 // arguments of lir_convert 1141 LIR_Opr conv_input = input; 1142 LIR_Opr conv_result = result; 1143 ConversionStub* stub = NULL; 1144 1145 __ convert(x->op(), conv_input, conv_result); 1146 1147 assert(result->is_virtual(), "result must be virtual register"); 1148 set_result(x, result); 1149 } 1150 1151 void LIRGenerator::do_NewInstance(NewInstance* x) { 1152 #ifndef PRODUCT 1153 if (PrintNotLoaded && !x->klass()->is_loaded()) { 1154 tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci()); 1155 } 1156 #endif 1157 CodeEmitInfo* info = state_for(x, x->state()); 1158 LIR_Opr reg = result_register_for(x->type()); 1159 new_instance(reg, x->klass(), x->is_unresolved(), 1160 FrameMap::r2_oop_opr, 1161 FrameMap::r5_oop_opr, 1162 FrameMap::r4_oop_opr, 1163 LIR_OprFact::illegalOpr, 1164 FrameMap::r3_metadata_opr, info); 1165 LIR_Opr result = rlock_result(x); 1166 __ move(reg, result); 1167 } 1168 1169 void LIRGenerator::do_NewValueTypeInstance (NewValueTypeInstance* x) { 1170 // Mapping to do_NewInstance (same code) 1171 CodeEmitInfo* info = state_for(x, x->state()); 1172 x->set_to_object_type(); 1173 LIR_Opr reg = result_register_for(x->type()); 1174 new_instance(reg, x->klass(), x->is_unresolved(), 1175 FrameMap::r2_oop_opr, 1176 FrameMap::r5_oop_opr, 1177 FrameMap::r4_oop_opr, 1178 LIR_OprFact::illegalOpr, 1179 FrameMap::r3_metadata_opr, info); 1180 LIR_Opr result = rlock_result(x); 1181 __ move(reg, result); 1182 1183 } 1184 1185 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { 1186 CodeEmitInfo* info = state_for(x, x->state()); 1187 1188 LIRItem length(x->length(), this); 1189 length.load_item_force(FrameMap::r19_opr); 1190 1191 LIR_Opr reg = result_register_for(x->type()); 1192 LIR_Opr tmp1 = FrameMap::r2_oop_opr; 1193 LIR_Opr tmp2 = FrameMap::r4_oop_opr; 1194 LIR_Opr tmp3 = FrameMap::r5_oop_opr; 1195 LIR_Opr tmp4 = reg; 1196 LIR_Opr klass_reg = FrameMap::r3_metadata_opr; 1197 LIR_Opr len = length.result(); 1198 BasicType elem_type = x->elt_type(); 1199 1200 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg); 1201 1202 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info); 1203 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path); 1204 1205 LIR_Opr result = rlock_result(x); 1206 __ move(reg, result); 1207 } 1208 1209 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) { 1210 LIRItem length(x->length(), this); 1211 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction 1212 // and therefore provide the state before the parameters have been consumed 1213 CodeEmitInfo* patching_info = NULL; 1214 if (!x->klass()->is_loaded() || PatchALot) { 1215 patching_info = state_for(x, x->state_before()); 1216 } 1217 1218 CodeEmitInfo* info = state_for(x, x->state()); 1219 1220 LIR_Opr reg = result_register_for(x->type()); 1221 LIR_Opr tmp1 = FrameMap::r2_oop_opr; 1222 LIR_Opr tmp2 = FrameMap::r4_oop_opr; 1223 LIR_Opr tmp3 = FrameMap::r5_oop_opr; 1224 LIR_Opr tmp4 = reg; 1225 LIR_Opr klass_reg = FrameMap::r3_metadata_opr; 1226 1227 length.load_item_force(FrameMap::r19_opr); 1228 LIR_Opr len = length.result(); 1229 1230 ciKlass* obj = (ciKlass*) x->exact_type(); 1231 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info, x->is_never_null()); 1232 if (obj == ciEnv::unloaded_ciobjarrayklass()) { 1233 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error"); 1234 } 1235 1236 klass2reg_with_patching(klass_reg, obj, patching_info); 1237 if (x->is_never_null()) { 1238 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_VALUETYPE, klass_reg, slow_path); 1239 } else { 1240 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path); 1241 } 1242 1243 LIR_Opr result = rlock_result(x); 1244 __ move(reg, result); 1245 } 1246 1247 1248 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { 1249 Values* dims = x->dims(); 1250 int i = dims->length(); 1251 LIRItemList* items = new LIRItemList(i, i, NULL); 1252 while (i-- > 0) { 1253 LIRItem* size = new LIRItem(dims->at(i), this); 1254 items->at_put(i, size); 1255 } 1256 1257 // Evaluate state_for early since it may emit code. 1258 CodeEmitInfo* patching_info = NULL; 1259 if (!x->klass()->is_loaded() || PatchALot) { 1260 patching_info = state_for(x, x->state_before()); 1261 1262 // Cannot re-use same xhandlers for multiple CodeEmitInfos, so 1263 // clone all handlers (NOTE: Usually this is handled transparently 1264 // by the CodeEmitInfo cloning logic in CodeStub constructors but 1265 // is done explicitly here because a stub isn't being used). 1266 x->set_exception_handlers(new XHandlers(x->exception_handlers())); 1267 } 1268 CodeEmitInfo* info = state_for(x, x->state()); 1269 1270 i = dims->length(); 1271 while (i-- > 0) { 1272 LIRItem* size = items->at(i); 1273 size->load_item(); 1274 1275 store_stack_parameter(size->result(), in_ByteSize(i*4)); 1276 } 1277 1278 LIR_Opr klass_reg = FrameMap::r0_metadata_opr; 1279 klass2reg_with_patching(klass_reg, x->klass(), patching_info); 1280 1281 LIR_Opr rank = FrameMap::r19_opr; 1282 __ move(LIR_OprFact::intConst(x->rank()), rank); 1283 LIR_Opr varargs = FrameMap::r2_opr; 1284 __ move(FrameMap::sp_opr, varargs); 1285 LIR_OprList* args = new LIR_OprList(3); 1286 args->append(klass_reg); 1287 args->append(rank); 1288 args->append(varargs); 1289 LIR_Opr reg = result_register_for(x->type()); 1290 __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id), 1291 LIR_OprFact::illegalOpr, 1292 reg, args, info); 1293 1294 LIR_Opr result = rlock_result(x); 1295 __ move(reg, result); 1296 } 1297 1298 void LIRGenerator::do_BlockBegin(BlockBegin* x) { 1299 // nothing to do for now 1300 } 1301 1302 void LIRGenerator::do_CheckCast(CheckCast* x) { 1303 LIRItem obj(x->obj(), this); 1304 1305 CodeEmitInfo* patching_info = NULL; 1306 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) { 1307 // must do this before locking the destination register as an oop register, 1308 // and before the obj is loaded (the latter is for deoptimization) 1309 patching_info = state_for(x, x->state_before()); 1310 } 1311 obj.load_item(); 1312 1313 // info for exceptions 1314 CodeEmitInfo* info_for_exception = 1315 (x->needs_exception_state() ? state_for(x) : 1316 state_for(x, x->state_before(), true /*ignore_xhandler*/)); 1317 if (x->is_never_null()) { 1318 __ null_check(obj.result(), new CodeEmitInfo(info_for_exception)); 1319 } 1320 1321 CodeStub* stub; 1322 if (x->is_incompatible_class_change_check()) { 1323 assert(patching_info == NULL, "can't patch this"); 1324 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); 1325 } else if (x->is_invokespecial_receiver_check()) { 1326 assert(patching_info == NULL, "can't patch this"); 1327 stub = new DeoptimizeStub(info_for_exception, 1328 Deoptimization::Reason_class_check, 1329 Deoptimization::Action_none); 1330 } else { 1331 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception); 1332 } 1333 LIR_Opr reg = rlock_result(x); 1334 LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 1335 if (!x->klass()->is_loaded() || UseCompressedClassPointers) { 1336 tmp3 = new_register(objectType); 1337 } 1338 1339 1340 __ checkcast(reg, obj.result(), x->klass(), 1341 new_register(objectType), new_register(objectType), tmp3, 1342 x->direct_compare(), info_for_exception, patching_info, stub, 1343 x->profiled_method(), x->profiled_bci(), x->is_never_null()); 1344 1345 } 1346 1347 void LIRGenerator::do_InstanceOf(InstanceOf* x) { 1348 LIRItem obj(x->obj(), this); 1349 1350 // result and test object may not be in same register 1351 LIR_Opr reg = rlock_result(x); 1352 CodeEmitInfo* patching_info = NULL; 1353 if ((!x->klass()->is_loaded() || PatchALot)) { 1354 // must do this before locking the destination register as an oop register 1355 patching_info = state_for(x, x->state_before()); 1356 } 1357 obj.load_item(); 1358 LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 1359 if (!x->klass()->is_loaded() || UseCompressedClassPointers) { 1360 tmp3 = new_register(objectType); 1361 } 1362 __ instanceof(reg, obj.result(), x->klass(), 1363 new_register(objectType), new_register(objectType), tmp3, 1364 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci()); 1365 } 1366 1367 void LIRGenerator::do_If(If* x) { 1368 assert(x->number_of_sux() == 2, "inconsistency"); 1369 ValueTag tag = x->x()->type()->tag(); 1370 bool is_safepoint = x->is_safepoint(); 1371 1372 If::Condition cond = x->cond(); 1373 1374 LIRItem xitem(x->x(), this); 1375 LIRItem yitem(x->y(), this); 1376 LIRItem* xin = &xitem; 1377 LIRItem* yin = &yitem; 1378 1379 if (tag == longTag) { 1380 // for longs, only conditions "eql", "neq", "lss", "geq" are valid; 1381 // mirror for other conditions 1382 if (cond == If::gtr || cond == If::leq) { 1383 cond = Instruction::mirror(cond); 1384 xin = &yitem; 1385 yin = &xitem; 1386 } 1387 xin->set_destroys_register(); 1388 } 1389 xin->load_item(); 1390 1391 if (tag == longTag) { 1392 if (yin->is_constant() 1393 && Assembler::operand_valid_for_add_sub_immediate(yin->get_jlong_constant())) { 1394 yin->dont_load_item(); 1395 } else { 1396 yin->load_item(); 1397 } 1398 } else if (tag == intTag) { 1399 if (yin->is_constant() 1400 && Assembler::operand_valid_for_add_sub_immediate(yin->get_jint_constant())) { 1401 yin->dont_load_item(); 1402 } else { 1403 yin->load_item(); 1404 } 1405 } else { 1406 yin->load_item(); 1407 } 1408 1409 set_no_result(x); 1410 1411 LIR_Opr left = xin->result(); 1412 LIR_Opr right = yin->result(); 1413 1414 // add safepoint before generating condition code so it can be recomputed 1415 if (x->is_safepoint()) { 1416 // increment backedge counter if needed 1417 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()), 1418 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci()); 1419 __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before())); 1420 } 1421 1422 __ cmp(lir_cond(cond), left, right); 1423 // Generate branch profiling. Profiling code doesn't kill flags. 1424 profile_branch(x, cond); 1425 move_to_phi(x->state()); 1426 if (x->x()->type()->is_float_kind()) { 1427 __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux()); 1428 } else { 1429 __ branch(lir_cond(cond), right->type(), x->tsux()); 1430 } 1431 assert(x->default_sux() == x->fsux(), "wrong destination above"); 1432 __ jump(x->default_sux()); 1433 } 1434 1435 LIR_Opr LIRGenerator::getThreadPointer() { 1436 return FrameMap::as_pointer_opr(rthread); 1437 } 1438 1439 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); } 1440 1441 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address, 1442 CodeEmitInfo* info) { 1443 __ volatile_store_mem_reg(value, address, info); 1444 } 1445 1446 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result, 1447 CodeEmitInfo* info) { 1448 // 8179954: We need to make sure that the code generated for 1449 // volatile accesses forms a sequentially-consistent set of 1450 // operations when combined with STLR and LDAR. Without a leading 1451 // membar it's possible for a simple Dekker test to fail if loads 1452 // use LD;DMB but stores use STLR. This can happen if C2 compiles 1453 // the stores in one method and C1 compiles the loads in another. 1454 if (! UseBarriersForVolatile) { 1455 __ membar(); 1456 } 1457 1458 __ volatile_load_mem_reg(address, result, info); 1459 }