1 /* 2 * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "c1/c1_Compilation.hpp" 28 #include "c1/c1_FrameMap.hpp" 29 #include "c1/c1_Instruction.hpp" 30 #include "c1/c1_LIRAssembler.hpp" 31 #include "c1/c1_LIRGenerator.hpp" 32 #include "c1/c1_Runtime1.hpp" 33 #include "c1/c1_ValueStack.hpp" 34 #include "ci/ciArray.hpp" 35 #include "ci/ciObjArrayKlass.hpp" 36 #include "ci/ciTypeArrayKlass.hpp" 37 #include "ci/ciUtilities.hpp" 38 #include "gc/shared/c1/barrierSetC1.hpp" 39 #include "gc/shared/cardTable.hpp" 40 #include "gc/shared/cardTableBarrierSet.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 #include "runtime/stubRoutines.hpp" 43 #include "vmreg_arm.inline.hpp" 44 45 #ifdef ASSERT 46 #define __ gen()->lir(__FILE__, __LINE__)-> 47 #else 48 #define __ gen()->lir()-> 49 #endif 50 51 void LIRItem::load_byte_item() { 52 load_item(); 53 } 54 55 void LIRItem::load_nonconstant() { 56 LIR_Opr r = value()->operand(); 57 if (_gen->can_inline_as_constant(value())) { 58 if (!r->is_constant()) { 59 r = LIR_OprFact::value_type(value()->type()); 60 } 61 _result = r; 62 } else { 63 load_item(); 64 } 65 } 66 67 //-------------------------------------------------------------- 68 // LIRGenerator 69 //-------------------------------------------------------------- 70 71 72 LIR_Opr LIRGenerator::exceptionOopOpr() { 73 return FrameMap::Exception_oop_opr; 74 } 75 76 LIR_Opr LIRGenerator::exceptionPcOpr() { 77 return FrameMap::Exception_pc_opr; 78 } 79 80 LIR_Opr LIRGenerator::syncLockOpr() { 81 return new_register(T_INT); 82 } 83 84 LIR_Opr LIRGenerator::syncTempOpr() { 85 return new_register(T_OBJECT); 86 } 87 88 LIR_Opr LIRGenerator::getThreadTemp() { 89 return LIR_OprFact::illegalOpr; 90 } 91 92 LIR_Opr LIRGenerator::atomicLockOpr() { 93 return LIR_OprFact::illegalOpr; 94 } 95 96 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) { 97 LIR_Opr opr; 98 switch (type->tag()) { 99 case intTag: opr = FrameMap::Int_result_opr; break; 100 case objectTag: opr = FrameMap::Object_result_opr; break; 101 case longTag: opr = FrameMap::Long_result_opr; break; 102 case floatTag: opr = FrameMap::Float_result_opr; break; 103 case doubleTag: opr = FrameMap::Double_result_opr; break; 104 case addressTag: 105 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr; 106 } 107 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch"); 108 return opr; 109 } 110 111 112 LIR_Opr LIRGenerator::rlock_byte(BasicType type) { 113 return new_register(T_INT); 114 } 115 116 117 //--------- loading items into registers -------------------------------- 118 119 120 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const { 121 #ifdef AARCH64 122 if (v->type()->as_IntConstant() != NULL) { 123 return v->type()->as_IntConstant()->value() == 0; 124 } else if (v->type()->as_LongConstant() != NULL) { 125 return v->type()->as_LongConstant()->value() == 0; 126 } else if (v->type()->as_ObjectConstant() != NULL) { 127 return v->type()->as_ObjectConstant()->value()->is_null_object(); 128 } else if (v->type()->as_FloatConstant() != NULL) { 129 return jint_cast(v->type()->as_FloatConstant()->value()) == 0; 130 } else if (v->type()->as_DoubleConstant() != NULL) { 131 return jlong_cast(v->type()->as_DoubleConstant()->value()) == 0; 132 } 133 #endif // AARCH64 134 return false; 135 } 136 137 138 bool LIRGenerator::can_inline_as_constant(Value v) const { 139 if (v->type()->as_IntConstant() != NULL) { 140 return Assembler::is_arith_imm_in_range(v->type()->as_IntConstant()->value()); 141 } else if (v->type()->as_ObjectConstant() != NULL) { 142 return v->type()->as_ObjectConstant()->value()->is_null_object(); 143 #ifdef AARCH64 144 } else if (v->type()->as_LongConstant() != NULL) { 145 return Assembler::is_arith_imm_in_range(v->type()->as_LongConstant()->value()); 146 #else 147 } else if (v->type()->as_FloatConstant() != NULL) { 148 return v->type()->as_FloatConstant()->value() == 0.0f; 149 } else if (v->type()->as_DoubleConstant() != NULL) { 150 return v->type()->as_DoubleConstant()->value() == 0.0; 151 #endif // AARCH64 152 } 153 return false; 154 } 155 156 157 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { 158 ShouldNotCallThis(); // Not used on ARM 159 return false; 160 } 161 162 163 #ifdef AARCH64 164 165 static bool can_inline_as_constant_in_cmp(Value v) { 166 jlong constant; 167 if (v->type()->as_IntConstant() != NULL) { 168 constant = v->type()->as_IntConstant()->value(); 169 } else if (v->type()->as_LongConstant() != NULL) { 170 constant = v->type()->as_LongConstant()->value(); 171 } else if (v->type()->as_ObjectConstant() != NULL) { 172 return v->type()->as_ObjectConstant()->value()->is_null_object(); 173 } else if (v->type()->as_FloatConstant() != NULL) { 174 return v->type()->as_FloatConstant()->value() == 0.0f; 175 } else if (v->type()->as_DoubleConstant() != NULL) { 176 return v->type()->as_DoubleConstant()->value() == 0.0; 177 } else { 178 return false; 179 } 180 181 return Assembler::is_arith_imm_in_range(constant) || Assembler::is_arith_imm_in_range(-constant); 182 } 183 184 185 static bool can_inline_as_constant_in_logic(Value v) { 186 if (v->type()->as_IntConstant() != NULL) { 187 return Assembler::LogicalImmediate(v->type()->as_IntConstant()->value(), true).is_encoded(); 188 } else if (v->type()->as_LongConstant() != NULL) { 189 return Assembler::LogicalImmediate(v->type()->as_LongConstant()->value(), false).is_encoded(); 190 } 191 return false; 192 } 193 194 195 #endif // AARCH64 196 197 198 LIR_Opr LIRGenerator::safepoint_poll_register() { 199 return LIR_OprFact::illegalOpr; 200 } 201 202 203 static LIR_Opr make_constant(BasicType type, jlong c) { 204 switch (type) { 205 case T_ADDRESS: 206 case T_OBJECT: return LIR_OprFact::intptrConst(c); 207 case T_LONG: return LIR_OprFact::longConst(c); 208 case T_INT: return LIR_OprFact::intConst(c); 209 default: ShouldNotReachHere(); 210 return LIR_OprFact::intConst(-1); 211 } 212 } 213 214 #ifdef AARCH64 215 216 void LIRGenerator::add_constant(LIR_Opr src, jlong c, LIR_Opr dest) { 217 if (c == 0) { 218 __ move(src, dest); 219 return; 220 } 221 222 BasicType type = src->type(); 223 bool is_neg = (c < 0); 224 c = ABS(c); 225 226 if ((c >> 24) == 0) { 227 for (int shift = 0; shift <= 12; shift += 12) { 228 int part = ((int)c) & (right_n_bits(12) << shift); 229 if (part != 0) { 230 if (is_neg) { 231 __ sub(src, make_constant(type, part), dest); 232 } else { 233 __ add(src, make_constant(type, part), dest); 234 } 235 src = dest; 236 } 237 } 238 } else { 239 __ move(make_constant(type, c), dest); 240 if (is_neg) { 241 __ sub(src, dest, dest); 242 } else { 243 __ add(src, dest, dest); 244 } 245 } 246 } 247 248 #endif // AARCH64 249 250 251 void LIRGenerator::add_large_constant(LIR_Opr src, int c, LIR_Opr dest) { 252 assert(c != 0, "must be"); 253 #ifdef AARCH64 254 add_constant(src, c, dest); 255 #else 256 // Find first non-zero bit 257 int shift = 0; 258 while ((c & (3 << shift)) == 0) { 259 shift += 2; 260 } 261 // Add the least significant part of the constant 262 int mask = 0xff << shift; 263 __ add(src, LIR_OprFact::intConst(c & mask), dest); 264 // Add up to 3 other parts of the constant; 265 // each of them can be represented as rotated_imm 266 if (c & (mask << 8)) { 267 __ add(dest, LIR_OprFact::intConst(c & (mask << 8)), dest); 268 } 269 if (c & (mask << 16)) { 270 __ add(dest, LIR_OprFact::intConst(c & (mask << 16)), dest); 271 } 272 if (c & (mask << 24)) { 273 __ add(dest, LIR_OprFact::intConst(c & (mask << 24)), dest); 274 } 275 #endif // AARCH64 276 } 277 278 static LIR_Address* make_address(LIR_Opr base, LIR_Opr index, LIR_Address::Scale scale, BasicType type) { 279 return new LIR_Address(base, index, scale, 0, type); 280 } 281 282 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index, 283 int shift, int disp, BasicType type) { 284 assert(base->is_register(), "must be"); 285 286 if (index->is_constant()) { 287 disp += index->as_constant_ptr()->as_jint() << shift; 288 index = LIR_OprFact::illegalOpr; 289 } 290 291 #ifndef AARCH64 292 if (base->type() == T_LONG) { 293 LIR_Opr tmp = new_register(T_INT); 294 __ convert(Bytecodes::_l2i, base, tmp); 295 base = tmp; 296 } 297 if (index != LIR_OprFact::illegalOpr && index->type() == T_LONG) { 298 LIR_Opr tmp = new_register(T_INT); 299 __ convert(Bytecodes::_l2i, index, tmp); 300 index = tmp; 301 } 302 // At this point base and index should be all ints and not constants 303 assert(base->is_single_cpu() && !base->is_constant(), "base should be an non-constant int"); 304 assert(index->is_illegal() || (index->type() == T_INT && !index->is_constant()), "index should be an non-constant int"); 305 #endif 306 307 int max_disp; 308 bool disp_is_in_range; 309 bool embedded_shift; 310 311 #ifdef AARCH64 312 int align = exact_log2(type2aelembytes(type, true)); 313 assert((disp & right_n_bits(align)) == 0, "displacement is not aligned"); 314 assert(shift == 0 || shift == align, "shift should be zero or equal to embedded align"); 315 max_disp = (1 << 12) << align; 316 317 if (disp >= 0) { 318 disp_is_in_range = Assembler::is_unsigned_imm_in_range(disp, 12, align); 319 } else { 320 disp_is_in_range = Assembler::is_imm_in_range(disp, 9, 0); 321 } 322 323 embedded_shift = true; 324 #else 325 switch (type) { 326 case T_BYTE: 327 case T_SHORT: 328 case T_CHAR: 329 max_disp = 256; // ldrh, ldrsb encoding has 8-bit offset 330 embedded_shift = false; 331 break; 332 case T_FLOAT: 333 case T_DOUBLE: 334 max_disp = 1024; // flds, fldd have 8-bit offset multiplied by 4 335 embedded_shift = false; 336 break; 337 case T_LONG: 338 max_disp = 4096; 339 embedded_shift = false; 340 break; 341 default: 342 max_disp = 4096; // ldr, ldrb allow 12-bit offset 343 embedded_shift = true; 344 } 345 346 disp_is_in_range = (-max_disp < disp && disp < max_disp); 347 #endif // !AARCH64 348 349 if (index->is_register()) { 350 LIR_Opr tmp = new_pointer_register(); 351 if (!disp_is_in_range) { 352 add_large_constant(base, disp, tmp); 353 base = tmp; 354 disp = 0; 355 } 356 LIR_Address* addr = make_address(base, index, (LIR_Address::Scale)shift, type); 357 if (disp == 0 && embedded_shift) { 358 // can use ldr/str instruction with register index 359 return addr; 360 } else { 361 LIR_Opr tmp = new_pointer_register(); 362 __ add(base, LIR_OprFact::address(addr), tmp); // add with shifted/extended register 363 return new LIR_Address(tmp, disp, type); 364 } 365 } 366 367 // If the displacement is too large to be inlined into LDR instruction, 368 // generate large constant with additional sequence of ADD instructions 369 int excess_disp = disp & ~(max_disp - 1); 370 if (excess_disp != 0) { 371 LIR_Opr tmp = new_pointer_register(); 372 add_large_constant(base, excess_disp, tmp); 373 base = tmp; 374 } 375 return new LIR_Address(base, disp & (max_disp - 1), type); 376 } 377 378 379 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, BasicType type) { 380 int base_offset = arrayOopDesc::base_offset_in_bytes(type); 381 int elem_size = type2aelembytes(type); 382 383 if (index_opr->is_constant()) { 384 int offset = base_offset + index_opr->as_constant_ptr()->as_jint() * elem_size; 385 return generate_address(array_opr, offset, type); 386 } else { 387 assert(index_opr->is_register(), "must be"); 388 int scale = exact_log2(elem_size); 389 return generate_address(array_opr, index_opr, scale, base_offset, type); 390 } 391 } 392 393 394 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) { 395 assert(type == T_LONG || type == T_INT, "should be"); 396 LIR_Opr r = make_constant(type, x); 397 #ifdef AARCH64 398 bool imm_in_range = Assembler::LogicalImmediate(x, type == T_INT).is_encoded(); 399 #else 400 bool imm_in_range = AsmOperand::is_rotated_imm(x); 401 #endif // AARCH64 402 if (!imm_in_range) { 403 LIR_Opr tmp = new_register(type); 404 __ move(r, tmp); 405 return tmp; 406 } 407 return r; 408 } 409 410 411 void LIRGenerator::increment_counter(address counter, BasicType type, int step) { 412 LIR_Opr pointer = new_pointer_register(); 413 __ move(LIR_OprFact::intptrConst(counter), pointer); 414 LIR_Address* addr = new LIR_Address(pointer, type); 415 increment_counter(addr, step); 416 } 417 418 419 void LIRGenerator::increment_counter(LIR_Address* addr, int step) { 420 LIR_Opr temp = new_register(addr->type()); 421 __ move(addr, temp); 422 __ add(temp, make_constant(addr->type(), step), temp); 423 __ move(temp, addr); 424 } 425 426 427 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { 428 __ load(new LIR_Address(base, disp, T_INT), FrameMap::LR_opr, info); 429 __ cmp(condition, FrameMap::LR_opr, c); 430 } 431 432 433 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) { 434 __ load(new LIR_Address(base, disp, type), FrameMap::LR_opr, info); 435 __ cmp(condition, reg, FrameMap::LR_opr); 436 } 437 438 439 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) { 440 assert(left != result, "should be different registers"); 441 if (is_power_of_2(c + 1)) { 442 #ifdef AARCH64 443 __ shift_left(left, log2_intptr(c + 1), result); 444 __ sub(result, left, result); 445 #else 446 LIR_Address::Scale scale = (LIR_Address::Scale) log2_intptr(c + 1); 447 LIR_Address* addr = new LIR_Address(left, left, scale, 0, T_INT); 448 __ sub(LIR_OprFact::address(addr), left, result); // rsb with shifted register 449 #endif // AARCH64 450 return true; 451 } else if (is_power_of_2(c - 1)) { 452 LIR_Address::Scale scale = (LIR_Address::Scale) log2_intptr(c - 1); 453 LIR_Address* addr = new LIR_Address(left, left, scale, 0, T_INT); 454 __ add(left, LIR_OprFact::address(addr), result); // add with shifted register 455 return true; 456 } 457 return false; 458 } 459 460 461 void LIRGenerator::store_stack_parameter(LIR_Opr item, ByteSize offset_from_sp) { 462 assert(item->type() == T_INT, "other types are not expected"); 463 __ store(item, new LIR_Address(FrameMap::SP_opr, in_bytes(offset_from_sp), item->type())); 464 } 465 466 void LIRGenerator::set_card(LIR_Opr value, LIR_Address* card_addr) { 467 assert(CardTable::dirty_card_val() == 0, 468 "Cannot use ZR register (aarch64) or the register containing the card table base address directly (aarch32) otherwise"); 469 #ifdef AARCH64 470 // AARCH64 has a register that is constant zero. We can use that one to set the 471 // value in the card table to dirty. 472 __ move(FrameMap::ZR_opr, card_addr); 473 #else // AARCH64 474 if((ci_card_table_address_as<intx>() & 0xff) == 0) { 475 // If the card table base address is aligned to 256 bytes, we can use the register 476 // that contains the card_table_base_address. 477 __ move(value, card_addr); 478 } else { 479 // Otherwise we need to create a register containing that value. 480 LIR_Opr tmp_zero = new_register(T_INT); 481 __ move(LIR_OprFact::intConst(CardTable::dirty_card_val()), tmp_zero); 482 __ move(tmp_zero, card_addr); 483 } 484 #endif // AARCH64 485 } 486 487 void LIRGenerator::CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base) { 488 assert(addr->is_register(), "must be a register at this point"); 489 490 CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set()); 491 CardTable* ct = ctbs->card_table(); 492 493 LIR_Opr tmp = FrameMap::LR_ptr_opr; 494 495 // TODO-AARCH64: check performance 496 bool load_card_table_base_const = AARCH64_ONLY(false) NOT_AARCH64(VM_Version::supports_movw()); 497 if (load_card_table_base_const) { 498 __ move((LIR_Opr)card_table_base, tmp); 499 } else { 500 __ move(new LIR_Address(FrameMap::Rthread_opr, in_bytes(JavaThread::card_table_base_offset()), T_ADDRESS), tmp); 501 } 502 503 #ifdef AARCH64 504 LIR_Address* shifted_reg_operand = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTable::card_shift, 0, T_BYTE); 505 LIR_Opr tmp2 = tmp; 506 __ add(tmp, LIR_OprFact::address(shifted_reg_operand), tmp2); // tmp2 = tmp + (addr >> CardTable::card_shift) 507 LIR_Address* card_addr = new LIR_Address(tmp2, T_BYTE); 508 #else 509 // Use unsigned type T_BOOLEAN here rather than (signed) T_BYTE since signed load 510 // byte instruction does not support the addressing mode we need. 511 LIR_Address* card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTable::card_shift, 0, T_BOOLEAN); 512 #endif 513 if (UseCondCardMark) { 514 if (ct->scanned_concurrently()) { 515 __ membar_storeload(); 516 } 517 LIR_Opr cur_value = new_register(T_INT); 518 __ move(card_addr, cur_value); 519 520 LabelObj* L_already_dirty = new LabelObj(); 521 __ cmp(lir_cond_equal, cur_value, LIR_OprFact::intConst(CardTable::dirty_card_val())); 522 __ branch(lir_cond_equal, T_BYTE, L_already_dirty->label()); 523 set_card(tmp, card_addr); 524 __ branch_destination(L_already_dirty->label()); 525 } else { 526 if (ct->scanned_concurrently()) { 527 __ membar_storestore(); 528 } 529 set_card(tmp, card_addr); 530 } 531 } 532 533 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) { 534 LIR_Opr tmp1 = FrameMap::R0_oop_opr; 535 LIR_Opr tmp2 = FrameMap::R1_oop_opr; 536 LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 537 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci); 538 } 539 540 //---------------------------------------------------------------------- 541 // visitor functions 542 //---------------------------------------------------------------------- 543 544 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) { 545 assert(x->is_pinned(),""); 546 LIRItem obj(x->obj(), this); 547 obj.load_item(); 548 set_no_result(x); 549 550 LIR_Opr lock = new_pointer_register(); 551 LIR_Opr hdr = new_pointer_register(); 552 553 // Need a scratch register for biased locking on arm 554 LIR_Opr scratch = LIR_OprFact::illegalOpr; 555 if(UseBiasedLocking) { 556 scratch = new_pointer_register(); 557 } else { 558 scratch = atomicLockOpr(); 559 } 560 561 CodeEmitInfo* info_for_exception = NULL; 562 if (x->needs_null_check()) { 563 info_for_exception = state_for(x); 564 } 565 566 CodeEmitInfo* info = state_for(x, x->state(), true); 567 monitor_enter(obj.result(), lock, hdr, scratch, 568 x->monitor_no(), info_for_exception, info); 569 } 570 571 572 void LIRGenerator::do_MonitorExit(MonitorExit* x) { 573 assert(x->is_pinned(),""); 574 LIRItem obj(x->obj(), this); 575 obj.dont_load_item(); 576 set_no_result(x); 577 578 LIR_Opr obj_temp = new_pointer_register(); 579 LIR_Opr lock = new_pointer_register(); 580 LIR_Opr hdr = new_pointer_register(); 581 582 monitor_exit(obj_temp, lock, hdr, atomicLockOpr(), x->monitor_no()); 583 } 584 585 586 // _ineg, _lneg, _fneg, _dneg 587 void LIRGenerator::do_NegateOp(NegateOp* x) { 588 #ifdef __SOFTFP__ 589 address runtime_func = NULL; 590 ValueTag tag = x->type()->tag(); 591 if (tag == floatTag) { 592 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::fneg); 593 } else if (tag == doubleTag) { 594 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dneg); 595 } 596 if (runtime_func != NULL) { 597 set_result(x, call_runtime(x->x(), runtime_func, x->type(), NULL)); 598 return; 599 } 600 #endif // __SOFTFP__ 601 LIRItem value(x->x(), this); 602 value.load_item(); 603 LIR_Opr reg = rlock_result(x); 604 __ negate(value.result(), reg); 605 } 606 607 608 // for _fadd, _fmul, _fsub, _fdiv, _frem 609 // _dadd, _dmul, _dsub, _ddiv, _drem 610 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) { 611 address runtime_func; 612 switch (x->op()) { 613 case Bytecodes::_frem: 614 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::frem); 615 break; 616 case Bytecodes::_drem: 617 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::drem); 618 break; 619 #ifdef __SOFTFP__ 620 // Call function compiled with -msoft-float. 621 622 // __aeabi_XXXX_glibc: Imported code from glibc soft-fp bundle for calculation accuracy improvement. See CR 6757269. 623 624 case Bytecodes::_fadd: 625 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_fadd_glibc); 626 break; 627 case Bytecodes::_fmul: 628 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_fmul); 629 break; 630 case Bytecodes::_fsub: 631 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_fsub_glibc); 632 break; 633 case Bytecodes::_fdiv: 634 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_fdiv); 635 break; 636 case Bytecodes::_dadd: 637 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_dadd_glibc); 638 break; 639 case Bytecodes::_dmul: 640 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_dmul); 641 break; 642 case Bytecodes::_dsub: 643 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_dsub_glibc); 644 break; 645 case Bytecodes::_ddiv: 646 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_ddiv); 647 break; 648 default: 649 ShouldNotReachHere(); 650 #else // __SOFTFP__ 651 default: { 652 LIRItem left(x->x(), this); 653 LIRItem right(x->y(), this); 654 left.load_item(); 655 right.load_item(); 656 rlock_result(x); 657 arithmetic_op_fpu(x->op(), x->operand(), left.result(), right.result(), x->is_strictfp()); 658 return; 659 } 660 #endif // __SOFTFP__ 661 } 662 663 LIR_Opr result = call_runtime(x->x(), x->y(), runtime_func, x->type(), NULL); 664 set_result(x, result); 665 } 666 667 668 void LIRGenerator::make_div_by_zero_check(LIR_Opr right_arg, BasicType type, CodeEmitInfo* info) { 669 assert(right_arg->is_register(), "must be"); 670 __ cmp(lir_cond_equal, right_arg, make_constant(type, 0)); 671 __ branch(lir_cond_equal, type, new DivByZeroStub(info)); 672 } 673 674 675 // for _ladd, _lmul, _lsub, _ldiv, _lrem 676 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { 677 CodeEmitInfo* info = NULL; 678 if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) { 679 info = state_for(x); 680 } 681 682 #ifdef AARCH64 683 LIRItem left(x->x(), this); 684 LIRItem right(x->y(), this); 685 LIRItem* left_arg = &left; 686 LIRItem* right_arg = &right; 687 688 // Test if instr is commutative and if we should swap 689 if (x->is_commutative() && left.is_constant()) { 690 left_arg = &right; 691 right_arg = &left; 692 } 693 694 left_arg->load_item(); 695 switch (x->op()) { 696 case Bytecodes::_ldiv: 697 right_arg->load_item(); 698 make_div_by_zero_check(right_arg->result(), T_LONG, info); 699 __ idiv(left_arg->result(), right_arg->result(), rlock_result(x), LIR_OprFact::illegalOpr, NULL); 700 break; 701 702 case Bytecodes::_lrem: { 703 right_arg->load_item(); 704 make_div_by_zero_check(right_arg->result(), T_LONG, info); 705 // a % b is implemented with 2 instructions: 706 // tmp = a/b (sdiv) 707 // res = a - b*tmp (msub) 708 LIR_Opr tmp = FrameMap::as_long_opr(Rtemp); 709 __ irem(left_arg->result(), right_arg->result(), rlock_result(x), tmp, NULL); 710 break; 711 } 712 713 case Bytecodes::_lmul: 714 if (right_arg->is_constant() && is_power_of_2_long(right_arg->get_jlong_constant())) { 715 right_arg->dont_load_item(); 716 __ shift_left(left_arg->result(), exact_log2_long(right_arg->get_jlong_constant()), rlock_result(x)); 717 } else { 718 right_arg->load_item(); 719 __ mul(left_arg->result(), right_arg->result(), rlock_result(x)); 720 } 721 break; 722 723 case Bytecodes::_ladd: 724 case Bytecodes::_lsub: 725 if (right_arg->is_constant()) { 726 jlong c = right_arg->get_jlong_constant(); 727 add_constant(left_arg->result(), (x->op() == Bytecodes::_ladd) ? c : -c, rlock_result(x)); 728 } else { 729 right_arg->load_item(); 730 arithmetic_op_long(x->op(), rlock_result(x), left_arg->result(), right_arg->result(), NULL); 731 } 732 break; 733 734 default: 735 ShouldNotReachHere(); 736 return; 737 } 738 #else 739 switch (x->op()) { 740 case Bytecodes::_ldiv: 741 case Bytecodes::_lrem: { 742 LIRItem right(x->y(), this); 743 right.load_item(); 744 make_div_by_zero_check(right.result(), T_LONG, info); 745 } 746 // Fall through 747 case Bytecodes::_lmul: { 748 address entry; 749 switch (x->op()) { 750 case Bytecodes::_lrem: 751 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem); 752 break; 753 case Bytecodes::_ldiv: 754 entry = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv); 755 break; 756 case Bytecodes::_lmul: 757 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lmul); 758 break; 759 default: 760 ShouldNotReachHere(); 761 return; 762 } 763 LIR_Opr result = call_runtime(x->y(), x->x(), entry, x->type(), NULL); 764 set_result(x, result); 765 break; 766 } 767 case Bytecodes::_ladd: 768 case Bytecodes::_lsub: { 769 LIRItem left(x->x(), this); 770 LIRItem right(x->y(), this); 771 left.load_item(); 772 right.load_item(); 773 rlock_result(x); 774 arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL); 775 break; 776 } 777 default: 778 ShouldNotReachHere(); 779 } 780 #endif // AARCH64 781 } 782 783 784 // for: _iadd, _imul, _isub, _idiv, _irem 785 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) { 786 bool is_div_rem = x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem; 787 LIRItem left(x->x(), this); 788 LIRItem right(x->y(), this); 789 LIRItem* left_arg = &left; 790 LIRItem* right_arg = &right; 791 792 // Test if instr is commutative and if we should swap 793 if (x->is_commutative() && left.is_constant()) { 794 left_arg = &right; 795 right_arg = &left; 796 } 797 798 if (is_div_rem) { 799 CodeEmitInfo* info = state_for(x); 800 if (x->op() == Bytecodes::_idiv && right_arg->is_constant() && is_power_of_2(right_arg->get_jint_constant())) { 801 left_arg->load_item(); 802 right_arg->dont_load_item(); 803 LIR_Opr tmp = LIR_OprFact::illegalOpr; 804 LIR_Opr result = rlock_result(x); 805 __ idiv(left_arg->result(), right_arg->result(), result, tmp, info); 806 } else { 807 #ifdef AARCH64 808 left_arg->load_item(); 809 right_arg->load_item(); 810 make_div_by_zero_check(right_arg->result(), T_INT, info); 811 if (x->op() == Bytecodes::_idiv) { 812 __ idiv(left_arg->result(), right_arg->result(), rlock_result(x), LIR_OprFact::illegalOpr, NULL); 813 } else { 814 // a % b is implemented with 2 instructions: 815 // tmp = a/b (sdiv) 816 // res = a - b*tmp (msub) 817 LIR_Opr tmp = FrameMap::as_opr(Rtemp); 818 __ irem(left_arg->result(), right_arg->result(), rlock_result(x), tmp, NULL); 819 } 820 #else 821 left_arg->load_item_force(FrameMap::R0_opr); 822 right_arg->load_item_force(FrameMap::R2_opr); 823 LIR_Opr tmp = FrameMap::R1_opr; 824 LIR_Opr result = rlock_result(x); 825 LIR_Opr out_reg; 826 if (x->op() == Bytecodes::_irem) { 827 out_reg = FrameMap::R0_opr; 828 __ irem(left_arg->result(), right_arg->result(), out_reg, tmp, info); 829 } else { // (x->op() == Bytecodes::_idiv) 830 out_reg = FrameMap::R1_opr; 831 __ idiv(left_arg->result(), right_arg->result(), out_reg, tmp, info); 832 } 833 __ move(out_reg, result); 834 #endif // AARCH64 835 } 836 837 #ifdef AARCH64 838 } else if (((x->op() == Bytecodes::_iadd) || (x->op() == Bytecodes::_isub)) && right_arg->is_constant()) { 839 left_arg->load_item(); 840 jint c = right_arg->get_jint_constant(); 841 right_arg->dont_load_item(); 842 add_constant(left_arg->result(), (x->op() == Bytecodes::_iadd) ? c : -c, rlock_result(x)); 843 #endif // AARCH64 844 845 } else { 846 left_arg->load_item(); 847 if (x->op() == Bytecodes::_imul && right_arg->is_constant()) { 848 jint c = right_arg->get_jint_constant(); 849 if (c > 0 && c < max_jint && (is_power_of_2(c) || is_power_of_2(c - 1) || is_power_of_2(c + 1))) { 850 right_arg->dont_load_item(); 851 } else { 852 right_arg->load_item(); 853 } 854 } else { 855 AARCH64_ONLY(assert(!right_arg->is_constant(), "constant right_arg is already handled by this moment");) 856 right_arg->load_nonconstant(); 857 } 858 rlock_result(x); 859 assert(right_arg->is_constant() || right_arg->is_register(), "wrong state of right"); 860 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), NULL); 861 } 862 } 863 864 865 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) { 866 ValueTag tag = x->type()->tag(); 867 assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters"); 868 switch (tag) { 869 case floatTag: 870 case doubleTag: do_ArithmeticOp_FPU(x); return; 871 case longTag: do_ArithmeticOp_Long(x); return; 872 case intTag: do_ArithmeticOp_Int(x); return; 873 } 874 ShouldNotReachHere(); 875 } 876 877 878 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr 879 void LIRGenerator::do_ShiftOp(ShiftOp* x) { 880 LIRItem value(x->x(), this); 881 LIRItem count(x->y(), this); 882 883 #ifndef AARCH64 884 if (value.type()->is_long()) { 885 count.set_destroys_register(); 886 } 887 #endif // !AARCH64 888 889 if (count.is_constant()) { 890 assert(count.type()->as_IntConstant() != NULL, "should be"); 891 count.dont_load_item(); 892 } else { 893 count.load_item(); 894 } 895 value.load_item(); 896 897 LIR_Opr res = rlock_result(x); 898 shift_op(x->op(), res, value.result(), count.result(), LIR_OprFact::illegalOpr); 899 } 900 901 902 // _iand, _land, _ior, _lor, _ixor, _lxor 903 void LIRGenerator::do_LogicOp(LogicOp* x) { 904 LIRItem left(x->x(), this); 905 LIRItem right(x->y(), this); 906 907 left.load_item(); 908 909 #ifdef AARCH64 910 if (right.is_constant() && can_inline_as_constant_in_logic(right.value())) { 911 right.dont_load_item(); 912 } else { 913 right.load_item(); 914 } 915 #else 916 right.load_nonconstant(); 917 #endif // AARCH64 918 919 logic_op(x->op(), rlock_result(x), left.result(), right.result()); 920 } 921 922 923 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg 924 void LIRGenerator::do_CompareOp(CompareOp* x) { 925 #ifdef __SOFTFP__ 926 address runtime_func; 927 switch (x->op()) { 928 case Bytecodes::_fcmpl: 929 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::fcmpl); 930 break; 931 case Bytecodes::_fcmpg: 932 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::fcmpg); 933 break; 934 case Bytecodes::_dcmpl: 935 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dcmpl); 936 break; 937 case Bytecodes::_dcmpg: 938 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dcmpg); 939 break; 940 case Bytecodes::_lcmp: { 941 LIRItem left(x->x(), this); 942 LIRItem right(x->y(), this); 943 left.load_item(); 944 right.load_nonconstant(); 945 LIR_Opr reg = rlock_result(x); 946 __ lcmp2int(left.result(), right.result(), reg); 947 return; 948 } 949 default: 950 ShouldNotReachHere(); 951 } 952 LIR_Opr result = call_runtime(x->x(), x->y(), runtime_func, x->type(), NULL); 953 set_result(x, result); 954 #else // __SOFTFP__ 955 LIRItem left(x->x(), this); 956 LIRItem right(x->y(), this); 957 left.load_item(); 958 959 #ifdef AARCH64 960 if (right.is_constant() && can_inline_as_constant_in_cmp(right.value())) { 961 right.dont_load_item(); 962 } else { 963 right.load_item(); 964 } 965 #else 966 right.load_nonconstant(); 967 #endif // AARCH64 968 969 LIR_Opr reg = rlock_result(x); 970 971 if (x->x()->type()->is_float_kind()) { 972 Bytecodes::Code code = x->op(); 973 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl)); 974 } else if (x->x()->type()->tag() == longTag) { 975 __ lcmp2int(left.result(), right.result(), reg); 976 } else { 977 ShouldNotReachHere(); 978 } 979 #endif // __SOFTFP__ 980 } 981 982 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) { 983 LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience 984 LIR_Opr tmp1 = LIR_OprFact::illegalOpr; 985 LIR_Opr tmp2 = LIR_OprFact::illegalOpr; 986 new_value.load_item(); 987 cmp_value.load_item(); 988 LIR_Opr result = new_register(T_INT); 989 if (type == T_OBJECT || type == T_ARRAY) { 990 #ifdef AARCH64 991 if (UseCompressedOops) { 992 tmp1 = new_pointer_register(); 993 tmp2 = new_pointer_register(); 994 } 995 #endif 996 __ cas_obj(addr, cmp_value.result(), new_value.result(), new_register(T_INT), new_register(T_INT), result); 997 } else if (type == T_INT) { 998 __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), tmp1, tmp1, result); 999 } else if (type == T_LONG) { 1000 #ifndef AARCH64 1001 tmp1 = new_register(T_LONG); 1002 #endif // !AARCH64 1003 __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), tmp1, tmp2, result); 1004 } else { 1005 ShouldNotReachHere(); 1006 } 1007 return result; 1008 } 1009 1010 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) { 1011 bool is_oop = type == T_OBJECT || type == T_ARRAY; 1012 LIR_Opr result = new_register(type); 1013 value.load_item(); 1014 assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type"); 1015 LIR_Opr tmp = (UseCompressedOops && is_oop) ? new_pointer_register() : LIR_OprFact::illegalOpr; 1016 __ xchg(addr, value.result(), result, tmp); 1017 return result; 1018 } 1019 1020 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) { 1021 LIR_Opr result = new_register(type); 1022 value.load_item(); 1023 assert(type == T_INT LP64_ONLY( || type == T_LONG), "unexpected type"); 1024 LIR_Opr tmp = new_register(type); 1025 __ xadd(addr, value.result(), result, tmp); 1026 return result; 1027 } 1028 1029 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) { 1030 address runtime_func; 1031 switch (x->id()) { 1032 case vmIntrinsics::_dabs: { 1033 #ifdef __SOFTFP__ 1034 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dabs); 1035 break; 1036 #else 1037 assert(x->number_of_arguments() == 1, "wrong type"); 1038 LIRItem value(x->argument_at(0), this); 1039 value.load_item(); 1040 __ abs(value.result(), rlock_result(x), LIR_OprFact::illegalOpr); 1041 return; 1042 #endif // __SOFTFP__ 1043 } 1044 case vmIntrinsics::_dsqrt: { 1045 #ifdef __SOFTFP__ 1046 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dsqrt); 1047 break; 1048 #else 1049 assert(x->number_of_arguments() == 1, "wrong type"); 1050 LIRItem value(x->argument_at(0), this); 1051 value.load_item(); 1052 __ sqrt(value.result(), rlock_result(x), LIR_OprFact::illegalOpr); 1053 return; 1054 #endif // __SOFTFP__ 1055 } 1056 case vmIntrinsics::_dsin: 1057 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dsin); 1058 break; 1059 case vmIntrinsics::_dcos: 1060 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dcos); 1061 break; 1062 case vmIntrinsics::_dtan: 1063 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dtan); 1064 break; 1065 case vmIntrinsics::_dlog: 1066 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dlog); 1067 break; 1068 case vmIntrinsics::_dlog10: 1069 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10); 1070 break; 1071 case vmIntrinsics::_dexp: 1072 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dexp); 1073 break; 1074 case vmIntrinsics::_dpow: 1075 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dpow); 1076 break; 1077 default: 1078 ShouldNotReachHere(); 1079 return; 1080 } 1081 1082 LIR_Opr result; 1083 if (x->number_of_arguments() == 1) { 1084 result = call_runtime(x->argument_at(0), runtime_func, x->type(), NULL); 1085 } else { 1086 assert(x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow, "unexpected intrinsic"); 1087 result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_func, x->type(), NULL); 1088 } 1089 set_result(x, result); 1090 } 1091 1092 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) { 1093 fatal("FMA intrinsic is not implemented on this platform"); 1094 } 1095 1096 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) { 1097 fatal("vectorizedMismatch intrinsic is not implemented on this platform"); 1098 } 1099 1100 void LIRGenerator::do_ArrayCopy(Intrinsic* x) { 1101 CodeEmitInfo* info = state_for(x, x->state()); 1102 assert(x->number_of_arguments() == 5, "wrong type"); 1103 LIRItem src(x->argument_at(0), this); 1104 LIRItem src_pos(x->argument_at(1), this); 1105 LIRItem dst(x->argument_at(2), this); 1106 LIRItem dst_pos(x->argument_at(3), this); 1107 LIRItem length(x->argument_at(4), this); 1108 1109 // We put arguments into the same registers which are used for a Java call. 1110 // Note: we used fixed registers for all arguments because all registers 1111 // are caller-saved, so register allocator treats them all as used. 1112 src.load_item_force (FrameMap::R0_oop_opr); 1113 src_pos.load_item_force(FrameMap::R1_opr); 1114 dst.load_item_force (FrameMap::R2_oop_opr); 1115 dst_pos.load_item_force(FrameMap::R3_opr); 1116 length.load_item_force (FrameMap::R4_opr); 1117 LIR_Opr tmp = (FrameMap::R5_opr); 1118 set_no_result(x); 1119 1120 int flags; 1121 ciArrayKlass* expected_type; 1122 arraycopy_helper(x, &flags, &expected_type); 1123 __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), 1124 tmp, expected_type, flags, info); 1125 } 1126 1127 void LIRGenerator::do_update_CRC32(Intrinsic* x) { 1128 fatal("CRC32 intrinsic is not implemented on this platform"); 1129 } 1130 1131 void LIRGenerator::do_update_CRC32C(Intrinsic* x) { 1132 Unimplemented(); 1133 } 1134 1135 void LIRGenerator::do_Convert(Convert* x) { 1136 address runtime_func; 1137 switch (x->op()) { 1138 #ifndef AARCH64 1139 case Bytecodes::_l2f: 1140 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::l2f); 1141 break; 1142 case Bytecodes::_l2d: 1143 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::l2d); 1144 break; 1145 case Bytecodes::_f2l: 1146 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::f2l); 1147 break; 1148 case Bytecodes::_d2l: 1149 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::d2l); 1150 break; 1151 #ifdef __SOFTFP__ 1152 case Bytecodes::_f2d: 1153 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_f2d); 1154 break; 1155 case Bytecodes::_d2f: 1156 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_d2f); 1157 break; 1158 case Bytecodes::_i2f: 1159 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_i2f); 1160 break; 1161 case Bytecodes::_i2d: 1162 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_i2d); 1163 break; 1164 case Bytecodes::_f2i: 1165 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_f2iz); 1166 break; 1167 case Bytecodes::_d2i: 1168 // This is implemented in hard float in assembler on arm but a call 1169 // on other platforms. 1170 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::d2i); 1171 break; 1172 #endif // __SOFTFP__ 1173 #endif // !AARCH64 1174 default: { 1175 LIRItem value(x->value(), this); 1176 value.load_item(); 1177 LIR_Opr reg = rlock_result(x); 1178 __ convert(x->op(), value.result(), reg, NULL); 1179 return; 1180 } 1181 } 1182 1183 LIR_Opr result = call_runtime(x->value(), runtime_func, x->type(), NULL); 1184 set_result(x, result); 1185 } 1186 1187 1188 void LIRGenerator::do_NewInstance(NewInstance* x) { 1189 print_if_not_loaded(x); 1190 1191 CodeEmitInfo* info = state_for(x, x->state()); 1192 LIR_Opr reg = result_register_for(x->type()); // R0 is required by runtime call in NewInstanceStub::emit_code 1193 LIR_Opr klass_reg = FrameMap::R1_metadata_opr; // R1 is required by runtime call in NewInstanceStub::emit_code 1194 LIR_Opr tmp1 = new_register(objectType); 1195 LIR_Opr tmp2 = new_register(objectType); 1196 LIR_Opr tmp3 = FrameMap::LR_oop_opr; 1197 1198 new_instance(reg, x->klass(), x->is_unresolved(), tmp1, tmp2, tmp3, 1199 LIR_OprFact::illegalOpr, klass_reg, info); 1200 1201 LIR_Opr result = rlock_result(x); 1202 __ move(reg, result); 1203 } 1204 1205 1206 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { 1207 // Evaluate state_for() first, because it can emit code 1208 // with the same fixed registers that are used here (R1, R2) 1209 CodeEmitInfo* info = state_for(x, x->state()); 1210 LIRItem length(x->length(), this); 1211 1212 length.load_item_force(FrameMap::R2_opr); // R2 is required by runtime call in NewTypeArrayStub::emit_code 1213 LIR_Opr len = length.result(); 1214 1215 LIR_Opr reg = result_register_for(x->type()); // R0 is required by runtime call in NewTypeArrayStub::emit_code 1216 LIR_Opr klass_reg = FrameMap::R1_metadata_opr; // R1 is required by runtime call in NewTypeArrayStub::emit_code 1217 1218 LIR_Opr tmp1 = new_register(objectType); 1219 LIR_Opr tmp2 = new_register(objectType); 1220 LIR_Opr tmp3 = FrameMap::LR_oop_opr; 1221 LIR_Opr tmp4 = LIR_OprFact::illegalOpr; 1222 1223 BasicType elem_type = x->elt_type(); 1224 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg); 1225 1226 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info); 1227 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path); 1228 1229 LIR_Opr result = rlock_result(x); 1230 __ move(reg, result); 1231 } 1232 1233 1234 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) { 1235 // Evaluate state_for() first, because it can emit code 1236 // with the same fixed registers that are used here (R1, R2) 1237 CodeEmitInfo* info = state_for(x, x->state()); 1238 LIRItem length(x->length(), this); 1239 1240 length.load_item_force(FrameMap::R2_opr); // R2 is required by runtime call in NewObjectArrayStub::emit_code 1241 LIR_Opr len = length.result(); 1242 1243 CodeEmitInfo* patching_info = NULL; 1244 if (!x->klass()->is_loaded() || PatchALot) { 1245 patching_info = state_for(x, x->state_before()); 1246 } 1247 1248 LIR_Opr reg = result_register_for(x->type()); // R0 is required by runtime call in NewObjectArrayStub::emit_code 1249 LIR_Opr klass_reg = FrameMap::R1_metadata_opr; // R1 is required by runtime call in NewObjectArrayStub::emit_code 1250 1251 LIR_Opr tmp1 = new_register(objectType); 1252 LIR_Opr tmp2 = new_register(objectType); 1253 LIR_Opr tmp3 = FrameMap::LR_oop_opr; 1254 LIR_Opr tmp4 = LIR_OprFact::illegalOpr; 1255 1256 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info); 1257 ciMetadata* obj = ciObjArrayKlass::make(x->klass()); 1258 if (obj == ciEnv::unloaded_ciobjarrayklass()) { 1259 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error"); 1260 } 1261 klass2reg_with_patching(klass_reg, obj, patching_info); 1262 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path); 1263 1264 LIR_Opr result = rlock_result(x); 1265 __ move(reg, result); 1266 } 1267 1268 1269 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { 1270 Values* dims = x->dims(); 1271 int i = dims->length(); 1272 LIRItemList* items = new LIRItemList(i, i, NULL); 1273 while (i-- > 0) { 1274 LIRItem* size = new LIRItem(dims->at(i), this); 1275 items->at_put(i, size); 1276 } 1277 1278 // Need to get the info before, as the items may become invalid through item_free 1279 CodeEmitInfo* patching_info = NULL; 1280 if (!x->klass()->is_loaded() || PatchALot) { 1281 patching_info = state_for(x, x->state_before()); 1282 1283 // Cannot re-use same xhandlers for multiple CodeEmitInfos, so 1284 // clone all handlers (NOTE: Usually this is handled transparently 1285 // by the CodeEmitInfo cloning logic in CodeStub constructors but 1286 // is done explicitly here because a stub isn't being used). 1287 x->set_exception_handlers(new XHandlers(x->exception_handlers())); 1288 } 1289 1290 i = dims->length(); 1291 while (i-- > 0) { 1292 LIRItem* size = items->at(i); 1293 size->load_item(); 1294 LIR_Opr sz = size->result(); 1295 assert(sz->type() == T_INT, "should be"); 1296 store_stack_parameter(sz, in_ByteSize(i * BytesPerInt)); 1297 } 1298 1299 CodeEmitInfo* info = state_for(x, x->state()); 1300 LIR_Opr klass_reg = FrameMap::R0_metadata_opr; 1301 klass2reg_with_patching(klass_reg, x->klass(), patching_info); 1302 1303 LIR_Opr rank = FrameMap::R2_opr; 1304 __ move(LIR_OprFact::intConst(x->rank()), rank); 1305 LIR_Opr varargs = FrameMap::SP_opr; 1306 LIR_OprList* args = new LIR_OprList(3); 1307 args->append(klass_reg); 1308 args->append(rank); 1309 args->append(varargs); 1310 LIR_Opr reg = result_register_for(x->type()); 1311 __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id), 1312 LIR_OprFact::illegalOpr, reg, args, info); 1313 1314 LIR_Opr result = rlock_result(x); 1315 __ move(reg, result); 1316 } 1317 1318 1319 void LIRGenerator::do_BlockBegin(BlockBegin* x) { 1320 // nothing to do for now 1321 } 1322 1323 1324 void LIRGenerator::do_CheckCast(CheckCast* x) { 1325 LIRItem obj(x->obj(), this); 1326 CodeEmitInfo* patching_info = NULL; 1327 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) { 1328 patching_info = state_for(x, x->state_before()); 1329 } 1330 1331 obj.load_item(); 1332 1333 CodeEmitInfo* info_for_exception = 1334 (x->needs_exception_state() ? state_for(x) : 1335 state_for(x, x->state_before(), true /*ignore_xhandler*/)); 1336 1337 CodeStub* stub; 1338 if (x->is_incompatible_class_change_check()) { 1339 assert(patching_info == NULL, "can't patch this"); 1340 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, 1341 LIR_OprFact::illegalOpr, info_for_exception); 1342 } else if (x->is_invokespecial_receiver_check()) { 1343 assert(patching_info == NULL, "can't patch this"); 1344 stub = new DeoptimizeStub(info_for_exception, 1345 Deoptimization::Reason_class_check, 1346 Deoptimization::Action_none); 1347 } else { 1348 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, 1349 LIR_OprFact::illegalOpr, info_for_exception); 1350 } 1351 1352 LIR_Opr out_reg = rlock_result(x); 1353 LIR_Opr tmp1 = FrameMap::R0_oop_opr; 1354 LIR_Opr tmp2 = FrameMap::R1_oop_opr; 1355 LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 1356 1357 __ checkcast(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3, x->direct_compare(), 1358 info_for_exception, patching_info, stub, x->profiled_method(), x->profiled_bci()); 1359 } 1360 1361 1362 void LIRGenerator::do_InstanceOf(InstanceOf* x) { 1363 LIRItem obj(x->obj(), this); 1364 CodeEmitInfo* patching_info = NULL; 1365 if (!x->klass()->is_loaded() || PatchALot) { 1366 patching_info = state_for(x, x->state_before()); 1367 } 1368 1369 obj.load_item(); 1370 LIR_Opr out_reg = rlock_result(x); 1371 LIR_Opr tmp1 = FrameMap::R0_oop_opr; 1372 LIR_Opr tmp2 = FrameMap::R1_oop_opr; 1373 LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 1374 1375 __ instanceof(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3, 1376 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci()); 1377 } 1378 1379 1380 #ifdef __SOFTFP__ 1381 // Turn operator if (f <op> g) into runtime call: 1382 // call _aeabi_fcmp<op>(f, g) 1383 // cmp(eq, 1) 1384 // branch(eq, true path). 1385 void LIRGenerator::do_soft_float_compare(If* x) { 1386 assert(x->number_of_sux() == 2, "inconsistency"); 1387 ValueTag tag = x->x()->type()->tag(); 1388 If::Condition cond = x->cond(); 1389 address runtime_func; 1390 // unordered comparison gets the wrong answer because aeabi functions 1391 // return false. 1392 bool unordered_is_true = x->unordered_is_true(); 1393 // reverse of condition for ne 1394 bool compare_to_zero = false; 1395 switch (lir_cond(cond)) { 1396 case lir_cond_notEqual: 1397 compare_to_zero = true; // fall through 1398 case lir_cond_equal: 1399 runtime_func = tag == floatTag ? 1400 CAST_FROM_FN_PTR(address, __aeabi_fcmpeq): 1401 CAST_FROM_FN_PTR(address, __aeabi_dcmpeq); 1402 break; 1403 case lir_cond_less: 1404 if (unordered_is_true) { 1405 runtime_func = tag == floatTag ? 1406 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_fcmplt): 1407 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_dcmplt); 1408 } else { 1409 runtime_func = tag == floatTag ? 1410 CAST_FROM_FN_PTR(address, __aeabi_fcmplt): 1411 CAST_FROM_FN_PTR(address, __aeabi_dcmplt); 1412 } 1413 break; 1414 case lir_cond_lessEqual: 1415 if (unordered_is_true) { 1416 runtime_func = tag == floatTag ? 1417 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_fcmple): 1418 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_dcmple); 1419 } else { 1420 runtime_func = tag == floatTag ? 1421 CAST_FROM_FN_PTR(address, __aeabi_fcmple): 1422 CAST_FROM_FN_PTR(address, __aeabi_dcmple); 1423 } 1424 break; 1425 case lir_cond_greaterEqual: 1426 if (unordered_is_true) { 1427 runtime_func = tag == floatTag ? 1428 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_fcmpge): 1429 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_dcmpge); 1430 } else { 1431 runtime_func = tag == floatTag ? 1432 CAST_FROM_FN_PTR(address, __aeabi_fcmpge): 1433 CAST_FROM_FN_PTR(address, __aeabi_dcmpge); 1434 } 1435 break; 1436 case lir_cond_greater: 1437 if (unordered_is_true) { 1438 runtime_func = tag == floatTag ? 1439 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_fcmpgt): 1440 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_dcmpgt); 1441 } else { 1442 runtime_func = tag == floatTag ? 1443 CAST_FROM_FN_PTR(address, __aeabi_fcmpgt): 1444 CAST_FROM_FN_PTR(address, __aeabi_dcmpgt); 1445 } 1446 break; 1447 case lir_cond_aboveEqual: 1448 case lir_cond_belowEqual: 1449 ShouldNotReachHere(); // We're not going to get these. 1450 default: 1451 assert(lir_cond(cond) == lir_cond_always, "must be"); 1452 ShouldNotReachHere(); 1453 } 1454 set_no_result(x); 1455 1456 // add safepoint before generating condition code so it can be recomputed 1457 if (x->is_safepoint()) { 1458 increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci()); 1459 __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before())); 1460 } 1461 // Call float compare function, returns (1,0) if true or false. 1462 LIR_Opr result = call_runtime(x->x(), x->y(), runtime_func, intType, NULL); 1463 __ cmp(lir_cond_equal, result, 1464 compare_to_zero ? 1465 LIR_OprFact::intConst(0) : LIR_OprFact::intConst(1)); 1466 profile_branch(x, cond); 1467 move_to_phi(x->state()); 1468 __ branch(lir_cond_equal, T_INT, x->tsux()); 1469 } 1470 #endif // __SOFTFP__ 1471 1472 void LIRGenerator::do_If(If* x) { 1473 assert(x->number_of_sux() == 2, "inconsistency"); 1474 ValueTag tag = x->x()->type()->tag(); 1475 1476 #ifdef __SOFTFP__ 1477 if (tag == floatTag || tag == doubleTag) { 1478 do_soft_float_compare(x); 1479 assert(x->default_sux() == x->fsux(), "wrong destination above"); 1480 __ jump(x->default_sux()); 1481 return; 1482 } 1483 #endif // __SOFTFP__ 1484 1485 LIRItem xitem(x->x(), this); 1486 LIRItem yitem(x->y(), this); 1487 LIRItem* xin = &xitem; 1488 LIRItem* yin = &yitem; 1489 If::Condition cond = x->cond(); 1490 1491 #ifndef AARCH64 1492 if (tag == longTag) { 1493 if (cond == If::gtr || cond == If::leq) { 1494 cond = Instruction::mirror(cond); 1495 xin = &yitem; 1496 yin = &xitem; 1497 } 1498 xin->set_destroys_register(); 1499 } 1500 #endif // !AARCH64 1501 1502 xin->load_item(); 1503 LIR_Opr left = xin->result(); 1504 LIR_Opr right; 1505 1506 #ifdef AARCH64 1507 if (yin->is_constant() && can_inline_as_constant_in_cmp(yin->value())) { 1508 yin->dont_load_item(); 1509 } else { 1510 yin->load_item(); 1511 } 1512 right = yin->result(); 1513 #else 1514 if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && 1515 (cond == If::eql || cond == If::neq)) { 1516 // inline long zero 1517 right = LIR_OprFact::value_type(yin->value()->type()); 1518 } else { 1519 yin->load_nonconstant(); 1520 right = yin->result(); 1521 } 1522 #endif // AARCH64 1523 1524 set_no_result(x); 1525 1526 // add safepoint before generating condition code so it can be recomputed 1527 if (x->is_safepoint()) { 1528 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()), 1529 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci()); 1530 __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before())); 1531 } 1532 1533 __ cmp(lir_cond(cond), left, right); 1534 profile_branch(x, cond); 1535 move_to_phi(x->state()); 1536 if (x->x()->type()->is_float_kind()) { 1537 __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux()); 1538 } else { 1539 __ branch(lir_cond(cond), right->type(), x->tsux()); 1540 } 1541 assert(x->default_sux() == x->fsux(), "wrong destination above"); 1542 __ jump(x->default_sux()); 1543 } 1544 1545 1546 LIR_Opr LIRGenerator::getThreadPointer() { 1547 return FrameMap::Rthread_opr; 1548 } 1549 1550 void LIRGenerator::trace_block_entry(BlockBegin* block) { 1551 __ move(LIR_OprFact::intConst(block->block_id()), FrameMap::R0_opr); 1552 LIR_OprList* args = new LIR_OprList(1); 1553 args->append(FrameMap::R0_opr); 1554 address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry); 1555 __ call_runtime_leaf(func, getThreadTemp(), LIR_OprFact::illegalOpr, args); 1556 } 1557 1558 1559 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address, 1560 CodeEmitInfo* info) { 1561 #ifndef AARCH64 1562 if (value->is_double_cpu()) { 1563 assert(address->index()->is_illegal(), "should have a constant displacement"); 1564 LIR_Opr tmp = new_pointer_register(); 1565 add_large_constant(address->base(), address->disp(), tmp); 1566 __ volatile_store_mem_reg(value, new LIR_Address(tmp, (intx)0, address->type()), info); 1567 return; 1568 } 1569 #endif // !AARCH64 1570 // TODO-AARCH64 implement with stlr instruction 1571 __ store(value, address, info, lir_patch_none); 1572 } 1573 1574 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result, 1575 CodeEmitInfo* info) { 1576 #ifndef AARCH64 1577 if (result->is_double_cpu()) { 1578 assert(address->index()->is_illegal(), "should have a constant displacement"); 1579 LIR_Opr tmp = new_pointer_register(); 1580 add_large_constant(address->base(), address->disp(), tmp); 1581 __ volatile_load_mem_reg(new LIR_Address(tmp, (intx)0, address->type()), result, info); 1582 return; 1583 } 1584 #endif // !AARCH64 1585 // TODO-AARCH64 implement with ldar instruction 1586 __ load(address, result, info, lir_patch_none); 1587 }