1 /* 2 * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_Compilation.hpp" 27 #include "c1/c1_FrameMap.hpp" 28 #include "c1/c1_Instruction.hpp" 29 #include "c1/c1_LIRAssembler.hpp" 30 #include "c1/c1_LIRGenerator.hpp" 31 #include "c1/c1_Runtime1.hpp" 32 #include "c1/c1_ValueStack.hpp" 33 #include "ci/ciArray.hpp" 34 #include "ci/ciObjArrayKlass.hpp" 35 #include "ci/ciTypeArrayKlass.hpp" 36 #include "ci/ciUtilities.hpp" 37 #include "gc/shared/cardTable.hpp" 38 #include "gc/shared/cardTableBarrierSet.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/stubRoutines.hpp" 41 #include "vmreg_arm.inline.hpp" 42 43 #ifdef ASSERT 44 #define __ gen()->lir(__FILE__, __LINE__)-> 45 #else 46 #define __ gen()->lir()-> 47 #endif 48 49 void LIRItem::load_byte_item() { 50 load_item(); 51 } 52 53 void LIRItem::load_nonconstant() { 54 LIR_Opr r = value()->operand(); 55 if (_gen->can_inline_as_constant(value())) { 56 if (!r->is_constant()) { 57 r = LIR_OprFact::value_type(value()->type()); 58 } 59 _result = r; 60 } else { 61 load_item(); 62 } 63 } 64 65 //-------------------------------------------------------------- 66 // LIRGenerator 67 //-------------------------------------------------------------- 68 69 70 LIR_Opr LIRGenerator::exceptionOopOpr() { 71 return FrameMap::Exception_oop_opr; 72 } 73 74 LIR_Opr LIRGenerator::exceptionPcOpr() { 75 return FrameMap::Exception_pc_opr; 76 } 77 78 LIR_Opr LIRGenerator::syncLockOpr() { 79 return new_register(T_INT); 80 } 81 82 LIR_Opr LIRGenerator::syncTempOpr() { 83 return new_register(T_OBJECT); 84 } 85 86 LIR_Opr LIRGenerator::getThreadTemp() { 87 return LIR_OprFact::illegalOpr; 88 } 89 90 LIR_Opr LIRGenerator::atomicLockOpr() { 91 return LIR_OprFact::illegalOpr; 92 } 93 94 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) { 95 LIR_Opr opr; 96 switch (type->tag()) { 97 case intTag: opr = FrameMap::Int_result_opr; break; 98 case objectTag: opr = FrameMap::Object_result_opr; break; 99 case longTag: opr = FrameMap::Long_result_opr; break; 100 case floatTag: opr = FrameMap::Float_result_opr; break; 101 case doubleTag: opr = FrameMap::Double_result_opr; break; 102 case addressTag: 103 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr; 104 } 105 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch"); 106 return opr; 107 } 108 109 110 LIR_Opr LIRGenerator::rlock_byte(BasicType type) { 111 return new_register(T_INT); 112 } 113 114 115 //--------- loading items into registers -------------------------------- 116 117 118 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const { 119 #ifdef AARCH64 120 if (v->type()->as_IntConstant() != NULL) { 121 return v->type()->as_IntConstant()->value() == 0; 122 } else if (v->type()->as_LongConstant() != NULL) { 123 return v->type()->as_LongConstant()->value() == 0; 124 } else if (v->type()->as_ObjectConstant() != NULL) { 125 return v->type()->as_ObjectConstant()->value()->is_null_object(); 126 } else if (v->type()->as_FloatConstant() != NULL) { 127 return jint_cast(v->type()->as_FloatConstant()->value()) == 0; 128 } else if (v->type()->as_DoubleConstant() != NULL) { 129 return jlong_cast(v->type()->as_DoubleConstant()->value()) == 0; 130 } 131 #endif // AARCH64 132 return false; 133 } 134 135 136 bool LIRGenerator::can_inline_as_constant(Value v) const { 137 if (v->type()->as_IntConstant() != NULL) { 138 return Assembler::is_arith_imm_in_range(v->type()->as_IntConstant()->value()); 139 } else if (v->type()->as_ObjectConstant() != NULL) { 140 return v->type()->as_ObjectConstant()->value()->is_null_object(); 141 #ifdef AARCH64 142 } else if (v->type()->as_LongConstant() != NULL) { 143 return Assembler::is_arith_imm_in_range(v->type()->as_LongConstant()->value()); 144 #else 145 } else if (v->type()->as_FloatConstant() != NULL) { 146 return v->type()->as_FloatConstant()->value() == 0.0f; 147 } else if (v->type()->as_DoubleConstant() != NULL) { 148 return v->type()->as_DoubleConstant()->value() == 0.0; 149 #endif // AARCH64 150 } 151 return false; 152 } 153 154 155 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { 156 ShouldNotCallThis(); // Not used on ARM 157 return false; 158 } 159 160 161 #ifdef AARCH64 162 163 static bool can_inline_as_constant_in_cmp(Value v) { 164 jlong constant; 165 if (v->type()->as_IntConstant() != NULL) { 166 constant = v->type()->as_IntConstant()->value(); 167 } else if (v->type()->as_LongConstant() != NULL) { 168 constant = v->type()->as_LongConstant()->value(); 169 } else if (v->type()->as_ObjectConstant() != NULL) { 170 return v->type()->as_ObjectConstant()->value()->is_null_object(); 171 } else if (v->type()->as_FloatConstant() != NULL) { 172 return v->type()->as_FloatConstant()->value() == 0.0f; 173 } else if (v->type()->as_DoubleConstant() != NULL) { 174 return v->type()->as_DoubleConstant()->value() == 0.0; 175 } else { 176 return false; 177 } 178 179 return Assembler::is_arith_imm_in_range(constant) || Assembler::is_arith_imm_in_range(-constant); 180 } 181 182 183 static bool can_inline_as_constant_in_logic(Value v) { 184 if (v->type()->as_IntConstant() != NULL) { 185 return Assembler::LogicalImmediate(v->type()->as_IntConstant()->value(), true).is_encoded(); 186 } else if (v->type()->as_LongConstant() != NULL) { 187 return Assembler::LogicalImmediate(v->type()->as_LongConstant()->value(), false).is_encoded(); 188 } 189 return false; 190 } 191 192 193 #endif // AARCH64 194 195 196 LIR_Opr LIRGenerator::safepoint_poll_register() { 197 return LIR_OprFact::illegalOpr; 198 } 199 200 201 static LIR_Opr make_constant(BasicType type, jlong c) { 202 switch (type) { 203 case T_ADDRESS: 204 case T_OBJECT: return LIR_OprFact::intptrConst(c); 205 case T_LONG: return LIR_OprFact::longConst(c); 206 case T_INT: return LIR_OprFact::intConst(c); 207 default: ShouldNotReachHere(); 208 return LIR_OprFact::intConst(-1); 209 } 210 } 211 212 #ifdef AARCH64 213 214 void LIRGenerator::add_constant(LIR_Opr src, jlong c, LIR_Opr dest) { 215 if (c == 0) { 216 __ move(src, dest); 217 return; 218 } 219 220 BasicType type = src->type(); 221 bool is_neg = (c < 0); 222 c = ABS(c); 223 224 if ((c >> 24) == 0) { 225 for (int shift = 0; shift <= 12; shift += 12) { 226 int part = ((int)c) & (right_n_bits(12) << shift); 227 if (part != 0) { 228 if (is_neg) { 229 __ sub(src, make_constant(type, part), dest); 230 } else { 231 __ add(src, make_constant(type, part), dest); 232 } 233 src = dest; 234 } 235 } 236 } else { 237 __ move(make_constant(type, c), dest); 238 if (is_neg) { 239 __ sub(src, dest, dest); 240 } else { 241 __ add(src, dest, dest); 242 } 243 } 244 } 245 246 #endif // AARCH64 247 248 249 void LIRGenerator::add_large_constant(LIR_Opr src, int c, LIR_Opr dest) { 250 assert(c != 0, "must be"); 251 #ifdef AARCH64 252 add_constant(src, c, dest); 253 #else 254 // Find first non-zero bit 255 int shift = 0; 256 while ((c & (3 << shift)) == 0) { 257 shift += 2; 258 } 259 // Add the least significant part of the constant 260 int mask = 0xff << shift; 261 __ add(src, LIR_OprFact::intConst(c & mask), dest); 262 // Add up to 3 other parts of the constant; 263 // each of them can be represented as rotated_imm 264 if (c & (mask << 8)) { 265 __ add(dest, LIR_OprFact::intConst(c & (mask << 8)), dest); 266 } 267 if (c & (mask << 16)) { 268 __ add(dest, LIR_OprFact::intConst(c & (mask << 16)), dest); 269 } 270 if (c & (mask << 24)) { 271 __ add(dest, LIR_OprFact::intConst(c & (mask << 24)), dest); 272 } 273 #endif // AARCH64 274 } 275 276 static LIR_Address* make_address(LIR_Opr base, LIR_Opr index, LIR_Address::Scale scale, BasicType type) { 277 return new LIR_Address(base, index, scale, 0, type); 278 } 279 280 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index, 281 int shift, int disp, BasicType type) { 282 assert(base->is_register(), "must be"); 283 284 if (index->is_constant()) { 285 disp += index->as_constant_ptr()->as_jint() << shift; 286 index = LIR_OprFact::illegalOpr; 287 } 288 289 #ifndef AARCH64 290 if (base->type() == T_LONG) { 291 LIR_Opr tmp = new_register(T_INT); 292 __ convert(Bytecodes::_l2i, base, tmp); 293 base = tmp; 294 } 295 if (index != LIR_OprFact::illegalOpr && index->type() == T_LONG) { 296 LIR_Opr tmp = new_register(T_INT); 297 __ convert(Bytecodes::_l2i, index, tmp); 298 index = tmp; 299 } 300 // At this point base and index should be all ints and not constants 301 assert(base->is_single_cpu() && !base->is_constant(), "base should be an non-constant int"); 302 assert(index->is_illegal() || (index->type() == T_INT && !index->is_constant()), "index should be an non-constant int"); 303 #endif 304 305 int max_disp; 306 bool disp_is_in_range; 307 bool embedded_shift; 308 309 #ifdef AARCH64 310 int align = exact_log2(type2aelembytes(type, true)); 311 assert((disp & right_n_bits(align)) == 0, "displacement is not aligned"); 312 assert(shift == 0 || shift == align, "shift should be zero or equal to embedded align"); 313 max_disp = (1 << 12) << align; 314 315 if (disp >= 0) { 316 disp_is_in_range = Assembler::is_unsigned_imm_in_range(disp, 12, align); 317 } else { 318 disp_is_in_range = Assembler::is_imm_in_range(disp, 9, 0); 319 } 320 321 embedded_shift = true; 322 #else 323 switch (type) { 324 case T_BYTE: 325 case T_SHORT: 326 case T_CHAR: 327 max_disp = 256; // ldrh, ldrsb encoding has 8-bit offset 328 embedded_shift = false; 329 break; 330 case T_FLOAT: 331 case T_DOUBLE: 332 max_disp = 1024; // flds, fldd have 8-bit offset multiplied by 4 333 embedded_shift = false; 334 break; 335 case T_LONG: 336 max_disp = 4096; 337 embedded_shift = false; 338 break; 339 default: 340 max_disp = 4096; // ldr, ldrb allow 12-bit offset 341 embedded_shift = true; 342 } 343 344 disp_is_in_range = (-max_disp < disp && disp < max_disp); 345 #endif // !AARCH64 346 347 if (index->is_register()) { 348 LIR_Opr tmp = new_pointer_register(); 349 if (!disp_is_in_range) { 350 add_large_constant(base, disp, tmp); 351 base = tmp; 352 disp = 0; 353 } 354 LIR_Address* addr = make_address(base, index, (LIR_Address::Scale)shift, type); 355 if (disp == 0 && embedded_shift) { 356 // can use ldr/str instruction with register index 357 return addr; 358 } else { 359 LIR_Opr tmp = new_pointer_register(); 360 __ add(base, LIR_OprFact::address(addr), tmp); // add with shifted/extended register 361 return new LIR_Address(tmp, disp, type); 362 } 363 } 364 365 // If the displacement is too large to be inlined into LDR instruction, 366 // generate large constant with additional sequence of ADD instructions 367 int excess_disp = disp & ~(max_disp - 1); 368 if (excess_disp != 0) { 369 LIR_Opr tmp = new_pointer_register(); 370 add_large_constant(base, excess_disp, tmp); 371 base = tmp; 372 } 373 return new LIR_Address(base, disp & (max_disp - 1), type); 374 } 375 376 377 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, 378 BasicType type, bool needs_card_mark) { 379 int base_offset = arrayOopDesc::base_offset_in_bytes(type); 380 int elem_size = type2aelembytes(type); 381 382 if (index_opr->is_constant()) { 383 int offset = base_offset + index_opr->as_constant_ptr()->as_jint() * elem_size; 384 if (needs_card_mark) { 385 LIR_Opr base_opr = new_pointer_register(); 386 add_large_constant(array_opr, offset, base_opr); 387 return new LIR_Address(base_opr, (intx)0, type); 388 } else { 389 return generate_address(array_opr, offset, type); 390 } 391 } else { 392 assert(index_opr->is_register(), "must be"); 393 int scale = exact_log2(elem_size); 394 if (needs_card_mark) { 395 LIR_Opr base_opr = new_pointer_register(); 396 LIR_Address* addr = make_address(base_opr, index_opr, (LIR_Address::Scale)scale, type); 397 __ add(array_opr, LIR_OprFact::intptrConst(base_offset), base_opr); 398 __ add(base_opr, LIR_OprFact::address(addr), base_opr); // add with shifted/extended register 399 return new LIR_Address(base_opr, type); 400 } else { 401 return generate_address(array_opr, index_opr, scale, base_offset, type); 402 } 403 } 404 } 405 406 407 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) { 408 assert(type == T_LONG || type == T_INT, "should be"); 409 LIR_Opr r = make_constant(type, x); 410 #ifdef AARCH64 411 bool imm_in_range = Assembler::LogicalImmediate(x, type == T_INT).is_encoded(); 412 #else 413 bool imm_in_range = AsmOperand::is_rotated_imm(x); 414 #endif // AARCH64 415 if (!imm_in_range) { 416 LIR_Opr tmp = new_register(type); 417 __ move(r, tmp); 418 return tmp; 419 } 420 return r; 421 } 422 423 424 void LIRGenerator::increment_counter(address counter, BasicType type, int step) { 425 LIR_Opr pointer = new_pointer_register(); 426 __ move(LIR_OprFact::intptrConst(counter), pointer); 427 LIR_Address* addr = new LIR_Address(pointer, type); 428 increment_counter(addr, step); 429 } 430 431 432 void LIRGenerator::increment_counter(LIR_Address* addr, int step) { 433 LIR_Opr temp = new_register(addr->type()); 434 __ move(addr, temp); 435 __ add(temp, make_constant(addr->type(), step), temp); 436 __ move(temp, addr); 437 } 438 439 440 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { 441 __ load(new LIR_Address(base, disp, T_INT), FrameMap::LR_opr, info); 442 __ cmp(condition, FrameMap::LR_opr, c); 443 } 444 445 446 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) { 447 __ load(new LIR_Address(base, disp, type), FrameMap::LR_opr, info); 448 __ cmp(condition, reg, FrameMap::LR_opr); 449 } 450 451 452 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) { 453 assert(left != result, "should be different registers"); 454 if (is_power_of_2(c + 1)) { 455 #ifdef AARCH64 456 __ shift_left(left, log2_intptr(c + 1), result); 457 __ sub(result, left, result); 458 #else 459 LIR_Address::Scale scale = (LIR_Address::Scale) log2_intptr(c + 1); 460 LIR_Address* addr = new LIR_Address(left, left, scale, 0, T_INT); 461 __ sub(LIR_OprFact::address(addr), left, result); // rsb with shifted register 462 #endif // AARCH64 463 return true; 464 } else if (is_power_of_2(c - 1)) { 465 LIR_Address::Scale scale = (LIR_Address::Scale) log2_intptr(c - 1); 466 LIR_Address* addr = new LIR_Address(left, left, scale, 0, T_INT); 467 __ add(left, LIR_OprFact::address(addr), result); // add with shifted register 468 return true; 469 } 470 return false; 471 } 472 473 474 void LIRGenerator::store_stack_parameter(LIR_Opr item, ByteSize offset_from_sp) { 475 assert(item->type() == T_INT, "other types are not expected"); 476 __ store(item, new LIR_Address(FrameMap::SP_opr, in_bytes(offset_from_sp), item->type())); 477 } 478 479 void LIRGenerator::set_card(LIR_Opr value, LIR_Address* card_addr) { 480 assert(CardTable::dirty_card_val() == 0, 481 "Cannot use ZR register (aarch64) or the register containing the card table base address directly (aarch32) otherwise"); 482 #ifdef AARCH64 483 // AARCH64 has a register that is constant zero. We can use that one to set the 484 // value in the card table to dirty. 485 __ move(FrameMap::ZR_opr, card_addr); 486 #else // AARCH64 487 if((ci_card_table_address_as<intx>() & 0xff) == 0) { 488 // If the card table base address is aligned to 256 bytes, we can use the register 489 // that contains the card_table_base_address. 490 __ move(value, card_addr); 491 } else { 492 // Otherwise we need to create a register containing that value. 493 LIR_Opr tmp_zero = new_register(T_INT); 494 __ move(LIR_OprFact::intConst(CardTable::dirty_card_val()), tmp_zero); 495 __ move(tmp_zero, card_addr); 496 } 497 #endif // AARCH64 498 } 499 500 void LIRGenerator::CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base) { 501 assert(addr->is_register(), "must be a register at this point"); 502 503 LIR_Opr tmp = FrameMap::LR_ptr_opr; 504 505 // TODO-AARCH64: check performance 506 bool load_card_table_base_const = AARCH64_ONLY(false) NOT_AARCH64(VM_Version::supports_movw()); 507 if (load_card_table_base_const) { 508 __ move((LIR_Opr)card_table_base, tmp); 509 } else { 510 __ move(new LIR_Address(FrameMap::Rthread_opr, in_bytes(JavaThread::card_table_base_offset()), T_ADDRESS), tmp); 511 } 512 513 #ifdef AARCH64 514 LIR_Address* shifted_reg_operand = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTable::card_shift, 0, T_BYTE); 515 LIR_Opr tmp2 = tmp; 516 __ add(tmp, LIR_OprFact::address(shifted_reg_operand), tmp2); // tmp2 = tmp + (addr >> CardTable::card_shift) 517 LIR_Address* card_addr = new LIR_Address(tmp2, T_BYTE); 518 #else 519 // Use unsigned type T_BOOLEAN here rather than (signed) T_BYTE since signed load 520 // byte instruction does not support the addressing mode we need. 521 LIR_Address* card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTable::card_shift, 0, T_BOOLEAN); 522 #endif 523 if (UseCondCardMark) { 524 if (UseConcMarkSweepGC) { 525 __ membar_storeload(); 526 } 527 LIR_Opr cur_value = new_register(T_INT); 528 __ move(card_addr, cur_value); 529 530 LabelObj* L_already_dirty = new LabelObj(); 531 __ cmp(lir_cond_equal, cur_value, LIR_OprFact::intConst(CardTable::dirty_card_val())); 532 __ branch(lir_cond_equal, T_BYTE, L_already_dirty->label()); 533 set_card(tmp, card_addr); 534 __ branch_destination(L_already_dirty->label()); 535 } else { 536 #if INCLUDE_ALL_GCS 537 if (UseConcMarkSweepGC && CMSPrecleaningEnabled) { 538 __ membar_storestore(); 539 } 540 #endif 541 set_card(tmp, card_addr); 542 } 543 } 544 545 //---------------------------------------------------------------------- 546 // visitor functions 547 //---------------------------------------------------------------------- 548 549 550 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) { 551 assert(x->is_pinned(),""); 552 bool needs_range_check = x->compute_needs_range_check(); 553 bool use_length = x->length() != NULL; 554 bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT; 555 bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL || 556 !get_jobject_constant(x->value())->is_null_object() || 557 x->should_profile()); 558 559 LIRItem array(x->array(), this); 560 LIRItem index(x->index(), this); 561 LIRItem value(x->value(), this); 562 LIRItem length(this); 563 564 array.load_item(); 565 index.load_nonconstant(); 566 567 if (use_length && needs_range_check) { 568 length.set_instruction(x->length()); 569 length.load_item(); 570 } 571 if (needs_store_check || x->check_boolean()) { 572 value.load_item(); 573 } else { 574 value.load_for_store(x->elt_type()); 575 } 576 577 set_no_result(x); 578 579 // the CodeEmitInfo must be duplicated for each different 580 // LIR-instruction because spilling can occur anywhere between two 581 // instructions and so the debug information must be different 582 CodeEmitInfo* range_check_info = state_for(x); 583 CodeEmitInfo* null_check_info = NULL; 584 if (x->needs_null_check()) { 585 null_check_info = new CodeEmitInfo(range_check_info); 586 } 587 588 // emit array address setup early so it schedules better 589 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store); 590 591 if (GenerateRangeChecks && needs_range_check) { 592 if (use_length) { 593 __ cmp(lir_cond_belowEqual, length.result(), index.result()); 594 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result())); 595 } else { 596 array_range_check(array.result(), index.result(), null_check_info, range_check_info); 597 // range_check also does the null check 598 null_check_info = NULL; 599 } 600 } 601 602 if (GenerateArrayStoreCheck && needs_store_check) { 603 LIR_Opr tmp1 = FrameMap::R0_oop_opr; 604 LIR_Opr tmp2 = FrameMap::R1_oop_opr; 605 CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info); 606 __ store_check(value.result(), array.result(), tmp1, tmp2, 607 LIR_OprFact::illegalOpr, store_check_info, 608 x->profiled_method(), x->profiled_bci()); 609 } 610 611 #if INCLUDE_ALL_GCS 612 if (obj_store) { 613 // Needs GC write barriers. 614 pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */, 615 true /* do_load */, false /* patch */, NULL); 616 } 617 #endif // INCLUDE_ALL_GCS 618 619 LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info); 620 __ move(result, array_addr, null_check_info); 621 if (obj_store) { 622 post_barrier(LIR_OprFact::address(array_addr), value.result()); 623 } 624 } 625 626 627 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) { 628 assert(x->is_pinned(),""); 629 LIRItem obj(x->obj(), this); 630 obj.load_item(); 631 set_no_result(x); 632 633 LIR_Opr lock = new_pointer_register(); 634 LIR_Opr hdr = new_pointer_register(); 635 636 // Need a scratch register for biased locking on arm 637 LIR_Opr scratch = LIR_OprFact::illegalOpr; 638 if(UseBiasedLocking) { 639 scratch = new_pointer_register(); 640 } else { 641 scratch = atomicLockOpr(); 642 } 643 644 CodeEmitInfo* info_for_exception = NULL; 645 if (x->needs_null_check()) { 646 info_for_exception = state_for(x); 647 } 648 649 CodeEmitInfo* info = state_for(x, x->state(), true); 650 monitor_enter(obj.result(), lock, hdr, scratch, 651 x->monitor_no(), info_for_exception, info); 652 } 653 654 655 void LIRGenerator::do_MonitorExit(MonitorExit* x) { 656 assert(x->is_pinned(),""); 657 LIRItem obj(x->obj(), this); 658 obj.dont_load_item(); 659 set_no_result(x); 660 661 LIR_Opr obj_temp = new_pointer_register(); 662 LIR_Opr lock = new_pointer_register(); 663 LIR_Opr hdr = new_pointer_register(); 664 665 monitor_exit(obj_temp, lock, hdr, atomicLockOpr(), x->monitor_no()); 666 } 667 668 669 // _ineg, _lneg, _fneg, _dneg 670 void LIRGenerator::do_NegateOp(NegateOp* x) { 671 #ifdef __SOFTFP__ 672 address runtime_func = NULL; 673 ValueTag tag = x->type()->tag(); 674 if (tag == floatTag) { 675 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::fneg); 676 } else if (tag == doubleTag) { 677 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dneg); 678 } 679 if (runtime_func != NULL) { 680 set_result(x, call_runtime(x->x(), runtime_func, x->type(), NULL)); 681 return; 682 } 683 #endif // __SOFTFP__ 684 LIRItem value(x->x(), this); 685 value.load_item(); 686 LIR_Opr reg = rlock_result(x); 687 __ negate(value.result(), reg); 688 } 689 690 691 // for _fadd, _fmul, _fsub, _fdiv, _frem 692 // _dadd, _dmul, _dsub, _ddiv, _drem 693 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) { 694 address runtime_func; 695 switch (x->op()) { 696 case Bytecodes::_frem: 697 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::frem); 698 break; 699 case Bytecodes::_drem: 700 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::drem); 701 break; 702 #ifdef __SOFTFP__ 703 // Call function compiled with -msoft-float. 704 705 // __aeabi_XXXX_glibc: Imported code from glibc soft-fp bundle for calculation accuracy improvement. See CR 6757269. 706 707 case Bytecodes::_fadd: 708 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_fadd_glibc); 709 break; 710 case Bytecodes::_fmul: 711 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_fmul); 712 break; 713 case Bytecodes::_fsub: 714 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_fsub_glibc); 715 break; 716 case Bytecodes::_fdiv: 717 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_fdiv); 718 break; 719 case Bytecodes::_dadd: 720 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_dadd_glibc); 721 break; 722 case Bytecodes::_dmul: 723 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_dmul); 724 break; 725 case Bytecodes::_dsub: 726 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_dsub_glibc); 727 break; 728 case Bytecodes::_ddiv: 729 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_ddiv); 730 break; 731 default: 732 ShouldNotReachHere(); 733 #else // __SOFTFP__ 734 default: { 735 LIRItem left(x->x(), this); 736 LIRItem right(x->y(), this); 737 left.load_item(); 738 right.load_item(); 739 rlock_result(x); 740 arithmetic_op_fpu(x->op(), x->operand(), left.result(), right.result(), x->is_strictfp()); 741 return; 742 } 743 #endif // __SOFTFP__ 744 } 745 746 LIR_Opr result = call_runtime(x->x(), x->y(), runtime_func, x->type(), NULL); 747 set_result(x, result); 748 } 749 750 751 void LIRGenerator::make_div_by_zero_check(LIR_Opr right_arg, BasicType type, CodeEmitInfo* info) { 752 assert(right_arg->is_register(), "must be"); 753 __ cmp(lir_cond_equal, right_arg, make_constant(type, 0)); 754 __ branch(lir_cond_equal, type, new DivByZeroStub(info)); 755 } 756 757 758 // for _ladd, _lmul, _lsub, _ldiv, _lrem 759 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { 760 CodeEmitInfo* info = NULL; 761 if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) { 762 info = state_for(x); 763 } 764 765 #ifdef AARCH64 766 LIRItem left(x->x(), this); 767 LIRItem right(x->y(), this); 768 LIRItem* left_arg = &left; 769 LIRItem* right_arg = &right; 770 771 // Test if instr is commutative and if we should swap 772 if (x->is_commutative() && left.is_constant()) { 773 left_arg = &right; 774 right_arg = &left; 775 } 776 777 left_arg->load_item(); 778 switch (x->op()) { 779 case Bytecodes::_ldiv: 780 right_arg->load_item(); 781 make_div_by_zero_check(right_arg->result(), T_LONG, info); 782 __ idiv(left_arg->result(), right_arg->result(), rlock_result(x), LIR_OprFact::illegalOpr, NULL); 783 break; 784 785 case Bytecodes::_lrem: { 786 right_arg->load_item(); 787 make_div_by_zero_check(right_arg->result(), T_LONG, info); 788 // a % b is implemented with 2 instructions: 789 // tmp = a/b (sdiv) 790 // res = a - b*tmp (msub) 791 LIR_Opr tmp = FrameMap::as_long_opr(Rtemp); 792 __ irem(left_arg->result(), right_arg->result(), rlock_result(x), tmp, NULL); 793 break; 794 } 795 796 case Bytecodes::_lmul: 797 if (right_arg->is_constant() && is_power_of_2_long(right_arg->get_jlong_constant())) { 798 right_arg->dont_load_item(); 799 __ shift_left(left_arg->result(), exact_log2_long(right_arg->get_jlong_constant()), rlock_result(x)); 800 } else { 801 right_arg->load_item(); 802 __ mul(left_arg->result(), right_arg->result(), rlock_result(x)); 803 } 804 break; 805 806 case Bytecodes::_ladd: 807 case Bytecodes::_lsub: 808 if (right_arg->is_constant()) { 809 jlong c = right_arg->get_jlong_constant(); 810 add_constant(left_arg->result(), (x->op() == Bytecodes::_ladd) ? c : -c, rlock_result(x)); 811 } else { 812 right_arg->load_item(); 813 arithmetic_op_long(x->op(), rlock_result(x), left_arg->result(), right_arg->result(), NULL); 814 } 815 break; 816 817 default: 818 ShouldNotReachHere(); 819 } 820 #else 821 switch (x->op()) { 822 case Bytecodes::_ldiv: 823 case Bytecodes::_lrem: { 824 LIRItem right(x->y(), this); 825 right.load_item(); 826 make_div_by_zero_check(right.result(), T_LONG, info); 827 } 828 // Fall through 829 case Bytecodes::_lmul: { 830 address entry; 831 switch (x->op()) { 832 case Bytecodes::_lrem: 833 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem); 834 break; 835 case Bytecodes::_ldiv: 836 entry = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv); 837 break; 838 case Bytecodes::_lmul: 839 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lmul); 840 break; 841 default: 842 ShouldNotReachHere(); 843 } 844 LIR_Opr result = call_runtime(x->y(), x->x(), entry, x->type(), NULL); 845 set_result(x, result); 846 break; 847 } 848 case Bytecodes::_ladd: 849 case Bytecodes::_lsub: { 850 LIRItem left(x->x(), this); 851 LIRItem right(x->y(), this); 852 left.load_item(); 853 right.load_item(); 854 rlock_result(x); 855 arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL); 856 break; 857 } 858 default: 859 ShouldNotReachHere(); 860 } 861 #endif // AARCH64 862 } 863 864 865 // for: _iadd, _imul, _isub, _idiv, _irem 866 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) { 867 bool is_div_rem = x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem; 868 LIRItem left(x->x(), this); 869 LIRItem right(x->y(), this); 870 LIRItem* left_arg = &left; 871 LIRItem* right_arg = &right; 872 873 // Test if instr is commutative and if we should swap 874 if (x->is_commutative() && left.is_constant()) { 875 left_arg = &right; 876 right_arg = &left; 877 } 878 879 if (is_div_rem) { 880 CodeEmitInfo* info = state_for(x); 881 if (x->op() == Bytecodes::_idiv && right_arg->is_constant() && is_power_of_2(right_arg->get_jint_constant())) { 882 left_arg->load_item(); 883 right_arg->dont_load_item(); 884 LIR_Opr tmp = LIR_OprFact::illegalOpr; 885 LIR_Opr result = rlock_result(x); 886 __ idiv(left_arg->result(), right_arg->result(), result, tmp, info); 887 } else { 888 #ifdef AARCH64 889 left_arg->load_item(); 890 right_arg->load_item(); 891 make_div_by_zero_check(right_arg->result(), T_INT, info); 892 if (x->op() == Bytecodes::_idiv) { 893 __ idiv(left_arg->result(), right_arg->result(), rlock_result(x), LIR_OprFact::illegalOpr, NULL); 894 } else { 895 // a % b is implemented with 2 instructions: 896 // tmp = a/b (sdiv) 897 // res = a - b*tmp (msub) 898 LIR_Opr tmp = FrameMap::as_opr(Rtemp); 899 __ irem(left_arg->result(), right_arg->result(), rlock_result(x), tmp, NULL); 900 } 901 #else 902 left_arg->load_item_force(FrameMap::R0_opr); 903 right_arg->load_item_force(FrameMap::R2_opr); 904 LIR_Opr tmp = FrameMap::R1_opr; 905 LIR_Opr result = rlock_result(x); 906 LIR_Opr out_reg; 907 if (x->op() == Bytecodes::_irem) { 908 out_reg = FrameMap::R0_opr; 909 __ irem(left_arg->result(), right_arg->result(), out_reg, tmp, info); 910 } else if (x->op() == Bytecodes::_idiv) { 911 out_reg = FrameMap::R1_opr; 912 __ idiv(left_arg->result(), right_arg->result(), out_reg, tmp, info); 913 } 914 __ move(out_reg, result); 915 #endif // AARCH64 916 } 917 918 #ifdef AARCH64 919 } else if (((x->op() == Bytecodes::_iadd) || (x->op() == Bytecodes::_isub)) && right_arg->is_constant()) { 920 left_arg->load_item(); 921 jint c = right_arg->get_jint_constant(); 922 right_arg->dont_load_item(); 923 add_constant(left_arg->result(), (x->op() == Bytecodes::_iadd) ? c : -c, rlock_result(x)); 924 #endif // AARCH64 925 926 } else { 927 left_arg->load_item(); 928 if (x->op() == Bytecodes::_imul && right_arg->is_constant()) { 929 jint c = right_arg->get_jint_constant(); 930 if (c > 0 && c < max_jint && (is_power_of_2(c) || is_power_of_2(c - 1) || is_power_of_2(c + 1))) { 931 right_arg->dont_load_item(); 932 } else { 933 right_arg->load_item(); 934 } 935 } else { 936 AARCH64_ONLY(assert(!right_arg->is_constant(), "constant right_arg is already handled by this moment");) 937 right_arg->load_nonconstant(); 938 } 939 rlock_result(x); 940 assert(right_arg->is_constant() || right_arg->is_register(), "wrong state of right"); 941 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), NULL); 942 } 943 } 944 945 946 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) { 947 ValueTag tag = x->type()->tag(); 948 assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters"); 949 switch (tag) { 950 case floatTag: 951 case doubleTag: do_ArithmeticOp_FPU(x); return; 952 case longTag: do_ArithmeticOp_Long(x); return; 953 case intTag: do_ArithmeticOp_Int(x); return; 954 } 955 ShouldNotReachHere(); 956 } 957 958 959 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr 960 void LIRGenerator::do_ShiftOp(ShiftOp* x) { 961 LIRItem value(x->x(), this); 962 LIRItem count(x->y(), this); 963 964 #ifndef AARCH64 965 if (value.type()->is_long()) { 966 count.set_destroys_register(); 967 } 968 #endif // !AARCH64 969 970 if (count.is_constant()) { 971 assert(count.type()->as_IntConstant() != NULL, "should be"); 972 count.dont_load_item(); 973 } else { 974 count.load_item(); 975 } 976 value.load_item(); 977 978 LIR_Opr res = rlock_result(x); 979 shift_op(x->op(), res, value.result(), count.result(), LIR_OprFact::illegalOpr); 980 } 981 982 983 // _iand, _land, _ior, _lor, _ixor, _lxor 984 void LIRGenerator::do_LogicOp(LogicOp* x) { 985 LIRItem left(x->x(), this); 986 LIRItem right(x->y(), this); 987 988 left.load_item(); 989 990 #ifdef AARCH64 991 if (right.is_constant() && can_inline_as_constant_in_logic(right.value())) { 992 right.dont_load_item(); 993 } else { 994 right.load_item(); 995 } 996 #else 997 right.load_nonconstant(); 998 #endif // AARCH64 999 1000 logic_op(x->op(), rlock_result(x), left.result(), right.result()); 1001 } 1002 1003 1004 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg 1005 void LIRGenerator::do_CompareOp(CompareOp* x) { 1006 #ifdef __SOFTFP__ 1007 address runtime_func; 1008 switch (x->op()) { 1009 case Bytecodes::_fcmpl: 1010 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::fcmpl); 1011 break; 1012 case Bytecodes::_fcmpg: 1013 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::fcmpg); 1014 break; 1015 case Bytecodes::_dcmpl: 1016 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dcmpl); 1017 break; 1018 case Bytecodes::_dcmpg: 1019 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dcmpg); 1020 break; 1021 case Bytecodes::_lcmp: { 1022 LIRItem left(x->x(), this); 1023 LIRItem right(x->y(), this); 1024 left.load_item(); 1025 right.load_nonconstant(); 1026 LIR_Opr reg = rlock_result(x); 1027 __ lcmp2int(left.result(), right.result(), reg); 1028 return; 1029 } 1030 default: 1031 ShouldNotReachHere(); 1032 } 1033 LIR_Opr result = call_runtime(x->x(), x->y(), runtime_func, x->type(), NULL); 1034 set_result(x, result); 1035 #else // __SOFTFP__ 1036 LIRItem left(x->x(), this); 1037 LIRItem right(x->y(), this); 1038 left.load_item(); 1039 1040 #ifdef AARCH64 1041 if (right.is_constant() && can_inline_as_constant_in_cmp(right.value())) { 1042 right.dont_load_item(); 1043 } else { 1044 right.load_item(); 1045 } 1046 #else 1047 right.load_nonconstant(); 1048 #endif // AARCH64 1049 1050 LIR_Opr reg = rlock_result(x); 1051 1052 if (x->x()->type()->is_float_kind()) { 1053 Bytecodes::Code code = x->op(); 1054 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl)); 1055 } else if (x->x()->type()->tag() == longTag) { 1056 __ lcmp2int(left.result(), right.result(), reg); 1057 } else { 1058 ShouldNotReachHere(); 1059 } 1060 #endif // __SOFTFP__ 1061 } 1062 1063 1064 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) { 1065 assert(x->number_of_arguments() == 4, "wrong type"); 1066 LIRItem obj (x->argument_at(0), this); // object 1067 LIRItem offset(x->argument_at(1), this); // offset of field 1068 LIRItem cmp (x->argument_at(2), this); // value to compare with field 1069 LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp 1070 1071 LIR_Opr addr = new_pointer_register(); 1072 LIR_Opr tmp1 = LIR_OprFact::illegalOpr; 1073 LIR_Opr tmp2 = LIR_OprFact::illegalOpr; 1074 1075 // get address of field 1076 obj.load_item(); 1077 offset.load_item(); 1078 cmp.load_item(); 1079 val.load_item(); 1080 1081 __ add(obj.result(), offset.result(), addr); 1082 LIR_Opr result = rlock_result(x); 1083 1084 if (type == objectType) { 1085 #if INCLUDE_ALL_GCS 1086 // Do the pre-write barrier, if any. 1087 pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */, 1088 true /* do_load */, false /* patch */, NULL); 1089 #endif // INCLUDE_ALL_GCS 1090 #ifdef AARCH64 1091 if (UseCompressedOops) { 1092 tmp1 = new_pointer_register(); 1093 tmp2 = new_pointer_register(); 1094 } 1095 #endif // AARCH64 1096 __ cas_obj(addr, cmp.result(), val.result(), tmp1, tmp2, result); 1097 post_barrier(addr, val.result()); 1098 } 1099 else if (type == intType) { 1100 __ cas_int(addr, cmp.result(), val.result(), tmp1, tmp1, result); 1101 } 1102 else if (type == longType) { 1103 #ifndef AARCH64 1104 tmp1 = new_register(T_LONG); 1105 #endif // !AARCH64 1106 __ cas_long(addr, cmp.result(), val.result(), tmp1, tmp2, result); 1107 } 1108 else { 1109 ShouldNotReachHere(); 1110 } 1111 } 1112 1113 1114 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) { 1115 address runtime_func; 1116 switch (x->id()) { 1117 case vmIntrinsics::_dabs: { 1118 #ifdef __SOFTFP__ 1119 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dabs); 1120 break; 1121 #else 1122 assert(x->number_of_arguments() == 1, "wrong type"); 1123 LIRItem value(x->argument_at(0), this); 1124 value.load_item(); 1125 __ abs(value.result(), rlock_result(x), LIR_OprFact::illegalOpr); 1126 return; 1127 #endif // __SOFTFP__ 1128 } 1129 case vmIntrinsics::_dsqrt: { 1130 #ifdef __SOFTFP__ 1131 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dsqrt); 1132 break; 1133 #else 1134 assert(x->number_of_arguments() == 1, "wrong type"); 1135 LIRItem value(x->argument_at(0), this); 1136 value.load_item(); 1137 __ sqrt(value.result(), rlock_result(x), LIR_OprFact::illegalOpr); 1138 return; 1139 #endif // __SOFTFP__ 1140 } 1141 case vmIntrinsics::_dsin: 1142 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dsin); 1143 break; 1144 case vmIntrinsics::_dcos: 1145 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dcos); 1146 break; 1147 case vmIntrinsics::_dtan: 1148 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dtan); 1149 break; 1150 case vmIntrinsics::_dlog: 1151 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dlog); 1152 break; 1153 case vmIntrinsics::_dlog10: 1154 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10); 1155 break; 1156 case vmIntrinsics::_dexp: 1157 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dexp); 1158 break; 1159 case vmIntrinsics::_dpow: 1160 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dpow); 1161 break; 1162 default: 1163 ShouldNotReachHere(); 1164 return; 1165 } 1166 1167 LIR_Opr result; 1168 if (x->number_of_arguments() == 1) { 1169 result = call_runtime(x->argument_at(0), runtime_func, x->type(), NULL); 1170 } else { 1171 assert(x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow, "unexpected intrinsic"); 1172 result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_func, x->type(), NULL); 1173 } 1174 set_result(x, result); 1175 } 1176 1177 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) { 1178 fatal("FMA intrinsic is not implemented on this platform"); 1179 } 1180 1181 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) { 1182 fatal("vectorizedMismatch intrinsic is not implemented on this platform"); 1183 } 1184 1185 void LIRGenerator::do_ArrayCopy(Intrinsic* x) { 1186 CodeEmitInfo* info = state_for(x, x->state()); 1187 assert(x->number_of_arguments() == 5, "wrong type"); 1188 LIRItem src(x->argument_at(0), this); 1189 LIRItem src_pos(x->argument_at(1), this); 1190 LIRItem dst(x->argument_at(2), this); 1191 LIRItem dst_pos(x->argument_at(3), this); 1192 LIRItem length(x->argument_at(4), this); 1193 1194 // We put arguments into the same registers which are used for a Java call. 1195 // Note: we used fixed registers for all arguments because all registers 1196 // are caller-saved, so register allocator treats them all as used. 1197 src.load_item_force (FrameMap::R0_oop_opr); 1198 src_pos.load_item_force(FrameMap::R1_opr); 1199 dst.load_item_force (FrameMap::R2_oop_opr); 1200 dst_pos.load_item_force(FrameMap::R3_opr); 1201 length.load_item_force (FrameMap::R4_opr); 1202 LIR_Opr tmp = (FrameMap::R5_opr); 1203 set_no_result(x); 1204 1205 int flags; 1206 ciArrayKlass* expected_type; 1207 arraycopy_helper(x, &flags, &expected_type); 1208 __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), 1209 tmp, expected_type, flags, info); 1210 } 1211 1212 void LIRGenerator::do_update_CRC32(Intrinsic* x) { 1213 fatal("CRC32 intrinsic is not implemented on this platform"); 1214 } 1215 1216 void LIRGenerator::do_update_CRC32C(Intrinsic* x) { 1217 Unimplemented(); 1218 } 1219 1220 void LIRGenerator::do_Convert(Convert* x) { 1221 address runtime_func; 1222 switch (x->op()) { 1223 #ifndef AARCH64 1224 case Bytecodes::_l2f: 1225 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::l2f); 1226 break; 1227 case Bytecodes::_l2d: 1228 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::l2d); 1229 break; 1230 case Bytecodes::_f2l: 1231 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::f2l); 1232 break; 1233 case Bytecodes::_d2l: 1234 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::d2l); 1235 break; 1236 #ifdef __SOFTFP__ 1237 case Bytecodes::_f2d: 1238 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_f2d); 1239 break; 1240 case Bytecodes::_d2f: 1241 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_d2f); 1242 break; 1243 case Bytecodes::_i2f: 1244 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_i2f); 1245 break; 1246 case Bytecodes::_i2d: 1247 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_i2d); 1248 break; 1249 case Bytecodes::_f2i: 1250 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_f2iz); 1251 break; 1252 case Bytecodes::_d2i: 1253 // This is implemented in hard float in assembler on arm but a call 1254 // on other platforms. 1255 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::d2i); 1256 break; 1257 #endif // __SOFTFP__ 1258 #endif // !AARCH64 1259 default: { 1260 LIRItem value(x->value(), this); 1261 value.load_item(); 1262 LIR_Opr reg = rlock_result(x); 1263 __ convert(x->op(), value.result(), reg, NULL); 1264 return; 1265 } 1266 } 1267 1268 LIR_Opr result = call_runtime(x->value(), runtime_func, x->type(), NULL); 1269 set_result(x, result); 1270 } 1271 1272 1273 void LIRGenerator::do_NewInstance(NewInstance* x) { 1274 print_if_not_loaded(x); 1275 1276 CodeEmitInfo* info = state_for(x, x->state()); 1277 LIR_Opr reg = result_register_for(x->type()); // R0 is required by runtime call in NewInstanceStub::emit_code 1278 LIR_Opr klass_reg = FrameMap::R1_metadata_opr; // R1 is required by runtime call in NewInstanceStub::emit_code 1279 LIR_Opr tmp1 = new_register(objectType); 1280 LIR_Opr tmp2 = new_register(objectType); 1281 LIR_Opr tmp3 = FrameMap::LR_oop_opr; 1282 1283 new_instance(reg, x->klass(), x->is_unresolved(), tmp1, tmp2, tmp3, 1284 LIR_OprFact::illegalOpr, klass_reg, info); 1285 1286 LIR_Opr result = rlock_result(x); 1287 __ move(reg, result); 1288 } 1289 1290 1291 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { 1292 // Evaluate state_for() first, because it can emit code 1293 // with the same fixed registers that are used here (R1, R2) 1294 CodeEmitInfo* info = state_for(x, x->state()); 1295 LIRItem length(x->length(), this); 1296 1297 length.load_item_force(FrameMap::R2_opr); // R2 is required by runtime call in NewTypeArrayStub::emit_code 1298 LIR_Opr len = length.result(); 1299 1300 LIR_Opr reg = result_register_for(x->type()); // R0 is required by runtime call in NewTypeArrayStub::emit_code 1301 LIR_Opr klass_reg = FrameMap::R1_metadata_opr; // R1 is required by runtime call in NewTypeArrayStub::emit_code 1302 1303 LIR_Opr tmp1 = new_register(objectType); 1304 LIR_Opr tmp2 = new_register(objectType); 1305 LIR_Opr tmp3 = FrameMap::LR_oop_opr; 1306 LIR_Opr tmp4 = LIR_OprFact::illegalOpr; 1307 1308 BasicType elem_type = x->elt_type(); 1309 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg); 1310 1311 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info); 1312 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path); 1313 1314 LIR_Opr result = rlock_result(x); 1315 __ move(reg, result); 1316 } 1317 1318 1319 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) { 1320 // Evaluate state_for() first, because it can emit code 1321 // with the same fixed registers that are used here (R1, R2) 1322 CodeEmitInfo* info = state_for(x, x->state()); 1323 LIRItem length(x->length(), this); 1324 1325 length.load_item_force(FrameMap::R2_opr); // R2 is required by runtime call in NewObjectArrayStub::emit_code 1326 LIR_Opr len = length.result(); 1327 1328 CodeEmitInfo* patching_info = NULL; 1329 if (!x->klass()->is_loaded() || PatchALot) { 1330 patching_info = state_for(x, x->state_before()); 1331 } 1332 1333 LIR_Opr reg = result_register_for(x->type()); // R0 is required by runtime call in NewObjectArrayStub::emit_code 1334 LIR_Opr klass_reg = FrameMap::R1_metadata_opr; // R1 is required by runtime call in NewObjectArrayStub::emit_code 1335 1336 LIR_Opr tmp1 = new_register(objectType); 1337 LIR_Opr tmp2 = new_register(objectType); 1338 LIR_Opr tmp3 = FrameMap::LR_oop_opr; 1339 LIR_Opr tmp4 = LIR_OprFact::illegalOpr; 1340 1341 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info); 1342 ciMetadata* obj = ciObjArrayKlass::make(x->klass()); 1343 if (obj == ciEnv::unloaded_ciobjarrayklass()) { 1344 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error"); 1345 } 1346 klass2reg_with_patching(klass_reg, obj, patching_info); 1347 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path); 1348 1349 LIR_Opr result = rlock_result(x); 1350 __ move(reg, result); 1351 } 1352 1353 1354 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { 1355 Values* dims = x->dims(); 1356 int i = dims->length(); 1357 LIRItemList* items = new LIRItemList(i, i, NULL); 1358 while (i-- > 0) { 1359 LIRItem* size = new LIRItem(dims->at(i), this); 1360 items->at_put(i, size); 1361 } 1362 1363 // Need to get the info before, as the items may become invalid through item_free 1364 CodeEmitInfo* patching_info = NULL; 1365 if (!x->klass()->is_loaded() || PatchALot) { 1366 patching_info = state_for(x, x->state_before()); 1367 1368 // Cannot re-use same xhandlers for multiple CodeEmitInfos, so 1369 // clone all handlers (NOTE: Usually this is handled transparently 1370 // by the CodeEmitInfo cloning logic in CodeStub constructors but 1371 // is done explicitly here because a stub isn't being used). 1372 x->set_exception_handlers(new XHandlers(x->exception_handlers())); 1373 } 1374 1375 i = dims->length(); 1376 while (i-- > 0) { 1377 LIRItem* size = items->at(i); 1378 size->load_item(); 1379 LIR_Opr sz = size->result(); 1380 assert(sz->type() == T_INT, "should be"); 1381 store_stack_parameter(sz, in_ByteSize(i * BytesPerInt)); 1382 } 1383 1384 CodeEmitInfo* info = state_for(x, x->state()); 1385 LIR_Opr klass_reg = FrameMap::R0_metadata_opr; 1386 klass2reg_with_patching(klass_reg, x->klass(), patching_info); 1387 1388 LIR_Opr rank = FrameMap::R2_opr; 1389 __ move(LIR_OprFact::intConst(x->rank()), rank); 1390 LIR_Opr varargs = FrameMap::SP_opr; 1391 LIR_OprList* args = new LIR_OprList(3); 1392 args->append(klass_reg); 1393 args->append(rank); 1394 args->append(varargs); 1395 LIR_Opr reg = result_register_for(x->type()); 1396 __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id), 1397 LIR_OprFact::illegalOpr, reg, args, info); 1398 1399 LIR_Opr result = rlock_result(x); 1400 __ move(reg, result); 1401 } 1402 1403 1404 void LIRGenerator::do_BlockBegin(BlockBegin* x) { 1405 // nothing to do for now 1406 } 1407 1408 1409 void LIRGenerator::do_CheckCast(CheckCast* x) { 1410 LIRItem obj(x->obj(), this); 1411 CodeEmitInfo* patching_info = NULL; 1412 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) { 1413 patching_info = state_for(x, x->state_before()); 1414 } 1415 1416 obj.load_item(); 1417 1418 CodeEmitInfo* info_for_exception = 1419 (x->needs_exception_state() ? state_for(x) : 1420 state_for(x, x->state_before(), true /*ignore_xhandler*/)); 1421 1422 CodeStub* stub; 1423 if (x->is_incompatible_class_change_check()) { 1424 assert(patching_info == NULL, "can't patch this"); 1425 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, 1426 LIR_OprFact::illegalOpr, info_for_exception); 1427 } else if (x->is_invokespecial_receiver_check()) { 1428 assert(patching_info == NULL, "can't patch this"); 1429 stub = new DeoptimizeStub(info_for_exception, 1430 Deoptimization::Reason_class_check, 1431 Deoptimization::Action_none); 1432 } else { 1433 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, 1434 LIR_OprFact::illegalOpr, info_for_exception); 1435 } 1436 1437 LIR_Opr out_reg = rlock_result(x); 1438 LIR_Opr tmp1 = FrameMap::R0_oop_opr; 1439 LIR_Opr tmp2 = FrameMap::R1_oop_opr; 1440 LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 1441 1442 __ checkcast(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3, x->direct_compare(), 1443 info_for_exception, patching_info, stub, x->profiled_method(), x->profiled_bci()); 1444 } 1445 1446 1447 void LIRGenerator::do_InstanceOf(InstanceOf* x) { 1448 LIRItem obj(x->obj(), this); 1449 CodeEmitInfo* patching_info = NULL; 1450 if (!x->klass()->is_loaded() || PatchALot) { 1451 patching_info = state_for(x, x->state_before()); 1452 } 1453 1454 obj.load_item(); 1455 LIR_Opr out_reg = rlock_result(x); 1456 LIR_Opr tmp1 = FrameMap::R0_oop_opr; 1457 LIR_Opr tmp2 = FrameMap::R1_oop_opr; 1458 LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 1459 1460 __ instanceof(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3, 1461 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci()); 1462 } 1463 1464 1465 #ifdef __SOFTFP__ 1466 // Turn operator if (f <op> g) into runtime call: 1467 // call _aeabi_fcmp<op>(f, g) 1468 // cmp(eq, 1) 1469 // branch(eq, true path). 1470 void LIRGenerator::do_soft_float_compare(If* x) { 1471 assert(x->number_of_sux() == 2, "inconsistency"); 1472 ValueTag tag = x->x()->type()->tag(); 1473 If::Condition cond = x->cond(); 1474 address runtime_func; 1475 // unordered comparison gets the wrong answer because aeabi functions 1476 // return false. 1477 bool unordered_is_true = x->unordered_is_true(); 1478 // reverse of condition for ne 1479 bool compare_to_zero = false; 1480 switch (lir_cond(cond)) { 1481 case lir_cond_notEqual: 1482 compare_to_zero = true; // fall through 1483 case lir_cond_equal: 1484 runtime_func = tag == floatTag ? 1485 CAST_FROM_FN_PTR(address, __aeabi_fcmpeq): 1486 CAST_FROM_FN_PTR(address, __aeabi_dcmpeq); 1487 break; 1488 case lir_cond_less: 1489 if (unordered_is_true) { 1490 runtime_func = tag == floatTag ? 1491 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_fcmplt): 1492 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_dcmplt); 1493 } else { 1494 runtime_func = tag == floatTag ? 1495 CAST_FROM_FN_PTR(address, __aeabi_fcmplt): 1496 CAST_FROM_FN_PTR(address, __aeabi_dcmplt); 1497 } 1498 break; 1499 case lir_cond_lessEqual: 1500 if (unordered_is_true) { 1501 runtime_func = tag == floatTag ? 1502 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_fcmple): 1503 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_dcmple); 1504 } else { 1505 runtime_func = tag == floatTag ? 1506 CAST_FROM_FN_PTR(address, __aeabi_fcmple): 1507 CAST_FROM_FN_PTR(address, __aeabi_dcmple); 1508 } 1509 break; 1510 case lir_cond_greaterEqual: 1511 if (unordered_is_true) { 1512 runtime_func = tag == floatTag ? 1513 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_fcmpge): 1514 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_dcmpge); 1515 } else { 1516 runtime_func = tag == floatTag ? 1517 CAST_FROM_FN_PTR(address, __aeabi_fcmpge): 1518 CAST_FROM_FN_PTR(address, __aeabi_dcmpge); 1519 } 1520 break; 1521 case lir_cond_greater: 1522 if (unordered_is_true) { 1523 runtime_func = tag == floatTag ? 1524 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_fcmpgt): 1525 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_dcmpgt); 1526 } else { 1527 runtime_func = tag == floatTag ? 1528 CAST_FROM_FN_PTR(address, __aeabi_fcmpgt): 1529 CAST_FROM_FN_PTR(address, __aeabi_dcmpgt); 1530 } 1531 break; 1532 case lir_cond_aboveEqual: 1533 case lir_cond_belowEqual: 1534 ShouldNotReachHere(); // We're not going to get these. 1535 default: 1536 assert(lir_cond(cond) == lir_cond_always, "must be"); 1537 ShouldNotReachHere(); 1538 } 1539 set_no_result(x); 1540 1541 // add safepoint before generating condition code so it can be recomputed 1542 if (x->is_safepoint()) { 1543 increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci()); 1544 __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before())); 1545 } 1546 // Call float compare function, returns (1,0) if true or false. 1547 LIR_Opr result = call_runtime(x->x(), x->y(), runtime_func, intType, NULL); 1548 __ cmp(lir_cond_equal, result, 1549 compare_to_zero ? 1550 LIR_OprFact::intConst(0) : LIR_OprFact::intConst(1)); 1551 profile_branch(x, cond); 1552 move_to_phi(x->state()); 1553 __ branch(lir_cond_equal, T_INT, x->tsux()); 1554 } 1555 #endif // __SOFTFP__ 1556 1557 void LIRGenerator::do_If(If* x) { 1558 assert(x->number_of_sux() == 2, "inconsistency"); 1559 ValueTag tag = x->x()->type()->tag(); 1560 1561 #ifdef __SOFTFP__ 1562 if (tag == floatTag || tag == doubleTag) { 1563 do_soft_float_compare(x); 1564 assert(x->default_sux() == x->fsux(), "wrong destination above"); 1565 __ jump(x->default_sux()); 1566 return; 1567 } 1568 #endif // __SOFTFP__ 1569 1570 LIRItem xitem(x->x(), this); 1571 LIRItem yitem(x->y(), this); 1572 LIRItem* xin = &xitem; 1573 LIRItem* yin = &yitem; 1574 If::Condition cond = x->cond(); 1575 1576 #ifndef AARCH64 1577 if (tag == longTag) { 1578 if (cond == If::gtr || cond == If::leq) { 1579 cond = Instruction::mirror(cond); 1580 xin = &yitem; 1581 yin = &xitem; 1582 } 1583 xin->set_destroys_register(); 1584 } 1585 #endif // !AARCH64 1586 1587 xin->load_item(); 1588 LIR_Opr left = xin->result(); 1589 LIR_Opr right; 1590 1591 #ifdef AARCH64 1592 if (yin->is_constant() && can_inline_as_constant_in_cmp(yin->value())) { 1593 yin->dont_load_item(); 1594 } else { 1595 yin->load_item(); 1596 } 1597 right = yin->result(); 1598 #else 1599 if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && 1600 (cond == If::eql || cond == If::neq)) { 1601 // inline long zero 1602 right = LIR_OprFact::value_type(yin->value()->type()); 1603 } else { 1604 yin->load_nonconstant(); 1605 right = yin->result(); 1606 } 1607 #endif // AARCH64 1608 1609 set_no_result(x); 1610 1611 // add safepoint before generating condition code so it can be recomputed 1612 if (x->is_safepoint()) { 1613 increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci()); 1614 __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before())); 1615 } 1616 1617 __ cmp(lir_cond(cond), left, right); 1618 profile_branch(x, cond); 1619 move_to_phi(x->state()); 1620 if (x->x()->type()->is_float_kind()) { 1621 __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux()); 1622 } else { 1623 __ branch(lir_cond(cond), right->type(), x->tsux()); 1624 } 1625 assert(x->default_sux() == x->fsux(), "wrong destination above"); 1626 __ jump(x->default_sux()); 1627 } 1628 1629 1630 LIR_Opr LIRGenerator::getThreadPointer() { 1631 return FrameMap::Rthread_opr; 1632 } 1633 1634 void LIRGenerator::trace_block_entry(BlockBegin* block) { 1635 __ move(LIR_OprFact::intConst(block->block_id()), FrameMap::R0_opr); 1636 LIR_OprList* args = new LIR_OprList(1); 1637 args->append(FrameMap::R0_opr); 1638 address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry); 1639 __ call_runtime_leaf(func, getThreadTemp(), LIR_OprFact::illegalOpr, args); 1640 } 1641 1642 1643 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address, 1644 CodeEmitInfo* info) { 1645 #ifndef AARCH64 1646 if (value->is_double_cpu()) { 1647 assert(address->index()->is_illegal(), "should have a constant displacement"); 1648 LIR_Opr tmp = new_pointer_register(); 1649 add_large_constant(address->base(), address->disp(), tmp); 1650 __ volatile_store_mem_reg(value, new LIR_Address(tmp, (intx)0, address->type()), info); 1651 return; 1652 } 1653 #endif // !AARCH64 1654 // TODO-AARCH64 implement with stlr instruction 1655 __ store(value, address, info, lir_patch_none); 1656 } 1657 1658 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result, 1659 CodeEmitInfo* info) { 1660 #ifndef AARCH64 1661 if (result->is_double_cpu()) { 1662 assert(address->index()->is_illegal(), "should have a constant displacement"); 1663 LIR_Opr tmp = new_pointer_register(); 1664 add_large_constant(address->base(), address->disp(), tmp); 1665 __ volatile_load_mem_reg(new LIR_Address(tmp, (intx)0, address->type()), result, info); 1666 return; 1667 } 1668 #endif // !AARCH64 1669 // TODO-AARCH64 implement with ldar instruction 1670 __ load(address, result, info, lir_patch_none); 1671 } 1672 1673 void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset, 1674 BasicType type, bool is_volatile) { 1675 #ifdef AARCH64 1676 __ load(new LIR_Address(src, offset, type), dst); 1677 #else 1678 assert(offset->is_single_cpu(), "must be"); 1679 if (is_volatile && dst->is_double_cpu()) { 1680 LIR_Opr tmp = new_pointer_register(); 1681 __ add(src, offset, tmp); 1682 __ volatile_load_mem_reg(new LIR_Address(tmp, (intx)0, type), dst, NULL); 1683 } else if (type == T_FLOAT || type == T_DOUBLE) { 1684 // fld doesn't have indexed addressing mode 1685 LIR_Opr tmp = new_register(T_INT); 1686 __ add(src, offset, tmp); 1687 __ load(new LIR_Address(tmp, (intx)0, type), dst); 1688 } else { 1689 __ load(new LIR_Address(src, offset, type), dst); 1690 } 1691 #endif // AARCH64 1692 } 1693 1694 void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data, 1695 BasicType type, bool is_volatile) { 1696 #ifdef AARCH64 1697 LIR_Address* addr = new LIR_Address(src, offset, type); 1698 if (type == T_ARRAY || type == T_OBJECT) { 1699 pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */, 1700 true /* do_load */, false /* patch */, NULL); 1701 __ move(data, addr); 1702 assert(src->is_register(), "must be register"); 1703 post_barrier(LIR_OprFact::address(addr), data); 1704 } else { 1705 __ move(data, addr); 1706 } 1707 #else 1708 assert(offset->is_single_cpu(), "must be"); 1709 if (is_volatile && data->is_double_cpu()) { 1710 LIR_Opr tmp = new_register(T_INT); 1711 __ add(src, offset, tmp); 1712 __ volatile_store_mem_reg(data, new LIR_Address(tmp, (intx)0, type), NULL); 1713 } else if (type == T_FLOAT || type == T_DOUBLE) { 1714 // fst doesn't have indexed addressing mode 1715 LIR_Opr tmp = new_register(T_INT); 1716 __ add(src, offset, tmp); 1717 __ move(data, new LIR_Address(tmp, (intx)0, type)); 1718 } else { 1719 LIR_Address* addr = new LIR_Address(src, offset, type); 1720 bool is_obj = (type == T_ARRAY || type == T_OBJECT); 1721 #if INCLUDE_ALL_GCS 1722 if (is_obj) { 1723 // Do the pre-write barrier, if any. 1724 pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */, 1725 true /* do_load */, false /* patch */, NULL); 1726 } 1727 #endif // INCLUDE_ALL_GCS 1728 __ move(data, addr); 1729 if (is_obj) { 1730 assert(src->is_register(), "must be register"); 1731 post_barrier(LIR_OprFact::address(addr), data); 1732 } 1733 } 1734 #endif // AARCH64 1735 } 1736 1737 void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) { 1738 BasicType type = x->basic_type(); 1739 LIRItem src(x->object(), this); 1740 LIRItem off(x->offset(), this); 1741 LIRItem value(x->value(), this); 1742 1743 src.load_item(); 1744 if (x->is_add()) { 1745 value.load_nonconstant(); 1746 } else { 1747 value.load_item(); 1748 } 1749 off.load_nonconstant(); 1750 1751 LIR_Opr dst = rlock_result(x, type); 1752 LIR_Opr data = value.result(); 1753 bool is_obj = (type == T_ARRAY || type == T_OBJECT); 1754 1755 assert (type == T_INT || type == T_LONG || (!x->is_add() && is_obj), "unexpected type"); 1756 LIR_Opr addr_ptr = new_pointer_register(); 1757 1758 __ add(src.result(), off.result(), addr_ptr); 1759 1760 LIR_Address* addr = new LIR_Address(addr_ptr, (intx)0, type); 1761 1762 if (x->is_add()) { 1763 LIR_Opr tmp = new_register(type); 1764 __ xadd(addr_ptr, data, dst, tmp); 1765 } else { 1766 LIR_Opr tmp = (UseCompressedOops && is_obj) ? new_pointer_register() : LIR_OprFact::illegalOpr; 1767 if (is_obj) { 1768 // Do the pre-write barrier, if any. 1769 pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */, 1770 true /* do_load */, false /* patch */, NULL); 1771 } 1772 __ xchg(addr_ptr, data, dst, tmp); 1773 if (is_obj) { 1774 // Seems to be a precise address 1775 post_barrier(LIR_OprFact::address(addr), data); 1776 } 1777 } 1778 }