1 /* 2 * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_Compilation.hpp" 27 #include "c1/c1_FrameMap.hpp" 28 #include "c1/c1_Instruction.hpp" 29 #include "c1/c1_LIRAssembler.hpp" 30 #include "c1/c1_LIRGenerator.hpp" 31 #include "c1/c1_Runtime1.hpp" 32 #include "c1/c1_ValueStack.hpp" 33 #include "ci/ciArray.hpp" 34 #include "ci/ciObjArrayKlass.hpp" 35 #include "ci/ciTypeArrayKlass.hpp" 36 #include "ci/ciUtilities.hpp" 37 #include "gc/shared/cardTable.hpp" 38 #include "gc/shared/cardTableBarrierSet.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/stubRoutines.hpp" 41 #include "vmreg_arm.inline.hpp" 42 43 #ifdef ASSERT 44 #define __ gen()->lir(__FILE__, __LINE__)-> 45 #else 46 #define __ gen()->lir()-> 47 #endif 48 49 void LIRItem::load_byte_item() { 50 load_item(); 51 } 52 53 void LIRItem::load_nonconstant() { 54 LIR_Opr r = value()->operand(); 55 if (_gen->can_inline_as_constant(value())) { 56 if (!r->is_constant()) { 57 r = LIR_OprFact::value_type(value()->type()); 58 } 59 _result = r; 60 } else { 61 load_item(); 62 } 63 } 64 65 //-------------------------------------------------------------- 66 // LIRGenerator 67 //-------------------------------------------------------------- 68 69 70 LIR_Opr LIRGenerator::exceptionOopOpr() { 71 return FrameMap::Exception_oop_opr; 72 } 73 74 LIR_Opr LIRGenerator::exceptionPcOpr() { 75 return FrameMap::Exception_pc_opr; 76 } 77 78 LIR_Opr LIRGenerator::syncLockOpr() { 79 return new_register(T_INT); 80 } 81 82 LIR_Opr LIRGenerator::syncTempOpr() { 83 return new_register(T_OBJECT); 84 } 85 86 LIR_Opr LIRGenerator::getThreadTemp() { 87 return LIR_OprFact::illegalOpr; 88 } 89 90 LIR_Opr LIRGenerator::atomicLockOpr() { 91 return LIR_OprFact::illegalOpr; 92 } 93 94 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) { 95 LIR_Opr opr; 96 switch (type->tag()) { 97 case intTag: opr = FrameMap::Int_result_opr; break; 98 case objectTag: opr = FrameMap::Object_result_opr; break; 99 case longTag: opr = FrameMap::Long_result_opr; break; 100 case floatTag: opr = FrameMap::Float_result_opr; break; 101 case doubleTag: opr = FrameMap::Double_result_opr; break; 102 case addressTag: 103 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr; 104 } 105 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch"); 106 return opr; 107 } 108 109 110 LIR_Opr LIRGenerator::rlock_byte(BasicType type) { 111 return new_register(T_INT); 112 } 113 114 115 //--------- loading items into registers -------------------------------- 116 117 118 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const { 119 #ifdef AARCH64 120 if (v->type()->as_IntConstant() != NULL) { 121 return v->type()->as_IntConstant()->value() == 0; 122 } else if (v->type()->as_LongConstant() != NULL) { 123 return v->type()->as_LongConstant()->value() == 0; 124 } else if (v->type()->as_ObjectConstant() != NULL) { 125 return v->type()->as_ObjectConstant()->value()->is_null_object(); 126 } else if (v->type()->as_FloatConstant() != NULL) { 127 return jint_cast(v->type()->as_FloatConstant()->value()) == 0; 128 } else if (v->type()->as_DoubleConstant() != NULL) { 129 return jlong_cast(v->type()->as_DoubleConstant()->value()) == 0; 130 } 131 #endif // AARCH64 132 return false; 133 } 134 135 136 bool LIRGenerator::can_inline_as_constant(Value v) const { 137 if (v->type()->as_IntConstant() != NULL) { 138 return Assembler::is_arith_imm_in_range(v->type()->as_IntConstant()->value()); 139 } else if (v->type()->as_ObjectConstant() != NULL) { 140 return v->type()->as_ObjectConstant()->value()->is_null_object(); 141 #ifdef AARCH64 142 } else if (v->type()->as_LongConstant() != NULL) { 143 return Assembler::is_arith_imm_in_range(v->type()->as_LongConstant()->value()); 144 #else 145 } else if (v->type()->as_FloatConstant() != NULL) { 146 return v->type()->as_FloatConstant()->value() == 0.0f; 147 } else if (v->type()->as_DoubleConstant() != NULL) { 148 return v->type()->as_DoubleConstant()->value() == 0.0; 149 #endif // AARCH64 150 } 151 return false; 152 } 153 154 155 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { 156 ShouldNotCallThis(); // Not used on ARM 157 return false; 158 } 159 160 161 #ifdef AARCH64 162 163 static bool can_inline_as_constant_in_cmp(Value v) { 164 jlong constant; 165 if (v->type()->as_IntConstant() != NULL) { 166 constant = v->type()->as_IntConstant()->value(); 167 } else if (v->type()->as_LongConstant() != NULL) { 168 constant = v->type()->as_LongConstant()->value(); 169 } else if (v->type()->as_ObjectConstant() != NULL) { 170 return v->type()->as_ObjectConstant()->value()->is_null_object(); 171 } else if (v->type()->as_FloatConstant() != NULL) { 172 return v->type()->as_FloatConstant()->value() == 0.0f; 173 } else if (v->type()->as_DoubleConstant() != NULL) { 174 return v->type()->as_DoubleConstant()->value() == 0.0; 175 } else { 176 return false; 177 } 178 179 return Assembler::is_arith_imm_in_range(constant) || Assembler::is_arith_imm_in_range(-constant); 180 } 181 182 183 static bool can_inline_as_constant_in_logic(Value v) { 184 if (v->type()->as_IntConstant() != NULL) { 185 return Assembler::LogicalImmediate(v->type()->as_IntConstant()->value(), true).is_encoded(); 186 } else if (v->type()->as_LongConstant() != NULL) { 187 return Assembler::LogicalImmediate(v->type()->as_LongConstant()->value(), false).is_encoded(); 188 } 189 return false; 190 } 191 192 193 #endif // AARCH64 194 195 196 LIR_Opr LIRGenerator::safepoint_poll_register() { 197 return LIR_OprFact::illegalOpr; 198 } 199 200 201 static LIR_Opr make_constant(BasicType type, jlong c) { 202 switch (type) { 203 case T_ADDRESS: 204 case T_OBJECT: return LIR_OprFact::intptrConst(c); 205 case T_LONG: return LIR_OprFact::longConst(c); 206 case T_INT: return LIR_OprFact::intConst(c); 207 default: ShouldNotReachHere(); 208 return LIR_OprFact::intConst(-1); 209 } 210 } 211 212 #ifdef AARCH64 213 214 void LIRGenerator::add_constant(LIR_Opr src, jlong c, LIR_Opr dest) { 215 if (c == 0) { 216 __ move(src, dest); 217 return; 218 } 219 220 BasicType type = src->type(); 221 bool is_neg = (c < 0); 222 c = ABS(c); 223 224 if ((c >> 24) == 0) { 225 for (int shift = 0; shift <= 12; shift += 12) { 226 int part = ((int)c) & (right_n_bits(12) << shift); 227 if (part != 0) { 228 if (is_neg) { 229 __ sub(src, make_constant(type, part), dest); 230 } else { 231 __ add(src, make_constant(type, part), dest); 232 } 233 src = dest; 234 } 235 } 236 } else { 237 __ move(make_constant(type, c), dest); 238 if (is_neg) { 239 __ sub(src, dest, dest); 240 } else { 241 __ add(src, dest, dest); 242 } 243 } 244 } 245 246 #endif // AARCH64 247 248 249 void LIRGenerator::add_large_constant(LIR_Opr src, int c, LIR_Opr dest) { 250 assert(c != 0, "must be"); 251 #ifdef AARCH64 252 add_constant(src, c, dest); 253 #else 254 // Find first non-zero bit 255 int shift = 0; 256 while ((c & (3 << shift)) == 0) { 257 shift += 2; 258 } 259 // Add the least significant part of the constant 260 int mask = 0xff << shift; 261 __ add(src, LIR_OprFact::intConst(c & mask), dest); 262 // Add up to 3 other parts of the constant; 263 // each of them can be represented as rotated_imm 264 if (c & (mask << 8)) { 265 __ add(dest, LIR_OprFact::intConst(c & (mask << 8)), dest); 266 } 267 if (c & (mask << 16)) { 268 __ add(dest, LIR_OprFact::intConst(c & (mask << 16)), dest); 269 } 270 if (c & (mask << 24)) { 271 __ add(dest, LIR_OprFact::intConst(c & (mask << 24)), dest); 272 } 273 #endif // AARCH64 274 } 275 276 static LIR_Address* make_address(LIR_Opr base, LIR_Opr index, LIR_Address::Scale scale, BasicType type) { 277 return new LIR_Address(base, index, scale, 0, type); 278 } 279 280 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index, 281 int shift, int disp, BasicType type) { 282 assert(base->is_register(), "must be"); 283 284 if (index->is_constant()) { 285 disp += index->as_constant_ptr()->as_jint() << shift; 286 index = LIR_OprFact::illegalOpr; 287 } 288 289 #ifndef AARCH64 290 if (base->type() == T_LONG) { 291 LIR_Opr tmp = new_register(T_INT); 292 __ convert(Bytecodes::_l2i, base, tmp); 293 base = tmp; 294 } 295 if (index != LIR_OprFact::illegalOpr && index->type() == T_LONG) { 296 LIR_Opr tmp = new_register(T_INT); 297 __ convert(Bytecodes::_l2i, index, tmp); 298 index = tmp; 299 } 300 // At this point base and index should be all ints and not constants 301 assert(base->is_single_cpu() && !base->is_constant(), "base should be an non-constant int"); 302 assert(index->is_illegal() || (index->type() == T_INT && !index->is_constant()), "index should be an non-constant int"); 303 #endif 304 305 int max_disp; 306 bool disp_is_in_range; 307 bool embedded_shift; 308 309 #ifdef AARCH64 310 int align = exact_log2(type2aelembytes(type, true)); 311 assert((disp & right_n_bits(align)) == 0, "displacement is not aligned"); 312 assert(shift == 0 || shift == align, "shift should be zero or equal to embedded align"); 313 max_disp = (1 << 12) << align; 314 315 if (disp >= 0) { 316 disp_is_in_range = Assembler::is_unsigned_imm_in_range(disp, 12, align); 317 } else { 318 disp_is_in_range = Assembler::is_imm_in_range(disp, 9, 0); 319 } 320 321 embedded_shift = true; 322 #else 323 switch (type) { 324 case T_BYTE: 325 case T_SHORT: 326 case T_CHAR: 327 max_disp = 256; // ldrh, ldrsb encoding has 8-bit offset 328 embedded_shift = false; 329 break; 330 case T_FLOAT: 331 case T_DOUBLE: 332 max_disp = 1024; // flds, fldd have 8-bit offset multiplied by 4 333 embedded_shift = false; 334 break; 335 case T_LONG: 336 max_disp = 4096; 337 embedded_shift = false; 338 break; 339 default: 340 max_disp = 4096; // ldr, ldrb allow 12-bit offset 341 embedded_shift = true; 342 } 343 344 disp_is_in_range = (-max_disp < disp && disp < max_disp); 345 #endif // !AARCH64 346 347 if (index->is_register()) { 348 LIR_Opr tmp = new_pointer_register(); 349 if (!disp_is_in_range) { 350 add_large_constant(base, disp, tmp); 351 base = tmp; 352 disp = 0; 353 } 354 LIR_Address* addr = make_address(base, index, (LIR_Address::Scale)shift, type); 355 if (disp == 0 && embedded_shift) { 356 // can use ldr/str instruction with register index 357 return addr; 358 } else { 359 LIR_Opr tmp = new_pointer_register(); 360 __ add(base, LIR_OprFact::address(addr), tmp); // add with shifted/extended register 361 return new LIR_Address(tmp, disp, type); 362 } 363 } 364 365 // If the displacement is too large to be inlined into LDR instruction, 366 // generate large constant with additional sequence of ADD instructions 367 int excess_disp = disp & ~(max_disp - 1); 368 if (excess_disp != 0) { 369 LIR_Opr tmp = new_pointer_register(); 370 add_large_constant(base, excess_disp, tmp); 371 base = tmp; 372 } 373 return new LIR_Address(base, disp & (max_disp - 1), type); 374 } 375 376 377 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, 378 BasicType type, bool needs_card_mark) { 379 int base_offset = arrayOopDesc::base_offset_in_bytes(type); 380 int elem_size = type2aelembytes(type); 381 382 if (index_opr->is_constant()) { 383 int offset = base_offset + index_opr->as_constant_ptr()->as_jint() * elem_size; 384 if (needs_card_mark) { 385 LIR_Opr base_opr = new_pointer_register(); 386 add_large_constant(array_opr, offset, base_opr); 387 return new LIR_Address(base_opr, (intx)0, type); 388 } else { 389 return generate_address(array_opr, offset, type); 390 } 391 } else { 392 assert(index_opr->is_register(), "must be"); 393 int scale = exact_log2(elem_size); 394 if (needs_card_mark) { 395 LIR_Opr base_opr = new_pointer_register(); 396 LIR_Address* addr = make_address(base_opr, index_opr, (LIR_Address::Scale)scale, type); 397 __ add(array_opr, LIR_OprFact::intptrConst(base_offset), base_opr); 398 __ add(base_opr, LIR_OprFact::address(addr), base_opr); // add with shifted/extended register 399 return new LIR_Address(base_opr, type); 400 } else { 401 return generate_address(array_opr, index_opr, scale, base_offset, type); 402 } 403 } 404 } 405 406 407 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) { 408 assert(type == T_LONG || type == T_INT, "should be"); 409 LIR_Opr r = make_constant(type, x); 410 #ifdef AARCH64 411 bool imm_in_range = Assembler::LogicalImmediate(x, type == T_INT).is_encoded(); 412 #else 413 bool imm_in_range = AsmOperand::is_rotated_imm(x); 414 #endif // AARCH64 415 if (!imm_in_range) { 416 LIR_Opr tmp = new_register(type); 417 __ move(r, tmp); 418 return tmp; 419 } 420 return r; 421 } 422 423 424 void LIRGenerator::increment_counter(address counter, BasicType type, int step) { 425 LIR_Opr pointer = new_pointer_register(); 426 __ move(LIR_OprFact::intptrConst(counter), pointer); 427 LIR_Address* addr = new LIR_Address(pointer, type); 428 increment_counter(addr, step); 429 } 430 431 432 void LIRGenerator::increment_counter(LIR_Address* addr, int step) { 433 LIR_Opr temp = new_register(addr->type()); 434 __ move(addr, temp); 435 __ add(temp, make_constant(addr->type(), step), temp); 436 __ move(temp, addr); 437 } 438 439 440 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { 441 __ load(new LIR_Address(base, disp, T_INT), FrameMap::LR_opr, info); 442 __ cmp(condition, FrameMap::LR_opr, c); 443 } 444 445 446 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) { 447 __ load(new LIR_Address(base, disp, type), FrameMap::LR_opr, info); 448 __ cmp(condition, reg, FrameMap::LR_opr); 449 } 450 451 452 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) { 453 assert(left != result, "should be different registers"); 454 if (is_power_of_2(c + 1)) { 455 #ifdef AARCH64 456 __ shift_left(left, log2_intptr(c + 1), result); 457 __ sub(result, left, result); 458 #else 459 LIR_Address::Scale scale = (LIR_Address::Scale) log2_intptr(c + 1); 460 LIR_Address* addr = new LIR_Address(left, left, scale, 0, T_INT); 461 __ sub(LIR_OprFact::address(addr), left, result); // rsb with shifted register 462 #endif // AARCH64 463 return true; 464 } else if (is_power_of_2(c - 1)) { 465 LIR_Address::Scale scale = (LIR_Address::Scale) log2_intptr(c - 1); 466 LIR_Address* addr = new LIR_Address(left, left, scale, 0, T_INT); 467 __ add(left, LIR_OprFact::address(addr), result); // add with shifted register 468 return true; 469 } 470 return false; 471 } 472 473 474 void LIRGenerator::store_stack_parameter(LIR_Opr item, ByteSize offset_from_sp) { 475 assert(item->type() == T_INT, "other types are not expected"); 476 __ store(item, new LIR_Address(FrameMap::SP_opr, in_bytes(offset_from_sp), item->type())); 477 } 478 479 void LIRGenerator::set_card(LIR_Opr value, LIR_Address* card_addr) { 480 assert(CardTable::dirty_card_val() == 0, 481 "Cannot use ZR register (aarch64) or the register containing the card table base address directly (aarch32) otherwise"); 482 #ifdef AARCH64 483 // AARCH64 has a register that is constant zero. We can use that one to set the 484 // value in the card table to dirty. 485 __ move(FrameMap::ZR_opr, card_addr); 486 #else // AARCH64 487 if((ci_card_table_address_as<intx>() & 0xff) == 0) { 488 // If the card table base address is aligned to 256 bytes, we can use the register 489 // that contains the card_table_base_address. 490 __ move(value, card_addr); 491 } else { 492 // Otherwise we need to create a register containing that value. 493 LIR_Opr tmp_zero = new_register(T_INT); 494 __ move(LIR_OprFact::intConst(CardTable::dirty_card_val()), tmp_zero); 495 __ move(tmp_zero, card_addr); 496 } 497 #endif // AARCH64 498 } 499 500 void LIRGenerator::CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base) { 501 assert(addr->is_register(), "must be a register at this point"); 502 503 LIR_Opr tmp = FrameMap::LR_ptr_opr; 504 505 // TODO-AARCH64: check performance 506 bool load_card_table_base_const = AARCH64_ONLY(false) NOT_AARCH64(VM_Version::supports_movw()); 507 if (load_card_table_base_const) { 508 __ move((LIR_Opr)card_table_base, tmp); 509 } else { 510 __ move(new LIR_Address(FrameMap::Rthread_opr, in_bytes(JavaThread::card_table_base_offset()), T_ADDRESS), tmp); 511 } 512 513 #ifdef AARCH64 514 LIR_Address* shifted_reg_operand = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTable::card_shift, 0, T_BYTE); 515 LIR_Opr tmp2 = tmp; 516 __ add(tmp, LIR_OprFact::address(shifted_reg_operand), tmp2); // tmp2 = tmp + (addr >> CardTable::card_shift) 517 LIR_Address* card_addr = new LIR_Address(tmp2, T_BYTE); 518 #else 519 // Use unsigned type T_BOOLEAN here rather than (signed) T_BYTE since signed load 520 // byte instruction does not support the addressing mode we need. 521 LIR_Address* card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTable::card_shift, 0, T_BOOLEAN); 522 #endif 523 if (UseCondCardMark) { 524 if (UseConcMarkSweepGC) { 525 __ membar_storeload(); 526 } 527 LIR_Opr cur_value = new_register(T_INT); 528 __ move(card_addr, cur_value); 529 530 LabelObj* L_already_dirty = new LabelObj(); 531 __ cmp(lir_cond_equal, cur_value, LIR_OprFact::intConst(CardTable::dirty_card_val())); 532 __ branch(lir_cond_equal, T_BYTE, L_already_dirty->label()); 533 set_card(tmp, card_addr); 534 __ branch_destination(L_already_dirty->label()); 535 } else { 536 if (UseConcMarkSweepGC && CMSPrecleaningEnabled) { 537 __ membar_storestore(); 538 } 539 set_card(tmp, card_addr); 540 } 541 } 542 543 //---------------------------------------------------------------------- 544 // visitor functions 545 //---------------------------------------------------------------------- 546 547 548 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) { 549 assert(x->is_pinned(),""); 550 bool needs_range_check = x->compute_needs_range_check(); 551 bool use_length = x->length() != NULL; 552 bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT; 553 bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL || 554 !get_jobject_constant(x->value())->is_null_object() || 555 x->should_profile()); 556 557 LIRItem array(x->array(), this); 558 LIRItem index(x->index(), this); 559 LIRItem value(x->value(), this); 560 LIRItem length(this); 561 562 array.load_item(); 563 index.load_nonconstant(); 564 565 if (use_length && needs_range_check) { 566 length.set_instruction(x->length()); 567 length.load_item(); 568 } 569 if (needs_store_check || x->check_boolean()) { 570 value.load_item(); 571 } else { 572 value.load_for_store(x->elt_type()); 573 } 574 575 set_no_result(x); 576 577 // the CodeEmitInfo must be duplicated for each different 578 // LIR-instruction because spilling can occur anywhere between two 579 // instructions and so the debug information must be different 580 CodeEmitInfo* range_check_info = state_for(x); 581 CodeEmitInfo* null_check_info = NULL; 582 if (x->needs_null_check()) { 583 null_check_info = new CodeEmitInfo(range_check_info); 584 } 585 586 // emit array address setup early so it schedules better 587 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store); 588 589 if (GenerateRangeChecks && needs_range_check) { 590 if (use_length) { 591 __ cmp(lir_cond_belowEqual, length.result(), index.result()); 592 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result())); 593 } else { 594 array_range_check(array.result(), index.result(), null_check_info, range_check_info); 595 // range_check also does the null check 596 null_check_info = NULL; 597 } 598 } 599 600 if (GenerateArrayStoreCheck && needs_store_check) { 601 LIR_Opr tmp1 = FrameMap::R0_oop_opr; 602 LIR_Opr tmp2 = FrameMap::R1_oop_opr; 603 CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info); 604 __ store_check(value.result(), array.result(), tmp1, tmp2, 605 LIR_OprFact::illegalOpr, store_check_info, 606 x->profiled_method(), x->profiled_bci()); 607 } 608 609 #if INCLUDE_ALL_GCS 610 if (obj_store) { 611 // Needs GC write barriers. 612 pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */, 613 true /* do_load */, false /* patch */, NULL); 614 } 615 #endif // INCLUDE_ALL_GCS 616 617 LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info); 618 __ move(result, array_addr, null_check_info); 619 if (obj_store) { 620 post_barrier(LIR_OprFact::address(array_addr), value.result()); 621 } 622 } 623 624 625 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) { 626 assert(x->is_pinned(),""); 627 LIRItem obj(x->obj(), this); 628 obj.load_item(); 629 set_no_result(x); 630 631 LIR_Opr lock = new_pointer_register(); 632 LIR_Opr hdr = new_pointer_register(); 633 634 // Need a scratch register for biased locking on arm 635 LIR_Opr scratch = LIR_OprFact::illegalOpr; 636 if(UseBiasedLocking) { 637 scratch = new_pointer_register(); 638 } else { 639 scratch = atomicLockOpr(); 640 } 641 642 CodeEmitInfo* info_for_exception = NULL; 643 if (x->needs_null_check()) { 644 info_for_exception = state_for(x); 645 } 646 647 CodeEmitInfo* info = state_for(x, x->state(), true); 648 monitor_enter(obj.result(), lock, hdr, scratch, 649 x->monitor_no(), info_for_exception, info); 650 } 651 652 653 void LIRGenerator::do_MonitorExit(MonitorExit* x) { 654 assert(x->is_pinned(),""); 655 LIRItem obj(x->obj(), this); 656 obj.dont_load_item(); 657 set_no_result(x); 658 659 LIR_Opr obj_temp = new_pointer_register(); 660 LIR_Opr lock = new_pointer_register(); 661 LIR_Opr hdr = new_pointer_register(); 662 663 monitor_exit(obj_temp, lock, hdr, atomicLockOpr(), x->monitor_no()); 664 } 665 666 667 // _ineg, _lneg, _fneg, _dneg 668 void LIRGenerator::do_NegateOp(NegateOp* x) { 669 #ifdef __SOFTFP__ 670 address runtime_func = NULL; 671 ValueTag tag = x->type()->tag(); 672 if (tag == floatTag) { 673 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::fneg); 674 } else if (tag == doubleTag) { 675 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dneg); 676 } 677 if (runtime_func != NULL) { 678 set_result(x, call_runtime(x->x(), runtime_func, x->type(), NULL)); 679 return; 680 } 681 #endif // __SOFTFP__ 682 LIRItem value(x->x(), this); 683 value.load_item(); 684 LIR_Opr reg = rlock_result(x); 685 __ negate(value.result(), reg); 686 } 687 688 689 // for _fadd, _fmul, _fsub, _fdiv, _frem 690 // _dadd, _dmul, _dsub, _ddiv, _drem 691 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) { 692 address runtime_func; 693 switch (x->op()) { 694 case Bytecodes::_frem: 695 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::frem); 696 break; 697 case Bytecodes::_drem: 698 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::drem); 699 break; 700 #ifdef __SOFTFP__ 701 // Call function compiled with -msoft-float. 702 703 // __aeabi_XXXX_glibc: Imported code from glibc soft-fp bundle for calculation accuracy improvement. See CR 6757269. 704 705 case Bytecodes::_fadd: 706 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_fadd_glibc); 707 break; 708 case Bytecodes::_fmul: 709 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_fmul); 710 break; 711 case Bytecodes::_fsub: 712 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_fsub_glibc); 713 break; 714 case Bytecodes::_fdiv: 715 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_fdiv); 716 break; 717 case Bytecodes::_dadd: 718 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_dadd_glibc); 719 break; 720 case Bytecodes::_dmul: 721 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_dmul); 722 break; 723 case Bytecodes::_dsub: 724 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_dsub_glibc); 725 break; 726 case Bytecodes::_ddiv: 727 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_ddiv); 728 break; 729 default: 730 ShouldNotReachHere(); 731 #else // __SOFTFP__ 732 default: { 733 LIRItem left(x->x(), this); 734 LIRItem right(x->y(), this); 735 left.load_item(); 736 right.load_item(); 737 rlock_result(x); 738 arithmetic_op_fpu(x->op(), x->operand(), left.result(), right.result(), x->is_strictfp()); 739 return; 740 } 741 #endif // __SOFTFP__ 742 } 743 744 LIR_Opr result = call_runtime(x->x(), x->y(), runtime_func, x->type(), NULL); 745 set_result(x, result); 746 } 747 748 749 void LIRGenerator::make_div_by_zero_check(LIR_Opr right_arg, BasicType type, CodeEmitInfo* info) { 750 assert(right_arg->is_register(), "must be"); 751 __ cmp(lir_cond_equal, right_arg, make_constant(type, 0)); 752 __ branch(lir_cond_equal, type, new DivByZeroStub(info)); 753 } 754 755 756 // for _ladd, _lmul, _lsub, _ldiv, _lrem 757 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { 758 CodeEmitInfo* info = NULL; 759 if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) { 760 info = state_for(x); 761 } 762 763 #ifdef AARCH64 764 LIRItem left(x->x(), this); 765 LIRItem right(x->y(), this); 766 LIRItem* left_arg = &left; 767 LIRItem* right_arg = &right; 768 769 // Test if instr is commutative and if we should swap 770 if (x->is_commutative() && left.is_constant()) { 771 left_arg = &right; 772 right_arg = &left; 773 } 774 775 left_arg->load_item(); 776 switch (x->op()) { 777 case Bytecodes::_ldiv: 778 right_arg->load_item(); 779 make_div_by_zero_check(right_arg->result(), T_LONG, info); 780 __ idiv(left_arg->result(), right_arg->result(), rlock_result(x), LIR_OprFact::illegalOpr, NULL); 781 break; 782 783 case Bytecodes::_lrem: { 784 right_arg->load_item(); 785 make_div_by_zero_check(right_arg->result(), T_LONG, info); 786 // a % b is implemented with 2 instructions: 787 // tmp = a/b (sdiv) 788 // res = a - b*tmp (msub) 789 LIR_Opr tmp = FrameMap::as_long_opr(Rtemp); 790 __ irem(left_arg->result(), right_arg->result(), rlock_result(x), tmp, NULL); 791 break; 792 } 793 794 case Bytecodes::_lmul: 795 if (right_arg->is_constant() && is_power_of_2_long(right_arg->get_jlong_constant())) { 796 right_arg->dont_load_item(); 797 __ shift_left(left_arg->result(), exact_log2_long(right_arg->get_jlong_constant()), rlock_result(x)); 798 } else { 799 right_arg->load_item(); 800 __ mul(left_arg->result(), right_arg->result(), rlock_result(x)); 801 } 802 break; 803 804 case Bytecodes::_ladd: 805 case Bytecodes::_lsub: 806 if (right_arg->is_constant()) { 807 jlong c = right_arg->get_jlong_constant(); 808 add_constant(left_arg->result(), (x->op() == Bytecodes::_ladd) ? c : -c, rlock_result(x)); 809 } else { 810 right_arg->load_item(); 811 arithmetic_op_long(x->op(), rlock_result(x), left_arg->result(), right_arg->result(), NULL); 812 } 813 break; 814 815 default: 816 ShouldNotReachHere(); 817 } 818 #else 819 switch (x->op()) { 820 case Bytecodes::_ldiv: 821 case Bytecodes::_lrem: { 822 LIRItem right(x->y(), this); 823 right.load_item(); 824 make_div_by_zero_check(right.result(), T_LONG, info); 825 } 826 // Fall through 827 case Bytecodes::_lmul: { 828 address entry; 829 switch (x->op()) { 830 case Bytecodes::_lrem: 831 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem); 832 break; 833 case Bytecodes::_ldiv: 834 entry = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv); 835 break; 836 case Bytecodes::_lmul: 837 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lmul); 838 break; 839 default: 840 ShouldNotReachHere(); 841 } 842 LIR_Opr result = call_runtime(x->y(), x->x(), entry, x->type(), NULL); 843 set_result(x, result); 844 break; 845 } 846 case Bytecodes::_ladd: 847 case Bytecodes::_lsub: { 848 LIRItem left(x->x(), this); 849 LIRItem right(x->y(), this); 850 left.load_item(); 851 right.load_item(); 852 rlock_result(x); 853 arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL); 854 break; 855 } 856 default: 857 ShouldNotReachHere(); 858 } 859 #endif // AARCH64 860 } 861 862 863 // for: _iadd, _imul, _isub, _idiv, _irem 864 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) { 865 bool is_div_rem = x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem; 866 LIRItem left(x->x(), this); 867 LIRItem right(x->y(), this); 868 LIRItem* left_arg = &left; 869 LIRItem* right_arg = &right; 870 871 // Test if instr is commutative and if we should swap 872 if (x->is_commutative() && left.is_constant()) { 873 left_arg = &right; 874 right_arg = &left; 875 } 876 877 if (is_div_rem) { 878 CodeEmitInfo* info = state_for(x); 879 if (x->op() == Bytecodes::_idiv && right_arg->is_constant() && is_power_of_2(right_arg->get_jint_constant())) { 880 left_arg->load_item(); 881 right_arg->dont_load_item(); 882 LIR_Opr tmp = LIR_OprFact::illegalOpr; 883 LIR_Opr result = rlock_result(x); 884 __ idiv(left_arg->result(), right_arg->result(), result, tmp, info); 885 } else { 886 #ifdef AARCH64 887 left_arg->load_item(); 888 right_arg->load_item(); 889 make_div_by_zero_check(right_arg->result(), T_INT, info); 890 if (x->op() == Bytecodes::_idiv) { 891 __ idiv(left_arg->result(), right_arg->result(), rlock_result(x), LIR_OprFact::illegalOpr, NULL); 892 } else { 893 // a % b is implemented with 2 instructions: 894 // tmp = a/b (sdiv) 895 // res = a - b*tmp (msub) 896 LIR_Opr tmp = FrameMap::as_opr(Rtemp); 897 __ irem(left_arg->result(), right_arg->result(), rlock_result(x), tmp, NULL); 898 } 899 #else 900 left_arg->load_item_force(FrameMap::R0_opr); 901 right_arg->load_item_force(FrameMap::R2_opr); 902 LIR_Opr tmp = FrameMap::R1_opr; 903 LIR_Opr result = rlock_result(x); 904 LIR_Opr out_reg; 905 if (x->op() == Bytecodes::_irem) { 906 out_reg = FrameMap::R0_opr; 907 __ irem(left_arg->result(), right_arg->result(), out_reg, tmp, info); 908 } else if (x->op() == Bytecodes::_idiv) { 909 out_reg = FrameMap::R1_opr; 910 __ idiv(left_arg->result(), right_arg->result(), out_reg, tmp, info); 911 } 912 __ move(out_reg, result); 913 #endif // AARCH64 914 } 915 916 #ifdef AARCH64 917 } else if (((x->op() == Bytecodes::_iadd) || (x->op() == Bytecodes::_isub)) && right_arg->is_constant()) { 918 left_arg->load_item(); 919 jint c = right_arg->get_jint_constant(); 920 right_arg->dont_load_item(); 921 add_constant(left_arg->result(), (x->op() == Bytecodes::_iadd) ? c : -c, rlock_result(x)); 922 #endif // AARCH64 923 924 } else { 925 left_arg->load_item(); 926 if (x->op() == Bytecodes::_imul && right_arg->is_constant()) { 927 jint c = right_arg->get_jint_constant(); 928 if (c > 0 && c < max_jint && (is_power_of_2(c) || is_power_of_2(c - 1) || is_power_of_2(c + 1))) { 929 right_arg->dont_load_item(); 930 } else { 931 right_arg->load_item(); 932 } 933 } else { 934 AARCH64_ONLY(assert(!right_arg->is_constant(), "constant right_arg is already handled by this moment");) 935 right_arg->load_nonconstant(); 936 } 937 rlock_result(x); 938 assert(right_arg->is_constant() || right_arg->is_register(), "wrong state of right"); 939 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), NULL); 940 } 941 } 942 943 944 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) { 945 ValueTag tag = x->type()->tag(); 946 assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters"); 947 switch (tag) { 948 case floatTag: 949 case doubleTag: do_ArithmeticOp_FPU(x); return; 950 case longTag: do_ArithmeticOp_Long(x); return; 951 case intTag: do_ArithmeticOp_Int(x); return; 952 } 953 ShouldNotReachHere(); 954 } 955 956 957 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr 958 void LIRGenerator::do_ShiftOp(ShiftOp* x) { 959 LIRItem value(x->x(), this); 960 LIRItem count(x->y(), this); 961 962 #ifndef AARCH64 963 if (value.type()->is_long()) { 964 count.set_destroys_register(); 965 } 966 #endif // !AARCH64 967 968 if (count.is_constant()) { 969 assert(count.type()->as_IntConstant() != NULL, "should be"); 970 count.dont_load_item(); 971 } else { 972 count.load_item(); 973 } 974 value.load_item(); 975 976 LIR_Opr res = rlock_result(x); 977 shift_op(x->op(), res, value.result(), count.result(), LIR_OprFact::illegalOpr); 978 } 979 980 981 // _iand, _land, _ior, _lor, _ixor, _lxor 982 void LIRGenerator::do_LogicOp(LogicOp* x) { 983 LIRItem left(x->x(), this); 984 LIRItem right(x->y(), this); 985 986 left.load_item(); 987 988 #ifdef AARCH64 989 if (right.is_constant() && can_inline_as_constant_in_logic(right.value())) { 990 right.dont_load_item(); 991 } else { 992 right.load_item(); 993 } 994 #else 995 right.load_nonconstant(); 996 #endif // AARCH64 997 998 logic_op(x->op(), rlock_result(x), left.result(), right.result()); 999 } 1000 1001 1002 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg 1003 void LIRGenerator::do_CompareOp(CompareOp* x) { 1004 #ifdef __SOFTFP__ 1005 address runtime_func; 1006 switch (x->op()) { 1007 case Bytecodes::_fcmpl: 1008 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::fcmpl); 1009 break; 1010 case Bytecodes::_fcmpg: 1011 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::fcmpg); 1012 break; 1013 case Bytecodes::_dcmpl: 1014 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dcmpl); 1015 break; 1016 case Bytecodes::_dcmpg: 1017 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dcmpg); 1018 break; 1019 case Bytecodes::_lcmp: { 1020 LIRItem left(x->x(), this); 1021 LIRItem right(x->y(), this); 1022 left.load_item(); 1023 right.load_nonconstant(); 1024 LIR_Opr reg = rlock_result(x); 1025 __ lcmp2int(left.result(), right.result(), reg); 1026 return; 1027 } 1028 default: 1029 ShouldNotReachHere(); 1030 } 1031 LIR_Opr result = call_runtime(x->x(), x->y(), runtime_func, x->type(), NULL); 1032 set_result(x, result); 1033 #else // __SOFTFP__ 1034 LIRItem left(x->x(), this); 1035 LIRItem right(x->y(), this); 1036 left.load_item(); 1037 1038 #ifdef AARCH64 1039 if (right.is_constant() && can_inline_as_constant_in_cmp(right.value())) { 1040 right.dont_load_item(); 1041 } else { 1042 right.load_item(); 1043 } 1044 #else 1045 right.load_nonconstant(); 1046 #endif // AARCH64 1047 1048 LIR_Opr reg = rlock_result(x); 1049 1050 if (x->x()->type()->is_float_kind()) { 1051 Bytecodes::Code code = x->op(); 1052 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl)); 1053 } else if (x->x()->type()->tag() == longTag) { 1054 __ lcmp2int(left.result(), right.result(), reg); 1055 } else { 1056 ShouldNotReachHere(); 1057 } 1058 #endif // __SOFTFP__ 1059 } 1060 1061 1062 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) { 1063 assert(x->number_of_arguments() == 4, "wrong type"); 1064 LIRItem obj (x->argument_at(0), this); // object 1065 LIRItem offset(x->argument_at(1), this); // offset of field 1066 LIRItem cmp (x->argument_at(2), this); // value to compare with field 1067 LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp 1068 1069 LIR_Opr addr = new_pointer_register(); 1070 LIR_Opr tmp1 = LIR_OprFact::illegalOpr; 1071 LIR_Opr tmp2 = LIR_OprFact::illegalOpr; 1072 1073 // get address of field 1074 obj.load_item(); 1075 offset.load_item(); 1076 cmp.load_item(); 1077 val.load_item(); 1078 1079 __ add(obj.result(), offset.result(), addr); 1080 LIR_Opr result = rlock_result(x); 1081 1082 if (type == objectType) { 1083 #if INCLUDE_ALL_GCS 1084 // Do the pre-write barrier, if any. 1085 pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */, 1086 true /* do_load */, false /* patch */, NULL); 1087 #endif // INCLUDE_ALL_GCS 1088 #ifdef AARCH64 1089 if (UseCompressedOops) { 1090 tmp1 = new_pointer_register(); 1091 tmp2 = new_pointer_register(); 1092 } 1093 #endif // AARCH64 1094 __ cas_obj(addr, cmp.result(), val.result(), tmp1, tmp2, result); 1095 post_barrier(addr, val.result()); 1096 } 1097 else if (type == intType) { 1098 __ cas_int(addr, cmp.result(), val.result(), tmp1, tmp1, result); 1099 } 1100 else if (type == longType) { 1101 #ifndef AARCH64 1102 tmp1 = new_register(T_LONG); 1103 #endif // !AARCH64 1104 __ cas_long(addr, cmp.result(), val.result(), tmp1, tmp2, result); 1105 } 1106 else { 1107 ShouldNotReachHere(); 1108 } 1109 } 1110 1111 1112 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) { 1113 address runtime_func; 1114 switch (x->id()) { 1115 case vmIntrinsics::_dabs: { 1116 #ifdef __SOFTFP__ 1117 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dabs); 1118 break; 1119 #else 1120 assert(x->number_of_arguments() == 1, "wrong type"); 1121 LIRItem value(x->argument_at(0), this); 1122 value.load_item(); 1123 __ abs(value.result(), rlock_result(x), LIR_OprFact::illegalOpr); 1124 return; 1125 #endif // __SOFTFP__ 1126 } 1127 case vmIntrinsics::_dsqrt: { 1128 #ifdef __SOFTFP__ 1129 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dsqrt); 1130 break; 1131 #else 1132 assert(x->number_of_arguments() == 1, "wrong type"); 1133 LIRItem value(x->argument_at(0), this); 1134 value.load_item(); 1135 __ sqrt(value.result(), rlock_result(x), LIR_OprFact::illegalOpr); 1136 return; 1137 #endif // __SOFTFP__ 1138 } 1139 case vmIntrinsics::_dsin: 1140 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dsin); 1141 break; 1142 case vmIntrinsics::_dcos: 1143 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dcos); 1144 break; 1145 case vmIntrinsics::_dtan: 1146 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dtan); 1147 break; 1148 case vmIntrinsics::_dlog: 1149 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dlog); 1150 break; 1151 case vmIntrinsics::_dlog10: 1152 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10); 1153 break; 1154 case vmIntrinsics::_dexp: 1155 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dexp); 1156 break; 1157 case vmIntrinsics::_dpow: 1158 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dpow); 1159 break; 1160 default: 1161 ShouldNotReachHere(); 1162 return; 1163 } 1164 1165 LIR_Opr result; 1166 if (x->number_of_arguments() == 1) { 1167 result = call_runtime(x->argument_at(0), runtime_func, x->type(), NULL); 1168 } else { 1169 assert(x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow, "unexpected intrinsic"); 1170 result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_func, x->type(), NULL); 1171 } 1172 set_result(x, result); 1173 } 1174 1175 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) { 1176 fatal("FMA intrinsic is not implemented on this platform"); 1177 } 1178 1179 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) { 1180 fatal("vectorizedMismatch intrinsic is not implemented on this platform"); 1181 } 1182 1183 void LIRGenerator::do_ArrayCopy(Intrinsic* x) { 1184 CodeEmitInfo* info = state_for(x, x->state()); 1185 assert(x->number_of_arguments() == 5, "wrong type"); 1186 LIRItem src(x->argument_at(0), this); 1187 LIRItem src_pos(x->argument_at(1), this); 1188 LIRItem dst(x->argument_at(2), this); 1189 LIRItem dst_pos(x->argument_at(3), this); 1190 LIRItem length(x->argument_at(4), this); 1191 1192 // We put arguments into the same registers which are used for a Java call. 1193 // Note: we used fixed registers for all arguments because all registers 1194 // are caller-saved, so register allocator treats them all as used. 1195 src.load_item_force (FrameMap::R0_oop_opr); 1196 src_pos.load_item_force(FrameMap::R1_opr); 1197 dst.load_item_force (FrameMap::R2_oop_opr); 1198 dst_pos.load_item_force(FrameMap::R3_opr); 1199 length.load_item_force (FrameMap::R4_opr); 1200 LIR_Opr tmp = (FrameMap::R5_opr); 1201 set_no_result(x); 1202 1203 int flags; 1204 ciArrayKlass* expected_type; 1205 arraycopy_helper(x, &flags, &expected_type); 1206 __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), 1207 tmp, expected_type, flags, info); 1208 } 1209 1210 void LIRGenerator::do_update_CRC32(Intrinsic* x) { 1211 fatal("CRC32 intrinsic is not implemented on this platform"); 1212 } 1213 1214 void LIRGenerator::do_update_CRC32C(Intrinsic* x) { 1215 Unimplemented(); 1216 } 1217 1218 void LIRGenerator::do_Convert(Convert* x) { 1219 address runtime_func; 1220 switch (x->op()) { 1221 #ifndef AARCH64 1222 case Bytecodes::_l2f: 1223 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::l2f); 1224 break; 1225 case Bytecodes::_l2d: 1226 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::l2d); 1227 break; 1228 case Bytecodes::_f2l: 1229 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::f2l); 1230 break; 1231 case Bytecodes::_d2l: 1232 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::d2l); 1233 break; 1234 #ifdef __SOFTFP__ 1235 case Bytecodes::_f2d: 1236 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_f2d); 1237 break; 1238 case Bytecodes::_d2f: 1239 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_d2f); 1240 break; 1241 case Bytecodes::_i2f: 1242 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_i2f); 1243 break; 1244 case Bytecodes::_i2d: 1245 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_i2d); 1246 break; 1247 case Bytecodes::_f2i: 1248 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_f2iz); 1249 break; 1250 case Bytecodes::_d2i: 1251 // This is implemented in hard float in assembler on arm but a call 1252 // on other platforms. 1253 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::d2i); 1254 break; 1255 #endif // __SOFTFP__ 1256 #endif // !AARCH64 1257 default: { 1258 LIRItem value(x->value(), this); 1259 value.load_item(); 1260 LIR_Opr reg = rlock_result(x); 1261 __ convert(x->op(), value.result(), reg, NULL); 1262 return; 1263 } 1264 } 1265 1266 LIR_Opr result = call_runtime(x->value(), runtime_func, x->type(), NULL); 1267 set_result(x, result); 1268 } 1269 1270 1271 void LIRGenerator::do_NewInstance(NewInstance* x) { 1272 print_if_not_loaded(x); 1273 1274 CodeEmitInfo* info = state_for(x, x->state()); 1275 LIR_Opr reg = result_register_for(x->type()); // R0 is required by runtime call in NewInstanceStub::emit_code 1276 LIR_Opr klass_reg = FrameMap::R1_metadata_opr; // R1 is required by runtime call in NewInstanceStub::emit_code 1277 LIR_Opr tmp1 = new_register(objectType); 1278 LIR_Opr tmp2 = new_register(objectType); 1279 LIR_Opr tmp3 = FrameMap::LR_oop_opr; 1280 1281 new_instance(reg, x->klass(), x->is_unresolved(), tmp1, tmp2, tmp3, 1282 LIR_OprFact::illegalOpr, klass_reg, info); 1283 1284 LIR_Opr result = rlock_result(x); 1285 __ move(reg, result); 1286 } 1287 1288 1289 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { 1290 // Evaluate state_for() first, because it can emit code 1291 // with the same fixed registers that are used here (R1, R2) 1292 CodeEmitInfo* info = state_for(x, x->state()); 1293 LIRItem length(x->length(), this); 1294 1295 length.load_item_force(FrameMap::R2_opr); // R2 is required by runtime call in NewTypeArrayStub::emit_code 1296 LIR_Opr len = length.result(); 1297 1298 LIR_Opr reg = result_register_for(x->type()); // R0 is required by runtime call in NewTypeArrayStub::emit_code 1299 LIR_Opr klass_reg = FrameMap::R1_metadata_opr; // R1 is required by runtime call in NewTypeArrayStub::emit_code 1300 1301 LIR_Opr tmp1 = new_register(objectType); 1302 LIR_Opr tmp2 = new_register(objectType); 1303 LIR_Opr tmp3 = FrameMap::LR_oop_opr; 1304 LIR_Opr tmp4 = LIR_OprFact::illegalOpr; 1305 1306 BasicType elem_type = x->elt_type(); 1307 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg); 1308 1309 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info); 1310 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path); 1311 1312 LIR_Opr result = rlock_result(x); 1313 __ move(reg, result); 1314 } 1315 1316 1317 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) { 1318 // Evaluate state_for() first, because it can emit code 1319 // with the same fixed registers that are used here (R1, R2) 1320 CodeEmitInfo* info = state_for(x, x->state()); 1321 LIRItem length(x->length(), this); 1322 1323 length.load_item_force(FrameMap::R2_opr); // R2 is required by runtime call in NewObjectArrayStub::emit_code 1324 LIR_Opr len = length.result(); 1325 1326 CodeEmitInfo* patching_info = NULL; 1327 if (!x->klass()->is_loaded() || PatchALot) { 1328 patching_info = state_for(x, x->state_before()); 1329 } 1330 1331 LIR_Opr reg = result_register_for(x->type()); // R0 is required by runtime call in NewObjectArrayStub::emit_code 1332 LIR_Opr klass_reg = FrameMap::R1_metadata_opr; // R1 is required by runtime call in NewObjectArrayStub::emit_code 1333 1334 LIR_Opr tmp1 = new_register(objectType); 1335 LIR_Opr tmp2 = new_register(objectType); 1336 LIR_Opr tmp3 = FrameMap::LR_oop_opr; 1337 LIR_Opr tmp4 = LIR_OprFact::illegalOpr; 1338 1339 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info); 1340 ciMetadata* obj = ciObjArrayKlass::make(x->klass()); 1341 if (obj == ciEnv::unloaded_ciobjarrayklass()) { 1342 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error"); 1343 } 1344 klass2reg_with_patching(klass_reg, obj, patching_info); 1345 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path); 1346 1347 LIR_Opr result = rlock_result(x); 1348 __ move(reg, result); 1349 } 1350 1351 1352 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { 1353 Values* dims = x->dims(); 1354 int i = dims->length(); 1355 LIRItemList* items = new LIRItemList(i, i, NULL); 1356 while (i-- > 0) { 1357 LIRItem* size = new LIRItem(dims->at(i), this); 1358 items->at_put(i, size); 1359 } 1360 1361 // Need to get the info before, as the items may become invalid through item_free 1362 CodeEmitInfo* patching_info = NULL; 1363 if (!x->klass()->is_loaded() || PatchALot) { 1364 patching_info = state_for(x, x->state_before()); 1365 1366 // Cannot re-use same xhandlers for multiple CodeEmitInfos, so 1367 // clone all handlers (NOTE: Usually this is handled transparently 1368 // by the CodeEmitInfo cloning logic in CodeStub constructors but 1369 // is done explicitly here because a stub isn't being used). 1370 x->set_exception_handlers(new XHandlers(x->exception_handlers())); 1371 } 1372 1373 i = dims->length(); 1374 while (i-- > 0) { 1375 LIRItem* size = items->at(i); 1376 size->load_item(); 1377 LIR_Opr sz = size->result(); 1378 assert(sz->type() == T_INT, "should be"); 1379 store_stack_parameter(sz, in_ByteSize(i * BytesPerInt)); 1380 } 1381 1382 CodeEmitInfo* info = state_for(x, x->state()); 1383 LIR_Opr klass_reg = FrameMap::R0_metadata_opr; 1384 klass2reg_with_patching(klass_reg, x->klass(), patching_info); 1385 1386 LIR_Opr rank = FrameMap::R2_opr; 1387 __ move(LIR_OprFact::intConst(x->rank()), rank); 1388 LIR_Opr varargs = FrameMap::SP_opr; 1389 LIR_OprList* args = new LIR_OprList(3); 1390 args->append(klass_reg); 1391 args->append(rank); 1392 args->append(varargs); 1393 LIR_Opr reg = result_register_for(x->type()); 1394 __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id), 1395 LIR_OprFact::illegalOpr, reg, args, info); 1396 1397 LIR_Opr result = rlock_result(x); 1398 __ move(reg, result); 1399 } 1400 1401 1402 void LIRGenerator::do_BlockBegin(BlockBegin* x) { 1403 // nothing to do for now 1404 } 1405 1406 1407 void LIRGenerator::do_CheckCast(CheckCast* x) { 1408 LIRItem obj(x->obj(), this); 1409 CodeEmitInfo* patching_info = NULL; 1410 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) { 1411 patching_info = state_for(x, x->state_before()); 1412 } 1413 1414 obj.load_item(); 1415 1416 CodeEmitInfo* info_for_exception = 1417 (x->needs_exception_state() ? state_for(x) : 1418 state_for(x, x->state_before(), true /*ignore_xhandler*/)); 1419 1420 CodeStub* stub; 1421 if (x->is_incompatible_class_change_check()) { 1422 assert(patching_info == NULL, "can't patch this"); 1423 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, 1424 LIR_OprFact::illegalOpr, info_for_exception); 1425 } else if (x->is_invokespecial_receiver_check()) { 1426 assert(patching_info == NULL, "can't patch this"); 1427 stub = new DeoptimizeStub(info_for_exception, 1428 Deoptimization::Reason_class_check, 1429 Deoptimization::Action_none); 1430 } else { 1431 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, 1432 LIR_OprFact::illegalOpr, info_for_exception); 1433 } 1434 1435 LIR_Opr out_reg = rlock_result(x); 1436 LIR_Opr tmp1 = FrameMap::R0_oop_opr; 1437 LIR_Opr tmp2 = FrameMap::R1_oop_opr; 1438 LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 1439 1440 __ checkcast(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3, x->direct_compare(), 1441 info_for_exception, patching_info, stub, x->profiled_method(), x->profiled_bci()); 1442 } 1443 1444 1445 void LIRGenerator::do_InstanceOf(InstanceOf* x) { 1446 LIRItem obj(x->obj(), this); 1447 CodeEmitInfo* patching_info = NULL; 1448 if (!x->klass()->is_loaded() || PatchALot) { 1449 patching_info = state_for(x, x->state_before()); 1450 } 1451 1452 obj.load_item(); 1453 LIR_Opr out_reg = rlock_result(x); 1454 LIR_Opr tmp1 = FrameMap::R0_oop_opr; 1455 LIR_Opr tmp2 = FrameMap::R1_oop_opr; 1456 LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 1457 1458 __ instanceof(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3, 1459 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci()); 1460 } 1461 1462 1463 #ifdef __SOFTFP__ 1464 // Turn operator if (f <op> g) into runtime call: 1465 // call _aeabi_fcmp<op>(f, g) 1466 // cmp(eq, 1) 1467 // branch(eq, true path). 1468 void LIRGenerator::do_soft_float_compare(If* x) { 1469 assert(x->number_of_sux() == 2, "inconsistency"); 1470 ValueTag tag = x->x()->type()->tag(); 1471 If::Condition cond = x->cond(); 1472 address runtime_func; 1473 // unordered comparison gets the wrong answer because aeabi functions 1474 // return false. 1475 bool unordered_is_true = x->unordered_is_true(); 1476 // reverse of condition for ne 1477 bool compare_to_zero = false; 1478 switch (lir_cond(cond)) { 1479 case lir_cond_notEqual: 1480 compare_to_zero = true; // fall through 1481 case lir_cond_equal: 1482 runtime_func = tag == floatTag ? 1483 CAST_FROM_FN_PTR(address, __aeabi_fcmpeq): 1484 CAST_FROM_FN_PTR(address, __aeabi_dcmpeq); 1485 break; 1486 case lir_cond_less: 1487 if (unordered_is_true) { 1488 runtime_func = tag == floatTag ? 1489 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_fcmplt): 1490 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_dcmplt); 1491 } else { 1492 runtime_func = tag == floatTag ? 1493 CAST_FROM_FN_PTR(address, __aeabi_fcmplt): 1494 CAST_FROM_FN_PTR(address, __aeabi_dcmplt); 1495 } 1496 break; 1497 case lir_cond_lessEqual: 1498 if (unordered_is_true) { 1499 runtime_func = tag == floatTag ? 1500 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_fcmple): 1501 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_dcmple); 1502 } else { 1503 runtime_func = tag == floatTag ? 1504 CAST_FROM_FN_PTR(address, __aeabi_fcmple): 1505 CAST_FROM_FN_PTR(address, __aeabi_dcmple); 1506 } 1507 break; 1508 case lir_cond_greaterEqual: 1509 if (unordered_is_true) { 1510 runtime_func = tag == floatTag ? 1511 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_fcmpge): 1512 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_dcmpge); 1513 } else { 1514 runtime_func = tag == floatTag ? 1515 CAST_FROM_FN_PTR(address, __aeabi_fcmpge): 1516 CAST_FROM_FN_PTR(address, __aeabi_dcmpge); 1517 } 1518 break; 1519 case lir_cond_greater: 1520 if (unordered_is_true) { 1521 runtime_func = tag == floatTag ? 1522 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_fcmpgt): 1523 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_dcmpgt); 1524 } else { 1525 runtime_func = tag == floatTag ? 1526 CAST_FROM_FN_PTR(address, __aeabi_fcmpgt): 1527 CAST_FROM_FN_PTR(address, __aeabi_dcmpgt); 1528 } 1529 break; 1530 case lir_cond_aboveEqual: 1531 case lir_cond_belowEqual: 1532 ShouldNotReachHere(); // We're not going to get these. 1533 default: 1534 assert(lir_cond(cond) == lir_cond_always, "must be"); 1535 ShouldNotReachHere(); 1536 } 1537 set_no_result(x); 1538 1539 // add safepoint before generating condition code so it can be recomputed 1540 if (x->is_safepoint()) { 1541 increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci()); 1542 __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before())); 1543 } 1544 // Call float compare function, returns (1,0) if true or false. 1545 LIR_Opr result = call_runtime(x->x(), x->y(), runtime_func, intType, NULL); 1546 __ cmp(lir_cond_equal, result, 1547 compare_to_zero ? 1548 LIR_OprFact::intConst(0) : LIR_OprFact::intConst(1)); 1549 profile_branch(x, cond); 1550 move_to_phi(x->state()); 1551 __ branch(lir_cond_equal, T_INT, x->tsux()); 1552 } 1553 #endif // __SOFTFP__ 1554 1555 void LIRGenerator::do_If(If* x) { 1556 assert(x->number_of_sux() == 2, "inconsistency"); 1557 ValueTag tag = x->x()->type()->tag(); 1558 1559 #ifdef __SOFTFP__ 1560 if (tag == floatTag || tag == doubleTag) { 1561 do_soft_float_compare(x); 1562 assert(x->default_sux() == x->fsux(), "wrong destination above"); 1563 __ jump(x->default_sux()); 1564 return; 1565 } 1566 #endif // __SOFTFP__ 1567 1568 LIRItem xitem(x->x(), this); 1569 LIRItem yitem(x->y(), this); 1570 LIRItem* xin = &xitem; 1571 LIRItem* yin = &yitem; 1572 If::Condition cond = x->cond(); 1573 1574 #ifndef AARCH64 1575 if (tag == longTag) { 1576 if (cond == If::gtr || cond == If::leq) { 1577 cond = Instruction::mirror(cond); 1578 xin = &yitem; 1579 yin = &xitem; 1580 } 1581 xin->set_destroys_register(); 1582 } 1583 #endif // !AARCH64 1584 1585 xin->load_item(); 1586 LIR_Opr left = xin->result(); 1587 LIR_Opr right; 1588 1589 #ifdef AARCH64 1590 if (yin->is_constant() && can_inline_as_constant_in_cmp(yin->value())) { 1591 yin->dont_load_item(); 1592 } else { 1593 yin->load_item(); 1594 } 1595 right = yin->result(); 1596 #else 1597 if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && 1598 (cond == If::eql || cond == If::neq)) { 1599 // inline long zero 1600 right = LIR_OprFact::value_type(yin->value()->type()); 1601 } else { 1602 yin->load_nonconstant(); 1603 right = yin->result(); 1604 } 1605 #endif // AARCH64 1606 1607 set_no_result(x); 1608 1609 // add safepoint before generating condition code so it can be recomputed 1610 if (x->is_safepoint()) { 1611 increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci()); 1612 __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before())); 1613 } 1614 1615 __ cmp(lir_cond(cond), left, right); 1616 profile_branch(x, cond); 1617 move_to_phi(x->state()); 1618 if (x->x()->type()->is_float_kind()) { 1619 __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux()); 1620 } else { 1621 __ branch(lir_cond(cond), right->type(), x->tsux()); 1622 } 1623 assert(x->default_sux() == x->fsux(), "wrong destination above"); 1624 __ jump(x->default_sux()); 1625 } 1626 1627 1628 LIR_Opr LIRGenerator::getThreadPointer() { 1629 return FrameMap::Rthread_opr; 1630 } 1631 1632 void LIRGenerator::trace_block_entry(BlockBegin* block) { 1633 __ move(LIR_OprFact::intConst(block->block_id()), FrameMap::R0_opr); 1634 LIR_OprList* args = new LIR_OprList(1); 1635 args->append(FrameMap::R0_opr); 1636 address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry); 1637 __ call_runtime_leaf(func, getThreadTemp(), LIR_OprFact::illegalOpr, args); 1638 } 1639 1640 1641 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address, 1642 CodeEmitInfo* info) { 1643 #ifndef AARCH64 1644 if (value->is_double_cpu()) { 1645 assert(address->index()->is_illegal(), "should have a constant displacement"); 1646 LIR_Opr tmp = new_pointer_register(); 1647 add_large_constant(address->base(), address->disp(), tmp); 1648 __ volatile_store_mem_reg(value, new LIR_Address(tmp, (intx)0, address->type()), info); 1649 return; 1650 } 1651 #endif // !AARCH64 1652 // TODO-AARCH64 implement with stlr instruction 1653 __ store(value, address, info, lir_patch_none); 1654 } 1655 1656 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result, 1657 CodeEmitInfo* info) { 1658 #ifndef AARCH64 1659 if (result->is_double_cpu()) { 1660 assert(address->index()->is_illegal(), "should have a constant displacement"); 1661 LIR_Opr tmp = new_pointer_register(); 1662 add_large_constant(address->base(), address->disp(), tmp); 1663 __ volatile_load_mem_reg(new LIR_Address(tmp, (intx)0, address->type()), result, info); 1664 return; 1665 } 1666 #endif // !AARCH64 1667 // TODO-AARCH64 implement with ldar instruction 1668 __ load(address, result, info, lir_patch_none); 1669 } 1670 1671 void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset, 1672 BasicType type, bool is_volatile) { 1673 #ifdef AARCH64 1674 __ load(new LIR_Address(src, offset, type), dst); 1675 #else 1676 assert(offset->is_single_cpu(), "must be"); 1677 if (is_volatile && dst->is_double_cpu()) { 1678 LIR_Opr tmp = new_pointer_register(); 1679 __ add(src, offset, tmp); 1680 __ volatile_load_mem_reg(new LIR_Address(tmp, (intx)0, type), dst, NULL); 1681 } else if (type == T_FLOAT || type == T_DOUBLE) { 1682 // fld doesn't have indexed addressing mode 1683 LIR_Opr tmp = new_register(T_INT); 1684 __ add(src, offset, tmp); 1685 __ load(new LIR_Address(tmp, (intx)0, type), dst); 1686 } else { 1687 __ load(new LIR_Address(src, offset, type), dst); 1688 } 1689 #endif // AARCH64 1690 } 1691 1692 void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data, 1693 BasicType type, bool is_volatile) { 1694 #ifdef AARCH64 1695 LIR_Address* addr = new LIR_Address(src, offset, type); 1696 if (type == T_ARRAY || type == T_OBJECT) { 1697 pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */, 1698 true /* do_load */, false /* patch */, NULL); 1699 __ move(data, addr); 1700 assert(src->is_register(), "must be register"); 1701 post_barrier(LIR_OprFact::address(addr), data); 1702 } else { 1703 __ move(data, addr); 1704 } 1705 #else 1706 assert(offset->is_single_cpu(), "must be"); 1707 if (is_volatile && data->is_double_cpu()) { 1708 LIR_Opr tmp = new_register(T_INT); 1709 __ add(src, offset, tmp); 1710 __ volatile_store_mem_reg(data, new LIR_Address(tmp, (intx)0, type), NULL); 1711 } else if (type == T_FLOAT || type == T_DOUBLE) { 1712 // fst doesn't have indexed addressing mode 1713 LIR_Opr tmp = new_register(T_INT); 1714 __ add(src, offset, tmp); 1715 __ move(data, new LIR_Address(tmp, (intx)0, type)); 1716 } else { 1717 LIR_Address* addr = new LIR_Address(src, offset, type); 1718 bool is_obj = (type == T_ARRAY || type == T_OBJECT); 1719 #if INCLUDE_ALL_GCS 1720 if (is_obj) { 1721 // Do the pre-write barrier, if any. 1722 pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */, 1723 true /* do_load */, false /* patch */, NULL); 1724 } 1725 #endif // INCLUDE_ALL_GCS 1726 __ move(data, addr); 1727 if (is_obj) { 1728 assert(src->is_register(), "must be register"); 1729 post_barrier(LIR_OprFact::address(addr), data); 1730 } 1731 } 1732 #endif // AARCH64 1733 } 1734 1735 void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) { 1736 BasicType type = x->basic_type(); 1737 LIRItem src(x->object(), this); 1738 LIRItem off(x->offset(), this); 1739 LIRItem value(x->value(), this); 1740 1741 src.load_item(); 1742 if (x->is_add()) { 1743 value.load_nonconstant(); 1744 } else { 1745 value.load_item(); 1746 } 1747 off.load_nonconstant(); 1748 1749 LIR_Opr dst = rlock_result(x, type); 1750 LIR_Opr data = value.result(); 1751 bool is_obj = (type == T_ARRAY || type == T_OBJECT); 1752 1753 assert (type == T_INT || type == T_LONG || (!x->is_add() && is_obj), "unexpected type"); 1754 LIR_Opr addr_ptr = new_pointer_register(); 1755 1756 __ add(src.result(), off.result(), addr_ptr); 1757 1758 LIR_Address* addr = new LIR_Address(addr_ptr, (intx)0, type); 1759 1760 if (x->is_add()) { 1761 LIR_Opr tmp = new_register(type); 1762 __ xadd(addr_ptr, data, dst, tmp); 1763 } else { 1764 LIR_Opr tmp = (UseCompressedOops && is_obj) ? new_pointer_register() : LIR_OprFact::illegalOpr; 1765 if (is_obj) { 1766 // Do the pre-write barrier, if any. 1767 pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */, 1768 true /* do_load */, false /* patch */, NULL); 1769 } 1770 __ xchg(addr_ptr, data, dst, tmp); 1771 if (is_obj) { 1772 // Seems to be a precise address 1773 post_barrier(LIR_OprFact::address(addr), data); 1774 } 1775 } 1776 }