1 /* 2 * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2012, 2015 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "c1/c1_Compilation.hpp" 28 #include "c1/c1_FrameMap.hpp" 29 #include "c1/c1_Instruction.hpp" 30 #include "c1/c1_LIRAssembler.hpp" 31 #include "c1/c1_LIRGenerator.hpp" 32 #include "c1/c1_Runtime1.hpp" 33 #include "c1/c1_ValueStack.hpp" 34 #include "ci/ciArray.hpp" 35 #include "ci/ciObjArrayKlass.hpp" 36 #include "ci/ciTypeArrayKlass.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubRoutines.hpp" 39 #include "vmreg_ppc.inline.hpp" 40 41 #ifdef ASSERT 42 #define __ gen()->lir(__FILE__, __LINE__)-> 43 #else 44 #define __ gen()->lir()-> 45 #endif 46 47 void LIRItem::load_byte_item() { 48 // Byte loads use same registers as other loads. 49 load_item(); 50 } 51 52 53 void LIRItem::load_nonconstant() { 54 LIR_Opr r = value()->operand(); 55 if (_gen->can_inline_as_constant(value())) { 56 if (!r->is_constant()) { 57 r = LIR_OprFact::value_type(value()->type()); 58 } 59 _result = r; 60 } else { 61 load_item(); 62 } 63 } 64 65 66 inline void load_int_as_long(LIR_List *ll, LIRItem &li, LIR_Opr dst) { 67 LIR_Opr r = li.value()->operand(); 68 if (r->is_register()) { 69 LIR_Opr dst_l = FrameMap::as_long_opr(dst->as_register()); 70 ll->convert(Bytecodes::_i2l, li.result(), dst_l); // Convert. 71 } else { 72 // Constants or memory get loaded with sign extend on this platform. 73 ll->move(li.result(), dst); 74 } 75 } 76 77 78 //-------------------------------------------------------------- 79 // LIRGenerator 80 //-------------------------------------------------------------- 81 82 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::R3_oop_opr; } 83 LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::R4_opr; } 84 LIR_Opr LIRGenerator::syncLockOpr() { return FrameMap::R5_opr; } // Need temp effect for MonitorEnterStub. 85 LIR_Opr LIRGenerator::syncTempOpr() { return FrameMap::R4_oop_opr; } // Need temp effect for MonitorEnterStub. 86 LIR_Opr LIRGenerator::getThreadTemp() { return LIR_OprFact::illegalOpr; } // not needed 87 88 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) { 89 LIR_Opr opr; 90 switch (type->tag()) { 91 case intTag: opr = FrameMap::R3_opr; break; 92 case objectTag: opr = FrameMap::R3_oop_opr; break; 93 case longTag: opr = FrameMap::R3_long_opr; break; 94 case floatTag: opr = FrameMap::F1_opr; break; 95 case doubleTag: opr = FrameMap::F1_double_opr; break; 96 97 case addressTag: 98 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr; 99 } 100 101 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch"); 102 return opr; 103 } 104 105 LIR_Opr LIRGenerator::rlock_callee_saved(BasicType type) { 106 ShouldNotReachHere(); 107 return LIR_OprFact::illegalOpr; 108 } 109 110 111 LIR_Opr LIRGenerator::rlock_byte(BasicType type) { 112 return new_register(T_INT); 113 } 114 115 116 //--------- loading items into registers -------------------------------- 117 118 // PPC cannot inline all constants. 119 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const { 120 if (v->type()->as_IntConstant() != NULL) { 121 return Assembler::is_simm16(v->type()->as_IntConstant()->value()); 122 } else if (v->type()->as_LongConstant() != NULL) { 123 return Assembler::is_simm16(v->type()->as_LongConstant()->value()); 124 } else if (v->type()->as_ObjectConstant() != NULL) { 125 return v->type()->as_ObjectConstant()->value()->is_null_object(); 126 } else { 127 return false; 128 } 129 } 130 131 132 // Only simm16 constants can be inlined. 133 bool LIRGenerator::can_inline_as_constant(Value i) const { 134 return can_store_as_constant(i, as_BasicType(i->type())); 135 } 136 137 138 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { 139 if (c->type() == T_INT) { 140 return Assembler::is_simm16(c->as_jint()); 141 } 142 if (c->type() == T_LONG) { 143 return Assembler::is_simm16(c->as_jlong()); 144 } 145 if (c->type() == T_OBJECT) { 146 return c->as_jobject() == NULL; 147 } 148 return false; 149 } 150 151 152 LIR_Opr LIRGenerator::safepoint_poll_register() { 153 return new_register(T_INT); 154 } 155 156 157 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index, 158 int shift, int disp, BasicType type) { 159 assert(base->is_register(), "must be"); 160 161 // Accumulate fixed displacements. 162 if (index->is_constant()) { 163 disp += index->as_constant_ptr()->as_jint() << shift; 164 index = LIR_OprFact::illegalOpr; 165 } 166 167 if (index->is_register()) { 168 // Apply the shift and accumulate the displacement. 169 if (shift > 0) { 170 LIR_Opr tmp = new_pointer_register(); 171 __ shift_left(index, shift, tmp); 172 index = tmp; 173 } 174 if (disp != 0) { 175 LIR_Opr tmp = new_pointer_register(); 176 if (Assembler::is_simm16(disp)) { 177 __ add(index, LIR_OprFact::intptrConst(disp), tmp); 178 index = tmp; 179 } else { 180 __ move(LIR_OprFact::intptrConst(disp), tmp); 181 __ add(tmp, index, tmp); 182 index = tmp; 183 } 184 disp = 0; 185 } 186 } else if (!Assembler::is_simm16(disp)) { 187 // Index is illegal so replace it with the displacement loaded into a register. 188 index = new_pointer_register(); 189 __ move(LIR_OprFact::intptrConst(disp), index); 190 disp = 0; 191 } 192 193 // At this point we either have base + index or base + displacement. 194 if (disp == 0) { 195 return new LIR_Address(base, index, type); 196 } else { 197 assert(Assembler::is_simm16(disp), "must be"); 198 return new LIR_Address(base, disp, type); 199 } 200 } 201 202 203 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, 204 BasicType type, bool needs_card_mark) { 205 int elem_size = type2aelembytes(type); 206 int shift = exact_log2(elem_size); 207 208 LIR_Opr base_opr; 209 int offset = arrayOopDesc::base_offset_in_bytes(type); 210 211 if (index_opr->is_constant()) { 212 int i = index_opr->as_constant_ptr()->as_jint(); 213 int array_offset = i * elem_size; 214 if (Assembler::is_simm16(array_offset + offset)) { 215 base_opr = array_opr; 216 offset = array_offset + offset; 217 } else { 218 base_opr = new_pointer_register(); 219 if (Assembler::is_simm16(array_offset)) { 220 __ add(array_opr, LIR_OprFact::intptrConst(array_offset), base_opr); 221 } else { 222 __ move(LIR_OprFact::intptrConst(array_offset), base_opr); 223 __ add(base_opr, array_opr, base_opr); 224 } 225 } 226 } else { 227 #ifdef _LP64 228 if (index_opr->type() == T_INT) { 229 LIR_Opr tmp = new_register(T_LONG); 230 __ convert(Bytecodes::_i2l, index_opr, tmp); 231 index_opr = tmp; 232 } 233 #endif 234 235 base_opr = new_pointer_register(); 236 assert (index_opr->is_register(), "Must be register"); 237 if (shift > 0) { 238 __ shift_left(index_opr, shift, base_opr); 239 __ add(base_opr, array_opr, base_opr); 240 } else { 241 __ add(index_opr, array_opr, base_opr); 242 } 243 } 244 if (needs_card_mark) { 245 LIR_Opr ptr = new_pointer_register(); 246 __ add(base_opr, LIR_OprFact::intptrConst(offset), ptr); 247 return new LIR_Address(ptr, type); 248 } else { 249 return new LIR_Address(base_opr, offset, type); 250 } 251 } 252 253 254 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) { 255 LIR_Opr r = NULL; 256 if (type == T_LONG) { 257 r = LIR_OprFact::longConst(x); 258 } else if (type == T_INT) { 259 r = LIR_OprFact::intConst(x); 260 } else { 261 ShouldNotReachHere(); 262 } 263 if (!Assembler::is_simm16(x)) { 264 LIR_Opr tmp = new_register(type); 265 __ move(r, tmp); 266 return tmp; 267 } 268 return r; 269 } 270 271 272 void LIRGenerator::increment_counter(address counter, BasicType type, int step) { 273 LIR_Opr pointer = new_pointer_register(); 274 __ move(LIR_OprFact::intptrConst(counter), pointer); 275 LIR_Address* addr = new LIR_Address(pointer, type); 276 increment_counter(addr, step); 277 } 278 279 280 void LIRGenerator::increment_counter(LIR_Address* addr, int step) { 281 LIR_Opr temp = new_register(addr->type()); 282 __ move(addr, temp); 283 __ add(temp, load_immediate(step, addr->type()), temp); 284 __ move(temp, addr); 285 } 286 287 288 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { 289 LIR_Opr tmp = FrameMap::R0_opr; 290 __ load(new LIR_Address(base, disp, T_INT), tmp, info); 291 __ cmp(condition, tmp, c); 292 } 293 294 295 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, 296 int disp, BasicType type, CodeEmitInfo* info) { 297 LIR_Opr tmp = FrameMap::R0_opr; 298 __ load(new LIR_Address(base, disp, type), tmp, info); 299 __ cmp(condition, reg, tmp); 300 } 301 302 303 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, 304 LIR_Opr disp, BasicType type, CodeEmitInfo* info) { 305 LIR_Opr tmp = FrameMap::R0_opr; 306 __ load(new LIR_Address(base, disp, type), tmp, info); 307 __ cmp(condition, reg, tmp); 308 } 309 310 311 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) { 312 assert(left != result, "should be different registers"); 313 if (is_power_of_2(c + 1)) { 314 __ shift_left(left, log2_intptr(c + 1), result); 315 __ sub(result, left, result); 316 return true; 317 } else if (is_power_of_2(c - 1)) { 318 __ shift_left(left, log2_intptr(c - 1), result); 319 __ add(result, left, result); 320 return true; 321 } 322 return false; 323 } 324 325 326 void LIRGenerator::store_stack_parameter(LIR_Opr item, ByteSize offset_from_sp) { 327 BasicType t = item->type(); 328 LIR_Opr sp_opr = FrameMap::SP_opr; 329 if ((t == T_LONG || t == T_DOUBLE) && 330 ((in_bytes(offset_from_sp) - STACK_BIAS) % 8 != 0)) { 331 __ unaligned_move(item, new LIR_Address(sp_opr, in_bytes(offset_from_sp), t)); 332 } else { 333 __ move(item, new LIR_Address(sp_opr, in_bytes(offset_from_sp), t)); 334 } 335 } 336 337 338 //---------------------------------------------------------------------- 339 // visitor functions 340 //---------------------------------------------------------------------- 341 342 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) { 343 assert(x->is_pinned(),""); 344 bool needs_range_check = x->compute_needs_range_check(); 345 bool use_length = x->length() != NULL; 346 bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT; 347 bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL || 348 !get_jobject_constant(x->value())->is_null_object() || 349 x->should_profile()); 350 351 LIRItem array(x->array(), this); 352 LIRItem index(x->index(), this); 353 LIRItem value(x->value(), this); 354 LIRItem length(this); 355 356 array.load_item(); 357 index.load_nonconstant(); 358 359 if (use_length && needs_range_check) { 360 length.set_instruction(x->length()); 361 length.load_item(); 362 } 363 if (needs_store_check || x->check_boolean()) { 364 value.load_item(); 365 } else { 366 value.load_for_store(x->elt_type()); 367 } 368 369 set_no_result(x); 370 371 // The CodeEmitInfo must be duplicated for each different 372 // LIR-instruction because spilling can occur anywhere between two 373 // instructions and so the debug information must be different. 374 CodeEmitInfo* range_check_info = state_for(x); 375 CodeEmitInfo* null_check_info = NULL; 376 if (x->needs_null_check()) { 377 null_check_info = new CodeEmitInfo(range_check_info); 378 } 379 380 // Emit array address setup early so it schedules better. 381 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store); 382 383 if (GenerateRangeChecks && needs_range_check) { 384 if (use_length) { 385 __ cmp(lir_cond_belowEqual, length.result(), index.result()); 386 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result())); 387 } else { 388 array_range_check(array.result(), index.result(), null_check_info, range_check_info); 389 // Range_check also does the null check. 390 null_check_info = NULL; 391 } 392 } 393 394 if (GenerateArrayStoreCheck && needs_store_check) { 395 // Following registers are used by slow_subtype_check: 396 LIR_Opr tmp1 = FrameMap::R4_opr; // super_klass 397 LIR_Opr tmp2 = FrameMap::R5_opr; // sub_klass 398 LIR_Opr tmp3 = FrameMap::R6_opr; // temp 399 400 CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info); 401 __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, 402 store_check_info, x->profiled_method(), x->profiled_bci()); 403 } 404 405 if (obj_store) { 406 // Needs GC write barriers. 407 pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */, 408 true /* do_load */, false /* patch */, NULL); 409 } 410 LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info); 411 __ move(result, array_addr, null_check_info); 412 if (obj_store) { 413 // Precise card mark. 414 post_barrier(LIR_OprFact::address(array_addr), value.result()); 415 } 416 } 417 418 419 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) { 420 assert(x->is_pinned(),""); 421 LIRItem obj(x->obj(), this); 422 obj.load_item(); 423 424 set_no_result(x); 425 426 // We use R4+R5 in order to get a temp effect. These regs are used in slow path (MonitorEnterStub). 427 LIR_Opr lock = FrameMap::R5_opr; 428 LIR_Opr scratch = FrameMap::R4_opr; 429 LIR_Opr hdr = FrameMap::R6_opr; 430 431 CodeEmitInfo* info_for_exception = NULL; 432 if (x->needs_null_check()) { 433 info_for_exception = state_for(x); 434 } 435 436 // This CodeEmitInfo must not have the xhandlers because here the 437 // object is already locked (xhandlers expects object to be unlocked). 438 CodeEmitInfo* info = state_for(x, x->state(), true); 439 monitor_enter(obj.result(), lock, hdr, scratch, x->monitor_no(), info_for_exception, info); 440 } 441 442 443 void LIRGenerator::do_MonitorExit(MonitorExit* x) { 444 assert(x->is_pinned(),""); 445 LIRItem obj(x->obj(), this); 446 obj.dont_load_item(); 447 448 set_no_result(x); 449 LIR_Opr lock = FrameMap::R5_opr; 450 LIR_Opr hdr = FrameMap::R4_opr; // Used for slow path (MonitorExitStub). 451 LIR_Opr obj_temp = FrameMap::R6_opr; 452 monitor_exit(obj_temp, lock, hdr, LIR_OprFact::illegalOpr, x->monitor_no()); 453 } 454 455 456 // _ineg, _lneg, _fneg, _dneg 457 void LIRGenerator::do_NegateOp(NegateOp* x) { 458 LIRItem value(x->x(), this); 459 value.load_item(); 460 LIR_Opr reg = rlock_result(x); 461 __ negate(value.result(), reg); 462 } 463 464 465 // for _fadd, _fmul, _fsub, _fdiv, _frem 466 // _dadd, _dmul, _dsub, _ddiv, _drem 467 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) { 468 switch (x->op()) { 469 case Bytecodes::_fadd: 470 case Bytecodes::_fmul: 471 case Bytecodes::_fsub: 472 case Bytecodes::_fdiv: 473 case Bytecodes::_dadd: 474 case Bytecodes::_dmul: 475 case Bytecodes::_dsub: 476 case Bytecodes::_ddiv: { 477 LIRItem left(x->x(), this); 478 LIRItem right(x->y(), this); 479 left.load_item(); 480 right.load_item(); 481 rlock_result(x); 482 arithmetic_op_fpu(x->op(), x->operand(), left.result(), right.result(), x->is_strictfp()); 483 } 484 break; 485 486 case Bytecodes::_frem: 487 case Bytecodes::_drem: { 488 address entry = NULL; 489 switch (x->op()) { 490 case Bytecodes::_frem: 491 entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem); 492 break; 493 case Bytecodes::_drem: 494 entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem); 495 break; 496 default: 497 ShouldNotReachHere(); 498 } 499 LIR_Opr result = call_runtime(x->x(), x->y(), entry, x->type(), NULL); 500 set_result(x, result); 501 } 502 break; 503 504 default: ShouldNotReachHere(); 505 } 506 } 507 508 509 // for _ladd, _lmul, _lsub, _ldiv, _lrem 510 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { 511 bool is_div_rem = x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem; 512 513 LIRItem right(x->y(), this); 514 // Missing test if instr is commutative and if we should swap. 515 if (right.value()->type()->as_LongConstant() && 516 (x->op() == Bytecodes::_lsub && right.value()->type()->as_LongConstant()->value() == ((-1)<<15)) ) { 517 // Sub is implemented by addi and can't support min_simm16 as constant.. 518 right.load_item(); 519 } else { 520 right.load_nonconstant(); 521 } 522 assert(right.is_constant() || right.is_register(), "wrong state of right"); 523 524 if (is_div_rem) { 525 LIR_Opr divisor = right.result(); 526 if (divisor->is_register()) { 527 CodeEmitInfo* null_check_info = state_for(x); 528 __ cmp(lir_cond_equal, divisor, LIR_OprFact::longConst(0)); 529 __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(null_check_info)); 530 } else { 531 jlong const_divisor = divisor->as_constant_ptr()->as_jlong(); 532 if (const_divisor == 0) { 533 CodeEmitInfo* null_check_info = state_for(x); 534 __ jump(new DivByZeroStub(null_check_info)); 535 rlock_result(x); 536 __ move(LIR_OprFact::longConst(0), x->operand()); // dummy 537 return; 538 } 539 if (x->op() == Bytecodes::_lrem && !is_power_of_2(const_divisor) && const_divisor != -1) { 540 // Remainder computation would need additional tmp != R0. 541 right.load_item(); 542 } 543 } 544 } 545 546 LIRItem left(x->x(), this); 547 left.load_item(); 548 rlock_result(x); 549 if (is_div_rem) { 550 CodeEmitInfo* info = NULL; // Null check already done above. 551 LIR_Opr tmp = FrameMap::R0_opr; 552 if (x->op() == Bytecodes::_lrem) { 553 __ irem(left.result(), right.result(), x->operand(), tmp, info); 554 } else if (x->op() == Bytecodes::_ldiv) { 555 __ idiv(left.result(), right.result(), x->operand(), tmp, info); 556 } 557 } else { 558 arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL); 559 } 560 } 561 562 563 // for: _iadd, _imul, _isub, _idiv, _irem 564 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) { 565 bool is_div_rem = x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem; 566 567 LIRItem right(x->y(), this); 568 // Missing test if instr is commutative and if we should swap. 569 if (right.value()->type()->as_IntConstant() && 570 (x->op() == Bytecodes::_isub && right.value()->type()->as_IntConstant()->value() == ((-1)<<15)) ) { 571 // Sub is implemented by addi and can't support min_simm16 as constant. 572 right.load_item(); 573 } else { 574 right.load_nonconstant(); 575 } 576 assert(right.is_constant() || right.is_register(), "wrong state of right"); 577 578 if (is_div_rem) { 579 LIR_Opr divisor = right.result(); 580 if (divisor->is_register()) { 581 CodeEmitInfo* null_check_info = state_for(x); 582 __ cmp(lir_cond_equal, divisor, LIR_OprFact::intConst(0)); 583 __ branch(lir_cond_equal, T_INT, new DivByZeroStub(null_check_info)); 584 } else { 585 jint const_divisor = divisor->as_constant_ptr()->as_jint(); 586 if (const_divisor == 0) { 587 CodeEmitInfo* null_check_info = state_for(x); 588 __ jump(new DivByZeroStub(null_check_info)); 589 rlock_result(x); 590 __ move(LIR_OprFact::intConst(0), x->operand()); // dummy 591 return; 592 } 593 if (x->op() == Bytecodes::_irem && !is_power_of_2(const_divisor) && const_divisor != -1) { 594 // Remainder computation would need additional tmp != R0. 595 right.load_item(); 596 } 597 } 598 } 599 600 LIRItem left(x->x(), this); 601 left.load_item(); 602 rlock_result(x); 603 if (is_div_rem) { 604 CodeEmitInfo* info = NULL; // Null check already done above. 605 LIR_Opr tmp = FrameMap::R0_opr; 606 if (x->op() == Bytecodes::_irem) { 607 __ irem(left.result(), right.result(), x->operand(), tmp, info); 608 } else if (x->op() == Bytecodes::_idiv) { 609 __ idiv(left.result(), right.result(), x->operand(), tmp, info); 610 } 611 } else { 612 arithmetic_op_int(x->op(), x->operand(), left.result(), right.result(), FrameMap::R0_opr); 613 } 614 } 615 616 617 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) { 618 ValueTag tag = x->type()->tag(); 619 assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters"); 620 switch (tag) { 621 case floatTag: 622 case doubleTag: do_ArithmeticOp_FPU(x); return; 623 case longTag: do_ArithmeticOp_Long(x); return; 624 case intTag: do_ArithmeticOp_Int(x); return; 625 } 626 ShouldNotReachHere(); 627 } 628 629 630 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr 631 void LIRGenerator::do_ShiftOp(ShiftOp* x) { 632 LIRItem value(x->x(), this); 633 LIRItem count(x->y(), this); 634 value.load_item(); 635 LIR_Opr reg = rlock_result(x); 636 LIR_Opr mcount; 637 if (count.result()->is_register()) { 638 mcount = FrameMap::R0_opr; 639 } else { 640 mcount = LIR_OprFact::illegalOpr; 641 } 642 shift_op(x->op(), reg, value.result(), count.result(), mcount); 643 } 644 645 646 inline bool can_handle_logic_op_as_uimm(ValueType *type, Bytecodes::Code bc) { 647 jlong int_or_long_const; 648 if (type->as_IntConstant()) { 649 int_or_long_const = type->as_IntConstant()->value(); 650 } else if (type->as_LongConstant()) { 651 int_or_long_const = type->as_LongConstant()->value(); 652 } else if (type->as_ObjectConstant()) { 653 return type->as_ObjectConstant()->value()->is_null_object(); 654 } else { 655 return false; 656 } 657 658 if (Assembler::is_uimm(int_or_long_const, 16)) return true; 659 if ((int_or_long_const & 0xFFFF) == 0 && 660 Assembler::is_uimm((jlong)((julong)int_or_long_const >> 16), 16)) return true; 661 662 // see Assembler::andi 663 if (bc == Bytecodes::_iand && 664 (is_power_of_2_long(int_or_long_const+1) || 665 is_power_of_2_long(int_or_long_const) || 666 is_power_of_2_long(-int_or_long_const))) return true; 667 if (bc == Bytecodes::_land && 668 (is_power_of_2_long(int_or_long_const+1) || 669 (Assembler::is_uimm(int_or_long_const, 32) && is_power_of_2_long(int_or_long_const)) || 670 (int_or_long_const != min_jlong && is_power_of_2_long(-int_or_long_const)))) return true; 671 672 // special case: xor -1 673 if ((bc == Bytecodes::_ixor || bc == Bytecodes::_lxor) && 674 int_or_long_const == -1) return true; 675 return false; 676 } 677 678 679 // _iand, _land, _ior, _lor, _ixor, _lxor 680 void LIRGenerator::do_LogicOp(LogicOp* x) { 681 LIRItem left(x->x(), this); 682 LIRItem right(x->y(), this); 683 684 left.load_item(); 685 686 Value rval = right.value(); 687 LIR_Opr r = rval->operand(); 688 ValueType *type = rval->type(); 689 // Logic instructions use unsigned immediate values. 690 if (can_handle_logic_op_as_uimm(type, x->op())) { 691 if (!r->is_constant()) { 692 r = LIR_OprFact::value_type(type); 693 rval->set_operand(r); 694 } 695 right.set_result(r); 696 } else { 697 right.load_item(); 698 } 699 700 LIR_Opr reg = rlock_result(x); 701 702 logic_op(x->op(), reg, left.result(), right.result()); 703 } 704 705 706 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg 707 void LIRGenerator::do_CompareOp(CompareOp* x) { 708 LIRItem left(x->x(), this); 709 LIRItem right(x->y(), this); 710 left.load_item(); 711 right.load_item(); 712 LIR_Opr reg = rlock_result(x); 713 if (x->x()->type()->is_float_kind()) { 714 Bytecodes::Code code = x->op(); 715 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl)); 716 } else if (x->x()->type()->tag() == longTag) { 717 __ lcmp2int(left.result(), right.result(), reg); 718 } else { 719 Unimplemented(); 720 } 721 } 722 723 724 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) { 725 assert(x->number_of_arguments() == 4, "wrong type"); 726 LIRItem obj (x->argument_at(0), this); // object 727 LIRItem offset(x->argument_at(1), this); // offset of field 728 LIRItem cmp (x->argument_at(2), this); // Value to compare with field. 729 LIRItem val (x->argument_at(3), this); // Replace field with val if matches cmp. 730 731 LIR_Opr t1 = LIR_OprFact::illegalOpr; 732 LIR_Opr t2 = LIR_OprFact::illegalOpr; 733 LIR_Opr addr = new_pointer_register(); 734 735 // Get address of field. 736 obj.load_item(); 737 offset.load_item(); 738 cmp.load_item(); 739 val.load_item(); 740 741 __ add(obj.result(), offset.result(), addr); 742 743 // Volatile load may be followed by Unsafe CAS. 744 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 745 __ membar(); // To be safe. Unsafe semantics are unclear. 746 } else { 747 __ membar_release(); 748 } 749 750 if (type == objectType) { // Write-barrier needed for Object fields. 751 // Only cmp value can get overwritten, no do_load required. 752 pre_barrier(LIR_OprFact::illegalOpr /* addr */, cmp.result() /* pre_val */, 753 false /* do_load */, false /* patch */, NULL); 754 } 755 756 if (type == objectType) { 757 if (UseCompressedOops) { 758 t1 = new_register(T_OBJECT); 759 t2 = new_register(T_OBJECT); 760 } 761 __ cas_obj(addr, cmp.result(), val.result(), t1, t2); 762 } else if (type == intType) { 763 __ cas_int(addr, cmp.result(), val.result(), t1, t2); 764 } else if (type == longType) { 765 __ cas_long(addr, cmp.result(), val.result(), t1, t2); 766 } else { 767 ShouldNotReachHere(); 768 } 769 // Benerate conditional move of boolean result. 770 LIR_Opr result = rlock_result(x); 771 __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), 772 result, as_BasicType(type)); 773 if (type == objectType) { // Write-barrier needed for Object fields. 774 // Precise card mark since could either be object or array. 775 post_barrier(addr, val.result()); 776 } 777 } 778 779 780 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) { 781 switch (x->id()) { 782 case vmIntrinsics::_dabs: { 783 assert(x->number_of_arguments() == 1, "wrong type"); 784 LIRItem value(x->argument_at(0), this); 785 value.load_item(); 786 LIR_Opr dst = rlock_result(x); 787 __ abs(value.result(), dst, LIR_OprFact::illegalOpr); 788 break; 789 } 790 case vmIntrinsics::_dsqrt: { 791 if (VM_Version::has_fsqrt()) { 792 assert(x->number_of_arguments() == 1, "wrong type"); 793 LIRItem value(x->argument_at(0), this); 794 value.load_item(); 795 LIR_Opr dst = rlock_result(x); 796 __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr); 797 break; 798 } // else fallthru 799 } 800 case vmIntrinsics::_dlog10: // fall through 801 case vmIntrinsics::_dlog: // fall through 802 case vmIntrinsics::_dsin: // fall through 803 case vmIntrinsics::_dtan: // fall through 804 case vmIntrinsics::_dcos: // fall through 805 case vmIntrinsics::_dexp: { 806 assert(x->number_of_arguments() == 1, "wrong type"); 807 808 address runtime_entry = NULL; 809 switch (x->id()) { 810 case vmIntrinsics::_dsqrt: 811 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsqrt); 812 break; 813 case vmIntrinsics::_dsin: 814 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin); 815 break; 816 case vmIntrinsics::_dcos: 817 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos); 818 break; 819 case vmIntrinsics::_dtan: 820 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan); 821 break; 822 case vmIntrinsics::_dlog: 823 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog); 824 break; 825 case vmIntrinsics::_dlog10: 826 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10); 827 break; 828 case vmIntrinsics::_dexp: 829 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp); 830 break; 831 default: 832 ShouldNotReachHere(); 833 } 834 835 LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL); 836 set_result(x, result); 837 break; 838 } 839 case vmIntrinsics::_dpow: { 840 assert(x->number_of_arguments() == 2, "wrong type"); 841 address runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow); 842 LIR_Opr result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_entry, x->type(), NULL); 843 set_result(x, result); 844 break; 845 } 846 } 847 } 848 849 850 void LIRGenerator::do_ArrayCopy(Intrinsic* x) { 851 assert(x->number_of_arguments() == 5, "wrong type"); 852 853 // Make all state_for calls early since they can emit code. 854 CodeEmitInfo* info = state_for(x, x->state()); 855 856 LIRItem src (x->argument_at(0), this); 857 LIRItem src_pos (x->argument_at(1), this); 858 LIRItem dst (x->argument_at(2), this); 859 LIRItem dst_pos (x->argument_at(3), this); 860 LIRItem length (x->argument_at(4), this); 861 862 // Load all values in callee_save_registers (C calling convention), 863 // as this makes the parameter passing to the fast case simpler. 864 src.load_item_force (FrameMap::R14_oop_opr); 865 src_pos.load_item_force (FrameMap::R15_opr); 866 dst.load_item_force (FrameMap::R17_oop_opr); 867 dst_pos.load_item_force (FrameMap::R18_opr); 868 length.load_item_force (FrameMap::R19_opr); 869 LIR_Opr tmp = FrameMap::R20_opr; 870 871 int flags; 872 ciArrayKlass* expected_type; 873 arraycopy_helper(x, &flags, &expected_type); 874 875 __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), 876 length.result(), tmp, 877 expected_type, flags, info); 878 set_no_result(x); 879 } 880 881 882 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f 883 // _i2b, _i2c, _i2s 884 void LIRGenerator::do_Convert(Convert* x) { 885 switch (x->op()) { 886 887 // int -> float: force spill 888 case Bytecodes::_l2f: { 889 if (!VM_Version::has_fcfids()) { // fcfids is >= Power7 only 890 // fcfid+frsp needs fixup code to avoid rounding incompatibility. 891 address entry = CAST_FROM_FN_PTR(address, SharedRuntime::l2f); 892 LIR_Opr result = call_runtime(x->value(), entry, x->type(), NULL); 893 set_result(x, result); 894 break; 895 } // else fallthru 896 } 897 case Bytecodes::_l2d: { 898 LIRItem value(x->value(), this); 899 LIR_Opr reg = rlock_result(x); 900 value.load_item(); 901 LIR_Opr tmp = force_to_spill(value.result(), T_DOUBLE); 902 __ convert(x->op(), tmp, reg); 903 break; 904 } 905 case Bytecodes::_i2f: 906 case Bytecodes::_i2d: { 907 LIRItem value(x->value(), this); 908 LIR_Opr reg = rlock_result(x); 909 value.load_item(); 910 // Convert i2l first. 911 LIR_Opr tmp1 = new_register(T_LONG); 912 __ convert(Bytecodes::_i2l, value.result(), tmp1); 913 LIR_Opr tmp2 = force_to_spill(tmp1, T_DOUBLE); 914 __ convert(x->op(), tmp2, reg); 915 break; 916 } 917 918 // float -> int: result will be stored 919 case Bytecodes::_f2l: 920 case Bytecodes::_d2l: { 921 LIRItem value(x->value(), this); 922 LIR_Opr reg = rlock_result(x); 923 value.set_destroys_register(); // USE_KILL 924 value.load_item(); 925 set_vreg_flag(reg, must_start_in_memory); 926 __ convert(x->op(), value.result(), reg); 927 break; 928 } 929 case Bytecodes::_f2i: 930 case Bytecodes::_d2i: { 931 LIRItem value(x->value(), this); 932 LIR_Opr reg = rlock_result(x); 933 value.set_destroys_register(); // USE_KILL 934 value.load_item(); 935 // Convert l2i afterwards. 936 LIR_Opr tmp1 = new_register(T_LONG); 937 set_vreg_flag(tmp1, must_start_in_memory); 938 __ convert(x->op(), value.result(), tmp1); 939 __ convert(Bytecodes::_l2i, tmp1, reg); 940 break; 941 } 942 943 // Within same category: just register conversions. 944 case Bytecodes::_i2b: 945 case Bytecodes::_i2c: 946 case Bytecodes::_i2s: 947 case Bytecodes::_i2l: 948 case Bytecodes::_l2i: 949 case Bytecodes::_f2d: 950 case Bytecodes::_d2f: { 951 LIRItem value(x->value(), this); 952 LIR_Opr reg = rlock_result(x); 953 value.load_item(); 954 __ convert(x->op(), value.result(), reg); 955 break; 956 } 957 958 default: ShouldNotReachHere(); 959 } 960 } 961 962 963 void LIRGenerator::do_NewInstance(NewInstance* x) { 964 // This instruction can be deoptimized in the slow path. 965 const LIR_Opr reg = result_register_for(x->type()); 966 #ifndef PRODUCT 967 if (PrintNotLoaded && !x->klass()->is_loaded()) { 968 tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci()); 969 } 970 #endif 971 CodeEmitInfo* info = state_for(x, x->state()); 972 LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path (NewInstanceStub). 973 LIR_Opr tmp1 = FrameMap::R5_oop_opr; 974 LIR_Opr tmp2 = FrameMap::R6_oop_opr; 975 LIR_Opr tmp3 = FrameMap::R7_oop_opr; 976 LIR_Opr tmp4 = FrameMap::R8_oop_opr; 977 new_instance(reg, x->klass(), x->is_unresolved(), tmp1, tmp2, tmp3, tmp4, klass_reg, info); 978 979 // Must prevent reordering of stores for object initialization 980 // with stores that publish the new object. 981 __ membar_storestore(); 982 LIR_Opr result = rlock_result(x); 983 __ move(reg, result); 984 } 985 986 987 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { 988 // Evaluate state_for early since it may emit code. 989 CodeEmitInfo* info = state_for(x, x->state()); 990 991 LIRItem length(x->length(), this); 992 length.load_item(); 993 994 LIR_Opr reg = result_register_for(x->type()); 995 LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path (NewTypeArrayStub). 996 // We use R5 in order to get a temp effect. This reg is used in slow path (NewTypeArrayStub). 997 LIR_Opr tmp1 = FrameMap::R5_oop_opr; 998 LIR_Opr tmp2 = FrameMap::R6_oop_opr; 999 LIR_Opr tmp3 = FrameMap::R7_oop_opr; 1000 LIR_Opr tmp4 = FrameMap::R8_oop_opr; 1001 LIR_Opr len = length.result(); 1002 BasicType elem_type = x->elt_type(); 1003 1004 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg); 1005 1006 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info); 1007 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path); 1008 1009 // Must prevent reordering of stores for object initialization 1010 // with stores that publish the new object. 1011 __ membar_storestore(); 1012 LIR_Opr result = rlock_result(x); 1013 __ move(reg, result); 1014 } 1015 1016 1017 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) { 1018 // Evaluate state_for early since it may emit code. 1019 CodeEmitInfo* info = state_for(x, x->state()); 1020 // In case of patching (i.e., object class is not yet loaded), 1021 // we need to reexecute the instruction and therefore provide 1022 // the state before the parameters have been consumed. 1023 CodeEmitInfo* patching_info = NULL; 1024 if (!x->klass()->is_loaded() || PatchALot) { 1025 patching_info = state_for(x, x->state_before()); 1026 } 1027 1028 LIRItem length(x->length(), this); 1029 length.load_item(); 1030 1031 const LIR_Opr reg = result_register_for(x->type()); 1032 LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path (NewObjectArrayStub). 1033 // We use R5 in order to get a temp effect. This reg is used in slow path (NewObjectArrayStub). 1034 LIR_Opr tmp1 = FrameMap::R5_oop_opr; 1035 LIR_Opr tmp2 = FrameMap::R6_oop_opr; 1036 LIR_Opr tmp3 = FrameMap::R7_oop_opr; 1037 LIR_Opr tmp4 = FrameMap::R8_oop_opr; 1038 LIR_Opr len = length.result(); 1039 1040 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info); 1041 ciMetadata* obj = ciObjArrayKlass::make(x->klass()); 1042 if (obj == ciEnv::unloaded_ciobjarrayklass()) { 1043 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error"); 1044 } 1045 klass2reg_with_patching(klass_reg, obj, patching_info); 1046 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path); 1047 1048 // Must prevent reordering of stores for object initialization 1049 // with stores that publish the new object. 1050 __ membar_storestore(); 1051 LIR_Opr result = rlock_result(x); 1052 __ move(reg, result); 1053 } 1054 1055 1056 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { 1057 Values* dims = x->dims(); 1058 int i = dims->length(); 1059 LIRItemList* items = new LIRItemList(i, i, NULL); 1060 while (i-- > 0) { 1061 LIRItem* size = new LIRItem(dims->at(i), this); 1062 items->at_put(i, size); 1063 } 1064 1065 // Evaluate state_for early since it may emit code. 1066 CodeEmitInfo* patching_info = NULL; 1067 if (!x->klass()->is_loaded() || PatchALot) { 1068 patching_info = state_for(x, x->state_before()); 1069 1070 // Cannot re-use same xhandlers for multiple CodeEmitInfos, so 1071 // clone all handlers (NOTE: Usually this is handled transparently 1072 // by the CodeEmitInfo cloning logic in CodeStub constructors but 1073 // is done explicitly here because a stub isn't being used). 1074 x->set_exception_handlers(new XHandlers(x->exception_handlers())); 1075 } 1076 CodeEmitInfo* info = state_for(x, x->state()); 1077 1078 i = dims->length(); 1079 while (i-- > 0) { 1080 LIRItem* size = items->at(i); 1081 size->load_nonconstant(); 1082 // FrameMap::_reserved_argument_area_size includes the dimensions 1083 // varargs, because it's initialized to hir()->max_stack() when the 1084 // FrameMap is created. 1085 store_stack_parameter(size->result(), in_ByteSize(i*sizeof(jint) + FrameMap::first_available_sp_in_frame)); 1086 } 1087 1088 const LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path. 1089 klass2reg_with_patching(klass_reg, x->klass(), patching_info); 1090 1091 LIR_Opr rank = FrameMap::R5_opr; // Used by slow path. 1092 __ move(LIR_OprFact::intConst(x->rank()), rank); 1093 1094 LIR_Opr varargs = FrameMap::as_pointer_opr(R6); // Used by slow path. 1095 __ leal(LIR_OprFact::address(new LIR_Address(FrameMap::SP_opr, FrameMap::first_available_sp_in_frame, T_INT)), 1096 varargs); 1097 1098 // Note: This instruction can be deoptimized in the slow path. 1099 LIR_OprList* args = new LIR_OprList(3); 1100 args->append(klass_reg); 1101 args->append(rank); 1102 args->append(varargs); 1103 const LIR_Opr reg = result_register_for(x->type()); 1104 __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id), 1105 LIR_OprFact::illegalOpr, 1106 reg, args, info); 1107 1108 // Must prevent reordering of stores for object initialization 1109 // with stores that publish the new object. 1110 __ membar_storestore(); 1111 LIR_Opr result = rlock_result(x); 1112 __ move(reg, result); 1113 } 1114 1115 1116 void LIRGenerator::do_BlockBegin(BlockBegin* x) { 1117 // nothing to do for now 1118 } 1119 1120 1121 void LIRGenerator::do_CheckCast(CheckCast* x) { 1122 LIRItem obj(x->obj(), this); 1123 CodeEmitInfo* patching_info = NULL; 1124 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) { 1125 // Must do this before locking the destination register as 1126 // an oop register, and before the obj is loaded (so x->obj()->item() 1127 // is valid for creating a debug info location). 1128 patching_info = state_for(x, x->state_before()); 1129 } 1130 obj.load_item(); 1131 LIR_Opr out_reg = rlock_result(x); 1132 CodeStub* stub; 1133 CodeEmitInfo* info_for_exception = state_for(x); 1134 1135 if (x->is_incompatible_class_change_check()) { 1136 assert(patching_info == NULL, "can't patch this"); 1137 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, 1138 LIR_OprFact::illegalOpr, info_for_exception); 1139 } else { 1140 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception); 1141 } 1142 // Following registers are used by slow_subtype_check: 1143 LIR_Opr tmp1 = FrameMap::R4_oop_opr; // super_klass 1144 LIR_Opr tmp2 = FrameMap::R5_oop_opr; // sub_klass 1145 LIR_Opr tmp3 = FrameMap::R6_oop_opr; // temp 1146 __ checkcast(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3, 1147 x->direct_compare(), info_for_exception, patching_info, stub, 1148 x->profiled_method(), x->profiled_bci()); 1149 } 1150 1151 1152 void LIRGenerator::do_InstanceOf(InstanceOf* x) { 1153 LIRItem obj(x->obj(), this); 1154 CodeEmitInfo* patching_info = NULL; 1155 if (!x->klass()->is_loaded() || PatchALot) { 1156 patching_info = state_for(x, x->state_before()); 1157 } 1158 // Ensure the result register is not the input register because the 1159 // result is initialized before the patching safepoint. 1160 obj.load_item(); 1161 LIR_Opr out_reg = rlock_result(x); 1162 // Following registers are used by slow_subtype_check: 1163 LIR_Opr tmp1 = FrameMap::R4_oop_opr; // super_klass 1164 LIR_Opr tmp2 = FrameMap::R5_oop_opr; // sub_klass 1165 LIR_Opr tmp3 = FrameMap::R6_oop_opr; // temp 1166 __ instanceof(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3, 1167 x->direct_compare(), patching_info, 1168 x->profiled_method(), x->profiled_bci()); 1169 } 1170 1171 1172 void LIRGenerator::do_If(If* x) { 1173 assert(x->number_of_sux() == 2, "inconsistency"); 1174 ValueTag tag = x->x()->type()->tag(); 1175 LIRItem xitem(x->x(), this); 1176 LIRItem yitem(x->y(), this); 1177 LIRItem* xin = &xitem; 1178 LIRItem* yin = &yitem; 1179 If::Condition cond = x->cond(); 1180 1181 LIR_Opr left = LIR_OprFact::illegalOpr; 1182 LIR_Opr right = LIR_OprFact::illegalOpr; 1183 1184 xin->load_item(); 1185 left = xin->result(); 1186 1187 if (yin->result()->is_constant() && yin->result()->type() == T_INT && 1188 Assembler::is_simm16(yin->result()->as_constant_ptr()->as_jint())) { 1189 // Inline int constants which are small enough to be immediate operands. 1190 right = LIR_OprFact::value_type(yin->value()->type()); 1191 } else if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && 1192 (cond == If::eql || cond == If::neq)) { 1193 // Inline long zero. 1194 right = LIR_OprFact::value_type(yin->value()->type()); 1195 } else if (tag == objectTag && yin->is_constant() && (yin->get_jobject_constant()->is_null_object())) { 1196 right = LIR_OprFact::value_type(yin->value()->type()); 1197 } else { 1198 yin->load_item(); 1199 right = yin->result(); 1200 } 1201 set_no_result(x); 1202 1203 // Add safepoint before generating condition code so it can be recomputed. 1204 if (x->is_safepoint()) { 1205 // Increment backedge counter if needed. 1206 increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci()); 1207 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before())); 1208 } 1209 1210 __ cmp(lir_cond(cond), left, right); 1211 // Generate branch profiling. Profiling code doesn't kill flags. 1212 profile_branch(x, cond); 1213 move_to_phi(x->state()); 1214 if (x->x()->type()->is_float_kind()) { 1215 __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux()); 1216 } else { 1217 __ branch(lir_cond(cond), right->type(), x->tsux()); 1218 } 1219 assert(x->default_sux() == x->fsux(), "wrong destination above"); 1220 __ jump(x->default_sux()); 1221 } 1222 1223 1224 LIR_Opr LIRGenerator::getThreadPointer() { 1225 return FrameMap::as_pointer_opr(R16_thread); 1226 } 1227 1228 1229 void LIRGenerator::trace_block_entry(BlockBegin* block) { 1230 LIR_Opr arg1 = FrameMap::R3_opr; // ARG1 1231 __ move(LIR_OprFact::intConst(block->block_id()), arg1); 1232 LIR_OprList* args = new LIR_OprList(1); 1233 args->append(arg1); 1234 address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry); 1235 __ call_runtime_leaf(func, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, args); 1236 } 1237 1238 1239 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address, 1240 CodeEmitInfo* info) { 1241 #ifdef _LP64 1242 __ store(value, address, info); 1243 #else 1244 Unimplemented(); 1245 // __ volatile_store_mem_reg(value, address, info); 1246 #endif 1247 } 1248 1249 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result, 1250 CodeEmitInfo* info) { 1251 #ifdef _LP64 1252 __ load(address, result, info); 1253 #else 1254 Unimplemented(); 1255 // __ volatile_load_mem_reg(address, result, info); 1256 #endif 1257 } 1258 1259 1260 void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data, 1261 BasicType type, bool is_volatile) { 1262 LIR_Opr base_op = src; 1263 LIR_Opr index_op = offset; 1264 1265 bool is_obj = (type == T_ARRAY || type == T_OBJECT); 1266 #ifndef _LP64 1267 if (is_volatile && type == T_LONG) { 1268 __ volatile_store_unsafe_reg(data, src, offset, type, NULL, lir_patch_none); 1269 } else 1270 #endif 1271 { 1272 if (type == T_BOOLEAN) { 1273 type = T_BYTE; 1274 } 1275 LIR_Address* addr; 1276 if (type == T_ARRAY || type == T_OBJECT) { 1277 LIR_Opr tmp = new_pointer_register(); 1278 __ add(base_op, index_op, tmp); 1279 addr = new LIR_Address(tmp, type); 1280 } else { 1281 addr = new LIR_Address(base_op, index_op, type); 1282 } 1283 1284 if (is_obj) { 1285 pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */, 1286 true /* do_load */, false /* patch */, NULL); 1287 // _bs->c1_write_barrier_pre(this, LIR_OprFact::address(addr)); 1288 } 1289 __ move(data, addr); 1290 if (is_obj) { 1291 // This address is precise. 1292 post_barrier(LIR_OprFact::address(addr), data); 1293 } 1294 } 1295 } 1296 1297 1298 void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset, 1299 BasicType type, bool is_volatile) { 1300 #ifndef _LP64 1301 if (is_volatile && type == T_LONG) { 1302 __ volatile_load_unsafe_reg(src, offset, dst, type, NULL, lir_patch_none); 1303 } else 1304 #endif 1305 { 1306 LIR_Address* addr = new LIR_Address(src, offset, type); 1307 __ load(addr, dst); 1308 } 1309 } 1310 1311 1312 void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) { 1313 BasicType type = x->basic_type(); 1314 LIRItem src(x->object(), this); 1315 LIRItem off(x->offset(), this); 1316 LIRItem value(x->value(), this); 1317 1318 src.load_item(); 1319 value.load_item(); 1320 off.load_nonconstant(); 1321 1322 LIR_Opr dst = rlock_result(x, type); 1323 LIR_Opr data = value.result(); 1324 bool is_obj = (type == T_ARRAY || type == T_OBJECT); 1325 1326 LIR_Opr tmp = FrameMap::R0_opr; 1327 LIR_Opr ptr = new_pointer_register(); 1328 __ add(src.result(), off.result(), ptr); 1329 1330 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 1331 __ membar(); 1332 } else { 1333 __ membar_release(); 1334 } 1335 1336 if (x->is_add()) { 1337 __ xadd(ptr, data, dst, tmp); 1338 } else { 1339 const bool can_move_barrier = true; // TODO: port GraphKit::can_move_pre_barrier() from C2 1340 if (!can_move_barrier && is_obj) { 1341 // Do the pre-write barrier, if any. 1342 pre_barrier(ptr, LIR_OprFact::illegalOpr /* pre_val */, 1343 true /* do_load */, false /* patch */, NULL); 1344 } 1345 __ xchg(ptr, data, dst, tmp); 1346 if (is_obj) { 1347 // Seems to be a precise address. 1348 post_barrier(ptr, data); 1349 if (can_move_barrier) { 1350 pre_barrier(LIR_OprFact::illegalOpr, dst /* pre_val */, 1351 false /* do_load */, false /* patch */, NULL); 1352 } 1353 } 1354 } 1355 1356 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 1357 __ membar_acquire(); 1358 } else { 1359 __ membar(); 1360 } 1361 } 1362 1363 1364 void LIRGenerator::do_update_CRC32(Intrinsic* x) { 1365 assert(UseCRC32Intrinsics, "or should not be here"); 1366 LIR_Opr result = rlock_result(x); 1367 1368 switch (x->id()) { 1369 case vmIntrinsics::_updateCRC32: { 1370 LIRItem crc(x->argument_at(0), this); 1371 LIRItem val(x->argument_at(1), this); 1372 // Registers destroyed by update_crc32. 1373 crc.set_destroys_register(); 1374 val.set_destroys_register(); 1375 crc.load_item(); 1376 val.load_item(); 1377 __ update_crc32(crc.result(), val.result(), result); 1378 break; 1379 } 1380 case vmIntrinsics::_updateBytesCRC32: 1381 case vmIntrinsics::_updateByteBufferCRC32: { 1382 bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32); 1383 1384 LIRItem crc(x->argument_at(0), this); 1385 LIRItem buf(x->argument_at(1), this); 1386 LIRItem off(x->argument_at(2), this); 1387 LIRItem len(x->argument_at(3), this); 1388 buf.load_item(); 1389 off.load_nonconstant(); 1390 1391 LIR_Opr index = off.result(); 1392 int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0; 1393 if (off.result()->is_constant()) { 1394 index = LIR_OprFact::illegalOpr; 1395 offset += off.result()->as_jint(); 1396 } 1397 LIR_Opr base_op = buf.result(); 1398 LIR_Address* a = NULL; 1399 1400 if (index->is_valid()) { 1401 LIR_Opr tmp = new_register(T_LONG); 1402 __ convert(Bytecodes::_i2l, index, tmp); 1403 index = tmp; 1404 __ add(index, LIR_OprFact::intptrConst(offset), index); 1405 a = new LIR_Address(base_op, index, T_BYTE); 1406 } else { 1407 a = new LIR_Address(base_op, offset, T_BYTE); 1408 } 1409 1410 BasicTypeList signature(3); 1411 signature.append(T_INT); 1412 signature.append(T_ADDRESS); 1413 signature.append(T_INT); 1414 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 1415 const LIR_Opr result_reg = result_register_for(x->type()); 1416 1417 LIR_Opr arg1 = cc->at(0), 1418 arg2 = cc->at(1), 1419 arg3 = cc->at(2); 1420 1421 // CCallingConventionRequiresIntsAsLongs 1422 crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32 stub doesn't care about high bits. 1423 __ leal(LIR_OprFact::address(a), arg2); 1424 load_int_as_long(gen()->lir(), len, arg3); 1425 1426 __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), LIR_OprFact::illegalOpr, result_reg, cc->args()); 1427 __ move(result_reg, result); 1428 break; 1429 } 1430 default: { 1431 ShouldNotReachHere(); 1432 } 1433 } 1434 } 1435 1436 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) { 1437 fatal("vectorizedMismatch intrinsic is not implemented on this platform"); 1438 } 1439 1440 void LIRGenerator::do_update_CRC32C(Intrinsic* x) { 1441 Unimplemented(); 1442 }