1 /* 2 * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "c1/c1_Compilation.hpp" 28 #include "c1/c1_FrameMap.hpp" 29 #include "c1/c1_Instruction.hpp" 30 #include "c1/c1_LIRAssembler.hpp" 31 #include "c1/c1_LIRGenerator.hpp" 32 #include "c1/c1_Runtime1.hpp" 33 #include "c1/c1_ValueStack.hpp" 34 #include "ci/ciArray.hpp" 35 #include "ci/ciObjArrayKlass.hpp" 36 #include "ci/ciTypeArrayKlass.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubRoutines.hpp" 39 #include "vmreg_aarch64.inline.hpp" 40 41 #ifdef ASSERT 42 #define __ gen()->lir(__FILE__, __LINE__)-> 43 #else 44 #define __ gen()->lir()-> 45 #endif 46 47 // Item will be loaded into a byte register; Intel only 48 void LIRItem::load_byte_item() { 49 load_item(); 50 } 51 52 53 void LIRItem::load_nonconstant() { 54 LIR_Opr r = value()->operand(); 55 if (r->is_constant()) { 56 _result = r; 57 } else { 58 load_item(); 59 } 60 } 61 62 //-------------------------------------------------------------- 63 // LIRGenerator 64 //-------------------------------------------------------------- 65 66 67 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::r0_oop_opr; } 68 LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::r3_opr; } 69 LIR_Opr LIRGenerator::divInOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; } 70 LIR_Opr LIRGenerator::divOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; } 71 LIR_Opr LIRGenerator::remOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; } 72 LIR_Opr LIRGenerator::shiftCountOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; } 73 LIR_Opr LIRGenerator::syncLockOpr() { return new_register(T_INT); } 74 LIR_Opr LIRGenerator::syncTempOpr() { return FrameMap::r0_opr; } 75 LIR_Opr LIRGenerator::getThreadTemp() { return LIR_OprFact::illegalOpr; } 76 77 78 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) { 79 LIR_Opr opr; 80 switch (type->tag()) { 81 case intTag: opr = FrameMap::r0_opr; break; 82 case objectTag: opr = FrameMap::r0_oop_opr; break; 83 case longTag: opr = FrameMap::long0_opr; break; 84 case floatTag: opr = FrameMap::fpu0_float_opr; break; 85 case doubleTag: opr = FrameMap::fpu0_double_opr; break; 86 87 case addressTag: 88 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr; 89 } 90 91 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch"); 92 return opr; 93 } 94 95 96 LIR_Opr LIRGenerator::rlock_byte(BasicType type) { 97 LIR_Opr reg = new_register(T_INT); 98 set_vreg_flag(reg, LIRGenerator::byte_reg); 99 return reg; 100 } 101 102 103 //--------- loading items into registers -------------------------------- 104 105 106 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const { 107 if (v->type()->as_IntConstant() != NULL) { 108 return v->type()->as_IntConstant()->value() == 0L; 109 } else if (v->type()->as_LongConstant() != NULL) { 110 return v->type()->as_LongConstant()->value() == 0L; 111 } else if (v->type()->as_ObjectConstant() != NULL) { 112 return v->type()->as_ObjectConstant()->value()->is_null_object(); 113 } else { 114 return false; 115 } 116 } 117 118 bool LIRGenerator::can_inline_as_constant(Value v) const { 119 // FIXME: Just a guess 120 if (v->type()->as_IntConstant() != NULL) { 121 return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value()); 122 } else if (v->type()->as_LongConstant() != NULL) { 123 return v->type()->as_LongConstant()->value() == 0L; 124 } else if (v->type()->as_ObjectConstant() != NULL) { 125 return v->type()->as_ObjectConstant()->value()->is_null_object(); 126 } else { 127 return false; 128 } 129 } 130 131 132 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { return false; } 133 134 135 LIR_Opr LIRGenerator::safepoint_poll_register() { 136 return LIR_OprFact::illegalOpr; 137 } 138 139 140 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index, 141 int shift, int disp, BasicType type) { 142 assert(base->is_register(), "must be"); 143 144 // accumulate fixed displacements 145 if (index->is_constant()) { 146 disp += index->as_constant_ptr()->as_jint() << shift; 147 index = LIR_OprFact::illegalOpr; 148 } 149 150 if (index->is_register()) { 151 // apply the shift and accumulate the displacement 152 if (shift > 0) { 153 LIR_Opr tmp = new_pointer_register(); 154 __ shift_left(index, shift, tmp); 155 index = tmp; 156 } 157 if (disp != 0) { 158 LIR_Opr tmp = new_pointer_register(); 159 if (Assembler::operand_valid_for_add_sub_immediate(disp)) { 160 __ add(tmp, tmp, LIR_OprFact::intptrConst(disp)); 161 index = tmp; 162 } else { 163 __ move(tmp, LIR_OprFact::intptrConst(disp)); 164 __ add(tmp, index, tmp); 165 index = tmp; 166 } 167 disp = 0; 168 } 169 } else if (disp != 0 && !Address::offset_ok_for_immed(disp, shift)) { 170 // index is illegal so replace it with the displacement loaded into a register 171 index = new_pointer_register(); 172 __ move(LIR_OprFact::intptrConst(disp), index); 173 disp = 0; 174 } 175 176 // at this point we either have base + index or base + displacement 177 if (disp == 0) { 178 return new LIR_Address(base, index, type); 179 } else { 180 assert(Address::offset_ok_for_immed(disp, 0), "must be"); 181 return new LIR_Address(base, disp, type); 182 } 183 } 184 185 186 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, 187 BasicType type, bool needs_card_mark) { 188 int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type); 189 int elem_size = type2aelembytes(type); 190 int shift = exact_log2(elem_size); 191 192 LIR_Address* addr; 193 if (index_opr->is_constant()) { 194 addr = new LIR_Address(array_opr, 195 offset_in_bytes + index_opr->as_jint() * elem_size, type); 196 } else { 197 if (offset_in_bytes) { 198 LIR_Opr tmp = new_pointer_register(); 199 __ add(array_opr, LIR_OprFact::intConst(offset_in_bytes), tmp); 200 array_opr = tmp; 201 offset_in_bytes = 0; 202 } 203 addr = new LIR_Address(array_opr, 204 index_opr, 205 LIR_Address::scale(type), 206 offset_in_bytes, type); 207 } 208 if (needs_card_mark) { 209 // This store will need a precise card mark, so go ahead and 210 // compute the full adddres instead of computing once for the 211 // store and again for the card mark. 212 LIR_Opr tmp = new_pointer_register(); 213 __ leal(LIR_OprFact::address(addr), tmp); 214 return new LIR_Address(tmp, type); 215 } else { 216 return addr; 217 } 218 } 219 220 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) { 221 LIR_Opr r; 222 if (type == T_LONG) { 223 r = LIR_OprFact::longConst(x); 224 if (!Assembler::operand_valid_for_logical_immediate(false, x)) { 225 LIR_Opr tmp = new_register(type); 226 __ move(r, tmp); 227 return tmp; 228 } 229 } else if (type == T_INT) { 230 r = LIR_OprFact::intConst(x); 231 if (!Assembler::operand_valid_for_logical_immediate(true, x)) { 232 // This is all rather nasty. We don't know whether our constant 233 // is required for a logical or an arithmetic operation, wo we 234 // don't know what the range of valid values is!! 235 LIR_Opr tmp = new_register(type); 236 __ move(r, tmp); 237 return tmp; 238 } 239 } else { 240 ShouldNotReachHere(); 241 r = NULL; // unreachable 242 } 243 return r; 244 } 245 246 247 248 void LIRGenerator::increment_counter(address counter, BasicType type, int step) { 249 LIR_Opr pointer = new_pointer_register(); 250 __ move(LIR_OprFact::intptrConst(counter), pointer); 251 LIR_Address* addr = new LIR_Address(pointer, type); 252 increment_counter(addr, step); 253 } 254 255 256 void LIRGenerator::increment_counter(LIR_Address* addr, int step) { 257 LIR_Opr imm = NULL; 258 switch(addr->type()) { 259 case T_INT: 260 imm = LIR_OprFact::intConst(step); 261 break; 262 case T_LONG: 263 imm = LIR_OprFact::longConst(step); 264 break; 265 default: 266 ShouldNotReachHere(); 267 } 268 LIR_Opr reg = new_register(addr->type()); 269 __ load(addr, reg); 270 __ add(reg, imm, reg); 271 __ store(reg, addr); 272 } 273 274 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { 275 LIR_Opr reg = new_register(T_INT); 276 __ load(generate_address(base, disp, T_INT), reg, info); 277 __ cmp(condition, reg, LIR_OprFact::intConst(c)); 278 } 279 280 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) { 281 LIR_Opr reg1 = new_register(T_INT); 282 __ load(generate_address(base, disp, type), reg1, info); 283 __ cmp(condition, reg, reg1); 284 } 285 286 287 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) { 288 289 if (is_power_of_2(c - 1)) { 290 __ shift_left(left, exact_log2(c - 1), tmp); 291 __ add(tmp, left, result); 292 return true; 293 } else if (is_power_of_2(c + 1)) { 294 __ shift_left(left, exact_log2(c + 1), tmp); 295 __ sub(tmp, left, result); 296 return true; 297 } else { 298 return false; 299 } 300 } 301 302 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) { 303 BasicType type = item->type(); 304 __ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type)); 305 } 306 307 //---------------------------------------------------------------------- 308 // visitor functions 309 //---------------------------------------------------------------------- 310 311 312 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) { 313 assert(x->is_pinned(),""); 314 bool needs_range_check = x->compute_needs_range_check(); 315 bool use_length = x->length() != NULL; 316 bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT; 317 bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL || 318 !get_jobject_constant(x->value())->is_null_object() || 319 x->should_profile()); 320 321 LIRItem array(x->array(), this); 322 LIRItem index(x->index(), this); 323 LIRItem value(x->value(), this); 324 LIRItem length(this); 325 326 array.load_item(); 327 index.load_nonconstant(); 328 329 if (use_length && needs_range_check) { 330 length.set_instruction(x->length()); 331 length.load_item(); 332 333 } 334 if (needs_store_check || x->check_boolean()) { 335 value.load_item(); 336 } else { 337 value.load_for_store(x->elt_type()); 338 } 339 340 set_no_result(x); 341 342 // the CodeEmitInfo must be duplicated for each different 343 // LIR-instruction because spilling can occur anywhere between two 344 // instructions and so the debug information must be different 345 CodeEmitInfo* range_check_info = state_for(x); 346 CodeEmitInfo* null_check_info = NULL; 347 if (x->needs_null_check()) { 348 null_check_info = new CodeEmitInfo(range_check_info); 349 } 350 351 // emit array address setup early so it schedules better 352 // FIXME? No harm in this on aarch64, and it might help 353 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store); 354 355 if (GenerateRangeChecks && needs_range_check) { 356 if (use_length) { 357 __ cmp(lir_cond_belowEqual, length.result(), index.result()); 358 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result())); 359 } else { 360 array_range_check(array.result(), index.result(), null_check_info, range_check_info); 361 // range_check also does the null check 362 null_check_info = NULL; 363 } 364 } 365 366 if (GenerateArrayStoreCheck && needs_store_check) { 367 LIR_Opr tmp1 = new_register(objectType); 368 LIR_Opr tmp2 = new_register(objectType); 369 LIR_Opr tmp3 = new_register(objectType); 370 371 CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info); 372 __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci()); 373 } 374 375 if (obj_store) { 376 // Needs GC write barriers. 377 pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */, 378 true /* do_load */, false /* patch */, NULL); 379 __ move(value.result(), array_addr, null_check_info); 380 // Seems to be a precise 381 post_barrier(LIR_OprFact::address(array_addr), value.result()); 382 } else { 383 LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info); 384 __ move(result, array_addr, null_check_info); 385 } 386 } 387 388 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) { 389 assert(x->is_pinned(),""); 390 LIRItem obj(x->obj(), this); 391 obj.load_item(); 392 393 set_no_result(x); 394 395 // "lock" stores the address of the monitor stack slot, so this is not an oop 396 LIR_Opr lock = new_register(T_INT); 397 // Need a scratch register for biased locking 398 LIR_Opr scratch = LIR_OprFact::illegalOpr; 399 if (UseBiasedLocking) { 400 scratch = new_register(T_INT); 401 } 402 403 CodeEmitInfo* info_for_exception = NULL; 404 if (x->needs_null_check()) { 405 info_for_exception = state_for(x); 406 } 407 // this CodeEmitInfo must not have the xhandlers because here the 408 // object is already locked (xhandlers expect object to be unlocked) 409 CodeEmitInfo* info = state_for(x, x->state(), true); 410 monitor_enter(obj.result(), lock, syncTempOpr(), scratch, 411 x->monitor_no(), info_for_exception, info); 412 } 413 414 415 void LIRGenerator::do_MonitorExit(MonitorExit* x) { 416 assert(x->is_pinned(),""); 417 418 LIRItem obj(x->obj(), this); 419 obj.dont_load_item(); 420 421 LIR_Opr lock = new_register(T_INT); 422 LIR_Opr obj_temp = new_register(T_INT); 423 set_no_result(x); 424 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no()); 425 } 426 427 428 void LIRGenerator::do_NegateOp(NegateOp* x) { 429 430 LIRItem from(x->x(), this); 431 from.load_item(); 432 LIR_Opr result = rlock_result(x); 433 __ negate (from.result(), result); 434 435 } 436 437 // for _fadd, _fmul, _fsub, _fdiv, _frem 438 // _dadd, _dmul, _dsub, _ddiv, _drem 439 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) { 440 441 if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) { 442 // float remainder is implemented as a direct call into the runtime 443 LIRItem right(x->x(), this); 444 LIRItem left(x->y(), this); 445 446 BasicTypeList signature(2); 447 if (x->op() == Bytecodes::_frem) { 448 signature.append(T_FLOAT); 449 signature.append(T_FLOAT); 450 } else { 451 signature.append(T_DOUBLE); 452 signature.append(T_DOUBLE); 453 } 454 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 455 456 const LIR_Opr result_reg = result_register_for(x->type()); 457 left.load_item_force(cc->at(1)); 458 right.load_item(); 459 460 __ move(right.result(), cc->at(0)); 461 462 address entry; 463 if (x->op() == Bytecodes::_frem) { 464 entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem); 465 } else { 466 entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem); 467 } 468 469 LIR_Opr result = rlock_result(x); 470 __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args()); 471 __ move(result_reg, result); 472 473 return; 474 } 475 476 LIRItem left(x->x(), this); 477 LIRItem right(x->y(), this); 478 LIRItem* left_arg = &left; 479 LIRItem* right_arg = &right; 480 481 // Always load right hand side. 482 right.load_item(); 483 484 if (!left.is_register()) 485 left.load_item(); 486 487 LIR_Opr reg = rlock(x); 488 LIR_Opr tmp = LIR_OprFact::illegalOpr; 489 if (x->is_strictfp() && (x->op() == Bytecodes::_dmul || x->op() == Bytecodes::_ddiv)) { 490 tmp = new_register(T_DOUBLE); 491 } 492 493 arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), NULL); 494 495 set_result(x, round_item(reg)); 496 } 497 498 // for _ladd, _lmul, _lsub, _ldiv, _lrem 499 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { 500 501 // missing test if instr is commutative and if we should swap 502 LIRItem left(x->x(), this); 503 LIRItem right(x->y(), this); 504 505 if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) { 506 507 // the check for division by zero destroys the right operand 508 right.set_destroys_register(); 509 510 // check for division by zero (destroys registers of right operand!) 511 CodeEmitInfo* info = state_for(x); 512 513 left.load_item(); 514 right.load_item(); 515 516 __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0)); 517 __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info)); 518 519 rlock_result(x); 520 switch (x->op()) { 521 case Bytecodes::_lrem: 522 __ rem (left.result(), right.result(), x->operand()); 523 break; 524 case Bytecodes::_ldiv: 525 __ div (left.result(), right.result(), x->operand()); 526 break; 527 default: 528 ShouldNotReachHere(); 529 break; 530 } 531 532 533 } else { 534 assert (x->op() == Bytecodes::_lmul || x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, 535 "expect lmul, ladd or lsub"); 536 // add, sub, mul 537 left.load_item(); 538 if (! right.is_register()) { 539 if (x->op() == Bytecodes::_lmul 540 || ! right.is_constant() 541 || ! Assembler::operand_valid_for_add_sub_immediate(right.get_jlong_constant())) { 542 right.load_item(); 543 } else { // add, sub 544 assert (x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, "expect ladd or lsub"); 545 // don't load constants to save register 546 right.load_nonconstant(); 547 } 548 } 549 rlock_result(x); 550 arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL); 551 } 552 } 553 554 // for: _iadd, _imul, _isub, _idiv, _irem 555 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) { 556 557 // Test if instr is commutative and if we should swap 558 LIRItem left(x->x(), this); 559 LIRItem right(x->y(), this); 560 LIRItem* left_arg = &left; 561 LIRItem* right_arg = &right; 562 if (x->is_commutative() && left.is_stack() && right.is_register()) { 563 // swap them if left is real stack (or cached) and right is real register(not cached) 564 left_arg = &right; 565 right_arg = &left; 566 } 567 568 left_arg->load_item(); 569 570 // do not need to load right, as we can handle stack and constants 571 if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) { 572 573 right_arg->load_item(); 574 rlock_result(x); 575 576 CodeEmitInfo* info = state_for(x); 577 LIR_Opr tmp = new_register(T_INT); 578 __ cmp(lir_cond_equal, right_arg->result(), LIR_OprFact::longConst(0)); 579 __ branch(lir_cond_equal, T_INT, new DivByZeroStub(info)); 580 info = state_for(x); 581 582 if (x->op() == Bytecodes::_irem) { 583 __ irem(left_arg->result(), right_arg->result(), x->operand(), tmp, NULL); 584 } else if (x->op() == Bytecodes::_idiv) { 585 __ idiv(left_arg->result(), right_arg->result(), x->operand(), tmp, NULL); 586 } 587 588 } else if (x->op() == Bytecodes::_iadd || x->op() == Bytecodes::_isub) { 589 if (right.is_constant() 590 && Assembler::operand_valid_for_add_sub_immediate(right.get_jint_constant())) { 591 right.load_nonconstant(); 592 } else { 593 right.load_item(); 594 } 595 rlock_result(x); 596 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), LIR_OprFact::illegalOpr); 597 } else { 598 assert (x->op() == Bytecodes::_imul, "expect imul"); 599 if (right.is_constant()) { 600 int c = right.get_jint_constant(); 601 if (! is_power_of_2(c) && ! is_power_of_2(c + 1) && ! is_power_of_2(c - 1)) { 602 // Cannot use constant op. 603 right.load_item(); 604 } else { 605 right.dont_load_item(); 606 } 607 } else { 608 right.load_item(); 609 } 610 rlock_result(x); 611 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), new_register(T_INT)); 612 } 613 } 614 615 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) { 616 // when an operand with use count 1 is the left operand, then it is 617 // likely that no move for 2-operand-LIR-form is necessary 618 if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) { 619 x->swap_operands(); 620 } 621 622 ValueTag tag = x->type()->tag(); 623 assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters"); 624 switch (tag) { 625 case floatTag: 626 case doubleTag: do_ArithmeticOp_FPU(x); return; 627 case longTag: do_ArithmeticOp_Long(x); return; 628 case intTag: do_ArithmeticOp_Int(x); return; 629 } 630 ShouldNotReachHere(); 631 } 632 633 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr 634 void LIRGenerator::do_ShiftOp(ShiftOp* x) { 635 636 LIRItem left(x->x(), this); 637 LIRItem right(x->y(), this); 638 639 left.load_item(); 640 641 rlock_result(x); 642 if (right.is_constant()) { 643 right.dont_load_item(); 644 645 switch (x->op()) { 646 case Bytecodes::_ishl: { 647 int c = right.get_jint_constant() & 0x1f; 648 __ shift_left(left.result(), c, x->operand()); 649 break; 650 } 651 case Bytecodes::_ishr: { 652 int c = right.get_jint_constant() & 0x1f; 653 __ shift_right(left.result(), c, x->operand()); 654 break; 655 } 656 case Bytecodes::_iushr: { 657 int c = right.get_jint_constant() & 0x1f; 658 __ unsigned_shift_right(left.result(), c, x->operand()); 659 break; 660 } 661 case Bytecodes::_lshl: { 662 int c = right.get_jint_constant() & 0x3f; 663 __ shift_left(left.result(), c, x->operand()); 664 break; 665 } 666 case Bytecodes::_lshr: { 667 int c = right.get_jint_constant() & 0x3f; 668 __ shift_right(left.result(), c, x->operand()); 669 break; 670 } 671 case Bytecodes::_lushr: { 672 int c = right.get_jint_constant() & 0x3f; 673 __ unsigned_shift_right(left.result(), c, x->operand()); 674 break; 675 } 676 default: 677 ShouldNotReachHere(); 678 } 679 } else { 680 right.load_item(); 681 LIR_Opr tmp = new_register(T_INT); 682 switch (x->op()) { 683 case Bytecodes::_ishl: { 684 __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp); 685 __ shift_left(left.result(), tmp, x->operand(), tmp); 686 break; 687 } 688 case Bytecodes::_ishr: { 689 __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp); 690 __ shift_right(left.result(), tmp, x->operand(), tmp); 691 break; 692 } 693 case Bytecodes::_iushr: { 694 __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp); 695 __ unsigned_shift_right(left.result(), tmp, x->operand(), tmp); 696 break; 697 } 698 case Bytecodes::_lshl: { 699 __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp); 700 __ shift_left(left.result(), tmp, x->operand(), tmp); 701 break; 702 } 703 case Bytecodes::_lshr: { 704 __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp); 705 __ shift_right(left.result(), tmp, x->operand(), tmp); 706 break; 707 } 708 case Bytecodes::_lushr: { 709 __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp); 710 __ unsigned_shift_right(left.result(), tmp, x->operand(), tmp); 711 break; 712 } 713 default: 714 ShouldNotReachHere(); 715 } 716 } 717 } 718 719 // _iand, _land, _ior, _lor, _ixor, _lxor 720 void LIRGenerator::do_LogicOp(LogicOp* x) { 721 722 LIRItem left(x->x(), this); 723 LIRItem right(x->y(), this); 724 725 left.load_item(); 726 727 rlock_result(x); 728 if (right.is_constant() 729 && ((right.type()->tag() == intTag 730 && Assembler::operand_valid_for_logical_immediate(true, right.get_jint_constant())) 731 || (right.type()->tag() == longTag 732 && Assembler::operand_valid_for_logical_immediate(false, right.get_jlong_constant())))) { 733 right.dont_load_item(); 734 } else { 735 right.load_item(); 736 } 737 switch (x->op()) { 738 case Bytecodes::_iand: 739 case Bytecodes::_land: 740 __ logical_and(left.result(), right.result(), x->operand()); break; 741 case Bytecodes::_ior: 742 case Bytecodes::_lor: 743 __ logical_or (left.result(), right.result(), x->operand()); break; 744 case Bytecodes::_ixor: 745 case Bytecodes::_lxor: 746 __ logical_xor(left.result(), right.result(), x->operand()); break; 747 default: Unimplemented(); 748 } 749 } 750 751 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg 752 void LIRGenerator::do_CompareOp(CompareOp* x) { 753 LIRItem left(x->x(), this); 754 LIRItem right(x->y(), this); 755 ValueTag tag = x->x()->type()->tag(); 756 if (tag == longTag) { 757 left.set_destroys_register(); 758 } 759 left.load_item(); 760 right.load_item(); 761 LIR_Opr reg = rlock_result(x); 762 763 if (x->x()->type()->is_float_kind()) { 764 Bytecodes::Code code = x->op(); 765 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl)); 766 } else if (x->x()->type()->tag() == longTag) { 767 __ lcmp2int(left.result(), right.result(), reg); 768 } else { 769 Unimplemented(); 770 } 771 } 772 773 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) { 774 assert(x->number_of_arguments() == 4, "wrong type"); 775 LIRItem obj (x->argument_at(0), this); // object 776 LIRItem offset(x->argument_at(1), this); // offset of field 777 LIRItem cmp (x->argument_at(2), this); // value to compare with field 778 LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp 779 780 assert(obj.type()->tag() == objectTag, "invalid type"); 781 782 // In 64bit the type can be long, sparc doesn't have this assert 783 // assert(offset.type()->tag() == intTag, "invalid type"); 784 785 assert(cmp.type()->tag() == type->tag(), "invalid type"); 786 assert(val.type()->tag() == type->tag(), "invalid type"); 787 788 // get address of field 789 obj.load_item(); 790 offset.load_nonconstant(); 791 val.load_item(); 792 cmp.load_item(); 793 794 LIR_Address* a; 795 if(offset.result()->is_constant()) { 796 jlong c = offset.result()->as_jlong(); 797 if ((jlong)((jint)c) == c) { 798 a = new LIR_Address(obj.result(), 799 (jint)c, 800 as_BasicType(type)); 801 } else { 802 LIR_Opr tmp = new_register(T_LONG); 803 __ move(offset.result(), tmp); 804 a = new LIR_Address(obj.result(), 805 tmp, 806 as_BasicType(type)); 807 } 808 } else { 809 a = new LIR_Address(obj.result(), 810 offset.result(), 811 0, 812 as_BasicType(type)); 813 } 814 LIR_Opr addr = new_pointer_register(); 815 __ leal(LIR_OprFact::address(a), addr); 816 817 if (type == objectType) { // Write-barrier needed for Object fields. 818 // Do the pre-write barrier, if any. 819 pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */, 820 true /* do_load */, false /* patch */, NULL); 821 } 822 823 LIR_Opr result = rlock_result(x); 824 825 LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience 826 if (type == objectType) 827 __ cas_obj(addr, cmp.result(), val.result(), new_register(T_INT), new_register(T_INT), 828 result); 829 else if (type == intType) 830 __ cas_int(addr, cmp.result(), val.result(), ill, ill); 831 else if (type == longType) 832 __ cas_long(addr, cmp.result(), val.result(), ill, ill); 833 else { 834 ShouldNotReachHere(); 835 } 836 837 __ logical_xor(FrameMap::r8_opr, LIR_OprFact::intConst(1), result); 838 839 if (type == objectType) { // Write-barrier needed for Object fields. 840 // Seems to be precise 841 post_barrier(addr, val.result()); 842 } 843 } 844 845 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) { 846 switch (x->id()) { 847 case vmIntrinsics::_dabs: 848 case vmIntrinsics::_dsqrt: { 849 assert(x->number_of_arguments() == 1, "wrong type"); 850 LIRItem value(x->argument_at(0), this); 851 value.load_item(); 852 LIR_Opr dst = rlock_result(x); 853 854 switch (x->id()) { 855 case vmIntrinsics::_dsqrt: { 856 __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr); 857 break; 858 } 859 case vmIntrinsics::_dabs: { 860 __ abs(value.result(), dst, LIR_OprFact::illegalOpr); 861 break; 862 } 863 } 864 break; 865 } 866 case vmIntrinsics::_dlog10: // fall through 867 case vmIntrinsics::_dlog: // fall through 868 case vmIntrinsics::_dsin: // fall through 869 case vmIntrinsics::_dtan: // fall through 870 case vmIntrinsics::_dcos: // fall through 871 case vmIntrinsics::_dexp: { 872 assert(x->number_of_arguments() == 1, "wrong type"); 873 874 address runtime_entry = NULL; 875 switch (x->id()) { 876 case vmIntrinsics::_dsin: 877 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin); 878 break; 879 case vmIntrinsics::_dcos: 880 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos); 881 break; 882 case vmIntrinsics::_dtan: 883 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan); 884 break; 885 case vmIntrinsics::_dlog: 886 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog); 887 break; 888 case vmIntrinsics::_dlog10: 889 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10); 890 break; 891 case vmIntrinsics::_dexp: 892 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp); 893 break; 894 default: 895 ShouldNotReachHere(); 896 } 897 898 LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL); 899 set_result(x, result); 900 break; 901 } 902 case vmIntrinsics::_dpow: { 903 assert(x->number_of_arguments() == 2, "wrong type"); 904 address runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow); 905 LIR_Opr result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_entry, x->type(), NULL); 906 set_result(x, result); 907 break; 908 } 909 } 910 } 911 912 913 void LIRGenerator::do_ArrayCopy(Intrinsic* x) { 914 assert(x->number_of_arguments() == 5, "wrong type"); 915 916 // Make all state_for calls early since they can emit code 917 CodeEmitInfo* info = state_for(x, x->state()); 918 919 LIRItem src(x->argument_at(0), this); 920 LIRItem src_pos(x->argument_at(1), this); 921 LIRItem dst(x->argument_at(2), this); 922 LIRItem dst_pos(x->argument_at(3), this); 923 LIRItem length(x->argument_at(4), this); 924 925 // operands for arraycopy must use fixed registers, otherwise 926 // LinearScan will fail allocation (because arraycopy always needs a 927 // call) 928 929 // The java calling convention will give us enough registers 930 // so that on the stub side the args will be perfect already. 931 // On the other slow/special case side we call C and the arg 932 // positions are not similar enough to pick one as the best. 933 // Also because the java calling convention is a "shifted" version 934 // of the C convention we can process the java args trivially into C 935 // args without worry of overwriting during the xfer 936 937 src.load_item_force (FrameMap::as_oop_opr(j_rarg0)); 938 src_pos.load_item_force (FrameMap::as_opr(j_rarg1)); 939 dst.load_item_force (FrameMap::as_oop_opr(j_rarg2)); 940 dst_pos.load_item_force (FrameMap::as_opr(j_rarg3)); 941 length.load_item_force (FrameMap::as_opr(j_rarg4)); 942 943 LIR_Opr tmp = FrameMap::as_opr(j_rarg5); 944 945 set_no_result(x); 946 947 int flags; 948 ciArrayKlass* expected_type; 949 arraycopy_helper(x, &flags, &expected_type); 950 951 __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint 952 } 953 954 void LIRGenerator::do_update_CRC32(Intrinsic* x) { 955 assert(UseCRC32Intrinsics, "why are we here?"); 956 // Make all state_for calls early since they can emit code 957 LIR_Opr result = rlock_result(x); 958 int flags = 0; 959 switch (x->id()) { 960 case vmIntrinsics::_updateCRC32: { 961 LIRItem crc(x->argument_at(0), this); 962 LIRItem val(x->argument_at(1), this); 963 // val is destroyed by update_crc32 964 val.set_destroys_register(); 965 crc.load_item(); 966 val.load_item(); 967 __ update_crc32(crc.result(), val.result(), result); 968 break; 969 } 970 case vmIntrinsics::_updateBytesCRC32: 971 case vmIntrinsics::_updateByteBufferCRC32: { 972 bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32); 973 974 LIRItem crc(x->argument_at(0), this); 975 LIRItem buf(x->argument_at(1), this); 976 LIRItem off(x->argument_at(2), this); 977 LIRItem len(x->argument_at(3), this); 978 buf.load_item(); 979 off.load_nonconstant(); 980 981 LIR_Opr index = off.result(); 982 int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0; 983 if(off.result()->is_constant()) { 984 index = LIR_OprFact::illegalOpr; 985 offset += off.result()->as_jint(); 986 } 987 LIR_Opr base_op = buf.result(); 988 989 if (index->is_valid()) { 990 LIR_Opr tmp = new_register(T_LONG); 991 __ convert(Bytecodes::_i2l, index, tmp); 992 index = tmp; 993 } 994 995 if (offset) { 996 LIR_Opr tmp = new_pointer_register(); 997 __ add(base_op, LIR_OprFact::intConst(offset), tmp); 998 base_op = tmp; 999 offset = 0; 1000 } 1001 1002 LIR_Address* a = new LIR_Address(base_op, 1003 index, 1004 offset, 1005 T_BYTE); 1006 BasicTypeList signature(3); 1007 signature.append(T_INT); 1008 signature.append(T_ADDRESS); 1009 signature.append(T_INT); 1010 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 1011 const LIR_Opr result_reg = result_register_for(x->type()); 1012 1013 LIR_Opr addr = new_pointer_register(); 1014 __ leal(LIR_OprFact::address(a), addr); 1015 1016 crc.load_item_force(cc->at(0)); 1017 __ move(addr, cc->at(1)); 1018 len.load_item_force(cc->at(2)); 1019 1020 __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args()); 1021 __ move(result_reg, result); 1022 1023 break; 1024 } 1025 default: { 1026 ShouldNotReachHere(); 1027 } 1028 } 1029 } 1030 1031 void LIRGenerator::do_update_CRC32C(Intrinsic* x) { 1032 Unimplemented(); 1033 } 1034 1035 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) { 1036 fatal("FMA intrinsic is not implemented on this platform"); 1037 } 1038 1039 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) { 1040 fatal("vectorizedMismatch intrinsic is not implemented on this platform"); 1041 } 1042 1043 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f 1044 // _i2b, _i2c, _i2s 1045 void LIRGenerator::do_Convert(Convert* x) { 1046 LIRItem value(x->value(), this); 1047 value.load_item(); 1048 LIR_Opr input = value.result(); 1049 LIR_Opr result = rlock(x); 1050 1051 // arguments of lir_convert 1052 LIR_Opr conv_input = input; 1053 LIR_Opr conv_result = result; 1054 ConversionStub* stub = NULL; 1055 1056 __ convert(x->op(), conv_input, conv_result); 1057 1058 assert(result->is_virtual(), "result must be virtual register"); 1059 set_result(x, result); 1060 } 1061 1062 void LIRGenerator::do_NewInstance(NewInstance* x) { 1063 #ifndef PRODUCT 1064 if (PrintNotLoaded && !x->klass()->is_loaded()) { 1065 tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci()); 1066 } 1067 #endif 1068 CodeEmitInfo* info = state_for(x, x->state()); 1069 LIR_Opr reg = result_register_for(x->type()); 1070 new_instance(reg, x->klass(), x->is_unresolved(), 1071 FrameMap::r2_oop_opr, 1072 FrameMap::r5_oop_opr, 1073 FrameMap::r4_oop_opr, 1074 LIR_OprFact::illegalOpr, 1075 FrameMap::r3_metadata_opr, info); 1076 LIR_Opr result = rlock_result(x); 1077 __ move(reg, result); 1078 } 1079 1080 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { 1081 CodeEmitInfo* info = state_for(x, x->state()); 1082 1083 LIRItem length(x->length(), this); 1084 length.load_item_force(FrameMap::r19_opr); 1085 1086 LIR_Opr reg = result_register_for(x->type()); 1087 LIR_Opr tmp1 = FrameMap::r2_oop_opr; 1088 LIR_Opr tmp2 = FrameMap::r4_oop_opr; 1089 LIR_Opr tmp3 = FrameMap::r5_oop_opr; 1090 LIR_Opr tmp4 = reg; 1091 LIR_Opr klass_reg = FrameMap::r3_metadata_opr; 1092 LIR_Opr len = length.result(); 1093 BasicType elem_type = x->elt_type(); 1094 1095 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg); 1096 1097 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info); 1098 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path); 1099 1100 LIR_Opr result = rlock_result(x); 1101 __ move(reg, result); 1102 } 1103 1104 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) { 1105 LIRItem length(x->length(), this); 1106 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction 1107 // and therefore provide the state before the parameters have been consumed 1108 CodeEmitInfo* patching_info = NULL; 1109 if (!x->klass()->is_loaded() || PatchALot) { 1110 patching_info = state_for(x, x->state_before()); 1111 } 1112 1113 CodeEmitInfo* info = state_for(x, x->state()); 1114 1115 LIR_Opr reg = result_register_for(x->type()); 1116 LIR_Opr tmp1 = FrameMap::r2_oop_opr; 1117 LIR_Opr tmp2 = FrameMap::r4_oop_opr; 1118 LIR_Opr tmp3 = FrameMap::r5_oop_opr; 1119 LIR_Opr tmp4 = reg; 1120 LIR_Opr klass_reg = FrameMap::r3_metadata_opr; 1121 1122 length.load_item_force(FrameMap::r19_opr); 1123 LIR_Opr len = length.result(); 1124 1125 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info); 1126 ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass()); 1127 if (obj == ciEnv::unloaded_ciobjarrayklass()) { 1128 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error"); 1129 } 1130 klass2reg_with_patching(klass_reg, obj, patching_info); 1131 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path); 1132 1133 LIR_Opr result = rlock_result(x); 1134 __ move(reg, result); 1135 } 1136 1137 1138 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { 1139 Values* dims = x->dims(); 1140 int i = dims->length(); 1141 LIRItemList* items = new LIRItemList(i, i, NULL); 1142 while (i-- > 0) { 1143 LIRItem* size = new LIRItem(dims->at(i), this); 1144 items->at_put(i, size); 1145 } 1146 1147 // Evaluate state_for early since it may emit code. 1148 CodeEmitInfo* patching_info = NULL; 1149 if (!x->klass()->is_loaded() || PatchALot) { 1150 patching_info = state_for(x, x->state_before()); 1151 1152 // Cannot re-use same xhandlers for multiple CodeEmitInfos, so 1153 // clone all handlers (NOTE: Usually this is handled transparently 1154 // by the CodeEmitInfo cloning logic in CodeStub constructors but 1155 // is done explicitly here because a stub isn't being used). 1156 x->set_exception_handlers(new XHandlers(x->exception_handlers())); 1157 } 1158 CodeEmitInfo* info = state_for(x, x->state()); 1159 1160 i = dims->length(); 1161 while (i-- > 0) { 1162 LIRItem* size = items->at(i); 1163 size->load_item(); 1164 1165 store_stack_parameter(size->result(), in_ByteSize(i*4)); 1166 } 1167 1168 LIR_Opr klass_reg = FrameMap::r0_metadata_opr; 1169 klass2reg_with_patching(klass_reg, x->klass(), patching_info); 1170 1171 LIR_Opr rank = FrameMap::r19_opr; 1172 __ move(LIR_OprFact::intConst(x->rank()), rank); 1173 LIR_Opr varargs = FrameMap::r2_opr; 1174 __ move(FrameMap::sp_opr, varargs); 1175 LIR_OprList* args = new LIR_OprList(3); 1176 args->append(klass_reg); 1177 args->append(rank); 1178 args->append(varargs); 1179 LIR_Opr reg = result_register_for(x->type()); 1180 __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id), 1181 LIR_OprFact::illegalOpr, 1182 reg, args, info); 1183 1184 LIR_Opr result = rlock_result(x); 1185 __ move(reg, result); 1186 } 1187 1188 void LIRGenerator::do_BlockBegin(BlockBegin* x) { 1189 // nothing to do for now 1190 } 1191 1192 void LIRGenerator::do_CheckCast(CheckCast* x) { 1193 LIRItem obj(x->obj(), this); 1194 1195 CodeEmitInfo* patching_info = NULL; 1196 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) { 1197 // must do this before locking the destination register as an oop register, 1198 // and before the obj is loaded (the latter is for deoptimization) 1199 patching_info = state_for(x, x->state_before()); 1200 } 1201 obj.load_item(); 1202 1203 // info for exceptions 1204 CodeEmitInfo* info_for_exception = state_for(x); 1205 1206 CodeStub* stub; 1207 if (x->is_incompatible_class_change_check()) { 1208 assert(patching_info == NULL, "can't patch this"); 1209 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); 1210 } else { 1211 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception); 1212 } 1213 LIR_Opr reg = rlock_result(x); 1214 LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 1215 if (!x->klass()->is_loaded() || UseCompressedClassPointers) { 1216 tmp3 = new_register(objectType); 1217 } 1218 __ checkcast(reg, obj.result(), x->klass(), 1219 new_register(objectType), new_register(objectType), tmp3, 1220 x->direct_compare(), info_for_exception, patching_info, stub, 1221 x->profiled_method(), x->profiled_bci()); 1222 } 1223 1224 void LIRGenerator::do_InstanceOf(InstanceOf* x) { 1225 LIRItem obj(x->obj(), this); 1226 1227 // result and test object may not be in same register 1228 LIR_Opr reg = rlock_result(x); 1229 CodeEmitInfo* patching_info = NULL; 1230 if ((!x->klass()->is_loaded() || PatchALot)) { 1231 // must do this before locking the destination register as an oop register 1232 patching_info = state_for(x, x->state_before()); 1233 } 1234 obj.load_item(); 1235 LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 1236 if (!x->klass()->is_loaded() || UseCompressedClassPointers) { 1237 tmp3 = new_register(objectType); 1238 } 1239 __ instanceof(reg, obj.result(), x->klass(), 1240 new_register(objectType), new_register(objectType), tmp3, 1241 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci()); 1242 } 1243 1244 void LIRGenerator::do_If(If* x) { 1245 assert(x->number_of_sux() == 2, "inconsistency"); 1246 ValueTag tag = x->x()->type()->tag(); 1247 bool is_safepoint = x->is_safepoint(); 1248 1249 If::Condition cond = x->cond(); 1250 1251 LIRItem xitem(x->x(), this); 1252 LIRItem yitem(x->y(), this); 1253 LIRItem* xin = &xitem; 1254 LIRItem* yin = &yitem; 1255 1256 if (tag == longTag) { 1257 // for longs, only conditions "eql", "neq", "lss", "geq" are valid; 1258 // mirror for other conditions 1259 if (cond == If::gtr || cond == If::leq) { 1260 cond = Instruction::mirror(cond); 1261 xin = &yitem; 1262 yin = &xitem; 1263 } 1264 xin->set_destroys_register(); 1265 } 1266 xin->load_item(); 1267 1268 if (tag == longTag) { 1269 if (yin->is_constant() 1270 && Assembler::operand_valid_for_add_sub_immediate(yin->get_jlong_constant())) { 1271 yin->dont_load_item(); 1272 } else { 1273 yin->load_item(); 1274 } 1275 } else if (tag == intTag) { 1276 if (yin->is_constant() 1277 && Assembler::operand_valid_for_add_sub_immediate(yin->get_jint_constant())) { 1278 yin->dont_load_item(); 1279 } else { 1280 yin->load_item(); 1281 } 1282 } else { 1283 yin->load_item(); 1284 } 1285 1286 // add safepoint before generating condition code so it can be recomputed 1287 if (x->is_safepoint()) { 1288 // increment backedge counter if needed 1289 increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci()); 1290 __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before())); 1291 } 1292 set_no_result(x); 1293 1294 LIR_Opr left = xin->result(); 1295 LIR_Opr right = yin->result(); 1296 1297 __ cmp(lir_cond(cond), left, right); 1298 // Generate branch profiling. Profiling code doesn't kill flags. 1299 profile_branch(x, cond); 1300 move_to_phi(x->state()); 1301 if (x->x()->type()->is_float_kind()) { 1302 __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux()); 1303 } else { 1304 __ branch(lir_cond(cond), right->type(), x->tsux()); 1305 } 1306 assert(x->default_sux() == x->fsux(), "wrong destination above"); 1307 __ jump(x->default_sux()); 1308 } 1309 1310 LIR_Opr LIRGenerator::getThreadPointer() { 1311 return FrameMap::as_pointer_opr(rthread); 1312 } 1313 1314 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); } 1315 1316 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address, 1317 CodeEmitInfo* info) { 1318 __ volatile_store_mem_reg(value, address, info); 1319 } 1320 1321 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result, 1322 CodeEmitInfo* info) { 1323 __ volatile_load_mem_reg(address, result, info); 1324 } 1325 1326 void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset, 1327 BasicType type, bool is_volatile) { 1328 LIR_Address* addr = new LIR_Address(src, offset, type); 1329 __ load(addr, dst); 1330 } 1331 1332 1333 void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data, 1334 BasicType type, bool is_volatile) { 1335 LIR_Address* addr = new LIR_Address(src, offset, type); 1336 bool is_obj = (type == T_ARRAY || type == T_OBJECT); 1337 if (is_obj) { 1338 // Do the pre-write barrier, if any. 1339 pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */, 1340 true /* do_load */, false /* patch */, NULL); 1341 __ move(data, addr); 1342 assert(src->is_register(), "must be register"); 1343 // Seems to be a precise address 1344 post_barrier(LIR_OprFact::address(addr), data); 1345 } else { 1346 __ move(data, addr); 1347 } 1348 } 1349 1350 void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) { 1351 BasicType type = x->basic_type(); 1352 LIRItem src(x->object(), this); 1353 LIRItem off(x->offset(), this); 1354 LIRItem value(x->value(), this); 1355 1356 src.load_item(); 1357 off.load_nonconstant(); 1358 1359 // We can cope with a constant increment in an xadd 1360 if (! (x->is_add() 1361 && value.is_constant() 1362 && can_inline_as_constant(x->value()))) { 1363 value.load_item(); 1364 } 1365 1366 LIR_Opr dst = rlock_result(x, type); 1367 LIR_Opr data = value.result(); 1368 bool is_obj = (type == T_ARRAY || type == T_OBJECT); 1369 LIR_Opr offset = off.result(); 1370 1371 if (data == dst) { 1372 LIR_Opr tmp = new_register(data->type()); 1373 __ move(data, tmp); 1374 data = tmp; 1375 } 1376 1377 LIR_Address* addr; 1378 if (offset->is_constant()) { 1379 jlong l = offset->as_jlong(); 1380 assert((jlong)((jint)l) == l, "offset too large for constant"); 1381 jint c = (jint)l; 1382 addr = new LIR_Address(src.result(), c, type); 1383 } else { 1384 addr = new LIR_Address(src.result(), offset, type); 1385 } 1386 1387 LIR_Opr tmp = new_register(T_INT); 1388 LIR_Opr ptr = LIR_OprFact::illegalOpr; 1389 1390 if (x->is_add()) { 1391 __ xadd(LIR_OprFact::address(addr), data, dst, tmp); 1392 } else { 1393 if (is_obj) { 1394 // Do the pre-write barrier, if any. 1395 ptr = new_pointer_register(); 1396 __ add(src.result(), off.result(), ptr); 1397 pre_barrier(ptr, LIR_OprFact::illegalOpr /* pre_val */, 1398 true /* do_load */, false /* patch */, NULL); 1399 } 1400 __ xchg(LIR_OprFact::address(addr), data, dst, tmp); 1401 if (is_obj) { 1402 post_barrier(ptr, data); 1403 } 1404 } 1405 }