1 /* 2 * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "c1/c1_Compilation.hpp" 28 #include "c1/c1_FrameMap.hpp" 29 #include "c1/c1_Instruction.hpp" 30 #include "c1/c1_LIRAssembler.hpp" 31 #include "c1/c1_LIRGenerator.hpp" 32 #include "c1/c1_Runtime1.hpp" 33 #include "c1/c1_ValueStack.hpp" 34 #include "ci/ciArray.hpp" 35 #include "ci/ciObjArrayKlass.hpp" 36 #include "ci/ciTypeArrayKlass.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubRoutines.hpp" 39 #include "vmreg_aarch64.inline.hpp" 40 41 #ifdef ASSERT 42 #define __ gen()->lir(__FILE__, __LINE__)-> 43 #else 44 #define __ gen()->lir()-> 45 #endif 46 47 // Item will be loaded into a byte register; Intel only 48 void LIRItem::load_byte_item() { 49 load_item(); 50 } 51 52 53 void LIRItem::load_nonconstant() { 54 LIR_Opr r = value()->operand(); 55 if (r->is_constant()) { 56 _result = r; 57 } else { 58 load_item(); 59 } 60 } 61 62 //-------------------------------------------------------------- 63 // LIRGenerator 64 //-------------------------------------------------------------- 65 66 67 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::r0_oop_opr; } 68 LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::r3_opr; } 69 LIR_Opr LIRGenerator::divInOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; } 70 LIR_Opr LIRGenerator::divOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; } 71 LIR_Opr LIRGenerator::remOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; } 72 LIR_Opr LIRGenerator::shiftCountOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; } 73 LIR_Opr LIRGenerator::syncLockOpr() { return new_register(T_INT); } 74 LIR_Opr LIRGenerator::syncTempOpr() { return FrameMap::r0_opr; } 75 LIR_Opr LIRGenerator::getThreadTemp() { return LIR_OprFact::illegalOpr; } 76 77 78 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) { 79 LIR_Opr opr; 80 switch (type->tag()) { 81 case intTag: opr = FrameMap::r0_opr; break; 82 case objectTag: opr = FrameMap::r0_oop_opr; break; 83 case longTag: opr = FrameMap::long0_opr; break; 84 case floatTag: opr = FrameMap::fpu0_float_opr; break; 85 case doubleTag: opr = FrameMap::fpu0_double_opr; break; 86 87 case addressTag: 88 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr; 89 } 90 91 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch"); 92 return opr; 93 } 94 95 96 LIR_Opr LIRGenerator::rlock_byte(BasicType type) { 97 LIR_Opr reg = new_register(T_INT); 98 set_vreg_flag(reg, LIRGenerator::byte_reg); 99 return reg; 100 } 101 102 103 //--------- loading items into registers -------------------------------- 104 105 106 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const { 107 if (v->type()->as_IntConstant() != NULL) { 108 return v->type()->as_IntConstant()->value() == 0L; 109 } else if (v->type()->as_LongConstant() != NULL) { 110 return v->type()->as_LongConstant()->value() == 0L; 111 } else if (v->type()->as_ObjectConstant() != NULL) { 112 return v->type()->as_ObjectConstant()->value()->is_null_object(); 113 } else { 114 return false; 115 } 116 } 117 118 bool LIRGenerator::can_inline_as_constant(Value v) const { 119 // FIXME: Just a guess 120 if (v->type()->as_IntConstant() != NULL) { 121 return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value()); 122 } else if (v->type()->as_LongConstant() != NULL) { 123 return v->type()->as_LongConstant()->value() == 0L; 124 } else if (v->type()->as_ObjectConstant() != NULL) { 125 return v->type()->as_ObjectConstant()->value()->is_null_object(); 126 } else { 127 return false; 128 } 129 } 130 131 132 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { return false; } 133 134 135 LIR_Opr LIRGenerator::safepoint_poll_register() { 136 return LIR_OprFact::illegalOpr; 137 } 138 139 140 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index, 141 int shift, int disp, BasicType type) { 142 assert(base->is_register(), "must be"); 143 intx large_disp = disp; 144 145 // accumulate fixed displacements 146 if (index->is_constant()) { 147 large_disp += (intx)(index->as_constant_ptr()->as_jint()) << shift; 148 index = LIR_OprFact::illegalOpr; 149 } 150 151 if (index->is_register()) { 152 // apply the shift and accumulate the displacement 153 if (shift > 0) { 154 LIR_Opr tmp = new_pointer_register(); 155 __ shift_left(index, shift, tmp); 156 index = tmp; 157 } 158 if (large_disp != 0) { 159 LIR_Opr tmp = new_pointer_register(); 160 if (Assembler::operand_valid_for_add_sub_immediate(large_disp)) { 161 __ add(tmp, tmp, LIR_OprFact::intptrConst(large_disp)); 162 index = tmp; 163 } else { 164 __ move(tmp, LIR_OprFact::intptrConst(large_disp)); 165 __ add(tmp, index, tmp); 166 index = tmp; 167 } 168 large_disp = 0; 169 } 170 } else if (large_disp != 0 && !Address::offset_ok_for_immed(large_disp, shift)) { 171 // index is illegal so replace it with the displacement loaded into a register 172 index = new_pointer_register(); 173 __ move(LIR_OprFact::intptrConst(large_disp), index); 174 large_disp = 0; 175 } 176 177 // at this point we either have base + index or base + displacement 178 if (large_disp == 0) { 179 return new LIR_Address(base, index, type); 180 } else { 181 assert(Address::offset_ok_for_immed(large_disp, 0), "must be"); 182 return new LIR_Address(base, large_disp, type); 183 } 184 } 185 186 187 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, 188 BasicType type, bool needs_card_mark) { 189 int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type); 190 int elem_size = type2aelembytes(type); 191 int shift = exact_log2(elem_size); 192 193 LIR_Address* addr; 194 if (index_opr->is_constant()) { 195 addr = new LIR_Address(array_opr, 196 offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type); 197 } else { 198 if (offset_in_bytes) { 199 LIR_Opr tmp = new_pointer_register(); 200 __ add(array_opr, LIR_OprFact::intConst(offset_in_bytes), tmp); 201 array_opr = tmp; 202 offset_in_bytes = 0; 203 } 204 addr = new LIR_Address(array_opr, 205 index_opr, 206 LIR_Address::scale(type), 207 offset_in_bytes, type); 208 } 209 if (needs_card_mark) { 210 // This store will need a precise card mark, so go ahead and 211 // compute the full adddres instead of computing once for the 212 // store and again for the card mark. 213 LIR_Opr tmp = new_pointer_register(); 214 __ leal(LIR_OprFact::address(addr), tmp); 215 return new LIR_Address(tmp, type); 216 } else { 217 return addr; 218 } 219 } 220 221 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) { 222 LIR_Opr r; 223 if (type == T_LONG) { 224 r = LIR_OprFact::longConst(x); 225 if (!Assembler::operand_valid_for_logical_immediate(false, x)) { 226 LIR_Opr tmp = new_register(type); 227 __ move(r, tmp); 228 return tmp; 229 } 230 } else if (type == T_INT) { 231 r = LIR_OprFact::intConst(x); 232 if (!Assembler::operand_valid_for_logical_immediate(true, x)) { 233 // This is all rather nasty. We don't know whether our constant 234 // is required for a logical or an arithmetic operation, wo we 235 // don't know what the range of valid values is!! 236 LIR_Opr tmp = new_register(type); 237 __ move(r, tmp); 238 return tmp; 239 } 240 } else { 241 ShouldNotReachHere(); 242 r = NULL; // unreachable 243 } 244 return r; 245 } 246 247 248 249 void LIRGenerator::increment_counter(address counter, BasicType type, int step) { 250 LIR_Opr pointer = new_pointer_register(); 251 __ move(LIR_OprFact::intptrConst(counter), pointer); 252 LIR_Address* addr = new LIR_Address(pointer, type); 253 increment_counter(addr, step); 254 } 255 256 257 void LIRGenerator::increment_counter(LIR_Address* addr, int step) { 258 LIR_Opr imm = NULL; 259 switch(addr->type()) { 260 case T_INT: 261 imm = LIR_OprFact::intConst(step); 262 break; 263 case T_LONG: 264 imm = LIR_OprFact::longConst(step); 265 break; 266 default: 267 ShouldNotReachHere(); 268 } 269 LIR_Opr reg = new_register(addr->type()); 270 __ load(addr, reg); 271 __ add(reg, imm, reg); 272 __ store(reg, addr); 273 } 274 275 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { 276 LIR_Opr reg = new_register(T_INT); 277 __ load(generate_address(base, disp, T_INT), reg, info); 278 __ cmp(condition, reg, LIR_OprFact::intConst(c)); 279 } 280 281 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) { 282 LIR_Opr reg1 = new_register(T_INT); 283 __ load(generate_address(base, disp, type), reg1, info); 284 __ cmp(condition, reg, reg1); 285 } 286 287 288 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) { 289 290 if (is_power_of_2(c - 1)) { 291 __ shift_left(left, exact_log2(c - 1), tmp); 292 __ add(tmp, left, result); 293 return true; 294 } else if (is_power_of_2(c + 1)) { 295 __ shift_left(left, exact_log2(c + 1), tmp); 296 __ sub(tmp, left, result); 297 return true; 298 } else { 299 return false; 300 } 301 } 302 303 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) { 304 BasicType type = item->type(); 305 __ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type)); 306 } 307 308 //---------------------------------------------------------------------- 309 // visitor functions 310 //---------------------------------------------------------------------- 311 312 313 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) { 314 assert(x->is_pinned(),""); 315 bool needs_range_check = x->compute_needs_range_check(); 316 bool use_length = x->length() != NULL; 317 bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT; 318 bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL || 319 !get_jobject_constant(x->value())->is_null_object() || 320 x->should_profile()); 321 322 LIRItem array(x->array(), this); 323 LIRItem index(x->index(), this); 324 LIRItem value(x->value(), this); 325 LIRItem length(this); 326 327 array.load_item(); 328 index.load_nonconstant(); 329 330 if (use_length && needs_range_check) { 331 length.set_instruction(x->length()); 332 length.load_item(); 333 334 } 335 if (needs_store_check || x->check_boolean()) { 336 value.load_item(); 337 } else { 338 value.load_for_store(x->elt_type()); 339 } 340 341 set_no_result(x); 342 343 // the CodeEmitInfo must be duplicated for each different 344 // LIR-instruction because spilling can occur anywhere between two 345 // instructions and so the debug information must be different 346 CodeEmitInfo* range_check_info = state_for(x); 347 CodeEmitInfo* null_check_info = NULL; 348 if (x->needs_null_check()) { 349 null_check_info = new CodeEmitInfo(range_check_info); 350 } 351 352 // emit array address setup early so it schedules better 353 // FIXME? No harm in this on aarch64, and it might help 354 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store); 355 356 if (GenerateRangeChecks && needs_range_check) { 357 if (use_length) { 358 __ cmp(lir_cond_belowEqual, length.result(), index.result()); 359 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result())); 360 } else { 361 array_range_check(array.result(), index.result(), null_check_info, range_check_info); 362 // range_check also does the null check 363 null_check_info = NULL; 364 } 365 } 366 367 if (GenerateArrayStoreCheck && needs_store_check) { 368 LIR_Opr tmp1 = new_register(objectType); 369 LIR_Opr tmp2 = new_register(objectType); 370 LIR_Opr tmp3 = new_register(objectType); 371 372 CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info); 373 __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci()); 374 } 375 376 if (obj_store) { 377 // Needs GC write barriers. 378 pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */, 379 true /* do_load */, false /* patch */, NULL); 380 __ move(value.result(), array_addr, null_check_info); 381 // Seems to be a precise 382 post_barrier(LIR_OprFact::address(array_addr), value.result()); 383 } else { 384 LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info); 385 __ move(result, array_addr, null_check_info); 386 } 387 } 388 389 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) { 390 assert(x->is_pinned(),""); 391 LIRItem obj(x->obj(), this); 392 obj.load_item(); 393 394 set_no_result(x); 395 396 // "lock" stores the address of the monitor stack slot, so this is not an oop 397 LIR_Opr lock = new_register(T_INT); 398 // Need a scratch register for biased locking 399 LIR_Opr scratch = LIR_OprFact::illegalOpr; 400 if (UseBiasedLocking) { 401 scratch = new_register(T_INT); 402 } 403 404 CodeEmitInfo* info_for_exception = NULL; 405 if (x->needs_null_check()) { 406 info_for_exception = state_for(x); 407 } 408 // this CodeEmitInfo must not have the xhandlers because here the 409 // object is already locked (xhandlers expect object to be unlocked) 410 CodeEmitInfo* info = state_for(x, x->state(), true); 411 monitor_enter(obj.result(), lock, syncTempOpr(), scratch, 412 x->monitor_no(), info_for_exception, info); 413 } 414 415 416 void LIRGenerator::do_MonitorExit(MonitorExit* x) { 417 assert(x->is_pinned(),""); 418 419 LIRItem obj(x->obj(), this); 420 obj.dont_load_item(); 421 422 LIR_Opr lock = new_register(T_INT); 423 LIR_Opr obj_temp = new_register(T_INT); 424 set_no_result(x); 425 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no()); 426 } 427 428 429 void LIRGenerator::do_NegateOp(NegateOp* x) { 430 431 LIRItem from(x->x(), this); 432 from.load_item(); 433 LIR_Opr result = rlock_result(x); 434 __ negate (from.result(), result); 435 436 } 437 438 // for _fadd, _fmul, _fsub, _fdiv, _frem 439 // _dadd, _dmul, _dsub, _ddiv, _drem 440 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) { 441 442 if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) { 443 // float remainder is implemented as a direct call into the runtime 444 LIRItem right(x->x(), this); 445 LIRItem left(x->y(), this); 446 447 BasicTypeList signature(2); 448 if (x->op() == Bytecodes::_frem) { 449 signature.append(T_FLOAT); 450 signature.append(T_FLOAT); 451 } else { 452 signature.append(T_DOUBLE); 453 signature.append(T_DOUBLE); 454 } 455 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 456 457 const LIR_Opr result_reg = result_register_for(x->type()); 458 left.load_item_force(cc->at(1)); 459 right.load_item(); 460 461 __ move(right.result(), cc->at(0)); 462 463 address entry; 464 if (x->op() == Bytecodes::_frem) { 465 entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem); 466 } else { 467 entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem); 468 } 469 470 LIR_Opr result = rlock_result(x); 471 __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args()); 472 __ move(result_reg, result); 473 474 return; 475 } 476 477 LIRItem left(x->x(), this); 478 LIRItem right(x->y(), this); 479 LIRItem* left_arg = &left; 480 LIRItem* right_arg = &right; 481 482 // Always load right hand side. 483 right.load_item(); 484 485 if (!left.is_register()) 486 left.load_item(); 487 488 LIR_Opr reg = rlock(x); 489 LIR_Opr tmp = LIR_OprFact::illegalOpr; 490 if (x->is_strictfp() && (x->op() == Bytecodes::_dmul || x->op() == Bytecodes::_ddiv)) { 491 tmp = new_register(T_DOUBLE); 492 } 493 494 arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), NULL); 495 496 set_result(x, round_item(reg)); 497 } 498 499 // for _ladd, _lmul, _lsub, _ldiv, _lrem 500 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { 501 502 // missing test if instr is commutative and if we should swap 503 LIRItem left(x->x(), this); 504 LIRItem right(x->y(), this); 505 506 if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) { 507 508 // the check for division by zero destroys the right operand 509 right.set_destroys_register(); 510 511 // check for division by zero (destroys registers of right operand!) 512 CodeEmitInfo* info = state_for(x); 513 514 left.load_item(); 515 right.load_item(); 516 517 __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0)); 518 __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info)); 519 520 rlock_result(x); 521 switch (x->op()) { 522 case Bytecodes::_lrem: 523 __ rem (left.result(), right.result(), x->operand()); 524 break; 525 case Bytecodes::_ldiv: 526 __ div (left.result(), right.result(), x->operand()); 527 break; 528 default: 529 ShouldNotReachHere(); 530 break; 531 } 532 533 534 } else { 535 assert (x->op() == Bytecodes::_lmul || x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, 536 "expect lmul, ladd or lsub"); 537 // add, sub, mul 538 left.load_item(); 539 if (! right.is_register()) { 540 if (x->op() == Bytecodes::_lmul 541 || ! right.is_constant() 542 || ! Assembler::operand_valid_for_add_sub_immediate(right.get_jlong_constant())) { 543 right.load_item(); 544 } else { // add, sub 545 assert (x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, "expect ladd or lsub"); 546 // don't load constants to save register 547 right.load_nonconstant(); 548 } 549 } 550 rlock_result(x); 551 arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL); 552 } 553 } 554 555 // for: _iadd, _imul, _isub, _idiv, _irem 556 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) { 557 558 // Test if instr is commutative and if we should swap 559 LIRItem left(x->x(), this); 560 LIRItem right(x->y(), this); 561 LIRItem* left_arg = &left; 562 LIRItem* right_arg = &right; 563 if (x->is_commutative() && left.is_stack() && right.is_register()) { 564 // swap them if left is real stack (or cached) and right is real register(not cached) 565 left_arg = &right; 566 right_arg = &left; 567 } 568 569 left_arg->load_item(); 570 571 // do not need to load right, as we can handle stack and constants 572 if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) { 573 574 right_arg->load_item(); 575 rlock_result(x); 576 577 CodeEmitInfo* info = state_for(x); 578 LIR_Opr tmp = new_register(T_INT); 579 __ cmp(lir_cond_equal, right_arg->result(), LIR_OprFact::longConst(0)); 580 __ branch(lir_cond_equal, T_INT, new DivByZeroStub(info)); 581 info = state_for(x); 582 583 if (x->op() == Bytecodes::_irem) { 584 __ irem(left_arg->result(), right_arg->result(), x->operand(), tmp, NULL); 585 } else if (x->op() == Bytecodes::_idiv) { 586 __ idiv(left_arg->result(), right_arg->result(), x->operand(), tmp, NULL); 587 } 588 589 } else if (x->op() == Bytecodes::_iadd || x->op() == Bytecodes::_isub) { 590 if (right.is_constant() 591 && Assembler::operand_valid_for_add_sub_immediate(right.get_jint_constant())) { 592 right.load_nonconstant(); 593 } else { 594 right.load_item(); 595 } 596 rlock_result(x); 597 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), LIR_OprFact::illegalOpr); 598 } else { 599 assert (x->op() == Bytecodes::_imul, "expect imul"); 600 if (right.is_constant()) { 601 jint c = right.get_jint_constant(); 602 if (c > 0 && c < max_jint && (is_power_of_2(c) || is_power_of_2(c - 1) || is_power_of_2(c + 1))) { 603 right_arg->dont_load_item(); 604 } else { 605 // Cannot use constant op. 606 right_arg->load_item(); 607 } 608 } else { 609 right.load_item(); 610 } 611 rlock_result(x); 612 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), new_register(T_INT)); 613 } 614 } 615 616 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) { 617 // when an operand with use count 1 is the left operand, then it is 618 // likely that no move for 2-operand-LIR-form is necessary 619 if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) { 620 x->swap_operands(); 621 } 622 623 ValueTag tag = x->type()->tag(); 624 assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters"); 625 switch (tag) { 626 case floatTag: 627 case doubleTag: do_ArithmeticOp_FPU(x); return; 628 case longTag: do_ArithmeticOp_Long(x); return; 629 case intTag: do_ArithmeticOp_Int(x); return; 630 } 631 ShouldNotReachHere(); 632 } 633 634 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr 635 void LIRGenerator::do_ShiftOp(ShiftOp* x) { 636 637 LIRItem left(x->x(), this); 638 LIRItem right(x->y(), this); 639 640 left.load_item(); 641 642 rlock_result(x); 643 if (right.is_constant()) { 644 right.dont_load_item(); 645 646 switch (x->op()) { 647 case Bytecodes::_ishl: { 648 int c = right.get_jint_constant() & 0x1f; 649 __ shift_left(left.result(), c, x->operand()); 650 break; 651 } 652 case Bytecodes::_ishr: { 653 int c = right.get_jint_constant() & 0x1f; 654 __ shift_right(left.result(), c, x->operand()); 655 break; 656 } 657 case Bytecodes::_iushr: { 658 int c = right.get_jint_constant() & 0x1f; 659 __ unsigned_shift_right(left.result(), c, x->operand()); 660 break; 661 } 662 case Bytecodes::_lshl: { 663 int c = right.get_jint_constant() & 0x3f; 664 __ shift_left(left.result(), c, x->operand()); 665 break; 666 } 667 case Bytecodes::_lshr: { 668 int c = right.get_jint_constant() & 0x3f; 669 __ shift_right(left.result(), c, x->operand()); 670 break; 671 } 672 case Bytecodes::_lushr: { 673 int c = right.get_jint_constant() & 0x3f; 674 __ unsigned_shift_right(left.result(), c, x->operand()); 675 break; 676 } 677 default: 678 ShouldNotReachHere(); 679 } 680 } else { 681 right.load_item(); 682 LIR_Opr tmp = new_register(T_INT); 683 switch (x->op()) { 684 case Bytecodes::_ishl: { 685 __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp); 686 __ shift_left(left.result(), tmp, x->operand(), tmp); 687 break; 688 } 689 case Bytecodes::_ishr: { 690 __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp); 691 __ shift_right(left.result(), tmp, x->operand(), tmp); 692 break; 693 } 694 case Bytecodes::_iushr: { 695 __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp); 696 __ unsigned_shift_right(left.result(), tmp, x->operand(), tmp); 697 break; 698 } 699 case Bytecodes::_lshl: { 700 __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp); 701 __ shift_left(left.result(), tmp, x->operand(), tmp); 702 break; 703 } 704 case Bytecodes::_lshr: { 705 __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp); 706 __ shift_right(left.result(), tmp, x->operand(), tmp); 707 break; 708 } 709 case Bytecodes::_lushr: { 710 __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp); 711 __ unsigned_shift_right(left.result(), tmp, x->operand(), tmp); 712 break; 713 } 714 default: 715 ShouldNotReachHere(); 716 } 717 } 718 } 719 720 // _iand, _land, _ior, _lor, _ixor, _lxor 721 void LIRGenerator::do_LogicOp(LogicOp* x) { 722 723 LIRItem left(x->x(), this); 724 LIRItem right(x->y(), this); 725 726 left.load_item(); 727 728 rlock_result(x); 729 if (right.is_constant() 730 && ((right.type()->tag() == intTag 731 && Assembler::operand_valid_for_logical_immediate(true, right.get_jint_constant())) 732 || (right.type()->tag() == longTag 733 && Assembler::operand_valid_for_logical_immediate(false, right.get_jlong_constant())))) { 734 right.dont_load_item(); 735 } else { 736 right.load_item(); 737 } 738 switch (x->op()) { 739 case Bytecodes::_iand: 740 case Bytecodes::_land: 741 __ logical_and(left.result(), right.result(), x->operand()); break; 742 case Bytecodes::_ior: 743 case Bytecodes::_lor: 744 __ logical_or (left.result(), right.result(), x->operand()); break; 745 case Bytecodes::_ixor: 746 case Bytecodes::_lxor: 747 __ logical_xor(left.result(), right.result(), x->operand()); break; 748 default: Unimplemented(); 749 } 750 } 751 752 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg 753 void LIRGenerator::do_CompareOp(CompareOp* x) { 754 LIRItem left(x->x(), this); 755 LIRItem right(x->y(), this); 756 ValueTag tag = x->x()->type()->tag(); 757 if (tag == longTag) { 758 left.set_destroys_register(); 759 } 760 left.load_item(); 761 right.load_item(); 762 LIR_Opr reg = rlock_result(x); 763 764 if (x->x()->type()->is_float_kind()) { 765 Bytecodes::Code code = x->op(); 766 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl)); 767 } else if (x->x()->type()->tag() == longTag) { 768 __ lcmp2int(left.result(), right.result(), reg); 769 } else { 770 Unimplemented(); 771 } 772 } 773 774 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) { 775 assert(x->number_of_arguments() == 4, "wrong type"); 776 LIRItem obj (x->argument_at(0), this); // object 777 LIRItem offset(x->argument_at(1), this); // offset of field 778 LIRItem cmp (x->argument_at(2), this); // value to compare with field 779 LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp 780 781 assert(obj.type()->tag() == objectTag, "invalid type"); 782 783 // In 64bit the type can be long, sparc doesn't have this assert 784 // assert(offset.type()->tag() == intTag, "invalid type"); 785 786 assert(cmp.type()->tag() == type->tag(), "invalid type"); 787 assert(val.type()->tag() == type->tag(), "invalid type"); 788 789 // get address of field 790 obj.load_item(); 791 offset.load_nonconstant(); 792 val.load_item(); 793 cmp.load_item(); 794 795 LIR_Address* a; 796 if(offset.result()->is_constant()) { 797 jlong c = offset.result()->as_jlong(); 798 if ((jlong)((jint)c) == c) { 799 a = new LIR_Address(obj.result(), 800 (jint)c, 801 as_BasicType(type)); 802 } else { 803 LIR_Opr tmp = new_register(T_LONG); 804 __ move(offset.result(), tmp); 805 a = new LIR_Address(obj.result(), 806 tmp, 807 as_BasicType(type)); 808 } 809 } else { 810 a = new LIR_Address(obj.result(), 811 offset.result(), 812 0, 813 as_BasicType(type)); 814 } 815 LIR_Opr addr = new_pointer_register(); 816 __ leal(LIR_OprFact::address(a), addr); 817 818 if (type == objectType) { // Write-barrier needed for Object fields. 819 // Do the pre-write barrier, if any. 820 pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */, 821 true /* do_load */, false /* patch */, NULL); 822 } 823 824 LIR_Opr result = rlock_result(x); 825 826 LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience 827 if (type == objectType) 828 __ cas_obj(addr, cmp.result(), val.result(), new_register(T_INT), new_register(T_INT), 829 result); 830 else if (type == intType) 831 __ cas_int(addr, cmp.result(), val.result(), ill, ill); 832 else if (type == longType) 833 __ cas_long(addr, cmp.result(), val.result(), ill, ill); 834 else { 835 ShouldNotReachHere(); 836 } 837 838 __ logical_xor(FrameMap::r8_opr, LIR_OprFact::intConst(1), result); 839 840 if (type == objectType) { // Write-barrier needed for Object fields. 841 // Seems to be precise 842 post_barrier(addr, val.result()); 843 } 844 } 845 846 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) { 847 switch (x->id()) { 848 case vmIntrinsics::_dabs: 849 case vmIntrinsics::_dsqrt: { 850 assert(x->number_of_arguments() == 1, "wrong type"); 851 LIRItem value(x->argument_at(0), this); 852 value.load_item(); 853 LIR_Opr dst = rlock_result(x); 854 855 switch (x->id()) { 856 case vmIntrinsics::_dsqrt: { 857 __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr); 858 break; 859 } 860 case vmIntrinsics::_dabs: { 861 __ abs(value.result(), dst, LIR_OprFact::illegalOpr); 862 break; 863 } 864 } 865 break; 866 } 867 case vmIntrinsics::_dlog10: // fall through 868 case vmIntrinsics::_dlog: // fall through 869 case vmIntrinsics::_dsin: // fall through 870 case vmIntrinsics::_dtan: // fall through 871 case vmIntrinsics::_dcos: // fall through 872 case vmIntrinsics::_dexp: { 873 assert(x->number_of_arguments() == 1, "wrong type"); 874 875 address runtime_entry = NULL; 876 switch (x->id()) { 877 case vmIntrinsics::_dsin: 878 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin); 879 break; 880 case vmIntrinsics::_dcos: 881 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos); 882 break; 883 case vmIntrinsics::_dtan: 884 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan); 885 break; 886 case vmIntrinsics::_dlog: 887 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog); 888 break; 889 case vmIntrinsics::_dlog10: 890 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10); 891 break; 892 case vmIntrinsics::_dexp: 893 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp); 894 break; 895 default: 896 ShouldNotReachHere(); 897 } 898 899 LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL); 900 set_result(x, result); 901 break; 902 } 903 case vmIntrinsics::_dpow: { 904 assert(x->number_of_arguments() == 2, "wrong type"); 905 address runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow); 906 LIR_Opr result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_entry, x->type(), NULL); 907 set_result(x, result); 908 break; 909 } 910 } 911 } 912 913 914 void LIRGenerator::do_ArrayCopy(Intrinsic* x) { 915 assert(x->number_of_arguments() == 5, "wrong type"); 916 917 // Make all state_for calls early since they can emit code 918 CodeEmitInfo* info = state_for(x, x->state()); 919 920 LIRItem src(x->argument_at(0), this); 921 LIRItem src_pos(x->argument_at(1), this); 922 LIRItem dst(x->argument_at(2), this); 923 LIRItem dst_pos(x->argument_at(3), this); 924 LIRItem length(x->argument_at(4), this); 925 926 // operands for arraycopy must use fixed registers, otherwise 927 // LinearScan will fail allocation (because arraycopy always needs a 928 // call) 929 930 // The java calling convention will give us enough registers 931 // so that on the stub side the args will be perfect already. 932 // On the other slow/special case side we call C and the arg 933 // positions are not similar enough to pick one as the best. 934 // Also because the java calling convention is a "shifted" version 935 // of the C convention we can process the java args trivially into C 936 // args without worry of overwriting during the xfer 937 938 src.load_item_force (FrameMap::as_oop_opr(j_rarg0)); 939 src_pos.load_item_force (FrameMap::as_opr(j_rarg1)); 940 dst.load_item_force (FrameMap::as_oop_opr(j_rarg2)); 941 dst_pos.load_item_force (FrameMap::as_opr(j_rarg3)); 942 length.load_item_force (FrameMap::as_opr(j_rarg4)); 943 944 LIR_Opr tmp = FrameMap::as_opr(j_rarg5); 945 946 set_no_result(x); 947 948 int flags; 949 ciArrayKlass* expected_type; 950 arraycopy_helper(x, &flags, &expected_type); 951 952 __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint 953 } 954 955 void LIRGenerator::do_update_CRC32(Intrinsic* x) { 956 assert(UseCRC32Intrinsics, "why are we here?"); 957 // Make all state_for calls early since they can emit code 958 LIR_Opr result = rlock_result(x); 959 int flags = 0; 960 switch (x->id()) { 961 case vmIntrinsics::_updateCRC32: { 962 LIRItem crc(x->argument_at(0), this); 963 LIRItem val(x->argument_at(1), this); 964 // val is destroyed by update_crc32 965 val.set_destroys_register(); 966 crc.load_item(); 967 val.load_item(); 968 __ update_crc32(crc.result(), val.result(), result); 969 break; 970 } 971 case vmIntrinsics::_updateBytesCRC32: 972 case vmIntrinsics::_updateByteBufferCRC32: { 973 bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32); 974 975 LIRItem crc(x->argument_at(0), this); 976 LIRItem buf(x->argument_at(1), this); 977 LIRItem off(x->argument_at(2), this); 978 LIRItem len(x->argument_at(3), this); 979 buf.load_item(); 980 off.load_nonconstant(); 981 982 LIR_Opr index = off.result(); 983 int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0; 984 if(off.result()->is_constant()) { 985 index = LIR_OprFact::illegalOpr; 986 offset += off.result()->as_jint(); 987 } 988 LIR_Opr base_op = buf.result(); 989 990 if (index->is_valid()) { 991 LIR_Opr tmp = new_register(T_LONG); 992 __ convert(Bytecodes::_i2l, index, tmp); 993 index = tmp; 994 } 995 996 if (offset) { 997 LIR_Opr tmp = new_pointer_register(); 998 __ add(base_op, LIR_OprFact::intConst(offset), tmp); 999 base_op = tmp; 1000 offset = 0; 1001 } 1002 1003 LIR_Address* a = new LIR_Address(base_op, 1004 index, 1005 offset, 1006 T_BYTE); 1007 BasicTypeList signature(3); 1008 signature.append(T_INT); 1009 signature.append(T_ADDRESS); 1010 signature.append(T_INT); 1011 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 1012 const LIR_Opr result_reg = result_register_for(x->type()); 1013 1014 LIR_Opr addr = new_pointer_register(); 1015 __ leal(LIR_OprFact::address(a), addr); 1016 1017 crc.load_item_force(cc->at(0)); 1018 __ move(addr, cc->at(1)); 1019 len.load_item_force(cc->at(2)); 1020 1021 __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args()); 1022 __ move(result_reg, result); 1023 1024 break; 1025 } 1026 default: { 1027 ShouldNotReachHere(); 1028 } 1029 } 1030 } 1031 1032 void LIRGenerator::do_update_CRC32C(Intrinsic* x) { 1033 assert(UseCRC32CIntrinsics, "why are we here?"); 1034 // Make all state_for calls early since they can emit code 1035 LIR_Opr result = rlock_result(x); 1036 int flags = 0; 1037 switch (x->id()) { 1038 case vmIntrinsics::_updateBytesCRC32C: 1039 case vmIntrinsics::_updateDirectByteBufferCRC32C: { 1040 bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C); 1041 int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0; 1042 1043 LIRItem crc(x->argument_at(0), this); 1044 LIRItem buf(x->argument_at(1), this); 1045 LIRItem off(x->argument_at(2), this); 1046 LIRItem end(x->argument_at(3), this); 1047 1048 buf.load_item(); 1049 off.load_nonconstant(); 1050 end.load_nonconstant(); 1051 1052 // len = end - off 1053 LIR_Opr len = end.result(); 1054 LIR_Opr tmpA = new_register(T_INT); 1055 LIR_Opr tmpB = new_register(T_INT); 1056 __ move(end.result(), tmpA); 1057 __ move(off.result(), tmpB); 1058 __ sub(tmpA, tmpB, tmpA); 1059 len = tmpA; 1060 1061 LIR_Opr index = off.result(); 1062 if(off.result()->is_constant()) { 1063 index = LIR_OprFact::illegalOpr; 1064 offset += off.result()->as_jint(); 1065 } 1066 LIR_Opr base_op = buf.result(); 1067 1068 if (index->is_valid()) { 1069 LIR_Opr tmp = new_register(T_LONG); 1070 __ convert(Bytecodes::_i2l, index, tmp); 1071 index = tmp; 1072 } 1073 1074 if (offset) { 1075 LIR_Opr tmp = new_pointer_register(); 1076 __ add(base_op, LIR_OprFact::intConst(offset), tmp); 1077 base_op = tmp; 1078 offset = 0; 1079 } 1080 1081 LIR_Address* a = new LIR_Address(base_op, 1082 index, 1083 offset, 1084 T_BYTE); 1085 BasicTypeList signature(3); 1086 signature.append(T_INT); 1087 signature.append(T_ADDRESS); 1088 signature.append(T_INT); 1089 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 1090 const LIR_Opr result_reg = result_register_for(x->type()); 1091 1092 LIR_Opr addr = new_pointer_register(); 1093 __ leal(LIR_OprFact::address(a), addr); 1094 1095 crc.load_item_force(cc->at(0)); 1096 __ move(addr, cc->at(1)); 1097 __ move(len, cc->at(2)); 1098 1099 __ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), getThreadTemp(), result_reg, cc->args()); 1100 __ move(result_reg, result); 1101 1102 break; 1103 } 1104 default: { 1105 ShouldNotReachHere(); 1106 } 1107 } 1108 } 1109 1110 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) { 1111 assert(x->number_of_arguments() == 3, "wrong type"); 1112 assert(UseFMA, "Needs FMA instructions support."); 1113 LIRItem value(x->argument_at(0), this); 1114 LIRItem value1(x->argument_at(1), this); 1115 LIRItem value2(x->argument_at(2), this); 1116 1117 value.load_item(); 1118 value1.load_item(); 1119 value2.load_item(); 1120 1121 LIR_Opr calc_input = value.result(); 1122 LIR_Opr calc_input1 = value1.result(); 1123 LIR_Opr calc_input2 = value2.result(); 1124 LIR_Opr calc_result = rlock_result(x); 1125 1126 switch (x->id()) { 1127 case vmIntrinsics::_fmaD: __ fmad(calc_input, calc_input1, calc_input2, calc_result); break; 1128 case vmIntrinsics::_fmaF: __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break; 1129 default: ShouldNotReachHere(); 1130 } 1131 } 1132 1133 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) { 1134 fatal("vectorizedMismatch intrinsic is not implemented on this platform"); 1135 } 1136 1137 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f 1138 // _i2b, _i2c, _i2s 1139 void LIRGenerator::do_Convert(Convert* x) { 1140 LIRItem value(x->value(), this); 1141 value.load_item(); 1142 LIR_Opr input = value.result(); 1143 LIR_Opr result = rlock(x); 1144 1145 // arguments of lir_convert 1146 LIR_Opr conv_input = input; 1147 LIR_Opr conv_result = result; 1148 ConversionStub* stub = NULL; 1149 1150 __ convert(x->op(), conv_input, conv_result); 1151 1152 assert(result->is_virtual(), "result must be virtual register"); 1153 set_result(x, result); 1154 } 1155 1156 void LIRGenerator::do_NewInstance(NewInstance* x) { 1157 #ifndef PRODUCT 1158 if (PrintNotLoaded && !x->klass()->is_loaded()) { 1159 tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci()); 1160 } 1161 #endif 1162 CodeEmitInfo* info = state_for(x, x->state()); 1163 LIR_Opr reg = result_register_for(x->type()); 1164 new_instance(reg, x->klass(), x->is_unresolved(), 1165 FrameMap::r2_oop_opr, 1166 FrameMap::r5_oop_opr, 1167 FrameMap::r4_oop_opr, 1168 LIR_OprFact::illegalOpr, 1169 FrameMap::r3_metadata_opr, info); 1170 LIR_Opr result = rlock_result(x); 1171 __ move(reg, result); 1172 } 1173 1174 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { 1175 CodeEmitInfo* info = state_for(x, x->state()); 1176 1177 LIRItem length(x->length(), this); 1178 length.load_item_force(FrameMap::r19_opr); 1179 1180 LIR_Opr reg = result_register_for(x->type()); 1181 LIR_Opr tmp1 = FrameMap::r2_oop_opr; 1182 LIR_Opr tmp2 = FrameMap::r4_oop_opr; 1183 LIR_Opr tmp3 = FrameMap::r5_oop_opr; 1184 LIR_Opr tmp4 = reg; 1185 LIR_Opr klass_reg = FrameMap::r3_metadata_opr; 1186 LIR_Opr len = length.result(); 1187 BasicType elem_type = x->elt_type(); 1188 1189 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg); 1190 1191 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info); 1192 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path); 1193 1194 LIR_Opr result = rlock_result(x); 1195 __ move(reg, result); 1196 } 1197 1198 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) { 1199 LIRItem length(x->length(), this); 1200 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction 1201 // and therefore provide the state before the parameters have been consumed 1202 CodeEmitInfo* patching_info = NULL; 1203 if (!x->klass()->is_loaded() || PatchALot) { 1204 patching_info = state_for(x, x->state_before()); 1205 } 1206 1207 CodeEmitInfo* info = state_for(x, x->state()); 1208 1209 LIR_Opr reg = result_register_for(x->type()); 1210 LIR_Opr tmp1 = FrameMap::r2_oop_opr; 1211 LIR_Opr tmp2 = FrameMap::r4_oop_opr; 1212 LIR_Opr tmp3 = FrameMap::r5_oop_opr; 1213 LIR_Opr tmp4 = reg; 1214 LIR_Opr klass_reg = FrameMap::r3_metadata_opr; 1215 1216 length.load_item_force(FrameMap::r19_opr); 1217 LIR_Opr len = length.result(); 1218 1219 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info); 1220 ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass()); 1221 if (obj == ciEnv::unloaded_ciobjarrayklass()) { 1222 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error"); 1223 } 1224 klass2reg_with_patching(klass_reg, obj, patching_info); 1225 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path); 1226 1227 LIR_Opr result = rlock_result(x); 1228 __ move(reg, result); 1229 } 1230 1231 1232 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { 1233 Values* dims = x->dims(); 1234 int i = dims->length(); 1235 LIRItemList* items = new LIRItemList(i, i, NULL); 1236 while (i-- > 0) { 1237 LIRItem* size = new LIRItem(dims->at(i), this); 1238 items->at_put(i, size); 1239 } 1240 1241 // Evaluate state_for early since it may emit code. 1242 CodeEmitInfo* patching_info = NULL; 1243 if (!x->klass()->is_loaded() || PatchALot) { 1244 patching_info = state_for(x, x->state_before()); 1245 1246 // Cannot re-use same xhandlers for multiple CodeEmitInfos, so 1247 // clone all handlers (NOTE: Usually this is handled transparently 1248 // by the CodeEmitInfo cloning logic in CodeStub constructors but 1249 // is done explicitly here because a stub isn't being used). 1250 x->set_exception_handlers(new XHandlers(x->exception_handlers())); 1251 } 1252 CodeEmitInfo* info = state_for(x, x->state()); 1253 1254 i = dims->length(); 1255 while (i-- > 0) { 1256 LIRItem* size = items->at(i); 1257 size->load_item(); 1258 1259 store_stack_parameter(size->result(), in_ByteSize(i*4)); 1260 } 1261 1262 LIR_Opr klass_reg = FrameMap::r0_metadata_opr; 1263 klass2reg_with_patching(klass_reg, x->klass(), patching_info); 1264 1265 LIR_Opr rank = FrameMap::r19_opr; 1266 __ move(LIR_OprFact::intConst(x->rank()), rank); 1267 LIR_Opr varargs = FrameMap::r2_opr; 1268 __ move(FrameMap::sp_opr, varargs); 1269 LIR_OprList* args = new LIR_OprList(3); 1270 args->append(klass_reg); 1271 args->append(rank); 1272 args->append(varargs); 1273 LIR_Opr reg = result_register_for(x->type()); 1274 __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id), 1275 LIR_OprFact::illegalOpr, 1276 reg, args, info); 1277 1278 LIR_Opr result = rlock_result(x); 1279 __ move(reg, result); 1280 } 1281 1282 void LIRGenerator::do_BlockBegin(BlockBegin* x) { 1283 // nothing to do for now 1284 } 1285 1286 void LIRGenerator::do_CheckCast(CheckCast* x) { 1287 LIRItem obj(x->obj(), this); 1288 1289 CodeEmitInfo* patching_info = NULL; 1290 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) { 1291 // must do this before locking the destination register as an oop register, 1292 // and before the obj is loaded (the latter is for deoptimization) 1293 patching_info = state_for(x, x->state_before()); 1294 } 1295 obj.load_item(); 1296 1297 // info for exceptions 1298 CodeEmitInfo* info_for_exception = 1299 (x->needs_exception_state() ? state_for(x) : 1300 state_for(x, x->state_before(), true /*ignore_xhandler*/)); 1301 1302 CodeStub* stub; 1303 if (x->is_incompatible_class_change_check()) { 1304 assert(patching_info == NULL, "can't patch this"); 1305 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); 1306 } else if (x->is_invokespecial_receiver_check()) { 1307 assert(patching_info == NULL, "can't patch this"); 1308 stub = new DeoptimizeStub(info_for_exception, 1309 Deoptimization::Reason_class_check, 1310 Deoptimization::Action_none); 1311 } else { 1312 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception); 1313 } 1314 LIR_Opr reg = rlock_result(x); 1315 LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 1316 if (!x->klass()->is_loaded() || UseCompressedClassPointers) { 1317 tmp3 = new_register(objectType); 1318 } 1319 __ checkcast(reg, obj.result(), x->klass(), 1320 new_register(objectType), new_register(objectType), tmp3, 1321 x->direct_compare(), info_for_exception, patching_info, stub, 1322 x->profiled_method(), x->profiled_bci()); 1323 } 1324 1325 void LIRGenerator::do_InstanceOf(InstanceOf* x) { 1326 LIRItem obj(x->obj(), this); 1327 1328 // result and test object may not be in same register 1329 LIR_Opr reg = rlock_result(x); 1330 CodeEmitInfo* patching_info = NULL; 1331 if ((!x->klass()->is_loaded() || PatchALot)) { 1332 // must do this before locking the destination register as an oop register 1333 patching_info = state_for(x, x->state_before()); 1334 } 1335 obj.load_item(); 1336 LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 1337 if (!x->klass()->is_loaded() || UseCompressedClassPointers) { 1338 tmp3 = new_register(objectType); 1339 } 1340 __ instanceof(reg, obj.result(), x->klass(), 1341 new_register(objectType), new_register(objectType), tmp3, 1342 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci()); 1343 } 1344 1345 void LIRGenerator::do_If(If* x) { 1346 assert(x->number_of_sux() == 2, "inconsistency"); 1347 ValueTag tag = x->x()->type()->tag(); 1348 bool is_safepoint = x->is_safepoint(); 1349 1350 If::Condition cond = x->cond(); 1351 1352 LIRItem xitem(x->x(), this); 1353 LIRItem yitem(x->y(), this); 1354 LIRItem* xin = &xitem; 1355 LIRItem* yin = &yitem; 1356 1357 if (tag == longTag) { 1358 // for longs, only conditions "eql", "neq", "lss", "geq" are valid; 1359 // mirror for other conditions 1360 if (cond == If::gtr || cond == If::leq) { 1361 cond = Instruction::mirror(cond); 1362 xin = &yitem; 1363 yin = &xitem; 1364 } 1365 xin->set_destroys_register(); 1366 } 1367 xin->load_item(); 1368 1369 if (tag == longTag) { 1370 if (yin->is_constant() 1371 && Assembler::operand_valid_for_add_sub_immediate(yin->get_jlong_constant())) { 1372 yin->dont_load_item(); 1373 } else { 1374 yin->load_item(); 1375 } 1376 } else if (tag == intTag) { 1377 if (yin->is_constant() 1378 && Assembler::operand_valid_for_add_sub_immediate(yin->get_jint_constant())) { 1379 yin->dont_load_item(); 1380 } else { 1381 yin->load_item(); 1382 } 1383 } else { 1384 yin->load_item(); 1385 } 1386 1387 // add safepoint before generating condition code so it can be recomputed 1388 if (x->is_safepoint()) { 1389 // increment backedge counter if needed 1390 increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci()); 1391 __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before())); 1392 } 1393 set_no_result(x); 1394 1395 LIR_Opr left = xin->result(); 1396 LIR_Opr right = yin->result(); 1397 1398 __ cmp(lir_cond(cond), left, right); 1399 // Generate branch profiling. Profiling code doesn't kill flags. 1400 profile_branch(x, cond); 1401 move_to_phi(x->state()); 1402 if (x->x()->type()->is_float_kind()) { 1403 __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux()); 1404 } else { 1405 __ branch(lir_cond(cond), right->type(), x->tsux()); 1406 } 1407 assert(x->default_sux() == x->fsux(), "wrong destination above"); 1408 __ jump(x->default_sux()); 1409 } 1410 1411 LIR_Opr LIRGenerator::getThreadPointer() { 1412 return FrameMap::as_pointer_opr(rthread); 1413 } 1414 1415 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); } 1416 1417 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address, 1418 CodeEmitInfo* info) { 1419 __ volatile_store_mem_reg(value, address, info); 1420 } 1421 1422 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result, 1423 CodeEmitInfo* info) { 1424 // 8179954: We need to make sure that the code generated for 1425 // volatile accesses forms a sequentially-consistent set of 1426 // operations when combined with STLR and LDAR. Without a leading 1427 // membar it's possible for a simple Dekker test to fail if loads 1428 // use LD;DMB but stores use STLR. This can happen if C2 compiles 1429 // the stores in one method and C1 compiles the loads in another. 1430 if (! UseBarriersForVolatile) { 1431 __ membar(); 1432 } 1433 1434 __ volatile_load_mem_reg(address, result, info); 1435 } 1436 1437 void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset, 1438 BasicType type, bool is_volatile) { 1439 LIR_Address* addr = new LIR_Address(src, offset, type); 1440 __ load(addr, dst); 1441 } 1442 1443 1444 void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data, 1445 BasicType type, bool is_volatile) { 1446 LIR_Address* addr = new LIR_Address(src, offset, type); 1447 bool is_obj = (type == T_ARRAY || type == T_OBJECT); 1448 if (is_obj) { 1449 // Do the pre-write barrier, if any. 1450 pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */, 1451 true /* do_load */, false /* patch */, NULL); 1452 __ move(data, addr); 1453 assert(src->is_register(), "must be register"); 1454 // Seems to be a precise address 1455 post_barrier(LIR_OprFact::address(addr), data); 1456 } else { 1457 __ move(data, addr); 1458 } 1459 } 1460 1461 void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) { 1462 BasicType type = x->basic_type(); 1463 LIRItem src(x->object(), this); 1464 LIRItem off(x->offset(), this); 1465 LIRItem value(x->value(), this); 1466 1467 src.load_item(); 1468 off.load_nonconstant(); 1469 1470 // We can cope with a constant increment in an xadd 1471 if (! (x->is_add() 1472 && value.is_constant() 1473 && can_inline_as_constant(x->value()))) { 1474 value.load_item(); 1475 } 1476 1477 LIR_Opr dst = rlock_result(x, type); 1478 LIR_Opr data = value.result(); 1479 bool is_obj = (type == T_ARRAY || type == T_OBJECT); 1480 LIR_Opr offset = off.result(); 1481 1482 if (data == dst) { 1483 LIR_Opr tmp = new_register(data->type()); 1484 __ move(data, tmp); 1485 data = tmp; 1486 } 1487 1488 LIR_Address* addr; 1489 if (offset->is_constant()) { 1490 jlong l = offset->as_jlong(); 1491 assert((jlong)((jint)l) == l, "offset too large for constant"); 1492 jint c = (jint)l; 1493 addr = new LIR_Address(src.result(), c, type); 1494 } else { 1495 addr = new LIR_Address(src.result(), offset, type); 1496 } 1497 1498 LIR_Opr tmp = new_register(T_INT); 1499 LIR_Opr ptr = LIR_OprFact::illegalOpr; 1500 1501 if (x->is_add()) { 1502 __ xadd(LIR_OprFact::address(addr), data, dst, tmp); 1503 } else { 1504 if (is_obj) { 1505 // Do the pre-write barrier, if any. 1506 ptr = new_pointer_register(); 1507 __ add(src.result(), off.result(), ptr); 1508 pre_barrier(ptr, LIR_OprFact::illegalOpr /* pre_val */, 1509 true /* do_load */, false /* patch */, NULL); 1510 } 1511 __ xchg(LIR_OprFact::address(addr), data, dst, tmp); 1512 if (is_obj) { 1513 post_barrier(ptr, data); 1514 } 1515 } 1516 }