1 /*
2 * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
2361 }
2362 }
2363
2364
2365 void LIR_Assembler::setup_md_access(ciMethod* method, int bci,
2366 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) {
2367 md = method->method_data_or_null();
2368 assert(md != NULL, "Sanity");
2369 data = md->bci_to_data(bci);
2370 assert(data != NULL, "need data for checkcast");
2371 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
2372 if (!Assembler::is_simm16(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {
2373 // The offset is large so bias the mdo by the base of the slot so
2374 // that the ld can use simm16s to reference the slots of the data.
2375 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
2376 }
2377 }
2378
2379
2380 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
2381 Register obj = op->object()->as_register();
2382 Register k_RInfo = op->tmp1()->as_register();
2383 Register klass_RInfo = op->tmp2()->as_register();
2384 Register Rtmp1 = op->tmp3()->as_register();
2385 Register dst = op->result_opr()->as_register();
2386 ciKlass* k = op->klass();
2387 bool should_profile = op->should_profile();
2388 bool move_obj_to_dst = (op->code() == lir_checkcast);
2389 // Attention: do_temp(opTypeCheck->_object) is not used, i.e. obj may be same as one of the temps.
2390 bool reg_conflict = (obj == k_RInfo || obj == klass_RInfo || obj == Rtmp1);
2391 bool restore_obj = move_obj_to_dst && reg_conflict;
2392
2393 __ cmpdi(CCR0, obj, 0);
2394 if (move_obj_to_dst || reg_conflict) {
2395 __ mr_if_needed(dst, obj);
2396 if (reg_conflict) { obj = dst; }
2397 }
2398
2399 ciMethodData* md = NULL;
2400 ciProfileData* data = NULL;
2401 int mdo_offset_bias = 0;
2402 if (should_profile) {
2403 ciMethod* method = op->profiled_method();
2404 assert(method != NULL, "Should have method");
2405 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
2406
2407 Register mdo = k_RInfo;
2408 Register data_val = Rtmp1;
2409 Label not_null;
2410 __ bne(CCR0, not_null);
2411 metadata2reg(md->constant_encoding(), mdo);
2412 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2413 __ lbz(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);
2414 __ ori(data_val, data_val, BitData::null_seen_byte_constant());
2415 __ stb(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);
2416 __ b(*obj_is_null);
2417 __ bind(not_null);
2443 // Fall through to failure case.
2444 }
2445 } else {
2446 bool need_slow_path = true;
2447 if (k->is_loaded()) {
2448 if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset())) {
2449 need_slow_path = false;
2450 }
2451 // Perform the fast part of the checking logic.
2452 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, (need_slow_path ? success_target : NULL),
2453 failure_target, NULL, RegisterOrConstant(k->super_check_offset()));
2454 } else {
2455 // Perform the fast part of the checking logic.
2456 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, success_target, failure_target);
2457 }
2458 if (!need_slow_path) {
2459 if (!should_profile) { __ b(*success); }
2460 } else {
2461 // Call out-of-line instance of __ check_klass_subtype_slow_path(...):
2462 address entry = Runtime1::entry_for(Runtime1::slow_subtype_check_id);
2463 //__ load_const_optimized(Rtmp1, entry, R0);
2464 __ calculate_address_from_global_toc(Rtmp1, entry, true, true, false);
2465 __ mtctr(Rtmp1);
2466 __ bctrl(); // sets CR0
2467 if (should_profile) {
2468 __ bne(CCR0, *failure_target);
2469 // Fall through to success case.
2470 } else {
2471 __ beq(CCR0, *success);
2472 // Fall through to failure case.
2473 }
2474 }
2475 }
2476
2477 if (should_profile) {
2478 Register mdo = k_RInfo, recv = klass_RInfo;
2479 assert_different_registers(mdo, recv, Rtmp1);
2480 __ bind(profile_cast_success);
2481 metadata2reg(md->constant_encoding(), mdo);
2482 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2483 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, Rtmp1, success);
2484 __ b(*success);
2485
2486 // Cast failure case.
2487 __ bind(profile_cast_failure);
2488 metadata2reg(md->constant_encoding(), mdo);
2489 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2490 __ ld(Rtmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2491 __ addi(Rtmp1, Rtmp1, -DataLayout::counter_increment);
2492 __ std(Rtmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2493 }
2494
2495 __ bind(*failure);
2496
2497 if (restore_obj) {
2498 __ mr(op->object()->as_register(), dst);
2499 // Fall through to failure case.
2500 }
2501 }
2502
2503
2504 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
2505 LIR_Code code = op->code();
2506 if (code == lir_store_check) {
2507 Register value = op->object()->as_register();
2508 Register array = op->array()->as_register();
2509 Register k_RInfo = op->tmp1()->as_register();
2510 Register klass_RInfo = op->tmp2()->as_register();
2511 Register Rtmp1 = op->tmp3()->as_register();
2512 bool should_profile = op->should_profile();
2513
2514 __ verify_oop(value);
2515 CodeStub* stub = op->stub();
2516 // Check if it needs to be profiled.
2517 ciMethodData* md = NULL;
2518 ciProfileData* data = NULL;
2519 int mdo_offset_bias = 0;
2520 if (should_profile) {
2573 metadata2reg(md->constant_encoding(), mdo);
2574 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2575 __ load_klass(recv, value);
2576 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done);
2577 __ b(done);
2578
2579 // Cast failure case.
2580 __ bind(failure);
2581 metadata2reg(md->constant_encoding(), mdo);
2582 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2583 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
2584 __ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2585 __ addi(tmp1, tmp1, -DataLayout::counter_increment);
2586 __ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2587 }
2588 __ b(*stub->entry());
2589 __ bind(done);
2590
2591 } else if (code == lir_checkcast) {
2592 Label success, failure;
2593 emit_typecheck_helper(op, &success, /*fallthru*/&failure, &success); // Moves obj to dst.
2594 __ b(*op->stub()->entry());
2595 __ align(32, 12);
2596 __ bind(success);
2597 } else if (code == lir_instanceof) {
2598 Register dst = op->result_opr()->as_register();
2599 Label success, failure, done;
2600 emit_typecheck_helper(op, &success, /*fallthru*/&failure, &failure);
2601 __ li(dst, 0);
2602 __ b(done);
2603 __ align(32, 12);
2604 __ bind(success);
2605 __ li(dst, 1);
2606 __ bind(done);
2607 } else {
2608 ShouldNotReachHere();
2609 }
2610 }
2611
2612
2613 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
2614 Register addr = op->addr()->as_pointer_register();
2615 Register cmp_value = noreg, new_value = noreg;
2616 bool is_64bit = false;
|
1 /*
2 * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2012, 2019, SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
2361 }
2362 }
2363
2364
2365 void LIR_Assembler::setup_md_access(ciMethod* method, int bci,
2366 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) {
2367 md = method->method_data_or_null();
2368 assert(md != NULL, "Sanity");
2369 data = md->bci_to_data(bci);
2370 assert(data != NULL, "need data for checkcast");
2371 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
2372 if (!Assembler::is_simm16(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {
2373 // The offset is large so bias the mdo by the base of the slot so
2374 // that the ld can use simm16s to reference the slots of the data.
2375 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
2376 }
2377 }
2378
2379
2380 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
2381 const Register obj = op->object()->as_register(); // Needs to live in this register at safepoint (patching stub).
2382 Register k_RInfo = op->tmp1()->as_register();
2383 Register klass_RInfo = op->tmp2()->as_register();
2384 Register Rtmp1 = op->tmp3()->as_register();
2385 Register dst = op->result_opr()->as_register();
2386 ciKlass* k = op->klass();
2387 bool should_profile = op->should_profile();
2388 // Attention: do_temp(opTypeCheck->_object) is not used, i.e. obj may be same as one of the temps.
2389 bool reg_conflict = false;
2390 if (obj == k_RInfo) {
2391 k_RInfo = dst;
2392 reg_conflict = true;
2393 } else if (obj == klass_RInfo) {
2394 klass_RInfo = dst;
2395 reg_conflict = true;
2396 } else if (obj == Rtmp1) {
2397 Rtmp1 = dst;
2398 reg_conflict = true;
2399 }
2400 assert_different_registers(obj, k_RInfo, klass_RInfo, Rtmp1);
2401
2402 __ cmpdi(CCR0, obj, 0);
2403
2404 ciMethodData* md = NULL;
2405 ciProfileData* data = NULL;
2406 int mdo_offset_bias = 0;
2407 if (should_profile) {
2408 ciMethod* method = op->profiled_method();
2409 assert(method != NULL, "Should have method");
2410 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
2411
2412 Register mdo = k_RInfo;
2413 Register data_val = Rtmp1;
2414 Label not_null;
2415 __ bne(CCR0, not_null);
2416 metadata2reg(md->constant_encoding(), mdo);
2417 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2418 __ lbz(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);
2419 __ ori(data_val, data_val, BitData::null_seen_byte_constant());
2420 __ stb(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);
2421 __ b(*obj_is_null);
2422 __ bind(not_null);
2448 // Fall through to failure case.
2449 }
2450 } else {
2451 bool need_slow_path = true;
2452 if (k->is_loaded()) {
2453 if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset())) {
2454 need_slow_path = false;
2455 }
2456 // Perform the fast part of the checking logic.
2457 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, (need_slow_path ? success_target : NULL),
2458 failure_target, NULL, RegisterOrConstant(k->super_check_offset()));
2459 } else {
2460 // Perform the fast part of the checking logic.
2461 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, success_target, failure_target);
2462 }
2463 if (!need_slow_path) {
2464 if (!should_profile) { __ b(*success); }
2465 } else {
2466 // Call out-of-line instance of __ check_klass_subtype_slow_path(...):
2467 address entry = Runtime1::entry_for(Runtime1::slow_subtype_check_id);
2468 // Stub needs fixed registers (tmp1-3).
2469 Register original_k_RInfo = op->tmp1()->as_register();
2470 Register original_klass_RInfo = op->tmp2()->as_register();
2471 Register original_Rtmp1 = op->tmp3()->as_register();
2472 bool keep_obj_alive = reg_conflict && (op->code() == lir_checkcast);
2473 bool keep_klass_RInfo_alive = (obj == original_klass_RInfo) && should_profile;
2474 if (keep_obj_alive && (obj != original_Rtmp1)) { __ mr(R0, obj); }
2475 __ mr_if_needed(original_k_RInfo, k_RInfo);
2476 __ mr_if_needed(original_klass_RInfo, klass_RInfo);
2477 if (keep_obj_alive) { __ mr(dst, (obj == original_Rtmp1) ? obj : R0); }
2478 //__ load_const_optimized(original_Rtmp1, entry, R0);
2479 __ calculate_address_from_global_toc(original_Rtmp1, entry, true, true, false);
2480 __ mtctr(original_Rtmp1);
2481 __ bctrl(); // sets CR0
2482 if (keep_obj_alive) {
2483 if (keep_klass_RInfo_alive) { __ mr(R0, obj); }
2484 __ mr(obj, dst);
2485 }
2486 if (should_profile) {
2487 __ bne(CCR0, *failure_target);
2488 if (keep_klass_RInfo_alive) { __ mr(klass_RInfo, keep_obj_alive ? R0 : obj); }
2489 // Fall through to success case.
2490 } else {
2491 __ beq(CCR0, *success);
2492 // Fall through to failure case.
2493 }
2494 }
2495 }
2496
2497 if (should_profile) {
2498 Register mdo = k_RInfo, recv = klass_RInfo;
2499 assert_different_registers(mdo, recv, Rtmp1);
2500 __ bind(profile_cast_success);
2501 metadata2reg(md->constant_encoding(), mdo);
2502 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2503 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, Rtmp1, success);
2504 __ b(*success);
2505
2506 // Cast failure case.
2507 __ bind(profile_cast_failure);
2508 metadata2reg(md->constant_encoding(), mdo);
2509 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2510 __ ld(Rtmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2511 __ addi(Rtmp1, Rtmp1, -DataLayout::counter_increment);
2512 __ std(Rtmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2513 }
2514
2515 __ bind(*failure);
2516 }
2517
2518
2519 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
2520 LIR_Code code = op->code();
2521 if (code == lir_store_check) {
2522 Register value = op->object()->as_register();
2523 Register array = op->array()->as_register();
2524 Register k_RInfo = op->tmp1()->as_register();
2525 Register klass_RInfo = op->tmp2()->as_register();
2526 Register Rtmp1 = op->tmp3()->as_register();
2527 bool should_profile = op->should_profile();
2528
2529 __ verify_oop(value);
2530 CodeStub* stub = op->stub();
2531 // Check if it needs to be profiled.
2532 ciMethodData* md = NULL;
2533 ciProfileData* data = NULL;
2534 int mdo_offset_bias = 0;
2535 if (should_profile) {
2588 metadata2reg(md->constant_encoding(), mdo);
2589 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2590 __ load_klass(recv, value);
2591 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done);
2592 __ b(done);
2593
2594 // Cast failure case.
2595 __ bind(failure);
2596 metadata2reg(md->constant_encoding(), mdo);
2597 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2598 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
2599 __ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2600 __ addi(tmp1, tmp1, -DataLayout::counter_increment);
2601 __ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2602 }
2603 __ b(*stub->entry());
2604 __ bind(done);
2605
2606 } else if (code == lir_checkcast) {
2607 Label success, failure;
2608 emit_typecheck_helper(op, &success, /*fallthru*/&failure, &success);
2609 __ b(*op->stub()->entry());
2610 __ align(32, 12);
2611 __ bind(success);
2612 __ mr_if_needed(op->result_opr()->as_register(), op->object()->as_register());
2613 } else if (code == lir_instanceof) {
2614 Register dst = op->result_opr()->as_register();
2615 Label success, failure, done;
2616 emit_typecheck_helper(op, &success, /*fallthru*/&failure, &failure);
2617 __ li(dst, 0);
2618 __ b(done);
2619 __ align(32, 12);
2620 __ bind(success);
2621 __ li(dst, 1);
2622 __ bind(done);
2623 } else {
2624 ShouldNotReachHere();
2625 }
2626 }
2627
2628
2629 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
2630 Register addr = op->addr()->as_pointer_register();
2631 Register cmp_value = noreg, new_value = noreg;
2632 bool is_64bit = false;
|