2373 // The offset is large so bias the mdo by the base of the slot so
2374 // that the ld can use simm16s to reference the slots of the data.
2375 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
2376 }
2377 }
2378
2379
2380 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
2381 Register obj = op->object()->as_register();
2382 Register k_RInfo = op->tmp1()->as_register();
2383 Register klass_RInfo = op->tmp2()->as_register();
2384 Register Rtmp1 = op->tmp3()->as_register();
2385 Register dst = op->result_opr()->as_register();
2386 ciKlass* k = op->klass();
2387 bool should_profile = op->should_profile();
2388 bool move_obj_to_dst = (op->code() == lir_checkcast);
2389 // Attention: do_temp(opTypeCheck->_object) is not used, i.e. obj may be same as one of the temps.
2390 bool reg_conflict = (obj == k_RInfo || obj == klass_RInfo || obj == Rtmp1);
2391 bool restore_obj = move_obj_to_dst && reg_conflict;
2392
2393 __ cmpdi(CCR0, obj, 0);
2394 if (move_obj_to_dst || reg_conflict) {
2395 __ mr_if_needed(dst, obj);
2396 if (reg_conflict) { obj = dst; }
2397 }
2398
2399 ciMethodData* md = NULL;
2400 ciProfileData* data = NULL;
2401 int mdo_offset_bias = 0;
2402 if (should_profile) {
2403 ciMethod* method = op->profiled_method();
2404 assert(method != NULL, "Should have method");
2405 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
2406
2407 Register mdo = k_RInfo;
2408 Register data_val = Rtmp1;
2409 Label not_null;
2410 __ bne(CCR0, not_null);
2411 metadata2reg(md->constant_encoding(), mdo);
2412 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2413 __ lbz(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);
2414 __ ori(data_val, data_val, BitData::null_seen_byte_constant());
2415 __ stb(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);
2416 __ b(*obj_is_null);
2417 __ bind(not_null);
2418 } else {
2419 __ beq(CCR0, *obj_is_null);
2420 }
2421
2422 // get object class
2423 __ load_klass(klass_RInfo, obj);
2424
2425 if (k->is_loaded()) {
2426 metadata2reg(k->constant_encoding(), k_RInfo);
2427 } else {
2428 klass2reg_with_patching(k_RInfo, op->info_for_patch());
2429 }
2430
2431 Label profile_cast_failure, failure_restore_obj, profile_cast_success;
2432 Label *failure_target = should_profile ? &profile_cast_failure : failure;
2433 Label *success_target = should_profile ? &profile_cast_success : success;
2434
2435 if (op->fast_check()) {
2436 assert_different_registers(klass_RInfo, k_RInfo);
2437 __ cmpd(CCR0, k_RInfo, klass_RInfo);
2438 if (should_profile) {
2439 __ bne(CCR0, *failure_target);
2440 // Fall through to success case.
2441 } else {
2442 __ beq(CCR0, *success);
2443 // Fall through to failure case.
2444 }
2445 } else {
2446 bool need_slow_path = true;
2447 if (k->is_loaded()) {
2448 if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset())) {
2449 need_slow_path = false;
|
2373 // The offset is large so bias the mdo by the base of the slot so
2374 // that the ld can use simm16s to reference the slots of the data.
2375 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
2376 }
2377 }
2378
2379
2380 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
2381 Register obj = op->object()->as_register();
2382 Register k_RInfo = op->tmp1()->as_register();
2383 Register klass_RInfo = op->tmp2()->as_register();
2384 Register Rtmp1 = op->tmp3()->as_register();
2385 Register dst = op->result_opr()->as_register();
2386 ciKlass* k = op->klass();
2387 bool should_profile = op->should_profile();
2388 bool move_obj_to_dst = (op->code() == lir_checkcast);
2389 // Attention: do_temp(opTypeCheck->_object) is not used, i.e. obj may be same as one of the temps.
2390 bool reg_conflict = (obj == k_RInfo || obj == klass_RInfo || obj == Rtmp1);
2391 bool restore_obj = move_obj_to_dst && reg_conflict;
2392
2393 // Have to load early, so possible safepoint during patching runtime call
2394 // will find correct register state corresponding to begging of instruction,
2395 // as it should be. Prior version shuffled oop out of obj register, making
2396 // it invisible to GC.
2397 Register ktmp = k_RInfo == obj ? klass_RInfo : k_RInfo;
2398 if (k->is_loaded()) {
2399 metadata2reg(k->constant_encoding(), ktmp);
2400 } else {
2401 klass2reg_with_patching(ktmp, op->info_for_patch());
2402 }
2403
2404 __ cmpdi(CCR0, obj, 0);
2405 if (move_obj_to_dst || reg_conflict) {
2406 __ mr_if_needed(dst, obj);
2407 if (reg_conflict) { obj = dst; }
2408 }
2409 __ mr_if_needed(k_RInfo, ktmp);
2410
2411 ciMethodData* md = NULL;
2412 ciProfileData* data = NULL;
2413 int mdo_offset_bias = 0;
2414 if (should_profile) {
2415 ciMethod* method = op->profiled_method();
2416 assert(method != NULL, "Should have method");
2417 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
2418
2419 Register mdo = klass_RInfo;
2420 Register data_val = Rtmp1;
2421 Label not_null;
2422 __ bne(CCR0, not_null);
2423 metadata2reg(md->constant_encoding(), mdo);
2424 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2425 __ lbz(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);
2426 __ ori(data_val, data_val, BitData::null_seen_byte_constant());
2427 __ stb(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);
2428 __ b(*obj_is_null);
2429 __ bind(not_null);
2430 } else {
2431 __ beq(CCR0, *obj_is_null);
2432 }
2433
2434 // get object class
2435 __ load_klass(klass_RInfo, obj);
2436
2437 Label profile_cast_failure, failure_restore_obj, profile_cast_success;
2438 Label *failure_target = should_profile ? &profile_cast_failure : failure;
2439 Label *success_target = should_profile ? &profile_cast_success : success;
2440
2441 if (op->fast_check()) {
2442 assert_different_registers(klass_RInfo, k_RInfo);
2443 __ cmpd(CCR0, k_RInfo, klass_RInfo);
2444 if (should_profile) {
2445 __ bne(CCR0, *failure_target);
2446 // Fall through to success case.
2447 } else {
2448 __ beq(CCR0, *success);
2449 // Fall through to failure case.
2450 }
2451 } else {
2452 bool need_slow_path = true;
2453 if (k->is_loaded()) {
2454 if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset())) {
2455 need_slow_path = false;
|