--- old/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp 2019-03-11 14:24:48.822356225 +0100 +++ new/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp 2019-03-11 14:24:48.618356227 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -199,7 +199,7 @@ __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix())); } else if (opr->is_constant()) { LIR_Const* const_opr = opr->as_constant_ptr(); - if (const_opr->type() == T_OBJECT) { + if (const_opr->type() == T_OBJECT || const_opr->type() == T_VALUETYPE) { __ push_oop(const_opr->as_jobject()); } else if (const_opr->type() == T_INT) { __ push_jint(const_opr->as_jint()); @@ -629,6 +629,7 @@ break; } + case T_VALUETYPE: // Fall through case T_OBJECT: { if (patch_code != lir_patch_none) { jobject2reg_with_patching(dest->as_register(), info); @@ -711,6 +712,7 @@ __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); break; + case T_VALUETYPE: // Fall through case T_OBJECT: __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject()); break; @@ -750,6 +752,7 @@ __ movptr(as_Address(addr), c->as_jint_bits()); break; + case T_VALUETYPE: // fall through case T_OBJECT: // fall through case T_ARRAY: if (c->as_jobject() == NULL) { @@ -838,14 +841,14 @@ } #endif assert(src->is_single_cpu(), "must match"); - if (src->type() == T_OBJECT) { + if (src->type() == T_OBJECT || src->type() == T_VALUETYPE) { __ verify_oop(src->as_register()); } move_regs(src->as_register(), dest->as_register()); } else if (dest->is_double_cpu()) { #ifdef _LP64 - if (src->type() == T_OBJECT || src->type() == T_ARRAY) { + if (src->type() == T_OBJECT || src->type() == T_ARRAY || src->type() == T_VALUETYPE) { // Surprising to me but we can see move of a long to t_object __ verify_oop(src->as_register()); move_regs(src->as_register(), dest->as_register_lo()); @@ -916,7 +919,7 @@ if (src->is_single_cpu()) { Address dst = frame_map()->address_for_slot(dest->single_stack_ix()); - if (type == T_OBJECT || type == T_ARRAY) { + if (type == T_OBJECT || type == T_ARRAY || type == T_VALUETYPE) { __ verify_oop(src->as_register()); __ movptr (dst, src->as_register()); } else if (type == T_METADATA) { @@ -962,7 +965,7 @@ PatchingStub* patch = NULL; Register compressed_src = rscratch1; - if (type == T_ARRAY || type == T_OBJECT) { + if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) { __ verify_oop(src->as_register()); #ifdef _LP64 if (UseCompressedOops && !wide) { @@ -1007,6 +1010,7 @@ break; } + case T_VALUETYPE: // fall through case T_ARRAY: // fall through case T_OBJECT: // fall through if (UseCompressedOops && !wide) { @@ -1097,7 +1101,7 @@ assert(dest->is_register(), "should not call otherwise"); if (dest->is_single_cpu()) { - if (type == T_ARRAY || type == T_OBJECT) { + if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) { __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); __ verify_oop(dest->as_register()); } else if (type == T_METADATA) { @@ -1138,7 +1142,7 @@ void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { if (src->is_single_stack()) { - if (type == T_OBJECT || type == T_ARRAY) { + if (type == T_OBJECT || type == T_ARRAY || type == T_VALUETYPE) { __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix())); __ popptr (frame_map()->address_for_slot(dest->single_stack_ix())); } else { @@ -1177,7 +1181,7 @@ LIR_Address* addr = src->as_address_ptr(); Address from_addr = as_Address(addr); - if (addr->base()->type() == T_OBJECT) { + if (addr->base()->type() == T_OBJECT || addr->base()->type() == T_VALUETYPE) { __ verify_oop(addr->base()->as_pointer_register()); } @@ -1230,6 +1234,7 @@ break; } + case T_VALUETYPE: // fall through case T_OBJECT: // fall through case T_ARRAY: // fall through if (UseCompressedOops && !wide) { @@ -1339,7 +1344,7 @@ patching_epilog(patch, patch_code, addr->base()->as_register(), info); } - if (type == T_ARRAY || type == T_OBJECT) { + if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) { #ifdef _LP64 if (UseCompressedOops && !wide) { __ decode_heap_oop(dest->as_register()); @@ -1576,7 +1581,7 @@ Register len = op->len()->as_register(); LP64_ONLY( __ movslq(len, len); ) - if (UseSlowPath || + if (UseSlowPath || op->type() == T_VALUETYPE || (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { __ jmp(*op->stub()->entry()); @@ -1674,20 +1679,22 @@ assert_different_registers(obj, k_RInfo, klass_RInfo); - __ cmpptr(obj, (int32_t)NULL_WORD); - if (op->should_profile()) { - Label not_null; - __ jccb(Assembler::notEqual, not_null); - // Object is null; update MDO and exit - Register mdo = klass_RInfo; - __ mov_metadata(mdo, md->constant_encoding()); - Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset())); - int header_bits = BitData::null_seen_byte_constant(); - __ orb(data_addr, header_bits); - __ jmp(*obj_is_null); - __ bind(not_null); - } else { - __ jcc(Assembler::equal, *obj_is_null); + if (op->need_null_check()) { + __ cmpptr(obj, (int32_t)NULL_WORD); + if (op->should_profile()) { + Label not_null; + __ jccb(Assembler::notEqual, not_null); + // Object is null; update MDO and exit + Register mdo = klass_RInfo; + __ mov_metadata(mdo, md->constant_encoding()); + Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset())); + int header_bits = BitData::null_seen_byte_constant(); + __ orb(data_addr, header_bits); + __ jmp(*obj_is_null); + __ bind(not_null); + } else { + __ jcc(Assembler::equal, *obj_is_null); + } } if (!k->is_loaded()) { @@ -1898,6 +1905,26 @@ } +void LIR_Assembler::emit_opFlattenedStoreCheck(LIR_OpFlattenedStoreCheck* op) { + Klass* k = (Klass*)(op->element_klass()->constant_encoding()); + assert(k->is_klass(), "must be a loaded klass"); + add_debug_info_for_null_check_here(op->info_for_exception()); + +#ifdef _LP64 + if (UseCompressedClassPointers) { + __ movl(op->tmp1()->as_register(), Address(op->object()->as_register(), oopDesc::klass_offset_in_bytes())); + __ cmp_narrow_klass(op->tmp1()->as_register(), k); + } else { + __ movq(op->tmp1()->as_register(), Address(op->object()->as_register(), oopDesc::klass_offset_in_bytes())); + __ cmpq(op->tmp1()->as_register(), op->tmp2()->as_register()); + } +#else + Unimplemented(); // FIXME +#endif + + __ jcc(Assembler::notEqual, *op->stub()->entry()); + __ bind(*op->stub()->continuation()); +} void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) { @@ -2494,7 +2521,7 @@ } else { #ifdef _LP64 Register r_lo; - if (right->type() == T_OBJECT || right->type() == T_ARRAY) { + if (right->type() == T_OBJECT || right->type() == T_ARRAY || right->type() == T_VALUETYPE) { r_lo = right->as_register(); } else { r_lo = right->as_register_lo(); @@ -2607,15 +2634,15 @@ Register reg1 = opr1->as_register(); if (opr2->is_single_cpu()) { // cpu register - cpu register - if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { + if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY || opr1->type() == T_VALUETYPE) { __ cmpoop(reg1, opr2->as_register()); } else { - assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?"); + assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY && opr2->type() != T_VALUETYPE, "cmp int, oop?"); __ cmpl(reg1, opr2->as_register()); } } else if (opr2->is_stack()) { // cpu register - stack - if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { + if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY || opr1->type() == T_VALUETYPE) { __ cmpoop(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); } else { __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); @@ -2625,7 +2652,7 @@ LIR_Const* c = opr2->as_constant_ptr(); if (c->type() == T_INT) { __ cmpl(reg1, c->as_jint()); - } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) { + } else if (c->type() == T_OBJECT || c->type() == T_ARRAY || c->type() == T_VALUETYPE) { // In 64bit oops are single register jobject o = c->as_jobject(); if (o == NULL) { @@ -2725,7 +2752,7 @@ } else if (opr1->is_address() && opr2->is_constant()) { LIR_Const* c = opr2->as_constant_ptr(); #ifdef _LP64 - if (c->type() == T_OBJECT || c->type() == T_ARRAY) { + if (c->type() == T_OBJECT || c->type() == T_ARRAY || c->type() == T_VALUETYPE) { assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse"); __ movoop(rscratch1, c->as_jobject()); } @@ -2737,7 +2764,7 @@ LIR_Address* addr = opr1->as_address_ptr(); if (c->type() == T_INT) { __ cmpl(as_Address(addr), c->as_jint()); - } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) { + } else if (c->type() == T_OBJECT || c->type() == T_ARRAY || c->type() == T_VALUETYPE) { #ifdef _LP64 // %%% Make this explode if addr isn't reachable until we figure out a // better strategy by giving noreg as the temp for as_Address @@ -3018,6 +3045,21 @@ } +void LIR_Assembler::arraycopy_flat_check(Register obj, Register tmp, CodeStub* slow_path) { + Address klass_addr = Address(obj, oopDesc::klass_offset_in_bytes()); + if (UseCompressedClassPointers) { + __ movl(tmp, klass_addr); + LP64_ONLY(__ decode_klass_not_null(tmp)); + } else { + __ movptr(tmp, klass_addr); + } + __ movl(tmp, Address(tmp, Klass::layout_helper_offset())); + __ sarl(tmp, Klass::_lh_array_tag_shift); + __ cmpl(tmp, Klass::_lh_array_tag_vt_value); + __ jcc(Assembler::equal, *slow_path->entry()); +} + + // This code replaces a call to arraycopy; no exception may // be thrown in this code, they must be thrown in the System.arraycopy // activation frame; we could save some checks if this would not be the case @@ -3038,6 +3080,30 @@ BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; if (basic_type == T_ARRAY) basic_type = T_OBJECT; + if (flags & LIR_OpArrayCopy::always_slow_path) { + __ jmp(*stub->entry()); + __ bind(*stub->continuation()); + return; + } + + if (flags & LIR_OpArrayCopy::src_flat_check) { + arraycopy_flat_check(src, tmp, stub); + } + + if (flags & LIR_OpArrayCopy::dst_flat_check) { + arraycopy_flat_check(dst, tmp, stub); + } + + if (basic_type == T_VALUETYPE) { + assert(flags & (LIR_OpArrayCopy::always_slow_path | + LIR_OpArrayCopy::src_flat_check | + LIR_OpArrayCopy::dst_flat_check), "must have checked"); + // If either src or dst is (or maybe) a flattened array, one of the 3 checks + // above would have caught it, and taken the slow path. So when we come here, + // the array must be a (non-flat) object array. + basic_type = T_OBJECT; + } + // if we don't know anything, just go through the generic arraycopy if (default_type == NULL) { // save outgoing arguments on stack in case call to System.arraycopy is needed