--- old/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp 2019-03-28 21:10:35.320385377 -0700 +++ new/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp 2019-03-28 21:10:35.068376468 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,10 +45,10 @@ extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index); #endif -VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { +VtableStub* VtableStubs::create_vtable_stub(int vtable_index, bool caller_is_c1) { // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing. const int stub_code_length = code_size_limit(true); - VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index); + VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index, caller_is_c1); // Can be NULL if there is no free space in the code cache. if (s == NULL) { return NULL; @@ -62,6 +62,7 @@ int slop_delta = 0; // No variance was detected in vtable stub sizes. Setting index_dependent_slop == 0 will unveil any deviation from this observation. const int index_dependent_slop = 0; + ByteSize entry_offset = caller_is_c1 ? Method::from_compiled_value_offset() : Method::from_compiled_value_ro_offset(); ResourceMark rm; CodeBuffer cb(s->entry_point(), stub_code_length); @@ -118,7 +119,7 @@ Label L; __ cmpptr(method, (int32_t)NULL_WORD); __ jcc(Assembler::equal, L); - __ cmpptr(Address(method, Method::from_compiled_value_ro_offset()), (int32_t)NULL_WORD); + __ cmpptr(Address(method, entry_offset), (int32_t)NULL_WORD); __ jcc(Assembler::notZero, L); __ stop("Vtable entry is NULL"); __ bind(L); @@ -129,7 +130,7 @@ // method (rbx): Method* // rcx: receiver address ame_addr = __ pc(); - __ jmp( Address(rbx, Method::from_compiled_value_ro_offset())); + __ jmp( Address(rbx, entry_offset)); masm->flush(); slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets @@ -139,10 +140,11 @@ } -VtableStub* VtableStubs::create_itable_stub(int itable_index) { +VtableStub* VtableStubs::create_itable_stub(int itable_index, bool caller_is_c1) { // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing. const int stub_code_length = code_size_limit(false); - VtableStub* s = new(stub_code_length) VtableStub(false, itable_index); + ByteSize entry_offset = caller_is_c1 ? Method::from_compiled_value_offset() : Method::from_compiled_value_ro_offset(); + VtableStub* s = new(stub_code_length) VtableStub(false, itable_index, caller_is_c1); // Can be NULL if there is no free space in the code cache. if (s == NULL) { return NULL; @@ -235,7 +237,7 @@ Label L2; __ cmpptr(method, (int32_t)NULL_WORD); __ jcc(Assembler::equal, L2); - __ cmpptr(Address(method, Method::from_compiled_value_ro_offset()), (int32_t)NULL_WORD); + __ cmpptr(Address(method, entry_offset), (int32_t)NULL_WORD); __ jcc(Assembler::notZero, L2); __ stop("compiler entrypoint is null"); __ bind(L2); @@ -243,7 +245,7 @@ #endif // ASSERT address ame_addr = __ pc(); - __ jmp(Address(method, Method::from_compiled_value_ro_offset())); + __ jmp(Address(method, entry_offset)); __ bind(L_no_such_interface); // Handle IncompatibleClassChangeError in itable stubs. --- old/src/hotspot/share/code/compiledIC.cpp 2019-03-28 21:10:35.972408428 -0700 +++ new/src/hotspot/share/code/compiledIC.cpp 2019-03-28 21:10:35.716399377 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -243,7 +243,7 @@ // was due to running out of IC stubs, in which case the caller will refill IC // stubs and retry. bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, - bool& needs_ic_stub_refill, TRAPS) { + bool& needs_ic_stub_refill, bool caller_is_c1, TRAPS) { assert(CompiledICLocker::is_safe(_method), "mt unsafe call"); assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic"); assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?"); @@ -252,7 +252,7 @@ if (call_info->call_kind() == CallInfo::itable_call) { assert(bytecode == Bytecodes::_invokeinterface, ""); int itable_index = call_info->itable_index(); - entry = VtableStubs::find_itable_stub(itable_index); + entry = VtableStubs::find_itable_stub(itable_index, caller_is_c1); if (entry == NULL) { return false; } @@ -275,7 +275,7 @@ // Can be different than selected_method->vtable_index(), due to package-private etc. int vtable_index = call_info->vtable_index(); assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check"); - entry = VtableStubs::find_vtable_stub(vtable_index); + entry = VtableStubs::find_vtable_stub(vtable_index, caller_is_c1); if (entry == NULL) { return false; } @@ -510,6 +510,7 @@ bool is_optimized, bool static_bound, bool caller_is_nmethod, + bool caller_is_c1, CompiledICInfo& info, TRAPS) { CompiledMethod* method_code = method->code(); @@ -551,7 +552,8 @@ info.set_aot_entry(entry, method()); } else { // Use stub entry - info.set_interpreter_entry(method()->get_c2i_entry(), method()); + address entry = caller_is_c1 ? method()->get_c2i_value_entry() : method()->get_c2i_entry(); + info.set_interpreter_entry(entry, method()); } } else { // Use icholder entry @@ -656,7 +658,8 @@ // Compute settings for a CompiledStaticCall. Since we might have to set // the stub when calling to the interpreter, we need to return arguments. -void CompiledStaticCall::compute_entry(const methodHandle& m, bool caller_is_nmethod, StaticCallInfo& info) { +void CompiledStaticCall::compute_entry(const methodHandle& m, CompiledMethod* caller_nm, StaticCallInfo& info) { + bool caller_is_nmethod = caller_nm->is_nmethod(); CompiledMethod* m_code = m->code(); info._callee = m; if (m_code != NULL && m_code->is_in_use()) { @@ -673,7 +676,14 @@ // puts a converter-frame on the stack to save arguments. assert(!m->is_method_handle_intrinsic(), "Compiled code should never call interpreter MH intrinsics"); info._to_interpreter = true; - info._entry = m()->get_c2i_entry(); + + if (caller_nm->is_c1()) { + // C1 -> interp: values passed as oops + info._entry = m()->get_c2i_value_entry(); + } else { + // C2 -> interp: values passed fields + info._entry = m()->get_c2i_entry(); + } } } --- old/src/hotspot/share/code/compiledIC.hpp 2019-03-28 21:10:36.616431196 -0700 +++ new/src/hotspot/share/code/compiledIC.hpp 2019-03-28 21:10:36.368422428 -0700 @@ -276,11 +276,11 @@ // Returns true if successful and false otherwise. The call can fail if memory // allocation in the code cache fails, or ic stub refill is required. - bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, bool& needs_ic_stub_refill, TRAPS); + bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, bool& needs_ic_stub_refill, bool caller_is_c1, TRAPS); static void compute_monomorphic_entry(const methodHandle& method, Klass* receiver_klass, bool is_optimized, bool static_bound, bool caller_is_nmethod, - CompiledICInfo& info, TRAPS); + bool caller_is_c1, CompiledICInfo& info, TRAPS); // Location address instruction_address() const { return _call->instruction_address(); } @@ -363,7 +363,7 @@ static int reloc_to_aot_stub(); // Compute entry point given a method - static void compute_entry(const methodHandle& m, bool caller_is_nmethod, StaticCallInfo& info); + static void compute_entry(const methodHandle& m, CompiledMethod* caller_nm, StaticCallInfo& info); public: // Clean static call (will force resolving on next use) --- old/src/hotspot/share/code/compiledMethod.hpp 2019-03-28 21:10:37.248453541 -0700 +++ new/src/hotspot/share/code/compiledMethod.hpp 2019-03-28 21:10:36.996444631 -0700 @@ -214,6 +214,7 @@ virtual bool is_in_use() const = 0; virtual int comp_level() const = 0; + virtual bool is_c1() const { return false; } virtual int compile_id() const = 0; virtual address verified_entry_point() const = 0; --- old/src/hotspot/share/code/nmethod.hpp 2019-03-28 21:10:37.884476027 -0700 +++ new/src/hotspot/share/code/nmethod.hpp 2019-03-28 21:10:37.628466976 -0700 @@ -26,6 +26,7 @@ #define SHARE_CODE_NMETHOD_HPP #include "code/compiledMethod.hpp" +#include "compiler/compilerDefinitions.hpp" class DepChange; class DirectiveSet; @@ -380,7 +381,7 @@ } int comp_level() const { return _comp_level; } - + bool is_c1() const { return CompLevel_simple <= _comp_level && _comp_level <= CompLevel_full_profile; } void unlink_from_method(bool acquire_lock); // Support for oops in scopes and relocs: --- old/src/hotspot/share/code/vtableStubs.cpp 2019-03-28 21:10:38.516498371 -0700 +++ new/src/hotspot/share/code/vtableStubs.cpp 2019-03-28 21:10:38.264489462 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -206,18 +206,18 @@ } -address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) { +address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index, bool caller_is_c1) { assert(vtable_index >= 0, "must be positive"); VtableStub* s; { MutexLockerEx ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag); - s = ShareVtableStubs ? lookup(is_vtable_stub, vtable_index) : NULL; + s = ShareVtableStubs ? lookup(is_vtable_stub, vtable_index, caller_is_c1) : NULL; if (s == NULL) { if (is_vtable_stub) { - s = create_vtable_stub(vtable_index); + s = create_vtable_stub(vtable_index, caller_is_c1); } else { - s = create_itable_stub(vtable_index); + s = create_itable_stub(vtable_index, caller_is_c1); } // Creation of vtable or itable can fail if there is not enough free space in the code cache. @@ -225,9 +225,9 @@ return NULL; } - enter(is_vtable_stub, vtable_index, s); + enter(is_vtable_stub, vtable_index, caller_is_c1, s); if (PrintAdapterHandlers) { - tty->print_cr("Decoding VtableStub %s[%d]@" INTX_FORMAT, + tty->print_cr("Decoding VtableStub (%s) %s[%d]@" INTX_FORMAT, caller_is_c1 ? "c1" : "full opt", is_vtable_stub? "vtbl": "itbl", vtable_index, p2i(VtableStub::receiver_location())); Disassembler::decode(s->code_begin(), s->code_end()); } @@ -235,7 +235,7 @@ // JvmtiDynamicCodeEventCollector and posted when this thread has released // all locks. if (JvmtiExport::should_post_dynamic_code_generated()) { - JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub", + JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub", // FIXME: need to pass caller_is_c1?? s->code_begin(), s->code_end()); } } @@ -244,26 +244,29 @@ } -inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index){ +inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index, bool caller_is_c1) { // Assumption: receiver_location < 4 in most cases. int hash = ((vtable_index << 2) ^ VtableStub::receiver_location()->value()) + vtable_index; + if (caller_is_c1) { + hash = 7 - hash; + } return (is_vtable_stub ? ~hash : hash) & mask; } -VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index) { +VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index, bool caller_is_c1) { assert_lock_strong(VtableStubs_lock); - unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index); + unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index, caller_is_c1); VtableStub* s = _table[hash]; - while( s && !s->matches(is_vtable_stub, vtable_index)) s = s->next(); + while( s && !s->matches(is_vtable_stub, vtable_index, caller_is_c1)) s = s->next(); return s; } -void VtableStubs::enter(bool is_vtable_stub, int vtable_index, VtableStub* s) { +void VtableStubs::enter(bool is_vtable_stub, int vtable_index, bool caller_is_c1, VtableStub* s) { assert_lock_strong(VtableStubs_lock); - assert(s->matches(is_vtable_stub, vtable_index), "bad vtable stub"); - unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index); + assert(s->matches(is_vtable_stub, vtable_index, caller_is_c1), "bad vtable stub"); + unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index, caller_is_c1); // enter s at the beginning of the corresponding list s->set_next(_table[h]); _table[h] = s; @@ -273,7 +276,7 @@ VtableStub* VtableStubs::entry_point(address pc) { MutexLockerEx ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag); VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset()); - uint hash = VtableStubs::hash(stub->is_vtable_stub(), stub->index()); + uint hash = VtableStubs::hash(stub->is_vtable_stub(), stub->index(), stub->caller_is_c1()); VtableStub* s; for (s = _table[hash]; s != NULL && s != stub; s = s->next()) {} return (s == stub) ? s : NULL; --- old/src/hotspot/share/code/vtableStubs.hpp 2019-03-28 21:10:39.160521139 -0700 +++ new/src/hotspot/share/code/vtableStubs.hpp 2019-03-28 21:10:38.908512230 -0700 @@ -86,12 +86,12 @@ static int _vtab_stub_size; // current size estimate for vtable stub (quasi-constant) static int _itab_stub_size; // current size estimate for itable stub (quasi-constant) - static VtableStub* create_vtable_stub(int vtable_index); - static VtableStub* create_itable_stub(int vtable_index); - static VtableStub* lookup (bool is_vtable_stub, int vtable_index); - static void enter (bool is_vtable_stub, int vtable_index, VtableStub* s); - static inline uint hash (bool is_vtable_stub, int vtable_index); - static address find_stub (bool is_vtable_stub, int vtable_index); + static VtableStub* create_vtable_stub(int vtable_index, bool caller_is_c1); + static VtableStub* create_itable_stub(int vtable_index, bool caller_is_c1); + static VtableStub* lookup (bool is_vtable_stub, int vtable_index, bool caller_is_c1); + static void enter (bool is_vtable_stub, int vtable_index, bool caller_is_c1, VtableStub* s); + static inline uint hash (bool is_vtable_stub, int vtable_index, bool caller_is_c1); + static address find_stub (bool is_vtable_stub, int vtable_index, bool caller_is_c1); static void bookkeeping(MacroAssembler* masm, outputStream* out, VtableStub* s, address npe_addr, address ame_addr, bool is_vtable_stub, int index, int slop_bytes, int index_dependent_slop); @@ -101,8 +101,8 @@ int padding); public: - static address find_vtable_stub(int vtable_index) { return find_stub(true, vtable_index); } - static address find_itable_stub(int itable_index) { return find_stub(false, itable_index); } + static address find_vtable_stub(int vtable_index, bool caller_is_c1) { return find_stub(true, vtable_index, caller_is_c1); } + static address find_itable_stub(int itable_index, bool caller_is_c1) { return find_stub(false, itable_index, caller_is_c1); } static VtableStub* entry_point(address pc); // vtable stub entry point for a pc static bool contains(address pc); // is pc within any stub? @@ -126,13 +126,15 @@ short _ame_offset; // Where an AbstractMethodError might occur short _npe_offset; // Where a NullPointerException might occur bool _is_vtable_stub; // True if vtable stub, false, is itable stub + bool _caller_is_c1; // True if this is for a caller compiled by C1, + // which doesn't scalarize parameters. /* code follows here */ // The vtableStub code void* operator new(size_t size, int code_size) throw(); - VtableStub(bool is_vtable_stub, int index) + VtableStub(bool is_vtable_stub, int index, bool caller_is_c1) : _next(NULL), _index(index), _ame_offset(-1), _npe_offset(-1), - _is_vtable_stub(is_vtable_stub) {} + _is_vtable_stub(is_vtable_stub), _caller_is_c1(caller_is_c1) {} VtableStub* next() const { return _next; } int index() const { return _index; } static VMReg receiver_location() { return _receiver_location; } @@ -144,8 +146,8 @@ address entry_point() const { return code_begin(); } static int entry_offset() { return sizeof(class VtableStub); } - bool matches(bool is_vtable_stub, int index) const { - return _index == index && _is_vtable_stub == is_vtable_stub; + bool matches(bool is_vtable_stub, int index, bool caller_is_c1) const { + return _index == index && _is_vtable_stub == is_vtable_stub && _caller_is_c1 == caller_is_c1; } bool contains(address pc) const { return code_begin() <= pc && pc < code_end(); } @@ -173,6 +175,7 @@ // Query bool is_itable_stub() { return !_is_vtable_stub; } bool is_vtable_stub() { return _is_vtable_stub; } + bool caller_is_c1() { return _caller_is_c1; } bool is_abstract_method_error(address epc) { return epc == code_begin()+_ame_offset; } bool is_null_pointer_exception(address epc) { return epc == code_begin()+_npe_offset; } --- old/src/hotspot/share/oops/method.cpp 2019-03-28 21:10:39.784543201 -0700 +++ new/src/hotspot/share/oops/method.cpp 2019-03-28 21:10:39.532534291 -0700 @@ -134,6 +134,11 @@ return adapter()->get_c2i_entry(); } +address Method::get_c2i_value_entry() { + assert(adapter() != NULL, "must have"); + return adapter()->get_c2i_value_entry(); +} + address Method::get_c2i_unverified_entry() { assert(adapter() != NULL, "must have"); return adapter()->get_c2i_unverified_entry(); @@ -1163,6 +1168,12 @@ return _from_compiled_entry; } +address Method::verified_value_code_entry() { + debug_only(NoSafepointVerifier nsv;) + assert(_from_compiled_value_entry != NULL, "must be set"); + return _from_compiled_value_entry; +} + address Method::verified_value_ro_code_entry() { debug_only(NoSafepointVerifier nsv;) assert(_from_compiled_value_ro_entry != NULL, "must be set"); --- old/src/hotspot/share/oops/method.hpp 2019-03-28 21:10:40.456566960 -0700 +++ new/src/hotspot/share/oops/method.hpp 2019-03-28 21:10:40.200557909 -0700 @@ -454,6 +454,7 @@ // nmethod/verified compiler entry address verified_code_entry(); + address verified_value_code_entry(); address verified_value_ro_code_entry(); bool check_code() const; // Not inline to avoid circular ref CompiledMethod* volatile code() const; @@ -468,6 +469,7 @@ address get_i2c_entry(); address get_c2i_entry(); + address get_c2i_value_entry(); address get_c2i_unverified_entry(); AdapterHandlerEntry* adapter() const { return constMethod()->adapter(); --- old/src/hotspot/share/runtime/arguments.cpp 2019-03-28 21:10:41.096589587 -0700 +++ new/src/hotspot/share/runtime/arguments.cpp 2019-03-28 21:10:40.840580536 -0700 @@ -2066,22 +2066,22 @@ if (!EnableValhallaC1) { // C1 support for value types is incomplete. Don't use it by default. if (!FLAG_IS_DEFAULT(TieredCompilation)) { - warning("TieredCompilation disabled because value types are not supported by C1"); + warning("TieredCompilation disabled because value types are not fully supported by C1"); } FLAG_SET_CMDLINE(bool, TieredCompilation, false); } else { - if (TieredStopAtLevel > 1) { - warning("C1 doesn't work with C2 yet. Forcing TieredStopAtLevel=1"); - FLAG_SET_CMDLINE(intx, TieredStopAtLevel, 1); - } - if (ValueTypePassFieldsAsArgs) { - warning("C1 doesn't work with ValueTypePassFieldsAsArgs yet. Forcing ValueTypePassFieldsAsArgs=false"); - FLAG_SET_CMDLINE(bool, ValueTypePassFieldsAsArgs, false); - } - if (ValueTypeReturnedAsFields) { - warning("C1 doesn't work with ValueTypeReturnedAsFields yet. Forcing ValueTypeReturnedAsFields=false"); - FLAG_SET_CMDLINE(bool, ValueTypeReturnedAsFields, false); - } + /* + TEMP: to run the valuetype tests with C1, you need to use the following command-line: + + cd test/hotspot/jtreg/compiler/valhalla/valuetypes + jtreg -Dtest.c1=true \ + -vmoptions:-XX:+EnableValhallaC1 \ + -vmoptions:-XX:TieredStopAtLevel=1 \ + -vmoptions:-XX:-ValueTypePassFieldsAsArgs \ + -vmoptions:-XX:-ValueTypeReturnedAsFields \ + . + + */ } } if (!EnableValhalla && ACmpOnValues != 3) { --- old/src/hotspot/share/runtime/sharedRuntime.cpp 2019-03-28 21:10:41.772613487 -0700 +++ new/src/hotspot/share/runtime/sharedRuntime.cpp 2019-03-28 21:10:41.516604436 -0700 @@ -1247,9 +1247,10 @@ // Resolves a call. methodHandle SharedRuntime::resolve_helper(JavaThread *thread, bool is_virtual, - bool is_optimized, TRAPS) { + bool is_optimized, + bool* caller_is_c1, TRAPS) { methodHandle callee_method; - callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD); + callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, caller_is_c1, THREAD); if (JvmtiExport::can_hotswap_or_post_breakpoint()) { int retry_count = 0; while (!HAS_PENDING_EXCEPTION && callee_method->is_old() && @@ -1266,7 +1267,7 @@ guarantee((retry_count++ < 100), "Could not resolve to latest version of redefined method"); // method is redefined in the middle of resolve so re-try. - callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD); + callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, caller_is_c1, THREAD); } } return callee_method; @@ -1297,10 +1298,11 @@ #endif bool is_nmethod = caller_nm->is_nmethod(); + bool caller_is_c1 = caller_nm->is_c1(); if (is_virtual) { Klass* receiver_klass = NULL; - if (ValueTypePassFieldsAsArgs && callee_method->method_holder()->is_value()) { + if (ValueTypePassFieldsAsArgs && !caller_is_c1 && callee_method->method_holder()->is_value()) { // If the receiver is a value type that is passed as fields, no oop is available receiver_klass = callee_method->method_holder(); } else { @@ -1309,11 +1311,11 @@ } bool static_bound = call_info.resolved_method()->can_be_statically_bound(); CompiledIC::compute_monomorphic_entry(callee_method, receiver_klass, - is_optimized, static_bound, is_nmethod, virtual_call_info, + is_optimized, static_bound, is_nmethod, caller_is_c1, virtual_call_info, CHECK_false); } else { // static call - CompiledStaticCall::compute_entry(callee_method, is_nmethod, static_call_info); + CompiledStaticCall::compute_entry(callee_method, caller_nm, static_call_info); } // grab lock, check for deoptimization and potentially patch caller @@ -1358,7 +1360,8 @@ // and are patched with the real destination of the call. methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread, bool is_virtual, - bool is_optimized, TRAPS) { + bool is_optimized, + bool* caller_is_c1, TRAPS) { ResourceMark rm(thread); RegisterMap cbl_map(thread, false); @@ -1367,6 +1370,7 @@ CodeBlob* caller_cb = caller_frame.cb(); guarantee(caller_cb != NULL && caller_cb->is_compiled(), "must be called from compiled method"); CompiledMethod* caller_nm = caller_cb->as_compiled_method_or_null(); + *caller_is_c1 = caller_nm->is_c1(); // make sure caller is not getting deoptimized // and removed before we are done with it. @@ -1477,15 +1481,14 @@ methodHandle callee_method; bool is_optimized = false; + bool caller_is_c1 = false; JRT_BLOCK - callee_method = SharedRuntime::handle_ic_miss_helper(thread, is_optimized, CHECK_NULL); + callee_method = SharedRuntime::handle_ic_miss_helper(thread, is_optimized, caller_is_c1, CHECK_NULL); // Return Method* through TLS thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints - assert(callee_method->verified_code_entry() != NULL, "Jump to zero!"); - assert(callee_method->verified_value_ro_code_entry() != NULL, "Jump to zero!"); - return is_optimized ? callee_method->verified_code_entry() : callee_method->verified_value_ro_code_entry(); + return entry_for_handle_wrong_method(callee_method, is_optimized, caller_is_c1); JRT_END @@ -1517,15 +1520,14 @@ // Must be compiled to compiled path which is safe to stackwalk methodHandle callee_method; bool is_optimized = false; + bool caller_is_c1 = false; JRT_BLOCK // Force resolving of caller (if we called from compiled frame) - callee_method = SharedRuntime::reresolve_call_site(thread, is_optimized, CHECK_NULL); + callee_method = SharedRuntime::reresolve_call_site(thread, is_optimized, caller_is_c1, CHECK_NULL); thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints - assert(callee_method->verified_code_entry() != NULL, "Jump to zero!"); - assert(callee_method->verified_value_ro_code_entry() != NULL, "Jump to zero!"); - return is_optimized ? callee_method->verified_code_entry() : callee_method->verified_value_ro_code_entry(); + return entry_for_handle_wrong_method(callee_method, is_optimized, caller_is_c1); JRT_END // Handle abstract method call @@ -1563,26 +1565,32 @@ // resolve a static call and patch code JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread *thread )) methodHandle callee_method; + bool caller_is_c1; JRT_BLOCK - callee_method = SharedRuntime::resolve_helper(thread, false, false, CHECK_NULL); + callee_method = SharedRuntime::resolve_helper(thread, false, false, &caller_is_c1, CHECK_NULL); thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints - assert(callee_method->verified_code_entry() != NULL, "Jump to zero!"); - return callee_method->verified_code_entry(); + address entry = caller_is_c1 ? + callee_method->verified_value_code_entry() : callee_method->verified_code_entry(); + assert(entry != NULL, "Jump to zero!"); + return entry; JRT_END // resolve virtual call and update inline cache to monomorphic JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread *thread )) methodHandle callee_method; + bool caller_is_c1; JRT_BLOCK - callee_method = SharedRuntime::resolve_helper(thread, true, false, CHECK_NULL); + callee_method = SharedRuntime::resolve_helper(thread, true, false, &caller_is_c1, CHECK_NULL); thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints - assert(callee_method->verified_value_ro_code_entry() != NULL, "Jump to zero!"); - return callee_method->verified_value_ro_code_entry(); + address entry = caller_is_c1 ? + callee_method->verified_value_code_entry() : callee_method->verified_value_ro_code_entry(); + assert(entry != NULL, "Jump to zero!"); + return entry; JRT_END @@ -1590,13 +1598,16 @@ // monomorphic, so it has no inline cache). Patch code to resolved target. JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread *thread)) methodHandle callee_method; + bool caller_is_c1; JRT_BLOCK - callee_method = SharedRuntime::resolve_helper(thread, true, true, CHECK_NULL); + callee_method = SharedRuntime::resolve_helper(thread, true, true, &caller_is_c1, CHECK_NULL); thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints - assert(callee_method->verified_code_entry() != NULL, "Jump to zero!"); - return callee_method->verified_code_entry(); + address entry = caller_is_c1 ? + callee_method->verified_value_code_entry() : callee_method->verified_code_entry(); + assert(entry != NULL, "Jump to zero!"); + return entry; JRT_END // The handle_ic_miss_helper_internal function returns false if it failed due @@ -1607,7 +1618,7 @@ bool SharedRuntime::handle_ic_miss_helper_internal(Handle receiver, CompiledMethod* caller_nm, const frame& caller_frame, methodHandle callee_method, Bytecodes::Code bc, CallInfo& call_info, - bool& needs_ic_stub_refill, bool& is_optimized, TRAPS) { + bool& needs_ic_stub_refill, bool& is_optimized, bool caller_is_c1, TRAPS) { CompiledICLocker ml(caller_nm); CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc()); bool should_be_mono = false; @@ -1656,6 +1667,7 @@ receiver_klass, inline_cache->is_optimized(), false, caller_nm->is_nmethod(), + caller_nm->is_c1(), info, CHECK_false); if (!inline_cache->set_to_monomorphic(info)) { needs_ic_stub_refill = true; @@ -1664,7 +1676,7 @@ } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) { // Potential change to megamorphic - bool successful = inline_cache->set_to_megamorphic(&call_info, bc, needs_ic_stub_refill, CHECK_false); + bool successful = inline_cache->set_to_megamorphic(&call_info, bc, needs_ic_stub_refill, caller_is_c1, CHECK_false); if (needs_ic_stub_refill) { return false; } @@ -1680,7 +1692,7 @@ return true; } -methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, bool& is_optimized, TRAPS) { +methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, bool& is_optimized, bool& caller_is_c1, TRAPS) { ResourceMark rm(thread); CallInfo call_info; Bytecodes::Code bc; @@ -1700,7 +1712,7 @@ // did this would still be the correct thing to do for it too, hence no ifdef. // if (call_info.resolved_method()->can_be_statically_bound()) { - methodHandle callee_method = SharedRuntime::reresolve_call_site(thread, is_optimized, CHECK_(methodHandle())); + methodHandle callee_method = SharedRuntime::reresolve_call_site(thread, is_optimized, caller_is_c1, CHECK_(methodHandle())); if (TraceCallFixup) { RegisterMap reg_map(thread, false); frame caller_frame = thread->last_frame().sender(®_map); @@ -1750,12 +1762,13 @@ frame caller_frame = thread->last_frame().sender(®_map); CodeBlob* cb = caller_frame.cb(); CompiledMethod* caller_nm = cb->as_compiled_method(); + caller_is_c1 = caller_nm->is_c1(); for (;;) { ICRefillVerifier ic_refill_verifier; bool needs_ic_stub_refill = false; bool successful = handle_ic_miss_helper_internal(receiver, caller_nm, caller_frame, callee_method, - bc, call_info, needs_ic_stub_refill, is_optimized, CHECK_(methodHandle())); + bc, call_info, needs_ic_stub_refill, is_optimized, caller_is_c1, CHECK_(methodHandle())); if (successful || !needs_ic_stub_refill) { return callee_method; } else { @@ -1787,7 +1800,7 @@ // sites, and static call sites. Typically used to change a call sites // destination from compiled to interpreted. // -methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, bool& is_optimized, TRAPS) { +methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, bool& is_optimized, bool& caller_is_c1, TRAPS) { ResourceMark rm(thread); RegisterMap reg_map(thread, false); frame stub_frame = thread->last_frame(); @@ -1805,6 +1818,7 @@ // Check for static or virtual call bool is_static_call = false; CompiledMethod* caller_nm = CodeCache::find_compiled(pc); + caller_is_c1 = caller_nm->is_c1(); // Default call_addr is the location of the "basic" call. // Determine the address of the call we a reresolving. With --- old/src/hotspot/share/runtime/sharedRuntime.hpp 2019-03-28 21:10:42.456637670 -0700 +++ new/src/hotspot/share/runtime/sharedRuntime.hpp 2019-03-28 21:10:42.204628761 -0700 @@ -55,7 +55,8 @@ Handle receiver, CallInfo& call_info, Bytecodes::Code invoke_code, TRAPS); static methodHandle resolve_sub_helper(JavaThread *thread, bool is_virtual, - bool is_optimized, TRAPS); + bool is_optimized, + bool* caller_is_c1, TRAPS); // Shared stub locations @@ -323,7 +324,8 @@ // compiled code. static methodHandle resolve_helper(JavaThread *thread, bool is_virtual, - bool is_optimized, TRAPS); + bool is_optimized, + bool* caller_is_c1, TRAPS); private: // deopt blob @@ -331,22 +333,35 @@ static bool handle_ic_miss_helper_internal(Handle receiver, CompiledMethod* caller_nm, const frame& caller_frame, methodHandle callee_method, Bytecodes::Code bc, CallInfo& call_info, - bool& needs_ic_stub_refill, bool& is_optimized, TRAPS); + bool& needs_ic_stub_refill, bool& is_optimized, bool caller_is_c1, TRAPS); public: static DeoptimizationBlob* deopt_blob(void) { return _deopt_blob; } // Resets a call-site in compiled code so it will get resolved again. - static methodHandle reresolve_call_site(JavaThread *thread, bool& is_optimized, TRAPS); + static methodHandle reresolve_call_site(JavaThread *thread, bool& is_optimized, bool& caller_is_c1, TRAPS); // In the code prolog, if the klass comparison fails, the inline cache // misses and the call site is patched to megamorphic - static methodHandle handle_ic_miss_helper(JavaThread* thread, bool& is_optimized, TRAPS); + static methodHandle handle_ic_miss_helper(JavaThread* thread, bool& is_optimized, bool& caller_is_c1, TRAPS); // Find the method that called us. static methodHandle find_callee_method(JavaThread* thread, TRAPS); + static address entry_for_handle_wrong_method(methodHandle callee_method, bool is_optimized, bool caller_is_c1) { + assert(callee_method->verified_code_entry() != NULL, "Jump to zero!"); + assert(callee_method->verified_value_code_entry() != NULL, "Jump to zero!"); + assert(callee_method->verified_value_ro_code_entry() != NULL, "Jump to zero!"); + if (caller_is_c1) { + return callee_method->verified_value_code_entry(); + } else if (is_optimized) { + return callee_method->verified_code_entry(); + } else { + return callee_method->verified_value_ro_code_entry(); + } + } + private: static Handle find_callee_info(JavaThread* thread, Bytecodes::Code& bc, --- old/test/hotspot/jtreg/compiler/valhalla/valuetypes/ValueTypeTest.java 2019-03-28 21:10:43.088660014 -0700 +++ new/test/hotspot/jtreg/compiler/valhalla/valuetypes/ValueTypeTest.java 2019-03-28 21:10:42.836651105 -0700 @@ -57,6 +57,7 @@ // Regular expressions used to match and count IR nodes. String[] match() default { }; int[] matchCount() default { }; + int compLevel() default ValueTypeTest.COMP_LEVEL_ANY; int valid() default ValueTypeTest.AllFlags; } @@ -77,6 +78,12 @@ @Retention(RetentionPolicy.RUNTIME) @interface DontCompile { } +// Force method compilation +@Retention(RetentionPolicy.RUNTIME) +@interface ForceCompile { + int compLevel() default ValueTypeTest.COMP_LEVEL_ANY; +} + // Number of warmup iterations @Retention(RetentionPolicy.RUNTIME) @interface Warmup { @@ -84,7 +91,8 @@ } public abstract class ValueTypeTest { - // Run "jtreg -Dtest.c1=true" to enable experimental C1 testing. + // Run "jtreg -Dtest.c1=true" to enable experimental C1 testing. This forces all + // compilable methods to be compiled with C1, regardless of the @Test(compLevel=?) setting. static final boolean TEST_C1 = Boolean.getBoolean("test.c1"); // Should we execute tests that assume (ValueType[] <: Object[])? @@ -107,18 +115,18 @@ private static final boolean DUMP_REPLAY = Boolean.parseBoolean(System.getProperty("DumpReplay", "false")); // Pre-defined settings - private static final List defaultFlags = Arrays.asList( + private static final String[] defaultFlags = { "-XX:-BackgroundCompilation", "-XX:CICompilerCount=1", "-XX:CompileCommand=quiet", "-XX:CompileCommand=compileonly,java.lang.invoke.*::*", "-XX:CompileCommand=compileonly,java.lang.Long::sum", "-XX:CompileCommand=compileonly,java.lang.Object::", - "-XX:CompileCommand=compileonly,compiler.valhalla.valuetypes.*::*"); - private static final List printFlags = Arrays.asList( - "-XX:+PrintCompilation", "-XX:+PrintIdeal", "-XX:+PrintOptoAssembly"); - private static final List verifyFlags = Arrays.asList( + "-XX:CompileCommand=compileonly,compiler.valhalla.valuetypes.*::*"}; + private static final String[] printFlags = { + "-XX:+PrintCompilation", "-XX:+PrintIdeal", "-XX:+PrintOptoAssembly"}; + private static final String[] verifyFlags = { "-XX:+VerifyOops", "-XX:+VerifyStack", "-XX:+VerifyLastFrame", "-XX:+VerifyBeforeGC", "-XX:+VerifyAfterGC", - "-XX:+VerifyDuringGC", "-XX:+VerifyAdapterSharing"); + "-XX:+VerifyDuringGC", "-XX:+VerifyAdapterSharing"}; protected static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox(); protected static final int ValueTypePassFieldsAsArgsOn = 0x1; @@ -134,8 +142,16 @@ protected static final boolean ValueTypeArrayFlatten = (WHITE_BOX.getIntxVMFlag("ValueArrayElemMaxFlatSize") == -1); // FIXME - fix this if default of ValueArrayElemMaxFlatSize is changed protected static final boolean ValueTypeReturnedAsFields = (Boolean)WHITE_BOX.getVMFlag("ValueTypeReturnedAsFields"); protected static final boolean AlwaysIncrementalInline = (Boolean)WHITE_BOX.getVMFlag("AlwaysIncrementalInline"); - protected static final int COMP_LEVEL_ANY = -2; - protected static final int COMP_LEVEL_FULL_OPTIMIZATION = TEST_C1 ? 1 : 4; + protected static final long TieredStopAtLevel = (Long)WHITE_BOX.getVMFlag("TieredStopAtLevel"); + protected static final int COMP_LEVEL_ANY = -2; + protected static final int COMP_LEVEL_ALL = -2; + protected static final int COMP_LEVEL_AOT = -1; + protected static final int COMP_LEVEL_NONE = 0; + protected static final int COMP_LEVEL_SIMPLE = 1; // C1 + protected static final int COMP_LEVEL_LIMITED_PROFILE = 2; // C1, invocation & backedge counters + protected static final int COMP_LEVEL_FULL_PROFILE = 3; // C1, invocation & backedge counters + mdo + protected static final int COMP_LEVEL_FULL_OPTIMIZATION = 4; // C2 or JVMCI + protected static final Hashtable tests = new Hashtable(); protected static final boolean USE_COMPILER = WHITE_BOX.getBooleanVMFlag("UseCompiler"); protected static final boolean PRINT_IDEAL = WHITE_BOX.getBooleanVMFlag("PrintIdeal"); @@ -193,7 +209,10 @@ public String[] getVMParameters(int scenario) { if (TEST_C1) { return new String[] { - "-XX:+EnableValhallaC1", + "-XX:+EnableValhallaC1", + "-XX:TieredStopAtLevel=1", + "-XX:-ValueTypePassFieldsAsArgs", + "-XX:-ValueTypeReturnedAsFields" }; } @@ -340,36 +359,46 @@ // Spawn a new VM instance execute_vm(); } else { - // Execute tests + // Execute tests in the VM spawned by the above code. + Asserts.assertTrue(args.length == 1 && args[0].equals("run"), "must be"); run(classes); } } private void execute_vm() throws Throwable { Asserts.assertFalse(tests.isEmpty(), "no tests to execute"); - ArrayList args = new ArrayList(defaultFlags); String[] vmInputArgs = InputArguments.getVmInputArgs(); for (String arg : vmInputArgs) { if (arg.startsWith("-XX:CompileThreshold")) { // Disable IR verification if non-default CompileThreshold is set VERIFY_IR = false; } + if (arg.startsWith("-XX:+EnableValhallaC1")) { + // Disable IR verification if C1 is used (FIXME!) + VERIFY_IR = false; + } } + // Each VM is launched with flags in this order, so the later ones can override the earlier one: + // defaultFlags + // VERIFY_IR/VERIFY_VM flags specified below + // vmInputArgs, which consists of: + // @run options + // getVMParameters() + // getExtraVMParameters() + String cmds[] = defaultFlags; if (VERIFY_IR) { // Add print flags for IR verification - args.addAll(printFlags); + cmds = concat(cmds, printFlags); // Always trap for exception throwing to not confuse IR verification - args.add("-XX:-OmitStackTraceInFastThrow"); + cmds = concat(cmds, "-XX:-OmitStackTraceInFastThrow"); } if (VERIFY_VM) { - args.addAll(verifyFlags); + cmds = concat(cmds, verifyFlags); } + cmds = concat(cmds, vmInputArgs); + // Run tests in own process and verify output - args.add(getClass().getName()); - args.add("run"); - // Spawn process with default JVM options from the test's run command - String[] cmds = Arrays.copyOf(vmInputArgs, vmInputArgs.length + args.size()); - System.arraycopy(args.toArray(), 0, cmds, vmInputArgs.length, args.size()); + cmds = concat(cmds, getClass().getName(), "run"); OutputAnalyzer oa = ProcessTools.executeTestJvm(cmds); // If ideal graph printing is enabled/supported, verify output String output = oa.getOutput(); @@ -511,6 +540,9 @@ WHITE_BOX.makeMethodNotCompilable(m, COMP_LEVEL_ANY, true); WHITE_BOX.makeMethodNotCompilable(m, COMP_LEVEL_ANY, false); WHITE_BOX.testSetDontInlineMethod(m, true); + } else if (m.isAnnotationPresent(ForceCompile.class)) { + int compLevel = getCompLevel(m.getAnnotation(ForceCompile.class)); + WHITE_BOX.enqueueMethodForCompilation(m, compLevel); } if (m.isAnnotationPresent(ForceInline.class)) { WHITE_BOX.testSetForceInlineMethod(m, true); @@ -520,7 +552,8 @@ } // Compile class initializers - WHITE_BOX.enqueueInitializerForCompilation(clazz, COMP_LEVEL_FULL_OPTIMIZATION); + int compLevel = getCompLevel(null); + WHITE_BOX.enqueueInitializerForCompilation(clazz, compLevel); } private void run(Class... classes) throws Exception { @@ -545,8 +578,9 @@ for (int i = 0; i < warmup; ++i) { verifier.invoke(this, true); } + int compLevel = getCompLevel(test.getAnnotation(Test.class)); // Trigger compilation - WHITE_BOX.enqueueMethodForCompilation(test, COMP_LEVEL_FULL_OPTIMIZATION); + WHITE_BOX.enqueueMethodForCompilation(test, compLevel); Asserts.assertTrue(!USE_COMPILER || WHITE_BOX.isMethodCompiled(test, false), test + " not compiled"); // Check result verifier.invoke(this, false); @@ -565,4 +599,29 @@ } } } + + // Choose the appropriate compilation level for a method, according to the given annotation. + // + // Currently, if TEST_C1 is true, we always use COMP_LEVEL_SIMPLE. Otherwise, if the + // compLevel is unspecified, the default is COMP_LEVEL_FULL_OPTIMIZATION. + int getCompLevel(Object annotation) { + if (TEST_C1) { + return COMP_LEVEL_SIMPLE; + } + int compLevel; + if (annotation == null) { + compLevel = COMP_LEVEL_ANY; + } else if (annotation instanceof Test) { + compLevel = ((Test)annotation).compLevel(); + } else { + compLevel = ((ForceCompile)annotation).compLevel(); + } + if (compLevel == COMP_LEVEL_ANY) { + compLevel = COMP_LEVEL_FULL_OPTIMIZATION; + } + if (compLevel > (int)TieredStopAtLevel) { + compLevel = (int)TieredStopAtLevel; + } + return compLevel; + } } --- /dev/null 2019-02-25 13:26:02.045529497 -0800 +++ new/test/hotspot/jtreg/compiler/valhalla/valuetypes/TestCallingConventionC1.java 2019-03-28 21:10:43.488674157 -0700 @@ -0,0 +1,279 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package compiler.valhalla.valuetypes; + +import jdk.test.lib.Asserts; + +/* + * @test + * @summary Test calls from {C1} to {C2, Interpreter}, and vice versa. + * @library /testlibrary /test/lib /compiler/whitebox / + * @requires os.simpleArch == "x64" + * @compile -XDallowWithFieldOperator TestCallingConventionC1.java + * @run driver ClassFileInstaller sun.hotspot.WhiteBox jdk.test.lib.Platform + * @run main/othervm/timeout=120 -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+UnlockExperimentalVMOptions -XX:+WhiteBoxAPI -XX:+EnableValhalla + * compiler.valhalla.valuetypes.ValueTypeTest + * compiler.valhalla.valuetypes.TestCallingConventionC1 + */ +public class TestCallingConventionC1 extends ValueTypeTest { + public static final int C1 = COMP_LEVEL_SIMPLE; + public static final int C2 = COMP_LEVEL_FULL_OPTIMIZATION; + + @Override + public int getNumScenarios() { + return 2; + } + + @Override + public String[] getVMParameters(int scenario) { + switch (scenario) { + + // Default: both C1 and C2 are enabled, tierd compilation enabled + case 0: return new String[] {"-XX:+EnableValhallaC1", "-XX:CICompilerCount=2" + , "-XX:-CheckCompressedOops", "-XX:CompileCommand=print,*::test3*" + }; + // Only C1. Tierd compilation disabled. + case 1: return new String[] {"-XX:+EnableValhallaC1", "-XX:TieredStopAtLevel=1"}; + } + return null; + } + + public static void main(String[] args) throws Throwable { + TestCallingConventionC1 test = new TestCallingConventionC1(); + test.run(args, + Point.class, + Functor.class, + Functor1.class, + Functor2.class, + Functor3.class, + Functor4.class); + } + + static value class Point { + final int x; + final int y; + public Point(int x, int y) { + this.x = x; + this.y = y; + } + + @DontCompile // FIXME -- C1 can't handle incoming values yet + public int func() { + return x + y; + } + } + + static interface FunctorInterface { + public int apply(Point p); + } + + static class Functor implements FunctorInterface { + @DontCompile // FIXME -- C1 can't handle incoming values yet + @DontInline + public int apply(Point p) { + return p.func() + 0; + } + } + static class Functor1 extends Functor { + @DontCompile // FIXME -- C1 can't handle incoming values yet + @DontInline + public int apply(Point p) { + return p.func() + 10000; + } + } + static class Functor2 extends Functor { + @DontCompile // FIXME -- C1 can't handle incoming values yet + @DontInline + public int apply(Point p) { + return p.func() + 20000; + } + } + static class Functor3 extends Functor { + @DontCompile // FIXME -- C1 can't handle incoming values yet + @DontInline + public int apply(Point p) { + return p.func() + 30000; + } + } + static class Functor4 extends Functor { + @DontCompile // FIXME -- C1 can't handle incoming values yet + @DontInline + public int apply(Point p) { + return p.func() + 40000; + } + } + + static Functor functors[] = { + new Functor(), + new Functor1(), + new Functor2(), + new Functor3(), + new Functor4() + }; + static int counter = 0; + static Functor getFunctor() { + int n = (++ counter) % functors.length; + return functors[n]; + } + + static Point pointField = new Point(123, 456); + + //********************************************************************** + // PART 1 - C1 calls interpreted code + //********************************************************************** + + + //** C1 passes value to interpreter (static) + @Test(compLevel = C1) + public int test1() { + return test1_helper(pointField); + } + + @DontInline + @DontCompile + private static int test1_helper(Point p) { + return p.func(); + } + + @DontCompile + public void test1_verifier(boolean warmup) { + int count = warmup ? 1 : 10; + for (int i=0; i