--- old/src/hotspot/cpu/aarch64/aarch64.ad 2018-11-30 21:08:38.502096813 +0100 +++ new/src/hotspot/cpu/aarch64/aarch64.ad 2018-11-30 21:08:38.350098218 +0100 @@ -1,6 +1,6 @@ // // Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. -// Copyright (c) 2014, Red Hat Inc. All rights reserved. +// Copyright (c) 2014, 2018, Red Hat, Inc. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -1272,6 +1272,8 @@ case Op_CompareAndSwapL: case Op_CompareAndSwapP: case Op_CompareAndSwapN: + case Op_ShenandoahCompareAndSwapP: + case Op_ShenandoahCompareAndSwapN: case Op_CompareAndSwapB: case Op_CompareAndSwapS: case Op_GetAndSetI: @@ -1293,6 +1295,10 @@ case Op_WeakCompareAndSwapL: case Op_WeakCompareAndSwapP: case Op_WeakCompareAndSwapN: + case Op_ShenandoahWeakCompareAndSwapP: + case Op_ShenandoahWeakCompareAndSwapN: + case Op_ShenandoahCompareAndExchangeP: + case Op_ShenandoahCompareAndExchangeN: return maybe_volatile; default: return false; --- old/src/hotspot/share/adlc/formssel.cpp 2018-11-30 21:08:38.871093401 +0100 +++ new/src/hotspot/share/adlc/formssel.cpp 2018-11-30 21:08:38.718094816 +0100 @@ -775,7 +775,10 @@ !strcmp(_matrule->_rChild->_opType,"GetAndSetP") || !strcmp(_matrule->_rChild->_opType,"GetAndSetN") || !strcmp(_matrule->_rChild->_opType,"CompareAndExchangeP") || - !strcmp(_matrule->_rChild->_opType,"CompareAndExchangeN"))) return true; + !strcmp(_matrule->_rChild->_opType,"CompareAndExchangeN") || + !strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeP") || + !strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeN") || + !strcmp(_matrule->_rChild->_opType,"ShenandoahReadBarrier"))) return true; else if ( is_ideal_load() == Form::idealP ) return true; else if ( is_ideal_store() != Form::none ) return true; @@ -3498,10 +3501,12 @@ "CompareAndSwapB", "CompareAndSwapS", "CompareAndSwapI", "CompareAndSwapL", "CompareAndSwapP", "CompareAndSwapN", "WeakCompareAndSwapB", "WeakCompareAndSwapS", "WeakCompareAndSwapI", "WeakCompareAndSwapL", "WeakCompareAndSwapP", "WeakCompareAndSwapN", "CompareAndExchangeB", "CompareAndExchangeS", "CompareAndExchangeI", "CompareAndExchangeL", "CompareAndExchangeP", "CompareAndExchangeN", + "ShenandoahCompareAndSwapN", "ShenandoahCompareAndSwapP", "ShenandoahWeakCompareAndSwapP", "ShenandoahWeakCompareAndSwapN", "ShenandoahCompareAndExchangeP", "ShenandoahCompareAndExchangeN", "StoreCM", "ClearArray", "GetAndSetB", "GetAndSetS", "GetAndAddI", "GetAndSetI", "GetAndSetP", "GetAndAddB", "GetAndAddS", "GetAndAddL", "GetAndSetL", "GetAndSetN", + "ShenandoahReadBarrier", "LoadBarrierSlowReg", "LoadBarrierWeakSlowReg" }; int cnt = sizeof(needs_ideal_memory_list)/sizeof(char*); --- old/src/hotspot/share/ci/ciInstanceKlass.cpp 2018-11-30 21:08:39.127091034 +0100 +++ new/src/hotspot/share/ci/ciInstanceKlass.cpp 2018-11-30 21:08:38.978092411 +0100 @@ -742,3 +742,27 @@ ik->do_local_static_fields(&sffp); } } + +#ifdef ASSERT +bool ciInstanceKlass::debug_final_field_at(int offset) { + GUARDED_VM_ENTRY( + InstanceKlass* ik = get_instanceKlass(); + fieldDescriptor fd; + if (ik->find_field_from_offset(offset, false, &fd)) { + return fd.is_final(); + } + ); + return false; +} + +bool ciInstanceKlass::debug_stable_field_at(int offset) { + GUARDED_VM_ENTRY( + InstanceKlass* ik = get_instanceKlass(); + fieldDescriptor fd; + if (ik->find_field_from_offset(offset, false, &fd)) { + return fd.is_stable(); + } + ); + return false; +} +#endif --- old/src/hotspot/share/ci/ciInstanceKlass.hpp 2018-11-30 21:08:39.349088981 +0100 +++ new/src/hotspot/share/ci/ciInstanceKlass.hpp 2018-11-30 21:08:39.198090377 +0100 @@ -271,6 +271,11 @@ // Dump the current state of this klass for compilation replay. virtual void dump_replay_data(outputStream* out); + +#ifdef ASSERT + bool debug_final_field_at(int offset); + bool debug_stable_field_at(int offset); +#endif }; #endif // SHARE_VM_CI_CIINSTANCEKLASS_HPP --- old/src/hotspot/share/opto/arraycopynode.cpp 2018-11-30 21:08:39.570086938 +0100 +++ new/src/hotspot/share/opto/arraycopynode.cpp 2018-11-30 21:08:39.420088325 +0100 @@ -497,7 +497,7 @@ } else { if (in(TypeFunc::Control) != ctl) { // we can't return new memory and control from Ideal at parse time - assert(!is_clonebasic(), "added control for clone?"); + assert(!is_clonebasic() || UseShenandoahGC, "added control for clone?"); phase->record_for_igvn(this); return false; } --- old/src/hotspot/share/opto/cfgnode.hpp 2018-11-30 21:08:39.791084895 +0100 +++ new/src/hotspot/share/opto/cfgnode.hpp 2018-11-30 21:08:39.639086300 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -304,7 +304,6 @@ protected: ProjNode* range_check_trap_proj(int& flip, Node*& l, Node*& r); Node* Ideal_common(PhaseGVN *phase, bool can_reshape); - Node* dominated_by(Node* prev_dom, PhaseIterGVN* igvn); Node* search_identical(int dist); public: @@ -392,6 +391,7 @@ virtual const RegMask &out_RegMask() const; Node* fold_compares(PhaseIterGVN* phase); static Node* up_one_dom(Node* curr, bool linear_only = false); + Node* dominated_by(Node* prev_dom, PhaseIterGVN* igvn); // Takes the type of val and filters it through the test represented // by if_proj and returns a more refined type if one is produced. --- old/src/hotspot/share/opto/classes.cpp 2018-11-30 21:08:40.016082814 +0100 +++ new/src/hotspot/share/opto/classes.cpp 2018-11-30 21:08:39.862084238 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,6 +51,9 @@ #if INCLUDE_ZGC #include "gc/z/c2/zBarrierSetC2.hpp" #endif +#if INCLUDE_SHENANDOAHGC +#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" +#endif // ---------------------------------------------------------------------------- // Build a table of virtual functions to map from Nodes to dense integer --- old/src/hotspot/share/opto/classes.hpp 2018-11-30 21:08:40.236080780 +0100 +++ new/src/hotspot/share/opto/classes.hpp 2018-11-30 21:08:40.081082213 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -264,6 +264,21 @@ macro(RoundFloat) macro(SafePoint) macro(SafePointScalarObject) +#if INCLUDE_SHENANDOAHGC +#define shmacro(x) macro(x) +#else +#define shmacro(x) optionalmacro(x) +#endif +shmacro(ShenandoahCompareAndExchangeP) +shmacro(ShenandoahCompareAndExchangeN) +shmacro(ShenandoahCompareAndSwapN) +shmacro(ShenandoahCompareAndSwapP) +shmacro(ShenandoahWeakCompareAndSwapN) +shmacro(ShenandoahWeakCompareAndSwapP) +shmacro(ShenandoahEnqueueBarrier) +shmacro(ShenandoahReadBarrier) +shmacro(ShenandoahWriteBarrier) +shmacro(ShenandoahWBMemProj) macro(SCMemProj) macro(SqrtD) macro(SqrtF) --- old/src/hotspot/share/opto/compile.cpp 2018-11-30 21:08:40.459078718 +0100 +++ new/src/hotspot/share/opto/compile.cpp 2018-11-30 21:08:40.303080161 +0100 @@ -3061,7 +3061,7 @@ Node *m = wq.at(next); for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) { Node* use = m->fast_out(i); - if (use->is_Mem() || use->is_EncodeNarrowPtr()) { + if (use->is_Mem() || use->is_EncodeNarrowPtr() || use->is_ShenandoahBarrier()) { use->ensure_control_or_add_prec(n->in(0)); } else { switch(use->Opcode()) { --- old/src/hotspot/share/opto/compile.hpp 2018-11-30 21:08:40.723076277 +0100 +++ new/src/hotspot/share/opto/compile.hpp 2018-11-30 21:08:40.572077673 +0100 @@ -93,6 +93,8 @@ enum LoopOptsMode { LoopOptsDefault, LoopOptsNone, + LoopOptsShenandoahExpand, + LoopOptsShenandoahPostExpand, LoopOptsSkipSplitIf, LoopOptsVerify, LoopOptsLastRound --- old/src/hotspot/share/opto/lcm.cpp 2018-11-30 21:08:40.958074105 +0100 +++ new/src/hotspot/share/opto/lcm.cpp 2018-11-30 21:08:40.805075519 +0100 @@ -178,6 +178,7 @@ case Op_LoadRange: case Op_LoadD_unaligned: case Op_LoadL_unaligned: + case Op_ShenandoahReadBarrier: assert(mach->in(2) == val, "should be address"); break; case Op_StoreB: --- old/src/hotspot/share/opto/library_call.cpp 2018-11-30 21:08:41.189071969 +0100 +++ new/src/hotspot/share/opto/library_call.cpp 2018-11-30 21:08:41.037073374 +0100 @@ -4464,7 +4464,7 @@ for (MergeMemStream mms(merged_memory(), mem->as_MergeMem()); mms.next_non_empty2(); ) { Node* n = mms.memory(); if (n != mms.memory2() && !(n->is_Proj() && n->in(0) == alloc->initialization())) { - assert(n->is_Store(), "what else?"); + assert(n->is_Store() || n->Opcode() == Op_ShenandoahWBMemProj, "what else?"); no_interfering_store = false; break; } @@ -4473,7 +4473,7 @@ for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) { Node* n = mms.memory(); if (n != mem && !(n->is_Proj() && n->in(0) == alloc->initialization())) { - assert(n->is_Store(), "what else?"); + assert(n->is_Store() || n->Opcode() == Op_ShenandoahWBMemProj, "what else?"); no_interfering_store = false; break; } --- old/src/hotspot/share/opto/loopPredicate.cpp 2018-11-30 21:08:41.477069306 +0100 +++ new/src/hotspot/share/opto/loopPredicate.cpp 2018-11-30 21:08:41.324070721 +0100 @@ -536,6 +536,9 @@ if (_lpt->is_invariant(n)) { // known invariant _invariant.set(n->_idx); } else if (!n->is_CFG()) { + if (n->Opcode() == Op_ShenandoahWriteBarrier) { + return; + } Node *n_ctrl = _phase->ctrl_or_self(n); Node *u_ctrl = _phase->ctrl_or_self(use); // self if use is a CFG if (_phase->is_dominator(n_ctrl, u_ctrl)) { --- old/src/hotspot/share/opto/loopTransform.cpp 2018-11-30 21:08:41.710067152 +0100 +++ new/src/hotspot/share/opto/loopTransform.cpp 2018-11-30 21:08:41.557068566 +0100 @@ -2795,7 +2795,13 @@ (bol->in(1)->Opcode() == Op_CompareAndSwapI ) || (bol->in(1)->Opcode() == Op_CompareAndSwapL ) || (bol->in(1)->Opcode() == Op_CompareAndSwapP ) || - (bol->in(1)->Opcode() == Op_CompareAndSwapN ))) + (bol->in(1)->Opcode() == Op_CompareAndSwapN ) || + (bol->in(1)->Opcode() == Op_ShenandoahCompareAndExchangeP ) || + (bol->in(1)->Opcode() == Op_ShenandoahCompareAndExchangeN ) || + (bol->in(1)->Opcode() == Op_ShenandoahWeakCompareAndSwapP ) || + (bol->in(1)->Opcode() == Op_ShenandoahWeakCompareAndSwapN ) || + (bol->in(1)->Opcode() == Op_ShenandoahCompareAndSwapP ) || + (bol->in(1)->Opcode() == Op_ShenandoahCompareAndSwapN ))) return; // Allocation loops RARELY take backedge // Find the OTHER exit path from the IF Node* ex = iff->proj_out(1-test_con); --- old/src/hotspot/share/opto/loopnode.cpp 2018-11-30 21:08:41.969064757 +0100 +++ new/src/hotspot/share/opto/loopnode.cpp 2018-11-30 21:08:41.814066190 +0100 @@ -3968,7 +3968,7 @@ } while(worklist.size() != 0 && LCA != early) { Node* s = worklist.pop(); - if (s->is_Load() || s->Opcode() == Op_SafePoint || + if (s->is_Load() || s->is_ShenandoahBarrier() || s->Opcode() == Op_SafePoint || (s->is_CallStaticJava() && s->as_CallStaticJava()->uncommon_trap_request() != 0)) { continue; } else if (s->is_MergeMem()) { @@ -4185,7 +4185,17 @@ //------------------------------build_loop_late_post--------------------------- // Put Data nodes into some loop nest, by setting the _nodes[]->loop mapping. // Second pass finds latest legal placement, and ideal loop placement. -void PhaseIdealLoop::build_loop_late_post( Node *n ) { +void PhaseIdealLoop::build_loop_late_post(Node *n) { + BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); + + if (bs->build_loop_late_post(this, n)) { + return; + } + + build_loop_late_post_work(n, true); +} + +void PhaseIdealLoop::build_loop_late_post_work(Node *n, bool pinned) { if (n->req() == 2 && (n->Opcode() == Op_ConvI2L || n->Opcode() == Op_CastII) && !C->major_progress() && !_verify_only) { _igvn._worklist.push(n); // Maybe we'll normalize it, if no more loops. @@ -4206,7 +4216,6 @@ // _must_ be pinned (they have to observe their control edge of course). // Unlike Stores (which modify an unallocable resource, the memory // state), Mods/Loads can float around. So free them up. - bool pinned = true; switch( n->Opcode() ) { case Op_DivI: case Op_DivF: @@ -4503,6 +4512,7 @@ } } } +#endif // Collect a R-P-O for the whole CFG. // Result list is in post-order (scan backwards for RPO) @@ -4525,7 +4535,6 @@ } } } -#endif //============================================================================= --- old/src/hotspot/share/opto/loopnode.hpp 2018-11-30 21:08:42.238062270 +0100 +++ new/src/hotspot/share/opto/loopnode.hpp 2018-11-30 21:08:42.084063694 +0100 @@ -38,6 +38,8 @@ class LoopNode; class Node; class OuterStripMinedLoopEndNode; +class ShenandoahBarrierNode; +class ShenandoahWriteBarrierNode; class PathFrequency; class PhaseIdealLoop; class CountedLoopReserveKit; @@ -636,6 +638,8 @@ friend class IdealLoopTree; friend class SuperWord; friend class CountedLoopReserveKit; + friend class ShenandoahBarrierNode; + friend class ShenandoahWriteBarrierNode; // Pre-computed def-use info PhaseIterGVN &_igvn; @@ -863,7 +867,8 @@ // Place Data nodes in some loop nest void build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ); void build_loop_late ( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ); - void build_loop_late_post ( Node* n ); + void build_loop_late_post_work(Node* n, bool pinned); + void build_loop_late_post(Node* n); void verify_strip_mined_scheduling(Node *n, Node* least); // Array of immediate dominance info for each CFG node indexed by node idx @@ -1309,7 +1314,6 @@ #ifndef PRODUCT void dump( ) const; void dump( IdealLoopTree *loop, uint rpo_idx, Node_List &rpo_list ) const; - void rpo( Node *start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list ) const; void verify() const; // Major slow :-) void verify_compare( Node *n, const PhaseIdealLoop *loop_verify, VectorSet &visited ) const; IdealLoopTree *get_loop_idx(Node* n) const { @@ -1321,6 +1325,7 @@ static int _loop_invokes; // Count of PhaseIdealLoop invokes static int _loop_work; // Sum of PhaseIdealLoop x _unique #endif + void rpo( Node *start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list ) const; }; // This kit may be used for making of a reserved copy of a loop before this loop --- old/src/hotspot/share/opto/loopopts.cpp 2018-11-30 21:08:42.477060060 +0100 +++ new/src/hotspot/share/opto/loopopts.cpp 2018-11-30 21:08:42.320061512 +0100 @@ -1024,6 +1024,11 @@ Node* m = n->fast_out(j); if (m->is_FastLock()) return false; +#if INCLUDE_SHENANDOAHGC + if (m->is_ShenandoahBarrier() && m->has_out_with(Op_FastLock)) { + return false; + } +#endif #ifdef _LP64 if (m->Opcode() == Op_ConvI2L) return false; @@ -1310,6 +1315,7 @@ // control, then the cloning of n is a pointless exercise, because // GVN will ensure that we end up where we started. if (!n->is_Load() || late_load_ctrl != n_ctrl) { + BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); for (DUIterator_Last jmin, j = n->last_outs(jmin); j >= jmin; ) { Node *u = n->last_out(j); // Clone private computation per use _igvn.rehash_node_delayed(u); @@ -1340,6 +1346,10 @@ // For inner loop uses get the preheader area. x_ctrl = place_near_use(x_ctrl); + if (bs->sink_node(this, n, x, x_ctrl, n_ctrl)) { + continue; + } + if (n->is_Load()) { // For loads, add a control edge to a CFG node outside of the loop // to force them to not combine and return back inside the loop @@ -3137,7 +3147,7 @@ // if not pinned and not a load (which maybe anti-dependent on a store) // and not a CMove (Matcher expects only bool->cmove). - if ( n->in(0) == NULL && !n->is_Load() && !n->is_CMove() ) { + if (n->in(0) == NULL && !n->is_Load() && !n->is_CMove() && n->Opcode() != Op_ShenandoahWBMemProj) { cloned_for_outside_use += clone_for_use_outside_loop( loop, n, worklist ); sink_list.push(n); peel >>= n->_idx; // delete n from peel set. --- old/src/hotspot/share/opto/macro.cpp 2018-11-30 21:08:42.739057638 +0100 +++ new/src/hotspot/share/opto/macro.cpp 2018-11-30 21:08:42.583059080 +0100 @@ -47,9 +47,13 @@ #include "opto/subnode.hpp" #include "opto/type.hpp" #include "runtime/sharedRuntime.hpp" +#include "utilities/macros.hpp" #if INCLUDE_G1GC #include "gc/g1/g1ThreadLocalData.hpp" #endif // INCLUDE_G1GC +#if INCLUDE_SHENANDOAHGC +#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" +#endif // @@ -629,6 +633,7 @@ k < kmax && can_eliminate; k++) { Node* n = use->fast_out(k); if (!n->is_Store() && n->Opcode() != Op_CastP2X && + SHENANDOAHGC_ONLY((!UseShenandoahGC || !ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(n)) &&) !(n->is_ArrayCopy() && n->as_ArrayCopy()->is_clonebasic() && n->in(ArrayCopyNode::Dest) == use)) { --- old/src/hotspot/share/opto/node.hpp 2018-11-30 21:08:42.987055345 +0100 +++ new/src/hotspot/share/opto/node.hpp 2018-11-30 21:08:42.835056750 +0100 @@ -142,6 +142,7 @@ class RootNode; class SafePointNode; class SafePointScalarObjectNode; +class ShenandoahBarrierNode; class StartNode; class State; class StoreNode; @@ -675,6 +676,7 @@ DEFINE_CLASS_ID(EncodeNarrowPtr, Type, 6) DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0) DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1) + DEFINE_CLASS_ID(ShenandoahBarrier, Type, 7) DEFINE_CLASS_ID(Proj, Node, 3) DEFINE_CLASS_ID(CatchProj, Proj, 0) @@ -873,6 +875,7 @@ DEFINE_CLASS_QUERY(Root) DEFINE_CLASS_QUERY(SafePoint) DEFINE_CLASS_QUERY(SafePointScalarObject) + DEFINE_CLASS_QUERY(ShenandoahBarrier) DEFINE_CLASS_QUERY(Start) DEFINE_CLASS_QUERY(Store) DEFINE_CLASS_QUERY(Sub) --- old/src/hotspot/share/opto/type.cpp 2018-11-30 21:08:43.227053126 +0100 +++ new/src/hotspot/share/opto/type.cpp 2018-11-30 21:08:43.073054550 +0100 @@ -3044,6 +3044,10 @@ return this; } +const TypeOopPtr *TypeOopPtr::cast_to_nonconst() const { + return this; +} + //-----------------------------cast_to_exactness------------------------------- const Type *TypeOopPtr::cast_to_exactness(bool klass_is_exact) const { // There is no such thing as an exact general oop. @@ -3546,6 +3550,11 @@ return make(_ptr, klass(), _klass_is_exact, const_oop(), _offset, instance_id, _speculative, _inline_depth); } +const TypeOopPtr *TypeInstPtr::cast_to_nonconst() const { + if (const_oop() == NULL) return this; + return make(NotNull, klass(), _klass_is_exact, NULL, _offset, _instance_id, _speculative, _inline_depth); +} + //------------------------------xmeet_unloaded--------------------------------- // Compute the MEET of two InstPtrs when at least one is unloaded. // Assume classes are different since called after check for same name/class-loader @@ -4073,6 +4082,12 @@ return make(_ptr, const_oop(), _ary, klass(), _klass_is_exact, _offset, instance_id, _speculative, _inline_depth); } +const TypeOopPtr *TypeAryPtr::cast_to_nonconst() const { + if (const_oop() == NULL) return this; + return make(NotNull, NULL, _ary, klass(), _klass_is_exact, _offset, _instance_id, _speculative, _inline_depth); +} + + //-----------------------------narrow_size_type------------------------------- // Local cache for arrayOopDesc::max_array_length(etype), // which is kind of slow (and cached elsewhere by other users). --- old/src/hotspot/share/opto/type.hpp 2018-11-30 21:08:43.508050528 +0100 +++ new/src/hotspot/share/opto/type.hpp 2018-11-30 21:08:43.357051925 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1028,6 +1028,8 @@ virtual const TypeOopPtr *cast_to_instance_id(int instance_id) const; + virtual const TypeOopPtr *cast_to_nonconst() const; + // corresponding pointer to klass, for a given instance const TypeKlassPtr* as_klass_type() const; @@ -1110,6 +1112,8 @@ virtual const TypeOopPtr *cast_to_instance_id(int instance_id) const; + virtual const TypeOopPtr *cast_to_nonconst() const; + virtual const TypePtr *add_offset( intptr_t offset ) const; // Speculative type helper methods. @@ -1193,6 +1197,8 @@ virtual const TypeOopPtr *cast_to_instance_id(int instance_id) const; + virtual const TypeOopPtr *cast_to_nonconst() const; + virtual const TypeAryPtr* cast_to_size(const TypeInt* size) const; virtual const TypeInt* narrow_size_type(const TypeInt* size) const; @@ -1770,6 +1776,8 @@ // UseOptoBiasInlining #define XorXNode XorLNode #define StoreXConditionalNode StoreLConditionalNode +#define LoadXNode LoadLNode +#define StoreXNode StoreLNode // Opcodes #define Op_LShiftX Op_LShiftL #define Op_AndX Op_AndL @@ -1815,6 +1823,8 @@ // UseOptoBiasInlining #define XorXNode XorINode #define StoreXConditionalNode StoreIConditionalNode +#define LoadXNode LoadINode +#define StoreXNode StoreINode // Opcodes #define Op_LShiftX Op_LShiftI #define Op_AndX Op_AndI