1 /*
   2  * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_OPTO_ARRAYCOPYNODE_HPP
  26 #define SHARE_OPTO_ARRAYCOPYNODE_HPP
  27 
  28 #include "gc/shared/c2/barrierSetC2.hpp"
  29 #include "opto/callnode.hpp"
  30 
  31 class GraphKit;
  32 
  33 class ArrayCopyNode : public CallNode {
  34 private:
  35 
  36   // What kind of arraycopy variant is this?
  37   enum {
  38     None,            // not set yet
  39     ArrayCopy,       // System.arraycopy()
  40     CloneBasic,      // A clone that can be copied by 64 bit chunks
  41     CloneOop,        // An oop array clone
  42     CopyOf,          // Arrays.copyOf()
  43     CopyOfRange      // Arrays.copyOfRange()
  44   } _kind;
  45 
  46 #ifndef PRODUCT
  47   static const char* _kind_names[CopyOfRange+1];
  48 #endif
  49   // Is the alloc obtained with
  50   // AllocateArrayNode::Ideal_array_allocation() tighly coupled
  51   // (arraycopy follows immediately the allocation)?
  52   // We cache the result of LibraryCallKit::tightly_coupled_allocation
  53   // here because it's much easier to find whether there's a tightly
  54   // couple allocation at parse time than at macro expansion time. At
  55   // macro expansion time, for every use of the allocation node we
  56   // would need to figure out whether it happens after the arraycopy (and
  57   // can be ignored) or between the allocation and the arraycopy. At
  58   // parse time, it's straightforward because whatever happens after
  59   // the arraycopy is not parsed yet so doesn't exist when
  60   // LibraryCallKit::tightly_coupled_allocation() is called.
  61   bool _alloc_tightly_coupled;
  62   bool _has_negative_length_guard;
  63 
  64   bool _arguments_validated;
  65 
  66   static const TypeFunc* arraycopy_type() {
  67     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
  68     fields[Src]       = TypeInstPtr::BOTTOM;
  69     fields[SrcPos]    = TypeInt::INT;
  70     fields[Dest]      = TypeInstPtr::BOTTOM;
  71     fields[DestPos]   = TypeInt::INT;
  72     fields[Length]    = TypeInt::INT;
  73     fields[SrcLen]    = TypeInt::INT;
  74     fields[DestLen]   = TypeInt::INT;
  75     fields[SrcKlass]  = TypeKlassPtr::BOTTOM;
  76     fields[DestKlass] = TypeKlassPtr::BOTTOM;
  77     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
  78 
  79     // create result type (range)
  80     fields = TypeTuple::fields(0);
  81 
  82     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
  83 
  84     return TypeFunc::make(domain, range);
  85   }
  86 
  87   ArrayCopyNode(Compile* C, bool alloc_tightly_coupled, bool has_negative_length_guard);
  88 
  89   intptr_t get_length_if_constant(PhaseGVN *phase) const;
  90   int get_count(PhaseGVN *phase) const;
  91   static const TypePtr* get_address_type(PhaseGVN *phase, Node* n);
  92 
  93   Node* try_clone_instance(PhaseGVN *phase, bool can_reshape, int count);
  94   bool prepare_array_copy(PhaseGVN *phase, bool can_reshape,
  95                           Node*& adr_src, Node*& base_src, Node*& adr_dest, Node*& base_dest,
  96                           BasicType& copy_type, const Type*& value_type, bool& disjoint_bases);
  97   void array_copy_test_overlap(PhaseGVN *phase, bool can_reshape,
  98                                bool disjoint_bases, int count,
  99                                Node*& forward_ctl, Node*& backward_ctl);
 100   Node* array_copy_forward(PhaseGVN *phase, bool can_reshape, Node*& ctl,
 101                            MergeMemNode* mm,
 102                            const TypePtr* atp_src, const TypePtr* atp_dest,
 103                            Node* adr_src, Node* base_src, Node* adr_dest, Node* base_dest,
 104                            BasicType copy_type, const Type* value_type, int count);
 105   Node* array_copy_backward(PhaseGVN *phase, bool can_reshape, Node*& ctl,
 106                             MergeMemNode* mm,
 107                             const TypePtr* atp_src, const TypePtr* atp_dest,
 108                             Node* adr_src, Node* base_src, Node* adr_dest, Node* base_dest,
 109                             BasicType copy_type, const Type* value_type, int count);
 110   bool finish_transform(PhaseGVN *phase, bool can_reshape,
 111                         Node* ctl, Node *mem);
 112   static bool may_modify_helper(const TypeOopPtr *t_oop, Node* n, PhaseTransform *phase, CallNode*& call);
 113 
 114   static Node* load(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMemNode* mem, Node* addr, const TypePtr* adr_type, const Type *type, BasicType bt);
 115   void store(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMemNode* mem, Node* addr, const TypePtr* adr_type, Node* val, const Type *type, BasicType bt);
 116 
 117 public:
 118 
 119   enum {
 120     Src   = TypeFunc::Parms,
 121     SrcPos,
 122     Dest,
 123     DestPos,
 124     Length,
 125     SrcLen,
 126     DestLen,
 127     SrcKlass,
 128     DestKlass,
 129     ParmLimit
 130   };
 131 
 132   // Results from escape analysis for non escaping inputs
 133   const TypeOopPtr* _src_type;
 134   const TypeOopPtr* _dest_type;
 135 
 136   static ArrayCopyNode* make(GraphKit* kit, bool may_throw,
 137                              Node* src, Node* src_offset,
 138                              Node* dest,  Node* dest_offset,
 139                              Node* length,
 140                              bool alloc_tightly_coupled,
 141                              bool has_negative_length_guard,
 142                              Node* src_klass = NULL, Node* dest_klass = NULL,
 143                              Node* src_length = NULL, Node* dest_length = NULL);
 144 
 145   void connect_outputs(GraphKit* kit);
 146 
 147   bool is_arraycopy()             const  { assert(_kind != None, "should bet set"); return _kind == ArrayCopy; }
 148   bool is_arraycopy_validated()   const  { assert(_kind != None, "should bet set"); return _kind == ArrayCopy && _arguments_validated; }
 149   bool is_clonebasic()            const  { assert(_kind != None, "should bet set"); return _kind == CloneBasic; }
 150   bool is_cloneoop()              const  { assert(_kind != None, "should bet set"); return _kind == CloneOop; }
 151   bool is_copyof()                const  { assert(_kind != None, "should bet set"); return _kind == CopyOf; }
 152   bool is_copyof_validated()      const  { assert(_kind != None, "should bet set"); return _kind == CopyOf && _arguments_validated; }
 153   bool is_copyofrange()           const  { assert(_kind != None, "should bet set"); return _kind == CopyOfRange; }
 154   bool is_copyofrange_validated() const  { assert(_kind != None, "should bet set"); return _kind == CopyOfRange && _arguments_validated; }
 155 
 156   void set_arraycopy(bool validated)   { assert(_kind == None, "shouldn't bet set yet"); _kind = ArrayCopy; _arguments_validated = validated; }
 157   void set_clonebasic()                { assert(_kind == None, "shouldn't bet set yet"); _kind = CloneBasic; }
 158   void set_cloneoop()                  { assert(_kind == None, "shouldn't bet set yet"); _kind = CloneOop; }
 159   void set_copyof(bool validated)      { assert(_kind == None, "shouldn't bet set yet"); _kind = CopyOf; _arguments_validated = validated; }
 160   void set_copyofrange(bool validated) { assert(_kind == None, "shouldn't bet set yet"); _kind = CopyOfRange; _arguments_validated = validated; }
 161 
 162   virtual int Opcode() const;
 163   virtual uint size_of() const; // Size is bigger
 164   virtual bool guaranteed_safepoint()  { return false; }
 165   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 166 
 167   virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase);
 168 
 169   bool is_alloc_tightly_coupled() const { return _alloc_tightly_coupled; }
 170 
 171   bool has_negative_length_guard() const { return _has_negative_length_guard; }
 172 
 173   static bool may_modify(const TypeOopPtr *t_oop, MemBarNode* mb, PhaseTransform *phase, ArrayCopyNode*& ac);
 174   bool modifies(intptr_t offset_lo, intptr_t offset_hi, PhaseTransform* phase, bool must_modify) const;
 175 
 176 #ifndef PRODUCT
 177   virtual void dump_spec(outputStream *st) const;
 178   virtual void dump_compact_spec(outputStream* st) const;
 179 #endif
 180 };
 181 #endif // SHARE_OPTO_ARRAYCOPYNODE_HPP