rev 8008 : 8073866: Fix for 8064703 is not sufficient
Summary: side effects between allocation and arraycopy can be reexecuted, unreachable uninitialized array can be seen by GCs
Reviewed-by:

   1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_OPTO_GRAPHKIT_HPP
  26 #define SHARE_VM_OPTO_GRAPHKIT_HPP
  27 
  28 #include "ci/ciEnv.hpp"
  29 #include "ci/ciMethodData.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/callnode.hpp"
  32 #include "opto/cfgnode.hpp"
  33 #include "opto/compile.hpp"
  34 #include "opto/divnode.hpp"
  35 #include "opto/mulnode.hpp"
  36 #include "opto/phaseX.hpp"
  37 #include "opto/subnode.hpp"
  38 #include "opto/type.hpp"
  39 #include "runtime/deoptimization.hpp"
  40 
  41 class FastLockNode;
  42 class FastUnlockNode;
  43 class IdealKit;
  44 class LibraryCallKit;
  45 class Parse;
  46 class RootNode;
  47 
  48 //-----------------------------------------------------------------------------
  49 //----------------------------GraphKit-----------------------------------------
  50 // Toolkit for building the common sorts of subgraphs.
  51 // Does not know about bytecode parsing or type-flow results.
  52 // It is able to create graphs implementing the semantics of most
  53 // or all bytecodes, so that it can expand intrinsics and calls.
  54 // It may depend on JVMState structure, but it must not depend
  55 // on specific bytecode streams.
  56 class GraphKit : public Phase {
  57   friend class PreserveJVMState;
  58 
  59  protected:
  60   ciEnv*            _env;       // Compilation environment
  61   PhaseGVN         &_gvn;       // Some optimizations while parsing
  62   SafePointNode*    _map;       // Parser map from JVM to Nodes
  63   SafePointNode*    _exceptions;// Parser map(s) for exception state(s)
  64   int               _bci;       // JVM Bytecode Pointer
  65   ciMethod*         _method;    // JVM Current Method
  66 
  67  private:
  68   int               _sp;        // JVM Expression Stack Pointer; don't modify directly!
  69 
  70  private:
  71   SafePointNode*     map_not_null() const {
  72     assert(_map != NULL, "must call stopped() to test for reset compiler map");
  73     return _map;
  74   }
  75 
  76  public:
  77   GraphKit();                   // empty constructor
  78   GraphKit(JVMState* jvms);     // the JVM state on which to operate
  79 
  80 #ifdef ASSERT
  81   ~GraphKit() {
  82     assert(!has_exceptions(), "user must call transfer_exceptions_into_jvms");
  83   }
  84 #endif
  85 
  86   virtual Parse*          is_Parse()          const { return NULL; }
  87   virtual LibraryCallKit* is_LibraryCallKit() const { return NULL; }
  88 
  89   ciEnv*        env()           const { return _env; }
  90   PhaseGVN&     gvn()           const { return _gvn; }
  91 
  92   void record_for_igvn(Node* n) const { C->record_for_igvn(n); }  // delegate to Compile
  93 
  94   // Handy well-known nodes:
  95   Node*         null()          const { return zerocon(T_OBJECT); }
  96   Node*         top()           const { return C->top(); }
  97   RootNode*     root()          const { return C->root(); }
  98 
  99   // Create or find a constant node
 100   Node* intcon(jint con)        const { return _gvn.intcon(con); }
 101   Node* longcon(jlong con)      const { return _gvn.longcon(con); }
 102   Node* makecon(const Type *t)  const { return _gvn.makecon(t); }
 103   Node* zerocon(BasicType bt)   const { return _gvn.zerocon(bt); }
 104   // (See also macro MakeConX in type.hpp, which uses intcon or longcon.)
 105 
 106   // Helper for byte_map_base
 107   Node* byte_map_base_node();
 108 
 109   jint  find_int_con(Node* n, jint value_if_unknown) {
 110     return _gvn.find_int_con(n, value_if_unknown);
 111   }
 112   jlong find_long_con(Node* n, jlong value_if_unknown) {
 113     return _gvn.find_long_con(n, value_if_unknown);
 114   }
 115   // (See also macro find_intptr_t_con in type.hpp, which uses one of these.)
 116 
 117   // JVM State accessors:
 118   // Parser mapping from JVM indices into Nodes.
 119   // Low slots are accessed by the StartNode::enum.
 120   // Then come the locals at StartNode::Parms to StartNode::Parms+max_locals();
 121   // Then come JVM stack slots.
 122   // Finally come the monitors, if any.
 123   // See layout accessors in class JVMState.
 124 
 125   SafePointNode*     map()      const { return _map; }
 126   bool               has_exceptions() const { return _exceptions != NULL; }
 127   JVMState*          jvms()     const { return map_not_null()->_jvms; }
 128   int                sp()       const { return _sp; }
 129   int                bci()      const { return _bci; }
 130   Bytecodes::Code    java_bc()  const;
 131   ciMethod*          method()   const { return _method; }
 132 
 133   void set_jvms(JVMState* jvms)       { set_map(jvms->map());
 134                                         assert(jvms == this->jvms(), "sanity");
 135                                         _sp = jvms->sp();
 136                                         _bci = jvms->bci();
 137                                         _method = jvms->has_method() ? jvms->method() : NULL; }
 138   void set_map(SafePointNode* m)      { _map = m; debug_only(verify_map()); }
 139   void set_sp(int sp)                 { assert(sp >= 0, err_msg_res("sp must be non-negative: %d", sp)); _sp = sp; }
 140   void clean_stack(int from_sp); // clear garbage beyond from_sp to top
 141 
 142   void inc_sp(int i)                  { set_sp(sp() + i); }
 143   void dec_sp(int i)                  { set_sp(sp() - i); }
 144   void set_bci(int bci)               { _bci = bci; }
 145 
 146   // Make sure jvms has current bci & sp.
 147   JVMState* sync_jvms() const;
 148   JVMState* sync_jvms_for_reexecute();
 149 
 150 #ifdef ASSERT
 151   // Make sure JVMS has an updated copy of bci and sp.
 152   // Also sanity-check method, depth, and monitor depth.
 153   bool jvms_in_sync() const;
 154 
 155   // Make sure the map looks OK.
 156   void verify_map() const;
 157 
 158   // Make sure a proposed exception state looks OK.
 159   static void verify_exception_state(SafePointNode* ex_map);
 160 #endif
 161 
 162   // Clone the existing map state.  (Implements PreserveJVMState.)
 163   SafePointNode* clone_map();
 164 
 165   // Set the map to a clone of the given one.
 166   void set_map_clone(SafePointNode* m);
 167 
 168   // Tell if the compilation is failing.
 169   bool failing() const { return C->failing(); }
 170 
 171   // Set _map to NULL, signalling a stop to further bytecode execution.
 172   // Preserve the map intact for future use, and return it back to the caller.
 173   SafePointNode* stop() { SafePointNode* m = map(); set_map(NULL); return m; }
 174 
 175   // Stop, but first smash the map's inputs to NULL, to mark it dead.
 176   void stop_and_kill_map();
 177 
 178   // Tell if _map is NULL, or control is top.
 179   bool stopped();
 180 
 181   // Tell if this method or any caller method has exception handlers.
 182   bool has_ex_handler();
 183 
 184   // Save an exception without blowing stack contents or other JVM state.
 185   // (The extra pointer is stuck with add_req on the map, beyond the JVMS.)
 186   static void set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop);
 187 
 188   // Recover a saved exception from its map.
 189   static Node* saved_ex_oop(SafePointNode* ex_map);
 190 
 191   // Recover a saved exception from its map, and remove it from the map.
 192   static Node* clear_saved_ex_oop(SafePointNode* ex_map);
 193 
 194 #ifdef ASSERT
 195   // Recover a saved exception from its map, and remove it from the map.
 196   static bool has_saved_ex_oop(SafePointNode* ex_map);
 197 #endif
 198 
 199   // Push an exception in the canonical position for handlers (stack(0)).
 200   void push_ex_oop(Node* ex_oop) {
 201     ensure_stack(1);  // ensure room to push the exception
 202     set_stack(0, ex_oop);
 203     set_sp(1);
 204     clean_stack(1);
 205   }
 206 
 207   // Detach and return an exception state.
 208   SafePointNode* pop_exception_state() {
 209     SafePointNode* ex_map = _exceptions;
 210     if (ex_map != NULL) {
 211       _exceptions = ex_map->next_exception();
 212       ex_map->set_next_exception(NULL);
 213       debug_only(verify_exception_state(ex_map));
 214     }
 215     return ex_map;
 216   }
 217 
 218   // Add an exception, using the given JVM state, without commoning.
 219   void push_exception_state(SafePointNode* ex_map) {
 220     debug_only(verify_exception_state(ex_map));
 221     ex_map->set_next_exception(_exceptions);
 222     _exceptions = ex_map;
 223   }
 224 
 225   // Turn the current JVM state into an exception state, appending the ex_oop.
 226   SafePointNode* make_exception_state(Node* ex_oop);
 227 
 228   // Add an exception, using the given JVM state.
 229   // Combine all exceptions with a common exception type into a single state.
 230   // (This is done via combine_exception_states.)
 231   void add_exception_state(SafePointNode* ex_map);
 232 
 233   // Combine all exceptions of any sort whatever into a single master state.
 234   SafePointNode* combine_and_pop_all_exception_states() {
 235     if (_exceptions == NULL)  return NULL;
 236     SafePointNode* phi_map = pop_exception_state();
 237     SafePointNode* ex_map;
 238     while ((ex_map = pop_exception_state()) != NULL) {
 239       combine_exception_states(ex_map, phi_map);
 240     }
 241     return phi_map;
 242   }
 243 
 244   // Combine the two exception states, building phis as necessary.
 245   // The second argument is updated to include contributions from the first.
 246   void combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map);
 247 
 248   // Reset the map to the given state.  If there are any half-finished phis
 249   // in it (created by combine_exception_states), transform them now.
 250   // Returns the exception oop.  (Caller must call push_ex_oop if required.)
 251   Node* use_exception_state(SafePointNode* ex_map);
 252 
 253   // Collect exceptions from a given JVM state into my exception list.
 254   void add_exception_states_from(JVMState* jvms);
 255 
 256   // Collect all raised exceptions into the current JVM state.
 257   // Clear the current exception list and map, returns the combined states.
 258   JVMState* transfer_exceptions_into_jvms();
 259 
 260   // Helper to throw a built-in exception.
 261   // Range checks take the offending index.
 262   // Cast and array store checks take the offending class.
 263   // Others do not take the optional argument.
 264   // The JVMS must allow the bytecode to be re-executed
 265   // via an uncommon trap.
 266   void builtin_throw(Deoptimization::DeoptReason reason, Node* arg = NULL);
 267 
 268   // Helper to check the JavaThread::_should_post_on_exceptions flag
 269   // and branch to an uncommon_trap if it is true (with the specified reason and must_throw)
 270   void uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason,
 271                                                   bool must_throw) ;
 272 
 273   // Helper Functions for adding debug information
 274   void kill_dead_locals();
 275 #ifdef ASSERT
 276   bool dead_locals_are_killed();
 277 #endif
 278   // The call may deoptimize.  Supply required JVM state as debug info.
 279   // If must_throw is true, the call is guaranteed not to return normally.
 280   void add_safepoint_edges(SafePointNode* call,
 281                            bool must_throw = false);
 282 
 283   // How many stack inputs does the current BC consume?
 284   // And, how does the stack change after the bytecode?
 285   // Returns false if unknown.
 286   bool compute_stack_effects(int& inputs, int& depth);
 287 
 288   // Add a fixed offset to a pointer
 289   Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset) {
 290     return basic_plus_adr(base, ptr, MakeConX(offset));
 291   }
 292   Node* basic_plus_adr(Node* base, intptr_t offset) {
 293     return basic_plus_adr(base, base, MakeConX(offset));
 294   }
 295   // Add a variable offset to a pointer
 296   Node* basic_plus_adr(Node* base, Node* offset) {
 297     return basic_plus_adr(base, base, offset);
 298   }
 299   Node* basic_plus_adr(Node* base, Node* ptr, Node* offset);
 300 
 301 
 302   // Some convenient shortcuts for common nodes
 303   Node* IfTrue(IfNode* iff)                   { return _gvn.transform(new IfTrueNode(iff));      }
 304   Node* IfFalse(IfNode* iff)                  { return _gvn.transform(new IfFalseNode(iff));     }
 305 
 306   Node* AddI(Node* l, Node* r)                { return _gvn.transform(new AddINode(l, r));       }
 307   Node* SubI(Node* l, Node* r)                { return _gvn.transform(new SubINode(l, r));       }
 308   Node* MulI(Node* l, Node* r)                { return _gvn.transform(new MulINode(l, r));       }
 309   Node* DivI(Node* ctl, Node* l, Node* r)     { return _gvn.transform(new DivINode(ctl, l, r));  }
 310 
 311   Node* AndI(Node* l, Node* r)                { return _gvn.transform(new AndINode(l, r));       }
 312   Node* OrI(Node* l, Node* r)                 { return _gvn.transform(new OrINode(l, r));        }
 313   Node* XorI(Node* l, Node* r)                { return _gvn.transform(new XorINode(l, r));       }
 314 
 315   Node* MaxI(Node* l, Node* r)                { return _gvn.transform(new MaxINode(l, r));       }
 316   Node* MinI(Node* l, Node* r)                { return _gvn.transform(new MinINode(l, r));       }
 317 
 318   Node* LShiftI(Node* l, Node* r)             { return _gvn.transform(new LShiftINode(l, r));    }
 319   Node* RShiftI(Node* l, Node* r)             { return _gvn.transform(new RShiftINode(l, r));    }
 320   Node* URShiftI(Node* l, Node* r)            { return _gvn.transform(new URShiftINode(l, r));   }
 321 
 322   Node* CmpI(Node* l, Node* r)                { return _gvn.transform(new CmpINode(l, r));       }
 323   Node* CmpL(Node* l, Node* r)                { return _gvn.transform(new CmpLNode(l, r));       }
 324   Node* CmpP(Node* l, Node* r)                { return _gvn.transform(new CmpPNode(l, r));       }
 325   Node* Bool(Node* cmp, BoolTest::mask relop) { return _gvn.transform(new BoolNode(cmp, relop)); }
 326 
 327   Node* AddP(Node* b, Node* a, Node* o)       { return _gvn.transform(new AddPNode(b, a, o));    }
 328 
 329   // Convert between int and long, and size_t.
 330   // (See macros ConvI2X, etc., in type.hpp for ConvI2X, etc.)
 331   Node* ConvI2L(Node* offset);
 332   Node* ConvI2UL(Node* offset);
 333   Node* ConvL2I(Node* offset);
 334   // Find out the klass of an object.
 335   Node* load_object_klass(Node* object);
 336   // Find out the length of an array.
 337   Node* load_array_length(Node* array);
 338 
 339 
 340   // Helper function to do a NULL pointer check or ZERO check based on type.
 341   // Throw an exception if a given value is null.
 342   // Return the value cast to not-null.
 343   // Be clever about equivalent dominating null checks.
 344   Node* null_check_common(Node* value, BasicType type,
 345                           bool assert_null = false,
 346                           Node* *null_control = NULL,
 347                           bool speculative = false);
 348   Node* null_check(Node* value, BasicType type = T_OBJECT) {
 349     return null_check_common(value, type, false, NULL, !_gvn.type(value)->speculative_maybe_null());
 350   }
 351   Node* null_check_receiver() {
 352     assert(argument(0)->bottom_type()->isa_ptr(), "must be");
 353     return null_check(argument(0));
 354   }
 355   Node* zero_check_int(Node* value) {
 356     assert(value->bottom_type()->basic_type() == T_INT,
 357         err_msg_res("wrong type: %s", type2name(value->bottom_type()->basic_type())));
 358     return null_check_common(value, T_INT);
 359   }
 360   Node* zero_check_long(Node* value) {
 361     assert(value->bottom_type()->basic_type() == T_LONG,
 362         err_msg_res("wrong type: %s", type2name(value->bottom_type()->basic_type())));
 363     return null_check_common(value, T_LONG);
 364   }
 365   // Throw an uncommon trap if a given value is __not__ null.
 366   // Return the value cast to null, and be clever about dominating checks.
 367   Node* null_assert(Node* value, BasicType type = T_OBJECT) {
 368     return null_check_common(value, type, true);
 369   }
 370 
 371   // Null check oop.  Return null-path control into (*null_control).
 372   // Return a cast-not-null node which depends on the not-null control.
 373   // If never_see_null, use an uncommon trap (*null_control sees a top).
 374   // The cast is not valid along the null path; keep a copy of the original.
 375   // If safe_for_replace, then we can replace the value with the cast
 376   // in the parsing map (the cast is guaranteed to dominate the map)
 377   Node* null_check_oop(Node* value, Node* *null_control,
 378                        bool never_see_null = false,
 379                        bool safe_for_replace = false,
 380                        bool speculative = false);
 381 
 382   // Check the null_seen bit.
 383   bool seems_never_null(Node* obj, ciProfileData* data, bool& speculating);
 384 
 385   // Check for unique class for receiver at call
 386   ciKlass* profile_has_unique_klass() {
 387     ciCallProfile profile = method()->call_profile_at_bci(bci());
 388     if (profile.count() >= 0 &&         // no cast failures here
 389         profile.has_receiver(0) &&
 390         profile.morphism() == 1) {
 391       return profile.receiver(0);
 392     }
 393     return NULL;
 394   }
 395 
 396   // record type from profiling with the type system
 397   Node* record_profile_for_speculation(Node* n, ciKlass* exact_kls, bool maybe_null);
 398   void record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc);
 399   void record_profiled_parameters_for_speculation();
 400   void record_profiled_return_for_speculation();
 401   Node* record_profiled_receiver_for_speculation(Node* n);
 402 
 403   // Use the type profile to narrow an object type.
 404   Node* maybe_cast_profiled_receiver(Node* not_null_obj,
 405                                      ciKlass* require_klass,
 406                                      ciKlass* spec,
 407                                      bool safe_for_replace);
 408 
 409   // Cast obj to type and emit guard unless we had too many traps here already
 410   Node* maybe_cast_profiled_obj(Node* obj,
 411                                 ciKlass* type,
 412                                 bool not_null = false,
 413                                 SafePointNode* sfpt = NULL);
 414 
 415   // Cast obj to not-null on this path
 416   Node* cast_not_null(Node* obj, bool do_replace_in_map = true);
 417   // Replace all occurrences of one node by another.
 418   void replace_in_map(Node* old, Node* neww);
 419 
 420   void  push(Node* n)     { map_not_null();        _map->set_stack(_map->_jvms,   _sp++        , n); }
 421   Node* pop()             { map_not_null(); return _map->stack(    _map->_jvms, --_sp             ); }
 422   Node* peek(int off = 0) { map_not_null(); return _map->stack(    _map->_jvms,   _sp - off - 1   ); }
 423 
 424   void push_pair(Node* ldval) {
 425     push(ldval);
 426     push(top());  // the halfword is merely a placeholder
 427   }
 428   void push_pair_local(int i) {
 429     // longs are stored in locals in "push" order
 430     push(  local(i+0) );  // the real value
 431     assert(local(i+1) == top(), "");
 432     push(top());  // halfword placeholder
 433   }
 434   Node* pop_pair() {
 435     // the second half is pushed last & popped first; it contains exactly nothing
 436     Node* halfword = pop();
 437     assert(halfword == top(), "");
 438     // the long bits are pushed first & popped last:
 439     return pop();
 440   }
 441   void set_pair_local(int i, Node* lval) {
 442     // longs are stored in locals as a value/half pair (like doubles)
 443     set_local(i+0, lval);
 444     set_local(i+1, top());
 445   }
 446 
 447   // Push the node, which may be zero, one, or two words.
 448   void push_node(BasicType n_type, Node* n) {
 449     int n_size = type2size[n_type];
 450     if      (n_size == 1)  push(      n );  // T_INT, ...
 451     else if (n_size == 2)  push_pair( n );  // T_DOUBLE, T_LONG
 452     else                   { assert(n_size == 0, "must be T_VOID"); }
 453   }
 454 
 455   Node* pop_node(BasicType n_type) {
 456     int n_size = type2size[n_type];
 457     if      (n_size == 1)  return pop();
 458     else if (n_size == 2)  return pop_pair();
 459     else                   return NULL;
 460   }
 461 
 462   Node* control()               const { return map_not_null()->control(); }
 463   Node* i_o()                   const { return map_not_null()->i_o(); }
 464   Node* returnadr()             const { return map_not_null()->returnadr(); }
 465   Node* frameptr()              const { return map_not_null()->frameptr(); }
 466   Node* local(uint idx)         const { map_not_null(); return _map->local(      _map->_jvms, idx); }
 467   Node* stack(uint idx)         const { map_not_null(); return _map->stack(      _map->_jvms, idx); }
 468   Node* argument(uint idx)      const { map_not_null(); return _map->argument(   _map->_jvms, idx); }
 469   Node* monitor_box(uint idx)   const { map_not_null(); return _map->monitor_box(_map->_jvms, idx); }
 470   Node* monitor_obj(uint idx)   const { map_not_null(); return _map->monitor_obj(_map->_jvms, idx); }
 471 
 472   void set_control  (Node* c)         { map_not_null()->set_control(c); }
 473   void set_i_o      (Node* c)         { map_not_null()->set_i_o(c); }
 474   void set_local(uint idx, Node* c)   { map_not_null(); _map->set_local(   _map->_jvms, idx, c); }
 475   void set_stack(uint idx, Node* c)   { map_not_null(); _map->set_stack(   _map->_jvms, idx, c); }
 476   void set_argument(uint idx, Node* c){ map_not_null(); _map->set_argument(_map->_jvms, idx, c); }
 477   void ensure_stack(uint stk_size)    { map_not_null(); _map->ensure_stack(_map->_jvms, stk_size); }
 478 
 479   // Access unaliased memory
 480   Node* memory(uint alias_idx);
 481   Node* memory(const TypePtr *tp) { return memory(C->get_alias_index(tp)); }
 482   Node* memory(Node* adr) { return memory(_gvn.type(adr)->is_ptr()); }
 483 
 484   // Access immutable memory
 485   Node* immutable_memory() { return C->immutable_memory(); }
 486 
 487   // Set unaliased memory
 488   void set_memory(Node* c, uint alias_idx) { merged_memory()->set_memory_at(alias_idx, c); }
 489   void set_memory(Node* c, const TypePtr *tp) { set_memory(c,C->get_alias_index(tp)); }
 490   void set_memory(Node* c, Node* adr) { set_memory(c,_gvn.type(adr)->is_ptr()); }
 491 
 492   // Get the entire memory state (probably a MergeMemNode), and reset it
 493   // (The resetting prevents somebody from using the dangling Node pointer.)
 494   Node* reset_memory();
 495 
 496   // Get the entire memory state, asserted to be a MergeMemNode.
 497   MergeMemNode* merged_memory() {
 498     Node* mem = map_not_null()->memory();
 499     assert(mem->is_MergeMem(), "parse memory is always pre-split");
 500     return mem->as_MergeMem();
 501   }
 502 
 503   // Set the entire memory state; produce a new MergeMemNode.
 504   void set_all_memory(Node* newmem);
 505 
 506   // Create a memory projection from the call, then set_all_memory.
 507   void set_all_memory_call(Node* call, bool separate_io_proj = false);
 508 
 509   // Create a LoadNode, reading from the parser's memory state.
 510   // (Note:  require_atomic_access is useful only with T_LONG.)
 511   //
 512   // We choose the unordered semantics by default because we have
 513   // adapted the `do_put_xxx' and `do_get_xxx' procedures for the case
 514   // of volatile fields.
 515   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
 516                   MemNode::MemOrd mo, bool require_atomic_access = false) {
 517     // This version computes alias_index from bottom_type
 518     return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
 519                      mo, require_atomic_access);
 520   }
 521   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type,
 522                   MemNode::MemOrd mo, bool require_atomic_access = false) {
 523     // This version computes alias_index from an address type
 524     assert(adr_type != NULL, "use other make_load factory");
 525     return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
 526                      mo, require_atomic_access);
 527   }
 528   // This is the base version which is given an alias index.
 529   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx,
 530                   MemNode::MemOrd mo, bool require_atomic_access = false);
 531 
 532   // Create & transform a StoreNode and store the effect into the
 533   // parser's memory state.
 534   //
 535   // We must ensure that stores of object references will be visible
 536   // only after the object's initialization. So the clients of this
 537   // procedure must indicate that the store requires `release'
 538   // semantics, if the stored value is an object reference that might
 539   // point to a new object and may become externally visible.
 540   Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
 541                         const TypePtr* adr_type,
 542                         MemNode::MemOrd mo,
 543                         bool require_atomic_access = false) {
 544     // This version computes alias_index from an address type
 545     assert(adr_type != NULL, "use other store_to_memory factory");
 546     return store_to_memory(ctl, adr, val, bt,
 547                            C->get_alias_index(adr_type),
 548                            mo, require_atomic_access);
 549   }
 550   // This is the base version which is given alias index
 551   // Return the new StoreXNode
 552   Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
 553                         int adr_idx,
 554                         MemNode::MemOrd,
 555                         bool require_atomic_access = false);
 556 
 557 
 558   // All in one pre-barrier, store, post_barrier
 559   // Insert a write-barrier'd store.  This is to let generational GC
 560   // work; we have to flag all oop-stores before the next GC point.
 561   //
 562   // It comes in 3 flavors of store to an object, array, or unknown.
 563   // We use precise card marks for arrays to avoid scanning the entire
 564   // array. We use imprecise for object. We use precise for unknown
 565   // since we don't know if we have an array or and object or even
 566   // where the object starts.
 567   //
 568   // If val==NULL, it is taken to be a completely unknown value. QQQ
 569 
 570   Node* store_oop(Node* ctl,
 571                   Node* obj,   // containing obj
 572                   Node* adr,   // actual adress to store val at
 573                   const TypePtr* adr_type,
 574                   Node* val,
 575                   const TypeOopPtr* val_type,
 576                   BasicType bt,
 577                   bool use_precise,
 578                   MemNode::MemOrd mo);
 579 
 580   Node* store_oop_to_object(Node* ctl,
 581                             Node* obj,   // containing obj
 582                             Node* adr,   // actual adress to store val at
 583                             const TypePtr* adr_type,
 584                             Node* val,
 585                             const TypeOopPtr* val_type,
 586                             BasicType bt,
 587                             MemNode::MemOrd mo) {
 588     return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, false, mo);
 589   }
 590 
 591   Node* store_oop_to_array(Node* ctl,
 592                            Node* obj,   // containing obj
 593                            Node* adr,   // actual adress to store val at
 594                            const TypePtr* adr_type,
 595                            Node* val,
 596                            const TypeOopPtr* val_type,
 597                            BasicType bt,
 598                            MemNode::MemOrd mo) {
 599     return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo);
 600   }
 601 
 602   // Could be an array or object we don't know at compile time (unsafe ref.)
 603   Node* store_oop_to_unknown(Node* ctl,
 604                              Node* obj,   // containing obj
 605                              Node* adr,   // actual adress to store val at
 606                              const TypePtr* adr_type,
 607                              Node* val,
 608                              BasicType bt,
 609                              MemNode::MemOrd mo);
 610 
 611   // For the few case where the barriers need special help
 612   void pre_barrier(bool do_load, Node* ctl,
 613                    Node* obj, Node* adr, uint adr_idx, Node* val, const TypeOopPtr* val_type,
 614                    Node* pre_val,
 615                    BasicType bt);
 616 
 617   void post_barrier(Node* ctl, Node* store, Node* obj, Node* adr, uint adr_idx,
 618                     Node* val, BasicType bt, bool use_precise);
 619 
 620   // Return addressing for an array element.
 621   Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
 622                               // Optional constraint on the array size:
 623                               const TypeInt* sizetype = NULL);
 624 
 625   // Return a load of array element at idx.
 626   Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype);
 627 
 628   //---------------- Dtrace support --------------------
 629   void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);
 630   void make_dtrace_method_entry(ciMethod* method) {
 631     make_dtrace_method_entry_exit(method, true);
 632   }
 633   void make_dtrace_method_exit(ciMethod* method) {
 634     make_dtrace_method_entry_exit(method, false);
 635   }
 636 
 637   //--------------- stub generation -------------------
 638  public:
 639   void gen_stub(address C_function,
 640                 const char *name,
 641                 int is_fancy_jump,
 642                 bool pass_tls,
 643                 bool return_pc);
 644 
 645   //---------- help for generating calls --------------
 646 
 647   // Do a null check on the receiver as it would happen before the call to
 648   // callee (with all arguments still on the stack).
 649   Node* null_check_receiver_before_call(ciMethod* callee) {
 650     assert(!callee->is_static(), "must be a virtual method");
 651     const int nargs = callee->arg_size();
 652     inc_sp(nargs);
 653     Node* n = null_check_receiver();
 654     dec_sp(nargs);
 655     return n;
 656   }
 657 
 658   // Fill in argument edges for the call from argument(0), argument(1), ...
 659   // (The next step is to call set_edges_for_java_call.)
 660   void  set_arguments_for_java_call(CallJavaNode* call);
 661 
 662   // Fill in non-argument edges for the call.
 663   // Transform the call, and update the basics: control, i_o, memory.
 664   // (The next step is usually to call set_results_for_java_call.)
 665   void set_edges_for_java_call(CallJavaNode* call,
 666                                bool must_throw = false, bool separate_io_proj = false);
 667 
 668   // Finish up a java call that was started by set_edges_for_java_call.
 669   // Call add_exception on any throw arising from the call.
 670   // Return the call result (transformed).
 671   Node* set_results_for_java_call(CallJavaNode* call, bool separate_io_proj = false);
 672 
 673   // Similar to set_edges_for_java_call, but simplified for runtime calls.
 674   void  set_predefined_output_for_runtime_call(Node* call) {
 675     set_predefined_output_for_runtime_call(call, NULL, NULL);
 676   }
 677   void  set_predefined_output_for_runtime_call(Node* call,
 678                                                Node* keep_mem,
 679                                                const TypePtr* hook_mem);
 680   Node* set_predefined_input_for_runtime_call(SafePointNode* call);
 681 
 682   // Replace the call with the current state of the kit.  Requires
 683   // that the call was generated with separate io_projs so that
 684   // exceptional control flow can be handled properly.
 685   void replace_call(CallNode* call, Node* result, bool do_replaced_nodes = false);
 686 
 687   // helper functions for statistics
 688   void increment_counter(address counter_addr);   // increment a debug counter
 689   void increment_counter(Node*   counter_addr);   // increment a debug counter
 690 
 691   // Bail out to the interpreter right now
 692   // The optional klass is the one causing the trap.
 693   // The optional reason is debug information written to the compile log.
 694   // Optional must_throw is the same as with add_safepoint_edges.
 695   void uncommon_trap(int trap_request,
 696                      ciKlass* klass = NULL, const char* reason_string = NULL,
 697                      bool must_throw = false, bool keep_exact_action = false);
 698 
 699   // Shorthand, to avoid saying "Deoptimization::" so many times.
 700   void uncommon_trap(Deoptimization::DeoptReason reason,
 701                      Deoptimization::DeoptAction action,
 702                      ciKlass* klass = NULL, const char* reason_string = NULL,
 703                      bool must_throw = false, bool keep_exact_action = false) {
 704     uncommon_trap(Deoptimization::make_trap_request(reason, action),
 705                   klass, reason_string, must_throw, keep_exact_action);
 706   }
 707 
 708   // Bail out to the interpreter and keep exact action (avoid switching to Action_none).
 709   void uncommon_trap_exact(Deoptimization::DeoptReason reason,
 710                            Deoptimization::DeoptAction action,
 711                            ciKlass* klass = NULL, const char* reason_string = NULL,
 712                            bool must_throw = false) {
 713     uncommon_trap(Deoptimization::make_trap_request(reason, action),
 714                   klass, reason_string, must_throw, /*keep_exact_action=*/true);
 715   }
 716 
 717   // SP when bytecode needs to be reexecuted.
 718   virtual int reexecute_sp() { return sp(); }
 719 
 720   // Report if there were too many traps at the current method and bci.
 721   // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
 722   // If there is no MDO at all, report no trap unless told to assume it.
 723   bool too_many_traps(Deoptimization::DeoptReason reason) {
 724     return C->too_many_traps(method(), bci(), reason);
 725   }
 726 
 727   // Report if there were too many recompiles at the current method and bci.
 728   bool too_many_recompiles(Deoptimization::DeoptReason reason) {
 729     return C->too_many_recompiles(method(), bci(), reason);
 730   }
 731 
 732   // Returns the object (if any) which was created the moment before.
 733   Node* just_allocated_object(Node* current_control);
 734 
 735   static bool use_ReduceInitialCardMarks() {
 736     return (ReduceInitialCardMarks
 737             && Universe::heap()->can_elide_tlab_store_barriers());
 738   }
 739 
 740   // Sync Ideal and Graph kits.
 741   void sync_kit(IdealKit& ideal);
 742   void final_sync(IdealKit& ideal);
 743 
 744   // vanilla/CMS post barrier
 745   void write_barrier_post(Node *store, Node* obj,
 746                           Node* adr,  uint adr_idx, Node* val, bool use_precise);
 747 
 748   // Allow reordering of pre-barrier with oop store and/or post-barrier.
 749   // Used for load_store operations which loads old value.
 750   bool can_move_pre_barrier() const;
 751 
 752   // G1 pre/post barriers
 753   void g1_write_barrier_pre(bool do_load,
 754                             Node* obj,
 755                             Node* adr,
 756                             uint alias_idx,
 757                             Node* val,
 758                             const TypeOopPtr* val_type,
 759                             Node* pre_val,
 760                             BasicType bt);
 761 
 762   void g1_write_barrier_post(Node* store,
 763                              Node* obj,
 764                              Node* adr,
 765                              uint alias_idx,
 766                              Node* val,
 767                              BasicType bt,
 768                              bool use_precise);
 769   // Helper function for g1
 770   private:
 771   void g1_mark_card(IdealKit& ideal, Node* card_adr, Node* store, uint oop_alias_idx,
 772                     Node* index, Node* index_adr,
 773                     Node* buffer, const TypeFunc* tf);
 774 
 775   bool g1_can_remove_pre_barrier(PhaseTransform* phase, Node* adr, BasicType bt, uint adr_idx);
 776 
 777   bool g1_can_remove_post_barrier(PhaseTransform* phase, Node* store, Node* adr);
 778 
 779   public:
 780   // Helper function to round double arguments before a call
 781   void round_double_arguments(ciMethod* dest_method);
 782   void round_double_result(ciMethod* dest_method);
 783 
 784   // rounding for strict float precision conformance
 785   Node* precision_rounding(Node* n);
 786 
 787   // rounding for strict double precision conformance
 788   Node* dprecision_rounding(Node* n);
 789 
 790   // rounding for non-strict double stores
 791   Node* dstore_rounding(Node* n);
 792 
 793   // Helper functions for fast/slow path codes
 794   Node* opt_iff(Node* region, Node* iff);
 795   Node* make_runtime_call(int flags,
 796                           const TypeFunc* call_type, address call_addr,
 797                           const char* call_name,
 798                           const TypePtr* adr_type, // NULL if no memory effects
 799                           Node* parm0 = NULL, Node* parm1 = NULL,
 800                           Node* parm2 = NULL, Node* parm3 = NULL,
 801                           Node* parm4 = NULL, Node* parm5 = NULL,
 802                           Node* parm6 = NULL, Node* parm7 = NULL);
 803   enum {  // flag values for make_runtime_call
 804     RC_NO_FP = 1,               // CallLeafNoFPNode
 805     RC_NO_IO = 2,               // do not hook IO edges
 806     RC_NO_LEAF = 4,             // CallStaticJavaNode
 807     RC_MUST_THROW = 8,          // flag passed to add_safepoint_edges
 808     RC_NARROW_MEM = 16,         // input memory is same as output
 809     RC_UNCOMMON = 32,           // freq. expected to be like uncommon trap
 810     RC_LEAF = 0                 // null value:  no flags set
 811   };
 812 
 813   // merge in all memory slices from new_mem, along the given path
 814   void merge_memory(Node* new_mem, Node* region, int new_path);
 815   void make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj, bool deoptimize = false);
 816 
 817   // Helper functions to build synchronizations
 818   int next_monitor();
 819   Node* insert_mem_bar(int opcode, Node* precedent = NULL);
 820   Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = NULL);
 821   // Optional 'precedent' is appended as an extra edge, to force ordering.
 822   FastLockNode* shared_lock(Node* obj);
 823   void shared_unlock(Node* box, Node* obj);
 824 
 825   // helper functions for the fast path/slow path idioms
 826   Node* fast_and_slow(Node* in, const Type *result_type, Node* null_result, IfNode* fast_test, Node* fast_result, address slow_call, const TypeFunc *slow_call_type, Node* slow_arg, Klass* ex_klass, Node* slow_result);
 827 
 828   // Generate an instance-of idiom.  Used by both the instance-of bytecode
 829   // and the reflective instance-of call.
 830   Node* gen_instanceof(Node *subobj, Node* superkls, bool safe_for_replace = false);
 831 
 832   // Generate a check-cast idiom.  Used by both the check-cast bytecode
 833   // and the array-store bytecode
 834   Node* gen_checkcast( Node *subobj, Node* superkls,
 835                        Node* *failure_control = NULL );
 836 
 837   Node* gen_subtype_check(Node* subklass, Node* superklass) {
 838     MergeMemNode* mem = merged_memory();
 839     Node* ctrl = control();
 840     Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, &_gvn);
 841     set_control(ctrl);
 842     return n;
 843   }
 844 
 845   // Exact type check used for predicted calls and casts.
 846   // Rewrites (*casted_receiver) to be casted to the stronger type.
 847   // (Caller is responsible for doing replace_in_map.)
 848   Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob,
 849                             Node* *casted_receiver);
 850 
 851   // implementation of object creation
 852   Node* set_output_for_allocation(AllocateNode* alloc,
 853                                   const TypeOopPtr* oop_type,
 854                                   bool deoptimize_on_exception=false);
 855   Node* get_layout_helper(Node* klass_node, jint& constant_value);
 856   Node* new_instance(Node* klass_node,
 857                      Node* slow_test = NULL,
 858                      Node* *return_size_val = NULL,
 859                      bool deoptimize_on_exception = false);
 860   Node* new_array(Node* klass_node, Node* count_val, int nargs,
 861                   Node* *return_size_val = NULL,
 862                   bool deoptimize_on_exception = false);
 863 
 864   // java.lang.String helpers
 865   Node* load_String_offset(Node* ctrl, Node* str);
 866   Node* load_String_length(Node* ctrl, Node* str);
 867   Node* load_String_value(Node* ctrl, Node* str);
 868   void store_String_offset(Node* ctrl, Node* str, Node* value);
 869   void store_String_length(Node* ctrl, Node* str, Node* value);
 870   void store_String_value(Node* ctrl, Node* str, Node* value);
 871 
 872   // Handy for making control flow
 873   IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) {
 874     IfNode* iff = new IfNode(ctrl, tst, prob, cnt);// New IfNode's
 875     _gvn.set_type(iff, iff->Value(&_gvn)); // Value may be known at parse-time
 876     // Place 'if' on worklist if it will be in graph
 877     if (!tst->is_Con())  record_for_igvn(iff);     // Range-check and Null-check removal is later
 878     return iff;
 879   }
 880 
 881   IfNode* create_and_xform_if(Node* ctrl, Node* tst, float prob, float cnt) {
 882     IfNode* iff = new IfNode(ctrl, tst, prob, cnt);// New IfNode's
 883     _gvn.transform(iff);                           // Value may be known at parse-time
 884     // Place 'if' on worklist if it will be in graph
 885     if (!tst->is_Con())  record_for_igvn(iff);     // Range-check and Null-check removal is later
 886     return iff;
 887   }
 888 
 889   // Insert a loop predicate into the graph
 890   void add_predicate(int nargs = 0);
 891   void add_predicate_impl(Deoptimization::DeoptReason reason, int nargs);
 892 
 893   // Produce new array node of stable type
 894   Node* cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type);
 895 };
 896 
 897 // Helper class to support building of control flow branches. Upon
 898 // creation the map and sp at bci are cloned and restored upon de-
 899 // struction. Typical use:
 900 //
 901 // { PreserveJVMState pjvms(this);
 902 //   // code of new branch
 903 // }
 904 // // here the JVM state at bci is established
 905 
 906 class PreserveJVMState: public StackObj {
 907  protected:
 908   GraphKit*      _kit;
 909 #ifdef ASSERT
 910   int            _block;  // PO of current block, if a Parse
 911   int            _bci;
 912 #endif
 913   SafePointNode* _map;
 914   uint           _sp;
 915 
 916  public:
 917   PreserveJVMState(GraphKit* kit, bool clone_map = true);
 918   ~PreserveJVMState();
 919 };
 920 
 921 // Helper class to build cutouts of the form if (p) ; else {x...}.
 922 // The code {x...} must not fall through.
 923 // The kit's main flow of control is set to the "then" continuation of if(p).
 924 class BuildCutout: public PreserveJVMState {
 925  public:
 926   BuildCutout(GraphKit* kit, Node* p, float prob, float cnt = COUNT_UNKNOWN);
 927   ~BuildCutout();
 928 };
 929 
 930 // Helper class to preserve the original _reexecute bit and _sp and restore
 931 // them back
 932 class PreserveReexecuteState: public StackObj {
 933  protected:
 934   GraphKit*                 _kit;
 935   uint                      _sp;
 936   JVMState::ReexecuteState  _reexecute;
 937 
 938  public:
 939   PreserveReexecuteState(GraphKit* kit);
 940   ~PreserveReexecuteState();
 941 };
 942 
 943 #endif // SHARE_VM_OPTO_GRAPHKIT_HPP
--- EOF ---