49
50 //-----------------------------------------------------------------------------
51 //----------------------------GraphKit-----------------------------------------
52 // Toolkit for building the common sorts of subgraphs.
53 // Does not know about bytecode parsing or type-flow results.
54 // It is able to create graphs implementing the semantics of most
55 // or all bytecodes, so that it can expand intrinsics and calls.
56 // It may depend on JVMState structure, but it must not depend
57 // on specific bytecode streams.
58 class GraphKit : public Phase {
59 friend class PreserveJVMState;
60
61 protected:
62 ciEnv* _env; // Compilation environment
63 PhaseGVN &_gvn; // Some optimizations while parsing
64 SafePointNode* _map; // Parser map from JVM to Nodes
65 SafePointNode* _exceptions;// Parser map(s) for exception state(s)
66 int _bci; // JVM Bytecode Pointer
67 ciMethod* _method; // JVM Current Method
68 BarrierSetC2* _barrier_set;
69
70 private:
71 int _sp; // JVM Expression Stack Pointer; don't modify directly!
72
73 private:
74 SafePointNode* map_not_null() const {
75 assert(_map != NULL, "must call stopped() to test for reset compiler map");
76 return _map;
77 }
78
79 public:
80 GraphKit(); // empty constructor
81 GraphKit(JVMState* jvms); // the JVM state on which to operate
82
83 #ifdef ASSERT
84 ~GraphKit() {
85 assert(!has_exceptions(), "user must call transfer_exceptions_into_jvms");
86 }
87 #endif
88
89 virtual Parse* is_Parse() const { return NULL; }
90 virtual LibraryCallKit* is_LibraryCallKit() const { return NULL; }
91
92 ciEnv* env() const { return _env; }
93 PhaseGVN& gvn() const { return _gvn; }
94 void* barrier_set_state() const { return C->barrier_set_state(); }
95
96 void record_for_igvn(Node* n) const { C->record_for_igvn(n); } // delegate to Compile
97
98 // Handy well-known nodes:
99 Node* null() const { return zerocon(T_OBJECT); }
100 Node* top() const { return C->top(); }
101 RootNode* root() const { return C->root(); }
102
103 // Create or find a constant node
104 Node* intcon(jint con) const { return _gvn.intcon(con); }
105 Node* longcon(jlong con) const { return _gvn.longcon(con); }
106 Node* makecon(const Type *t) const { return _gvn.makecon(t); }
107 Node* zerocon(BasicType bt) const { return _gvn.zerocon(bt); }
108 // (See also macro MakeConX in type.hpp, which uses intcon or longcon.)
109
110 jint find_int_con(Node* n, jint value_if_unknown) {
111 return _gvn.find_int_con(n, value_if_unknown);
112 }
113 jlong find_long_con(Node* n, jlong value_if_unknown) {
114 return _gvn.find_long_con(n, value_if_unknown);
115 }
116 // (See also macro find_intptr_t_con in type.hpp, which uses one of these.)
352 Node* null_check_receiver() {
353 assert(argument(0)->bottom_type()->isa_ptr(), "must be");
354 return null_check(argument(0));
355 }
356 Node* zero_check_int(Node* value) {
357 assert(value->bottom_type()->basic_type() == T_INT,
358 "wrong type: %s", type2name(value->bottom_type()->basic_type()));
359 return null_check_common(value, T_INT);
360 }
361 Node* zero_check_long(Node* value) {
362 assert(value->bottom_type()->basic_type() == T_LONG,
363 "wrong type: %s", type2name(value->bottom_type()->basic_type()));
364 return null_check_common(value, T_LONG);
365 }
366 // Throw an uncommon trap if a given value is __not__ null.
367 // Return the value cast to null, and be clever about dominating checks.
368 Node* null_assert(Node* value, BasicType type = T_OBJECT) {
369 return null_check_common(value, type, true, NULL, _gvn.type(value)->speculative_always_null());
370 }
371
372 // Check if value is null and abort if it is
373 Node* must_be_not_null(Node* value, bool do_replace_in_map);
374
375 // Null check oop. Return null-path control into (*null_control).
376 // Return a cast-not-null node which depends on the not-null control.
377 // If never_see_null, use an uncommon trap (*null_control sees a top).
378 // The cast is not valid along the null path; keep a copy of the original.
379 // If safe_for_replace, then we can replace the value with the cast
380 // in the parsing map (the cast is guaranteed to dominate the map)
381 Node* null_check_oop(Node* value, Node* *null_control,
382 bool never_see_null = false,
383 bool safe_for_replace = false,
384 bool speculative = false);
385
386 // Check the null_seen bit.
387 bool seems_never_null(Node* obj, ciProfileData* data, bool& speculating);
388
389 // Check for unique class for receiver at call
390 ciKlass* profile_has_unique_klass() {
391 ciCallProfile profile = method()->call_profile_at_bci(bci());
563 unaligned, mismatched);
564 }
565 // This is the base version which is given alias index
566 // Return the new StoreXNode
567 Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
568 int adr_idx,
569 MemNode::MemOrd,
570 bool require_atomic_access = false,
571 bool unaligned = false,
572 bool mismatched = false,
573 bool unsafe = false);
574
575 // Perform decorated accesses
576
577 Node* access_store_at(Node* obj, // containing obj
578 Node* adr, // actual adress to store val at
579 const TypePtr* adr_type,
580 Node* val,
581 const Type* val_type,
582 BasicType bt,
583 DecoratorSet decorators);
584
585 Node* access_load_at(Node* obj, // containing obj
586 Node* adr, // actual adress to load val at
587 const TypePtr* adr_type,
588 const Type* val_type,
589 BasicType bt,
590 DecoratorSet decorators);
591
592 Node* access_load(Node* adr, // actual adress to load val at
593 const Type* val_type,
594 BasicType bt,
595 DecoratorSet decorators);
596
597 Node* access_atomic_cmpxchg_val_at(Node* obj,
598 Node* adr,
599 const TypePtr* adr_type,
600 int alias_idx,
601 Node* expected_val,
602 Node* new_val,
603 const Type* value_type,
615 DecoratorSet decorators);
616
617 Node* access_atomic_xchg_at(Node* obj,
618 Node* adr,
619 const TypePtr* adr_type,
620 int alias_idx,
621 Node* new_val,
622 const Type* value_type,
623 BasicType bt,
624 DecoratorSet decorators);
625
626 Node* access_atomic_add_at(Node* obj,
627 Node* adr,
628 const TypePtr* adr_type,
629 int alias_idx,
630 Node* new_val,
631 const Type* value_type,
632 BasicType bt,
633 DecoratorSet decorators);
634
635 void access_clone(Node* src, Node* dst, Node* size, bool is_array);
636
637 Node* access_resolve(Node* n, DecoratorSet decorators);
638
639 // Return addressing for an array element.
640 Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
641 // Optional constraint on the array size:
642 const TypeInt* sizetype = NULL,
643 // Optional control dependency (for example, on range check)
644 Node* ctrl = NULL);
645
646 // Return a load of array element at idx.
647 Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype);
648
649 //---------------- Dtrace support --------------------
650 void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);
651 void make_dtrace_method_entry(ciMethod* method) {
652 make_dtrace_method_entry_exit(method, true);
653 }
654 void make_dtrace_method_exit(ciMethod* method) {
655 make_dtrace_method_entry_exit(method, false);
656 }
657
658 //--------------- stub generation -------------------
659 public:
660 void gen_stub(address C_function,
661 const char *name,
662 int is_fancy_jump,
663 bool pass_tls,
664 bool return_pc);
665
666 //---------- help for generating calls --------------
667
668 // Do a null check on the receiver as it would happen before the call to
669 // callee (with all arguments still on the stack).
670 Node* null_check_receiver_before_call(ciMethod* callee) {
671 assert(!callee->is_static(), "must be a virtual method");
672 // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
673 // Use callsite signature always.
674 ciMethod* declared_method = method()->get_method_at_bci(bci());
675 const int nargs = declared_method->arg_size();
676 inc_sp(nargs);
677 Node* n = null_check_receiver();
678 dec_sp(nargs);
679 return n;
680 }
681
682 // Fill in argument edges for the call from argument(0), argument(1), ...
683 // (The next step is to call set_edges_for_java_call.)
684 void set_arguments_for_java_call(CallJavaNode* call);
685
686 // Fill in non-argument edges for the call.
687 // Transform the call, and update the basics: control, i_o, memory.
688 // (The next step is usually to call set_results_for_java_call.)
689 void set_edges_for_java_call(CallJavaNode* call,
690 bool must_throw = false, bool separate_io_proj = false);
691
692 // Finish up a java call that was started by set_edges_for_java_call.
693 // Call add_exception on any throw arising from the call.
694 // Return the call result (transformed).
695 Node* set_results_for_java_call(CallJavaNode* call, bool separate_io_proj = false, bool deoptimize = false);
696
697 // Similar to set_edges_for_java_call, but simplified for runtime calls.
698 void set_predefined_output_for_runtime_call(Node* call) {
699 set_predefined_output_for_runtime_call(call, NULL, NULL);
700 }
701 void set_predefined_output_for_runtime_call(Node* call,
702 Node* keep_mem,
703 const TypePtr* hook_mem);
704 Node* set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem = NULL);
802 void merge_memory(Node* new_mem, Node* region, int new_path);
803 void make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj, bool deoptimize = false);
804
805 // Helper functions to build synchronizations
806 int next_monitor();
807 Node* insert_mem_bar(int opcode, Node* precedent = NULL);
808 Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = NULL);
809 // Optional 'precedent' is appended as an extra edge, to force ordering.
810 FastLockNode* shared_lock(Node* obj);
811 void shared_unlock(Node* box, Node* obj);
812
813 // helper functions for the fast path/slow path idioms
814 Node* fast_and_slow(Node* in, const Type *result_type, Node* null_result, IfNode* fast_test, Node* fast_result, address slow_call, const TypeFunc *slow_call_type, Node* slow_arg, Klass* ex_klass, Node* slow_result);
815
816 // Generate an instance-of idiom. Used by both the instance-of bytecode
817 // and the reflective instance-of call.
818 Node* gen_instanceof(Node *subobj, Node* superkls, bool safe_for_replace = false);
819
820 // Generate a check-cast idiom. Used by both the check-cast bytecode
821 // and the array-store bytecode
822 Node* gen_checkcast( Node *subobj, Node* superkls,
823 Node* *failure_control = NULL );
824
825 Node* gen_subtype_check(Node* subklass, Node* superklass) {
826 MergeMemNode* mem = merged_memory();
827 Node* ctrl = control();
828 Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, &_gvn);
829 set_control(ctrl);
830 return n;
831 }
832
833 // Exact type check used for predicted calls and casts.
834 // Rewrites (*casted_receiver) to be casted to the stronger type.
835 // (Caller is responsible for doing replace_in_map.)
836 Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob,
837 Node* *casted_receiver);
838
839 // Inexact type check used for predicted calls.
840 Node* subtype_check_receiver(Node* receiver, ciKlass* klass,
841 Node** casted_receiver);
842
843 // implementation of object creation
844 Node* set_output_for_allocation(AllocateNode* alloc,
845 const TypeOopPtr* oop_type,
846 bool deoptimize_on_exception=false);
847 Node* get_layout_helper(Node* klass_node, jint& constant_value);
848 Node* new_instance(Node* klass_node,
849 Node* slow_test = NULL,
850 Node* *return_size_val = NULL,
851 bool deoptimize_on_exception = false);
852 Node* new_array(Node* klass_node, Node* count_val, int nargs,
853 Node* *return_size_val = NULL,
854 bool deoptimize_on_exception = false);
855
856 // java.lang.String helpers
857 Node* load_String_length(Node* str, bool set_ctrl);
858 Node* load_String_value(Node* str, bool set_ctrl);
859 Node* load_String_coder(Node* str, bool set_ctrl);
860 void store_String_value(Node* str, Node* value);
861 void store_String_coder(Node* str, Node* value);
862 Node* capture_memory(const TypePtr* src_type, const TypePtr* dst_type);
863 Node* compress_string(Node* src, const TypeAryPtr* src_type, Node* dst, Node* count);
864 void inflate_string(Node* src, Node* dst, const TypeAryPtr* dst_type, Node* count);
865 void inflate_string_slow(Node* src, Node* dst, Node* start, Node* count);
866
867 // Handy for making control flow
868 IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) {
869 IfNode* iff = new IfNode(ctrl, tst, prob, cnt);// New IfNode's
870 _gvn.set_type(iff, iff->Value(&_gvn)); // Value may be known at parse-time
871 // Place 'if' on worklist if it will be in graph
872 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
873 return iff;
874 }
875
876 IfNode* create_and_xform_if(Node* ctrl, Node* tst, float prob, float cnt) {
877 IfNode* iff = new IfNode(ctrl, tst, prob, cnt);// New IfNode's
878 _gvn.transform(iff); // Value may be known at parse-time
879 // Place 'if' on worklist if it will be in graph
880 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
881 return iff;
882 }
883
884 // Insert a loop predicate into the graph
885 void add_predicate(int nargs = 0);
886 void add_predicate_impl(Deoptimization::DeoptReason reason, int nargs);
887
888 Node* make_constant_from_field(ciField* field, Node* obj);
889 };
890
891 // Helper class to support building of control flow branches. Upon
892 // creation the map and sp at bci are cloned and restored upon de-
893 // struction. Typical use:
894 //
895 // { PreserveJVMState pjvms(this);
896 // // code of new branch
897 // }
898 // // here the JVM state at bci is established
899
900 class PreserveJVMState: public StackObj {
901 protected:
902 GraphKit* _kit;
903 #ifdef ASSERT
904 int _block; // PO of current block, if a Parse
905 int _bci;
906 #endif
907 SafePointNode* _map;
908 uint _sp;
|
49
50 //-----------------------------------------------------------------------------
51 //----------------------------GraphKit-----------------------------------------
52 // Toolkit for building the common sorts of subgraphs.
53 // Does not know about bytecode parsing or type-flow results.
54 // It is able to create graphs implementing the semantics of most
55 // or all bytecodes, so that it can expand intrinsics and calls.
56 // It may depend on JVMState structure, but it must not depend
57 // on specific bytecode streams.
58 class GraphKit : public Phase {
59 friend class PreserveJVMState;
60
61 protected:
62 ciEnv* _env; // Compilation environment
63 PhaseGVN &_gvn; // Some optimizations while parsing
64 SafePointNode* _map; // Parser map from JVM to Nodes
65 SafePointNode* _exceptions;// Parser map(s) for exception state(s)
66 int _bci; // JVM Bytecode Pointer
67 ciMethod* _method; // JVM Current Method
68 BarrierSetC2* _barrier_set;
69 #ifdef ASSERT
70 uint _worklist_size;
71 #endif
72
73 private:
74 int _sp; // JVM Expression Stack Pointer; don't modify directly!
75
76 private:
77 SafePointNode* map_not_null() const {
78 assert(_map != NULL, "must call stopped() to test for reset compiler map");
79 return _map;
80 }
81
82 public:
83 GraphKit(); // empty constructor
84 GraphKit(JVMState* jvms, PhaseGVN* gvn = NULL); // the JVM state on which to operate
85
86 #ifdef ASSERT
87 ~GraphKit() {
88 assert(!has_exceptions(), "user must call transfer_exceptions_into_jvms");
89 // During incremental inlining, the Node_Array of the C->for_igvn() worklist and the IGVN
90 // worklist are shared but the _in_worklist VectorSet is not. To avoid inconsistencies,
91 // we should not add nodes to the _for_igvn worklist when using IGVN for the GraphKit.
92 assert((_gvn.is_IterGVN() == NULL) || (_gvn.C->for_igvn()->size() == _worklist_size),
93 "GraphKit should not modify _for_igvn worklist after parsing");
94 }
95 #endif
96
97 virtual Parse* is_Parse() const { return NULL; }
98 virtual LibraryCallKit* is_LibraryCallKit() const { return NULL; }
99
100 ciEnv* env() const { return _env; }
101 PhaseGVN& gvn() const { return _gvn; }
102 void* barrier_set_state() const { return C->barrier_set_state(); }
103
104 void record_for_igvn(Node* n) const { _gvn.record_for_igvn(n); }
105
106 // Handy well-known nodes:
107 Node* null() const { return zerocon(T_OBJECT); }
108 Node* top() const { return C->top(); }
109 RootNode* root() const { return C->root(); }
110
111 // Create or find a constant node
112 Node* intcon(jint con) const { return _gvn.intcon(con); }
113 Node* longcon(jlong con) const { return _gvn.longcon(con); }
114 Node* makecon(const Type *t) const { return _gvn.makecon(t); }
115 Node* zerocon(BasicType bt) const { return _gvn.zerocon(bt); }
116 // (See also macro MakeConX in type.hpp, which uses intcon or longcon.)
117
118 jint find_int_con(Node* n, jint value_if_unknown) {
119 return _gvn.find_int_con(n, value_if_unknown);
120 }
121 jlong find_long_con(Node* n, jlong value_if_unknown) {
122 return _gvn.find_long_con(n, value_if_unknown);
123 }
124 // (See also macro find_intptr_t_con in type.hpp, which uses one of these.)
360 Node* null_check_receiver() {
361 assert(argument(0)->bottom_type()->isa_ptr(), "must be");
362 return null_check(argument(0));
363 }
364 Node* zero_check_int(Node* value) {
365 assert(value->bottom_type()->basic_type() == T_INT,
366 "wrong type: %s", type2name(value->bottom_type()->basic_type()));
367 return null_check_common(value, T_INT);
368 }
369 Node* zero_check_long(Node* value) {
370 assert(value->bottom_type()->basic_type() == T_LONG,
371 "wrong type: %s", type2name(value->bottom_type()->basic_type()));
372 return null_check_common(value, T_LONG);
373 }
374 // Throw an uncommon trap if a given value is __not__ null.
375 // Return the value cast to null, and be clever about dominating checks.
376 Node* null_assert(Node* value, BasicType type = T_OBJECT) {
377 return null_check_common(value, type, true, NULL, _gvn.type(value)->speculative_always_null());
378 }
379
380 Node* null2default(Node* value, ciValueKlass* vk = NULL);
381
382 // Check if value is null and abort if it is
383 Node* must_be_not_null(Node* value, bool do_replace_in_map);
384
385 // Null check oop. Return null-path control into (*null_control).
386 // Return a cast-not-null node which depends on the not-null control.
387 // If never_see_null, use an uncommon trap (*null_control sees a top).
388 // The cast is not valid along the null path; keep a copy of the original.
389 // If safe_for_replace, then we can replace the value with the cast
390 // in the parsing map (the cast is guaranteed to dominate the map)
391 Node* null_check_oop(Node* value, Node* *null_control,
392 bool never_see_null = false,
393 bool safe_for_replace = false,
394 bool speculative = false);
395
396 // Check the null_seen bit.
397 bool seems_never_null(Node* obj, ciProfileData* data, bool& speculating);
398
399 // Check for unique class for receiver at call
400 ciKlass* profile_has_unique_klass() {
401 ciCallProfile profile = method()->call_profile_at_bci(bci());
573 unaligned, mismatched);
574 }
575 // This is the base version which is given alias index
576 // Return the new StoreXNode
577 Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
578 int adr_idx,
579 MemNode::MemOrd,
580 bool require_atomic_access = false,
581 bool unaligned = false,
582 bool mismatched = false,
583 bool unsafe = false);
584
585 // Perform decorated accesses
586
587 Node* access_store_at(Node* obj, // containing obj
588 Node* adr, // actual adress to store val at
589 const TypePtr* adr_type,
590 Node* val,
591 const Type* val_type,
592 BasicType bt,
593 DecoratorSet decorators,
594 bool deoptimize_on_exception = false);
595
596 Node* access_load_at(Node* obj, // containing obj
597 Node* adr, // actual adress to load val at
598 const TypePtr* adr_type,
599 const Type* val_type,
600 BasicType bt,
601 DecoratorSet decorators);
602
603 Node* access_load(Node* adr, // actual adress to load val at
604 const Type* val_type,
605 BasicType bt,
606 DecoratorSet decorators);
607
608 Node* access_atomic_cmpxchg_val_at(Node* obj,
609 Node* adr,
610 const TypePtr* adr_type,
611 int alias_idx,
612 Node* expected_val,
613 Node* new_val,
614 const Type* value_type,
626 DecoratorSet decorators);
627
628 Node* access_atomic_xchg_at(Node* obj,
629 Node* adr,
630 const TypePtr* adr_type,
631 int alias_idx,
632 Node* new_val,
633 const Type* value_type,
634 BasicType bt,
635 DecoratorSet decorators);
636
637 Node* access_atomic_add_at(Node* obj,
638 Node* adr,
639 const TypePtr* adr_type,
640 int alias_idx,
641 Node* new_val,
642 const Type* value_type,
643 BasicType bt,
644 DecoratorSet decorators);
645
646 void access_clone(Node* src_base, Node* dst_base, Node* countx, bool is_array);
647
648 Node* access_resolve(Node* n, DecoratorSet decorators);
649
650 // Return addressing for an array element.
651 Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
652 // Optional constraint on the array size:
653 const TypeInt* sizetype = NULL,
654 // Optional control dependency (for example, on range check)
655 Node* ctrl = NULL);
656
657 // Return a load of array element at idx.
658 Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype);
659
660 //---------------- Dtrace support --------------------
661 void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);
662 void make_dtrace_method_entry(ciMethod* method) {
663 make_dtrace_method_entry_exit(method, true);
664 }
665 void make_dtrace_method_exit(ciMethod* method) {
666 make_dtrace_method_entry_exit(method, false);
667 }
668
669 //--------------- stub generation -------------------
670 public:
671 void gen_stub(address C_function,
672 const char *name,
673 int is_fancy_jump,
674 bool pass_tls,
675 bool return_pc);
676
677 //---------- help for generating calls --------------
678
679 // Do a null check on the receiver as it would happen before the call to
680 // callee (with all arguments still on the stack).
681 Node* null_check_receiver_before_call(ciMethod* callee) {
682 assert(!callee->is_static(), "must be a virtual method");
683 if (argument(0)->is_ValueType()) {
684 return argument(0);
685 }
686 // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
687 // Use callsite signature always.
688 ciMethod* declared_method = method()->get_method_at_bci(bci());
689 const int nargs = declared_method->arg_size();
690 inc_sp(nargs);
691 Node* n = null_check_receiver();
692 dec_sp(nargs);
693 return n;
694 }
695
696 // Fill in argument edges for the call from argument(0), argument(1), ...
697 // (The next step is to call set_edges_for_java_call.)
698 void set_arguments_for_java_call(CallJavaNode* call, bool incremental_inlining = false);
699
700 // Fill in non-argument edges for the call.
701 // Transform the call, and update the basics: control, i_o, memory.
702 // (The next step is usually to call set_results_for_java_call.)
703 void set_edges_for_java_call(CallJavaNode* call,
704 bool must_throw = false, bool separate_io_proj = false);
705
706 // Finish up a java call that was started by set_edges_for_java_call.
707 // Call add_exception on any throw arising from the call.
708 // Return the call result (transformed).
709 Node* set_results_for_java_call(CallJavaNode* call, bool separate_io_proj = false, bool deoptimize = false);
710
711 // Similar to set_edges_for_java_call, but simplified for runtime calls.
712 void set_predefined_output_for_runtime_call(Node* call) {
713 set_predefined_output_for_runtime_call(call, NULL, NULL);
714 }
715 void set_predefined_output_for_runtime_call(Node* call,
716 Node* keep_mem,
717 const TypePtr* hook_mem);
718 Node* set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem = NULL);
816 void merge_memory(Node* new_mem, Node* region, int new_path);
817 void make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj, bool deoptimize = false);
818
819 // Helper functions to build synchronizations
820 int next_monitor();
821 Node* insert_mem_bar(int opcode, Node* precedent = NULL);
822 Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = NULL);
823 // Optional 'precedent' is appended as an extra edge, to force ordering.
824 FastLockNode* shared_lock(Node* obj);
825 void shared_unlock(Node* box, Node* obj);
826
827 // helper functions for the fast path/slow path idioms
828 Node* fast_and_slow(Node* in, const Type *result_type, Node* null_result, IfNode* fast_test, Node* fast_result, address slow_call, const TypeFunc *slow_call_type, Node* slow_arg, Klass* ex_klass, Node* slow_result);
829
830 // Generate an instance-of idiom. Used by both the instance-of bytecode
831 // and the reflective instance-of call.
832 Node* gen_instanceof(Node *subobj, Node* superkls, bool safe_for_replace = false);
833
834 // Generate a check-cast idiom. Used by both the check-cast bytecode
835 // and the array-store bytecode
836 Node* gen_checkcast(Node *subobj, Node* superkls, Node* *failure_control = NULL, bool never_null = false);
837
838 Node* is_always_locked(Node* obj);
839 Node* gen_value_type_test(Node* kls);
840 void gen_value_type_guard(Node* obj, int nargs = 0);
841 void gen_value_type_array_guard(Node* ary, Node* obj, int nargs);
842 Node* load_lh_array_tag(Node* kls);
843 Node* gen_lh_array_test(Node* kls, unsigned int lh_value);
844
845 Node* gen_subtype_check(Node* subklass, Node* superklass) {
846 MergeMemNode* mem = merged_memory();
847 Node* ctrl = control();
848 Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, &_gvn);
849 set_control(ctrl);
850 return n;
851 }
852
853 // Exact type check used for predicted calls and casts.
854 // Rewrites (*casted_receiver) to be casted to the stronger type.
855 // (Caller is responsible for doing replace_in_map.)
856 Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob,
857 Node* *casted_receiver);
858 Node* type_check(Node* recv_klass, const TypeKlassPtr* tklass, float prob);
859
860 // Inexact type check used for predicted calls.
861 Node* subtype_check_receiver(Node* receiver, ciKlass* klass,
862 Node** casted_receiver);
863
864 // implementation of object creation
865 Node* set_output_for_allocation(AllocateNode* alloc,
866 const TypeOopPtr* oop_type,
867 bool deoptimize_on_exception=false);
868 Node* get_layout_helper(Node* klass_node, jint& constant_value);
869 Node* new_instance(Node* klass_node,
870 Node* slow_test = NULL,
871 Node* *return_size_val = NULL,
872 bool deoptimize_on_exception = false,
873 ValueTypeBaseNode* value_node = NULL);
874 Node* new_array(Node* klass_node, Node* count_val, int nargs,
875 Node* *return_size_val = NULL,
876 bool deoptimize_on_exception = false);
877
878 // java.lang.String helpers
879 Node* load_String_length(Node* str, bool set_ctrl);
880 Node* load_String_value(Node* str, bool set_ctrl);
881 Node* load_String_coder(Node* str, bool set_ctrl);
882 void store_String_value(Node* str, Node* value);
883 void store_String_coder(Node* str, Node* value);
884 Node* capture_memory(const TypePtr* src_type, const TypePtr* dst_type);
885 Node* compress_string(Node* src, const TypeAryPtr* src_type, Node* dst, Node* count);
886 void inflate_string(Node* src, Node* dst, const TypeAryPtr* dst_type, Node* count);
887 void inflate_string_slow(Node* src, Node* dst, Node* start, Node* count);
888
889 // Handy for making control flow
890 IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) {
891 IfNode* iff = new IfNode(ctrl, tst, prob, cnt);// New IfNode's
892 _gvn.set_type(iff, iff->Value(&_gvn)); // Value may be known at parse-time
893 // Place 'if' on worklist if it will be in graph
894 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
895 return iff;
896 }
897
898 IfNode* create_and_xform_if(Node* ctrl, Node* tst, float prob, float cnt) {
899 IfNode* iff = new IfNode(ctrl, tst, prob, cnt);// New IfNode's
900 _gvn.transform(iff); // Value may be known at parse-time
901 // Place 'if' on worklist if it will be in graph
902 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
903 return iff;
904 }
905
906 // Insert a loop predicate into the graph
907 void add_predicate(int nargs = 0);
908 void add_predicate_impl(Deoptimization::DeoptReason reason, int nargs);
909
910 Node* make_constant_from_field(ciField* field, Node* obj);
911
912 Node* load_mirror_from_klass(Node* klass);
913 };
914
915 // Helper class to support building of control flow branches. Upon
916 // creation the map and sp at bci are cloned and restored upon de-
917 // struction. Typical use:
918 //
919 // { PreserveJVMState pjvms(this);
920 // // code of new branch
921 // }
922 // // here the JVM state at bci is established
923
924 class PreserveJVMState: public StackObj {
925 protected:
926 GraphKit* _kit;
927 #ifdef ASSERT
928 int _block; // PO of current block, if a Parse
929 int _bci;
930 #endif
931 SafePointNode* _map;
932 uint _sp;
|