< prev index next >

src/share/vm/opto/library_call.cpp

Print this page
rev 9080 : 8139040: Fix initializations before ShouldNotReachHere() etc. and enable -Wuninitialized on linux.
rev 9081 : imported patch more.patch


2644     }
2645   }
2646 
2647   int alias_idx = C->get_alias_index(adr_type);
2648 
2649   // Memory-model-wise, a LoadStore acts like a little synchronized
2650   // block, so needs barriers on each side.  These don't translate
2651   // into actual barriers on most machines, but we still need rest of
2652   // compiler to respect ordering.
2653 
2654   insert_mem_bar(Op_MemBarRelease);
2655   insert_mem_bar(Op_MemBarCPUOrder);
2656 
2657   // 4984716: MemBars must be inserted before this
2658   //          memory node in order to avoid a false
2659   //          dependency which will confuse the scheduler.
2660   Node *mem = memory(alias_idx);
2661 
2662   // For now, we handle only those cases that actually exist: ints,
2663   // longs, and Object. Adding others should be straightforward.
2664   Node* load_store;
2665   switch(type) {
2666   case T_INT:
2667     if (kind == LS_xadd) {
2668       load_store = _gvn.transform(new GetAndAddINode(control(), mem, adr, newval, adr_type));
2669     } else if (kind == LS_xchg) {
2670       load_store = _gvn.transform(new GetAndSetINode(control(), mem, adr, newval, adr_type));
2671     } else if (kind == LS_cmpxchg) {
2672       load_store = _gvn.transform(new CompareAndSwapINode(control(), mem, adr, newval, oldval));
2673     } else {
2674       ShouldNotReachHere();
2675     }
2676     break;
2677   case T_LONG:
2678     if (kind == LS_xadd) {
2679       load_store = _gvn.transform(new GetAndAddLNode(control(), mem, adr, newval, adr_type));
2680     } else if (kind == LS_xchg) {
2681       load_store = _gvn.transform(new GetAndSetLNode(control(), mem, adr, newval, adr_type));
2682     } else if (kind == LS_cmpxchg) {
2683       load_store = _gvn.transform(new CompareAndSwapLNode(control(), mem, adr, newval, oldval));
2684     } else {


3650   // It could be a dynamic mix of int[], boolean[], Object[], etc.
3651   Node* result = load_array_length(array);
3652 
3653   C->set_has_split_ifs(true);  // Has chance for split-if optimization
3654   set_result(result);
3655   return true;
3656 }
3657 
3658 //------------------------inline_array_copyOf----------------------------
3659 // public static <T,U> T[] java.util.Arrays.copyOf(     U[] original, int newLength,         Class<? extends T[]> newType);
3660 // public static <T,U> T[] java.util.Arrays.copyOfRange(U[] original, int from,      int to, Class<? extends T[]> newType);
3661 bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
3662   if (too_many_traps(Deoptimization::Reason_intrinsic))  return false;
3663 
3664   // Get the arguments.
3665   Node* original          = argument(0);
3666   Node* start             = is_copyOfRange? argument(1): intcon(0);
3667   Node* end               = is_copyOfRange? argument(2): argument(1);
3668   Node* array_type_mirror = is_copyOfRange? argument(3): argument(2);
3669 
3670   Node* newcopy;
3671 
3672   // Set the original stack and the reexecute bit for the interpreter to reexecute
3673   // the bytecode that invokes Arrays.copyOf if deoptimization happens.
3674   { PreserveReexecuteState preexecs(this);
3675     jvms()->set_should_reexecute(true);
3676 
3677     array_type_mirror = null_check(array_type_mirror);
3678     original          = null_check(original);
3679 
3680     // Check if a null path was taken unconditionally.
3681     if (stopped())  return true;
3682 
3683     Node* orig_length = load_array_length(original);
3684 
3685     Node* klass_node = load_klass_from_mirror(array_type_mirror, false, NULL, 0);
3686     klass_node = null_check(klass_node);
3687 
3688     RegionNode* bailout = new RegionNode(1);
3689     record_for_igvn(bailout);
3690 




2644     }
2645   }
2646 
2647   int alias_idx = C->get_alias_index(adr_type);
2648 
2649   // Memory-model-wise, a LoadStore acts like a little synchronized
2650   // block, so needs barriers on each side.  These don't translate
2651   // into actual barriers on most machines, but we still need rest of
2652   // compiler to respect ordering.
2653 
2654   insert_mem_bar(Op_MemBarRelease);
2655   insert_mem_bar(Op_MemBarCPUOrder);
2656 
2657   // 4984716: MemBars must be inserted before this
2658   //          memory node in order to avoid a false
2659   //          dependency which will confuse the scheduler.
2660   Node *mem = memory(alias_idx);
2661 
2662   // For now, we handle only those cases that actually exist: ints,
2663   // longs, and Object. Adding others should be straightforward.
2664   Node* load_store = NULL;
2665   switch(type) {
2666   case T_INT:
2667     if (kind == LS_xadd) {
2668       load_store = _gvn.transform(new GetAndAddINode(control(), mem, adr, newval, adr_type));
2669     } else if (kind == LS_xchg) {
2670       load_store = _gvn.transform(new GetAndSetINode(control(), mem, adr, newval, adr_type));
2671     } else if (kind == LS_cmpxchg) {
2672       load_store = _gvn.transform(new CompareAndSwapINode(control(), mem, adr, newval, oldval));
2673     } else {
2674       ShouldNotReachHere();
2675     }
2676     break;
2677   case T_LONG:
2678     if (kind == LS_xadd) {
2679       load_store = _gvn.transform(new GetAndAddLNode(control(), mem, adr, newval, adr_type));
2680     } else if (kind == LS_xchg) {
2681       load_store = _gvn.transform(new GetAndSetLNode(control(), mem, adr, newval, adr_type));
2682     } else if (kind == LS_cmpxchg) {
2683       load_store = _gvn.transform(new CompareAndSwapLNode(control(), mem, adr, newval, oldval));
2684     } else {


3650   // It could be a dynamic mix of int[], boolean[], Object[], etc.
3651   Node* result = load_array_length(array);
3652 
3653   C->set_has_split_ifs(true);  // Has chance for split-if optimization
3654   set_result(result);
3655   return true;
3656 }
3657 
3658 //------------------------inline_array_copyOf----------------------------
3659 // public static <T,U> T[] java.util.Arrays.copyOf(     U[] original, int newLength,         Class<? extends T[]> newType);
3660 // public static <T,U> T[] java.util.Arrays.copyOfRange(U[] original, int from,      int to, Class<? extends T[]> newType);
3661 bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
3662   if (too_many_traps(Deoptimization::Reason_intrinsic))  return false;
3663 
3664   // Get the arguments.
3665   Node* original          = argument(0);
3666   Node* start             = is_copyOfRange? argument(1): intcon(0);
3667   Node* end               = is_copyOfRange? argument(2): argument(1);
3668   Node* array_type_mirror = is_copyOfRange? argument(3): argument(2);
3669 
3670   Node* newcopy = NULL;
3671 
3672   // Set the original stack and the reexecute bit for the interpreter to reexecute
3673   // the bytecode that invokes Arrays.copyOf if deoptimization happens.
3674   { PreserveReexecuteState preexecs(this);
3675     jvms()->set_should_reexecute(true);
3676 
3677     array_type_mirror = null_check(array_type_mirror);
3678     original          = null_check(original);
3679 
3680     // Check if a null path was taken unconditionally.
3681     if (stopped())  return true;
3682 
3683     Node* orig_length = load_array_length(original);
3684 
3685     Node* klass_node = load_klass_from_mirror(array_type_mirror, false, NULL, 0);
3686     klass_node = null_check(klass_node);
3687 
3688     RegionNode* bailout = new RegionNode(1);
3689     record_for_igvn(bailout);
3690 


< prev index next >