< prev index next >

src/share/vm/opto/parse1.cpp

Print this page




 975   //
 976   // 2. On PPC64, also add MemBarRelease for constructors which write
 977   //    volatile fields. As support_IRIW_for_not_multiple_copy_atomic_cpu
 978   //    is set on PPC64, no sync instruction is issued after volatile
 979   //    stores. We want to guarantee the same behavior as on platforms
 980   //    with total store order, although this is not required by the Java
 981   //    memory model. So as with finals, we add a barrier here.
 982   //
 983   // 3. Experimental VM option is used to force the barrier if any field
 984   //    was written out in the constructor.
 985   //
 986   // "All bets are off" unless the first publication occurs after a
 987   // normal return from the constructor.  We do not attempt to detect
 988   // such unusual early publications.  But no barrier is needed on
 989   // exceptional returns, since they cannot publish normally.
 990   //
 991   if (method()->is_initializer() &&
 992         (wrote_final() ||
 993            PPC64_ONLY(wrote_volatile() ||)
 994            (AlwaysSafeConstructors && wrote_fields()))) {
 995     _exits.insert_mem_bar(Op_MemBarRelease, alloc_with_final());
 996 
 997     // If Memory barrier is created for final fields write
 998     // and allocation node does not escape the initialize method,
 999     // then barrier introduced by allocation node can be removed.
1000     if (DoEscapeAnalysis && alloc_with_final()) {
1001       AllocateNode *alloc = AllocateNode::Ideal_allocation(alloc_with_final(), &_gvn);
1002       alloc->compute_MemBar_redundancy(method());
1003     }
1004     if (PrintOpto && (Verbose || WizardMode)) {
1005       method()->print_name();
1006       tty->print_cr(" writes finals and needs a memory barrier");
1007     }
1008   }
1009 
1010   // Any method can write a @Stable field; insert memory barriers
1011   // after those also. Can't bind predecessor allocation node (if any)
1012   // with barrier because allocation doesn't always dominate
1013   // MemBarRelease.
1014   if (wrote_stable()) {
1015     _exits.insert_mem_bar(Op_MemBarRelease);
1016     if (PrintOpto && (Verbose || WizardMode)) {
1017       method()->print_name();
1018       tty->print_cr(" writes @Stable and needs a memory barrier");
1019     }
1020   }
1021 
1022   for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1023     // transform each slice of the original memphi:
1024     mms.set_memory(_gvn.transform(mms.memory()));
1025   }
1026 
1027   if (tf()->range()->cnt() > TypeFunc::Parms) {
1028     const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
1029     Node*       ret_phi  = _gvn.transform( _exits.argument(0) );
1030     if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1031       // In case of concurrent class loading, the type we set for the
1032       // ret_phi in build_exits() may have been too optimistic and the
1033       // ret_phi may be top now.
1034       // Otherwise, we've encountered an error and have to mark the method as
1035       // not compilable. Just using an assertion instead would be dangerous


1142   // Create an initial safepoint to hold JVM state during parsing
1143   JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : NULL);
1144   set_map(new SafePointNode(len, jvms));
1145   jvms->set_map(map());
1146   record_for_igvn(map());
1147   assert(jvms->endoff() == len, "correct jvms sizing");
1148 
1149   SafePointNode* inmap = _caller->map();
1150   assert(inmap != NULL, "must have inmap");
1151   // In case of null check on receiver above
1152   map()->transfer_replaced_nodes_from(inmap, _new_idx);
1153 
1154   uint i;
1155 
1156   // Pass thru the predefined input parameters.
1157   for (i = 0; i < TypeFunc::Parms; i++) {
1158     map()->init_req(i, inmap->in(i));
1159   }
1160 
1161   if (depth() == 1) {
1162     assert(map()->memory()->Opcode() == Op_Parm, "");
1163     // Insert the memory aliasing node
1164     set_all_memory(reset_memory());
1165   }
1166   assert(merged_memory(), "");
1167 
1168   // Now add the locals which are initially bound to arguments:
1169   uint arg_size = tf()->domain()->cnt();
1170   ensure_stack(arg_size - TypeFunc::Parms);  // OSR methods have funny args
1171   for (i = TypeFunc::Parms; i < arg_size; i++) {
1172     map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
1173   }
1174 
1175   // Clear out the rest of the map (locals and stack)
1176   for (i = arg_size; i < len; i++) {
1177     map()->init_req(i, top());
1178   }
1179 
1180   SafePointNode* entry_map = stop();
1181   return entry_map;
1182 }




 975   //
 976   // 2. On PPC64, also add MemBarRelease for constructors which write
 977   //    volatile fields. As support_IRIW_for_not_multiple_copy_atomic_cpu
 978   //    is set on PPC64, no sync instruction is issued after volatile
 979   //    stores. We want to guarantee the same behavior as on platforms
 980   //    with total store order, although this is not required by the Java
 981   //    memory model. So as with finals, we add a barrier here.
 982   //
 983   // 3. Experimental VM option is used to force the barrier if any field
 984   //    was written out in the constructor.
 985   //
 986   // "All bets are off" unless the first publication occurs after a
 987   // normal return from the constructor.  We do not attempt to detect
 988   // such unusual early publications.  But no barrier is needed on
 989   // exceptional returns, since they cannot publish normally.
 990   //
 991   if (method()->is_initializer() &&
 992         (wrote_final() ||
 993            PPC64_ONLY(wrote_volatile() ||)
 994            (AlwaysSafeConstructors && wrote_fields()))) {
 995     _exits.insert_mem_bar(Opcodes::Op_MemBarRelease, alloc_with_final());
 996 
 997     // If Memory barrier is created for final fields write
 998     // and allocation node does not escape the initialize method,
 999     // then barrier introduced by allocation node can be removed.
1000     if (DoEscapeAnalysis && alloc_with_final()) {
1001       AllocateNode *alloc = AllocateNode::Ideal_allocation(alloc_with_final(), &_gvn);
1002       alloc->compute_MemBar_redundancy(method());
1003     }
1004     if (PrintOpto && (Verbose || WizardMode)) {
1005       method()->print_name();
1006       tty->print_cr(" writes finals and needs a memory barrier");
1007     }
1008   }
1009 
1010   // Any method can write a @Stable field; insert memory barriers
1011   // after those also. Can't bind predecessor allocation node (if any)
1012   // with barrier because allocation doesn't always dominate
1013   // MemBarRelease.
1014   if (wrote_stable()) {
1015     _exits.insert_mem_bar(Opcodes::Op_MemBarRelease);
1016     if (PrintOpto && (Verbose || WizardMode)) {
1017       method()->print_name();
1018       tty->print_cr(" writes @Stable and needs a memory barrier");
1019     }
1020   }
1021 
1022   for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1023     // transform each slice of the original memphi:
1024     mms.set_memory(_gvn.transform(mms.memory()));
1025   }
1026 
1027   if (tf()->range()->cnt() > TypeFunc::Parms) {
1028     const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
1029     Node*       ret_phi  = _gvn.transform( _exits.argument(0) );
1030     if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1031       // In case of concurrent class loading, the type we set for the
1032       // ret_phi in build_exits() may have been too optimistic and the
1033       // ret_phi may be top now.
1034       // Otherwise, we've encountered an error and have to mark the method as
1035       // not compilable. Just using an assertion instead would be dangerous


1142   // Create an initial safepoint to hold JVM state during parsing
1143   JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : NULL);
1144   set_map(new SafePointNode(len, jvms));
1145   jvms->set_map(map());
1146   record_for_igvn(map());
1147   assert(jvms->endoff() == len, "correct jvms sizing");
1148 
1149   SafePointNode* inmap = _caller->map();
1150   assert(inmap != NULL, "must have inmap");
1151   // In case of null check on receiver above
1152   map()->transfer_replaced_nodes_from(inmap, _new_idx);
1153 
1154   uint i;
1155 
1156   // Pass thru the predefined input parameters.
1157   for (i = 0; i < TypeFunc::Parms; i++) {
1158     map()->init_req(i, inmap->in(i));
1159   }
1160 
1161   if (depth() == 1) {
1162     assert(map()->memory()->Opcode() == Opcodes::Op_Parm, "");
1163     // Insert the memory aliasing node
1164     set_all_memory(reset_memory());
1165   }
1166   assert(merged_memory(), "");
1167 
1168   // Now add the locals which are initially bound to arguments:
1169   uint arg_size = tf()->domain()->cnt();
1170   ensure_stack(arg_size - TypeFunc::Parms);  // OSR methods have funny args
1171   for (i = TypeFunc::Parms; i < arg_size; i++) {
1172     map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
1173   }
1174 
1175   // Clear out the rest of the map (locals and stack)
1176   for (i = arg_size; i < len; i++) {
1177     map()->init_req(i, top());
1178   }
1179 
1180   SafePointNode* entry_map = stop();
1181   return entry_map;
1182 }


< prev index next >