123 }
124 #ifdef _LP64
125 // 32-bit oops don't make sense for the 64-bit VM on sparc
126 // since the 32-bit VM has the same registers and smaller objects.
127 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
128 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
129 #endif // _LP64
130 #ifdef COMPILER2
131 // Indirect branch is the same cost as direct
132 if (FLAG_IS_DEFAULT(UseJumpTables)) {
133 FLAG_SET_DEFAULT(UseJumpTables, true);
134 }
135 // Single-issue, so entry and loop tops are
136 // aligned on a single instruction boundary
137 if (FLAG_IS_DEFAULT(InteriorEntryAlignment)) {
138 FLAG_SET_DEFAULT(InteriorEntryAlignment, 4);
139 }
140 if (is_niagara_plus()) {
141 if (has_blk_init() && (cache_line_size > 0) && UseTLAB &&
142 FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
143 // Use BIS instruction for TLAB allocation prefetch.
144 FLAG_SET_ERGO(intx, AllocatePrefetchInstr, 1);
145 if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
146 FLAG_SET_ERGO(intx, AllocatePrefetchStyle, 3);
147 }
148 if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
149 // Use smaller prefetch distance with BIS
150 FLAG_SET_DEFAULT(AllocatePrefetchDistance, 64);
151 }
152 }
153 if (is_T4()) {
154 // Double number of prefetched cache lines on T4
155 // since L2 cache line size is smaller (32 bytes).
156 if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) {
157 FLAG_SET_ERGO(intx, AllocatePrefetchLines, AllocatePrefetchLines*2);
158 }
159 if (FLAG_IS_DEFAULT(AllocateInstancePrefetchLines)) {
160 FLAG_SET_ERGO(intx, AllocateInstancePrefetchLines, AllocateInstancePrefetchLines*2);
161 }
162 }
163 if (AllocatePrefetchStyle != 3 && FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
164 // Use different prefetch distance without BIS
165 FLAG_SET_DEFAULT(AllocatePrefetchDistance, 256);
166 }
|
123 }
124 #ifdef _LP64
125 // 32-bit oops don't make sense for the 64-bit VM on sparc
126 // since the 32-bit VM has the same registers and smaller objects.
127 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
128 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
129 #endif // _LP64
130 #ifdef COMPILER2
131 // Indirect branch is the same cost as direct
132 if (FLAG_IS_DEFAULT(UseJumpTables)) {
133 FLAG_SET_DEFAULT(UseJumpTables, true);
134 }
135 // Single-issue, so entry and loop tops are
136 // aligned on a single instruction boundary
137 if (FLAG_IS_DEFAULT(InteriorEntryAlignment)) {
138 FLAG_SET_DEFAULT(InteriorEntryAlignment, 4);
139 }
140 if (is_niagara_plus()) {
141 if (has_blk_init() && (cache_line_size > 0) && UseTLAB &&
142 FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
143 if (!has_sparc5_instr()) {
144 // Use BIS instruction for TLAB allocation prefetch.
145 // on Niagara plus processors other than those based on CoreS4
146 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 1);
147 } else {
148 // On CoreS4 processors use prefetch instruction
149 // to avoid partial RAW issue, also use prefetch style 3
150 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 0);
151 if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
152 FLAG_SET_ERGO(intx, AllocatePrefetchStyle, 3);
153 }
154 }
155 if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
156 // Use smaller prefetch distance with BIS
157 FLAG_SET_DEFAULT(AllocatePrefetchDistance, 64);
158 }
159 }
160 if (is_T4()) {
161 // Double number of prefetched cache lines on T4
162 // since L2 cache line size is smaller (32 bytes).
163 if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) {
164 FLAG_SET_ERGO(intx, AllocatePrefetchLines, AllocatePrefetchLines*2);
165 }
166 if (FLAG_IS_DEFAULT(AllocateInstancePrefetchLines)) {
167 FLAG_SET_ERGO(intx, AllocateInstancePrefetchLines, AllocateInstancePrefetchLines*2);
168 }
169 }
170 if (AllocatePrefetchStyle != 3 && FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
171 // Use different prefetch distance without BIS
172 FLAG_SET_DEFAULT(AllocatePrefetchDistance, 256);
173 }
|