72 }
73 #ifdef _LP64
74 // 32-bit oops don't make sense for the 64-bit VM on sparc
75 // since the 32-bit VM has the same registers and smaller objects.
76 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
77 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
78 #endif // _LP64
79 #ifdef COMPILER2
80 // Indirect branch is the same cost as direct
81 if (FLAG_IS_DEFAULT(UseJumpTables)) {
82 FLAG_SET_DEFAULT(UseJumpTables, true);
83 }
84 // Single-issue, so entry and loop tops are
85 // aligned on a single instruction boundary
86 if (FLAG_IS_DEFAULT(InteriorEntryAlignment)) {
87 FLAG_SET_DEFAULT(InteriorEntryAlignment, 4);
88 }
89 if (is_niagara_plus()) {
90 if (has_blk_init() && (cache_line_size > 0) && UseTLAB &&
91 FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
92 // Use BIS instruction for TLAB allocation prefetch.
93 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 1);
94 }
95 if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
96 if (AllocatePrefetchInstr == 0) {
97 // Use different prefetch distance without BIS
98 FLAG_SET_DEFAULT(AllocatePrefetchDistance, 256);
99 } else {
100 // Use smaller prefetch distance with BIS
101 FLAG_SET_DEFAULT(AllocatePrefetchDistance, 64);
102 }
103 }
104 if (is_T4()) {
105 // Double number of prefetched cache lines on T4
106 // since L2 cache line size is smaller (32 bytes).
107 if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) {
108 FLAG_SET_ERGO(intx, AllocatePrefetchLines, AllocatePrefetchLines*2);
109 }
110 if (FLAG_IS_DEFAULT(AllocateInstancePrefetchLines)) {
111 FLAG_SET_ERGO(intx, AllocateInstancePrefetchLines, AllocateInstancePrefetchLines*2);
112 }
113 }
114 }
|
72 }
73 #ifdef _LP64
74 // 32-bit oops don't make sense for the 64-bit VM on sparc
75 // since the 32-bit VM has the same registers and smaller objects.
76 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
77 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
78 #endif // _LP64
79 #ifdef COMPILER2
80 // Indirect branch is the same cost as direct
81 if (FLAG_IS_DEFAULT(UseJumpTables)) {
82 FLAG_SET_DEFAULT(UseJumpTables, true);
83 }
84 // Single-issue, so entry and loop tops are
85 // aligned on a single instruction boundary
86 if (FLAG_IS_DEFAULT(InteriorEntryAlignment)) {
87 FLAG_SET_DEFAULT(InteriorEntryAlignment, 4);
88 }
89 if (is_niagara_plus()) {
90 if (has_blk_init() && (cache_line_size > 0) && UseTLAB &&
91 FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
92 if (!has_sparc5_instr()) {
93 // Use BIS instruction for TLAB allocation prefetch
94 // on Niagara plus processors other than those based on CoreS4
95 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 1);
96 } else {
97 // On CoreS4 processors use prefetch instruction
98 // to avoid partial RAW issue, also use prefetch style 3
99 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 0);
100 if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
101 FLAG_SET_DEFAULT(AllocatePrefetchStyle, 3);
102 }
103 }
104 }
105 if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
106 if (AllocatePrefetchInstr == 0) {
107 // Use different prefetch distance without BIS
108 FLAG_SET_DEFAULT(AllocatePrefetchDistance, 256);
109 } else {
110 // Use smaller prefetch distance with BIS
111 FLAG_SET_DEFAULT(AllocatePrefetchDistance, 64);
112 }
113 }
114 if (is_T4()) {
115 // Double number of prefetched cache lines on T4
116 // since L2 cache line size is smaller (32 bytes).
117 if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) {
118 FLAG_SET_ERGO(intx, AllocatePrefetchLines, AllocatePrefetchLines*2);
119 }
120 if (FLAG_IS_DEFAULT(AllocateInstancePrefetchLines)) {
121 FLAG_SET_ERGO(intx, AllocateInstancePrefetchLines, AllocateInstancePrefetchLines*2);
122 }
123 }
124 }
|