# HG changeset patch # User chrisphi # Date 1529412710 14400 # Tue Jun 19 08:51:50 2018 -0400 # Branch JDK-8203030 # Node ID 4f17dd581abba7e89e2ed71c11c581649948b7b6 # Parent 8a18bcdd75edfd04918d2301f85868f9378f722f 8203030: Zero s390 31 bit size_t type conflicts in shared code Summary: Cast to size_t or change to size_t foe compatibility with other archs. Reviewed-by: Per Liden Contributed-by: chrisphi diff --git a/src/hotspot/share/classfile/stringTable.hpp b/src/hotspot/share/classfile/stringTable.hpp --- a/src/hotspot/share/classfile/stringTable.hpp +++ b/src/hotspot/share/classfile/stringTable.hpp @@ -81,7 +81,7 @@ void check_concurrent_work(); void trigger_concurrent_work(); - static uintx item_added(); + static size_t item_added(); static void item_removed(); size_t add_items_to_clean(size_t ndead); diff --git a/src/hotspot/share/code/codeCache.cpp b/src/hotspot/share/code/codeCache.cpp --- a/src/hotspot/share/code/codeCache.cpp +++ b/src/hotspot/share/code/codeCache.cpp @@ -409,7 +409,7 @@ add_heap(heap); // Reserve Space - size_t size_initial = MIN2(InitialCodeCacheSize, rs.size()); + size_t size_initial = MIN2((size_t)InitialCodeCacheSize, rs.size()); size_initial = align_up(size_initial, os::vm_page_size()); if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) { vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (" SIZE_FORMAT "K)", diff --git a/src/hotspot/share/compiler/compilerDefinitions.cpp b/src/hotspot/share/compiler/compilerDefinitions.cpp --- a/src/hotspot/share/compiler/compilerDefinitions.cpp +++ b/src/hotspot/share/compiler/compilerDefinitions.cpp @@ -198,7 +198,7 @@ // Increase the code cache size - tiered compiles a lot more. if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) { FLAG_SET_ERGO(uintx, ReservedCodeCacheSize, - MIN2(CODE_CACHE_DEFAULT_LIMIT, ReservedCodeCacheSize * 5)); + MIN2(CODE_CACHE_DEFAULT_LIMIT, (size_t)ReservedCodeCacheSize * 5)); } // Enable SegmentedCodeCache if TieredCompilation is enabled and ReservedCodeCacheSize >= 240M if (FLAG_IS_DEFAULT(SegmentedCodeCache) && ReservedCodeCacheSize >= 240*M) { diff --git a/src/hotspot/share/gc/cms/parNewGeneration.cpp b/src/hotspot/share/gc/cms/parNewGeneration.cpp --- a/src/hotspot/share/gc/cms/parNewGeneration.cpp +++ b/src/hotspot/share/gc/cms/parNewGeneration.cpp @@ -203,7 +203,7 @@ const size_t num_overflow_elems = of_stack->size(); const size_t space_available = queue->max_elems() - queue->size(); const size_t num_take_elems = MIN3(space_available / 4, - ParGCDesiredObjsFromOverflowList, + (size_t)ParGCDesiredObjsFromOverflowList, num_overflow_elems); // Transfer the most recent num_take_elems from the overflow // stack to our work queue. diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp @@ -2350,7 +2350,7 @@ // of things to do) or totally (at the very end). size_t target_size; if (partially) { - target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); + target_size = MIN2((size_t)_task_queue->max_elems()/3, (size_t)GCDrainStackTargetSize); } else { target_size = 0; } diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.cpp --- a/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.cpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.cpp @@ -31,7 +31,7 @@ } size_t G1CMObjArrayProcessor::process_array_slice(objArrayOop obj, HeapWord* start_from, size_t remaining) { - size_t words_to_scan = MIN2(remaining, ObjArrayMarkingStride); + size_t words_to_scan = MIN2(remaining, (size_t)ObjArrayMarkingStride); if (remaining > ObjArrayMarkingStride) { push_array_slice(start_from + ObjArrayMarkingStride); diff --git a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.hpp b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.hpp --- a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.hpp +++ b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.hpp @@ -91,7 +91,7 @@ void pretouch_internal(size_t start_page, size_t end_page); // Returns the index of the page which contains the given address. - uintptr_t addr_to_page_index(char* addr) const; + size_t addr_to_page_index(char* addr) const; // Returns the address of the given page index. char* page_start(size_t index) const; diff --git a/src/hotspot/share/gc/parallel/parallel_globals.hpp b/src/hotspot/share/gc/parallel/parallel_globals.hpp --- a/src/hotspot/share/gc/parallel/parallel_globals.hpp +++ b/src/hotspot/share/gc/parallel/parallel_globals.hpp @@ -52,12 +52,12 @@ "Use maximum compaction in the Parallel Old garbage collector " \ "for a system GC") \ \ - product(uintx, ParallelOldDeadWoodLimiterMean, 50, \ + product(size_t, ParallelOldDeadWoodLimiterMean, 50, \ "The mean used by the parallel compact dead wood " \ "limiter (a number between 0-100)") \ range(0, 100) \ \ - product(uintx, ParallelOldDeadWoodLimiterStdDev, 80, \ + product(size_t, ParallelOldDeadWoodLimiterStdDev, 80, \ "The standard deviation used by the parallel compact dead wood " \ "limiter (a number between 0-100)") \ range(0, 100) \ diff --git a/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp b/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp --- a/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp +++ b/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp @@ -118,7 +118,7 @@ const size_t beg_index = size_t(index); assert(beg_index < len || len == 0, "index too large"); - const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride); + const size_t stride = MIN2(len - beg_index, (size_t)ObjArrayMarkingStride); const size_t end_index = beg_index + stride; T* const base = (T*)obj->base_raw(); T* const beg = base + beg_index; diff --git a/src/hotspot/share/gc/shared/plab.cpp b/src/hotspot/share/gc/shared/plab.cpp --- a/src/hotspot/share/gc/shared/plab.cpp +++ b/src/hotspot/share/gc/shared/plab.cpp @@ -32,7 +32,7 @@ size_t PLAB::min_size() { // Make sure that we return something that is larger than AlignmentReserve - return align_object_size(MAX2(MinTLABSize / HeapWordSize, (uintx)oopDesc::header_size())) + AlignmentReserve; + return align_object_size(MAX2(MinTLABSize / HeapWordSize, (size_t)oopDesc::header_size())) + AlignmentReserve; } size_t PLAB::max_size() { diff --git a/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp b/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp --- a/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp +++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp @@ -654,7 +654,7 @@ STRDEDUP_BYTES_PARAM(_table->_size * sizeof(StringDedupEntry*) + (_table->_entries + _entry_cache->size()) * sizeof(StringDedupEntry))); log.debug(" Size: " SIZE_FORMAT ", Min: " SIZE_FORMAT ", Max: " SIZE_FORMAT, _table->_size, _min_size, _max_size); log.debug(" Entries: " UINTX_FORMAT ", Load: " STRDEDUP_PERCENT_FORMAT_NS ", Cached: " UINTX_FORMAT ", Added: " UINTX_FORMAT ", Removed: " UINTX_FORMAT, - _table->_entries, percent_of(_table->_entries, _table->_size), _entry_cache->size(), _entries_added, _entries_removed); + _table->_entries, percent_of((size_t)_table->_entries, _table->_size), _entry_cache->size(), _entries_added, _entries_removed); log.debug(" Resize Count: " UINTX_FORMAT ", Shrink Threshold: " UINTX_FORMAT "(" STRDEDUP_PERCENT_FORMAT_NS "), Grow Threshold: " UINTX_FORMAT "(" STRDEDUP_PERCENT_FORMAT_NS ")", _resize_count, _table->_shrink_threshold, _shrink_load_factor * 100.0, _table->_grow_threshold, _grow_load_factor * 100.0); log.debug(" Rehash Count: " UINTX_FORMAT ", Rehash Threshold: " UINTX_FORMAT ", Hash Seed: 0x%x", _rehash_count, _rehash_threshold, _table->_hash_seed); diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp --- a/src/hotspot/share/prims/whitebox.cpp +++ b/src/hotspot/share/prims/whitebox.cpp @@ -1116,7 +1116,7 @@ WB_END WB_ENTRY(jobject, WB_GetSizeTVMFlag(JNIEnv* env, jobject o, jstring name)) - uintx result; + size_t result; if (GetVMFlag (thread, env, name, &result, &JVMFlag::size_tAt)) { ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI return longBox(thread, env, result); diff --git a/src/hotspot/share/runtime/arguments.hpp b/src/hotspot/share/runtime/arguments.hpp --- a/src/hotspot/share/runtime/arguments.hpp +++ b/src/hotspot/share/runtime/arguments.hpp @@ -333,7 +333,7 @@ // Value of the conservative maximum heap alignment needed static size_t _conservative_max_heap_alignment; - static uintx _min_heap_size; + static size_t _min_heap_size; // -Xrun arguments static AgentLibraryList _libraryList;