< prev index next >

src/share/vm/runtime/arguments.cpp

Print this page

        

@@ -1553,11 +1553,11 @@
   // Turn off AdaptiveSizePolicy by default for cms until it is complete.
   disable_adaptive_size_policy("UseConcMarkSweepGC");
 
   set_parnew_gc_flags();
 
-  size_t max_heap = align_size_down(MaxHeapSize,
+  size_t max_heap = align_down(MaxHeapSize,
                                     CardTableRS::ct_max_alignment_constraint());
 
   // Now make adjustments for CMS
   intx   tenuring_default = (intx)6;
   size_t young_gen_per_worker = CMSYoungGenPerWorker;

@@ -1565,11 +1565,11 @@
   // Preferred young gen size for "short" pauses:
   // upper bound depends on # of threads and NewRatio.
   const size_t preferred_max_new_size_unaligned =
     MIN2(max_heap/(NewRatio+1), ScaleForWordSize(young_gen_per_worker * ParallelGCThreads));
   size_t preferred_max_new_size =
-    align_size_up(preferred_max_new_size_unaligned, os::vm_page_size());
+    align_up(preferred_max_new_size_unaligned, os::vm_page_size());
 
   // Unless explicitly requested otherwise, size young gen
   // for "short" pauses ~ CMSYoungGenPerWorker*ParallelGCThreads
 
   // If either MaxNewSize or NewRatio is set on the command line,

@@ -1679,11 +1679,11 @@
   assert(OopEncodingHeapMax > (uint64_t)os::vm_page_size(), "Unusual page size");
   // We need to fit both the NULL page and the heap into the memory budget, while
   // keeping alignment constraints of the heap. To guarantee the latter, as the
   // NULL page is located before the heap, we pad the NULL page to the conservative
   // maximum alignment that the GC may ever impose upon the heap.
-  size_t displacement_due_to_null_page = align_size_up_(os::vm_page_size(),
+  size_t displacement_due_to_null_page = align_up_(os::vm_page_size(),
                                                         _conservative_max_heap_alignment);
 
   LP64_ONLY(return OopEncodingHeapMax - displacement_due_to_null_page);
   NOT_LP64(ShouldNotReachHere(); return 0);
 }

@@ -2761,11 +2761,11 @@
   const julong max_ThreadStackSize = 1 * M;
 
   const julong min_size = min_ThreadStackSize * K;
   const julong max_size = max_ThreadStackSize * K;
 
-  assert(is_size_aligned_(max_size, (size_t)os::vm_page_size()), "Implementation assumption");
+  assert(is_aligned_(max_size, (size_t)os::vm_page_size()), "Implementation assumption");
 
   julong size = 0;
   ArgsRange errcode = parse_memory_size(tail, &size, min_size, max_size);
   if (errcode != arg_in_range) {
     bool silent = (option == NULL); // Allow testing to silence error messages

@@ -2776,22 +2776,22 @@
     }
     return JNI_EINVAL;
   }
 
   // Internally track ThreadStackSize in units of 1024 bytes.
-  const julong size_aligned = align_size_up_(size, K);
+  const julong size_aligned = align_up_(size, K);
   assert(size <= size_aligned,
          "Overflow: " JULONG_FORMAT " " JULONG_FORMAT,
          size, size_aligned);
 
   const julong size_in_K = size_aligned / K;
   assert(size_in_K < (julong)max_intx,
          "size_in_K doesn't fit in the type of ThreadStackSize: " JULONG_FORMAT,
          size_in_K);
 
   // Check that code expanding ThreadStackSize to a page aligned number of bytes won't overflow.
-  const julong max_expanded = align_size_up_(size_in_K * K, (size_t)os::vm_page_size());
+  const julong max_expanded = align_up_(size_in_K * K, (size_t)os::vm_page_size());
   assert(max_expanded < max_uintx && max_expanded >= size_in_K,
          "Expansion overflowed: " JULONG_FORMAT " " JULONG_FORMAT,
          max_expanded, size_in_K);
 
   *out_ThreadStackSize = (intx)size_in_K;
< prev index next >