< prev index next >

src/hotspot/share/gc/z/zHeap.cpp

Print this page




 160 size_t ZHeap::max_tlab_size() const {
 161   return ZObjectSizeLimitSmall;
 162 }
 163 
 164 size_t ZHeap::unsafe_max_tlab_alloc() const {
 165   size_t size = _object_allocator.remaining();
 166 
 167   if (size < MinTLABSize) {
 168     // The remaining space in the allocator is not enough to
 169     // fit the smallest possible TLAB. This means that the next
 170     // TLAB allocation will force the allocator to get a new
 171     // backing page anyway, which in turn means that we can then
 172     // fit the largest possible TLAB.
 173     size = max_tlab_size();
 174   }
 175 
 176   return MIN2(size, max_tlab_size());
 177 }
 178 
 179 bool ZHeap::is_in(uintptr_t addr) const {
 180   if (addr < ZAddressReservedStart || addr >= ZAddressReservedEnd) {
 181     return false;
 182   }


 183 
 184   const ZPage* const page = _page_table.get(addr);
 185   if (page != NULL) {
 186     return page->is_in(addr);


 187   }
 188 
 189   return false;
 190 }
 191 
 192 uintptr_t ZHeap::block_start(uintptr_t addr) const {
 193   const ZPage* const page = _page_table.get(addr);
 194   return page->block_start(addr);
 195 }
 196 
 197 bool ZHeap::block_is_obj(uintptr_t addr) const {
 198   const ZPage* const page = _page_table.get(addr);
 199   return page->block_is_obj(addr);
 200 }
 201 
 202 uint ZHeap::nconcurrent_worker_threads() const {
 203   return _workers.nconcurrent();
 204 }
 205 
 206 uint ZHeap::nconcurrent_no_boost_worker_threads() const {




 160 size_t ZHeap::max_tlab_size() const {
 161   return ZObjectSizeLimitSmall;
 162 }
 163 
 164 size_t ZHeap::unsafe_max_tlab_alloc() const {
 165   size_t size = _object_allocator.remaining();
 166 
 167   if (size < MinTLABSize) {
 168     // The remaining space in the allocator is not enough to
 169     // fit the smallest possible TLAB. This means that the next
 170     // TLAB allocation will force the allocator to get a new
 171     // backing page anyway, which in turn means that we can then
 172     // fit the largest possible TLAB.
 173     size = max_tlab_size();
 174   }
 175 
 176   return MIN2(size, max_tlab_size());
 177 }
 178 
 179 bool ZHeap::is_in(uintptr_t addr) const {
 180   // An address is considered to be "in the heap" if it points into
 181   // the allocated part of a pages, regardless of which heap view is
 182   // used. Note that an address with the finalizable metadata bit set
 183   // is not pointing into a heap view, and therefore not considered
 184   // to be "in the heap".
 185 
 186   if (ZAddress::is_in(addr)) {
 187     const ZPage* const page = _page_table.get(addr);
 188     if (page != NULL) {
 189       return page->is_in(addr);
 190     }
 191   }
 192 
 193   return false;
 194 }
 195 
 196 uintptr_t ZHeap::block_start(uintptr_t addr) const {
 197   const ZPage* const page = _page_table.get(addr);
 198   return page->block_start(addr);
 199 }
 200 
 201 bool ZHeap::block_is_obj(uintptr_t addr) const {
 202   const ZPage* const page = _page_table.get(addr);
 203   return page->block_is_obj(addr);
 204 }
 205 
 206 uint ZHeap::nconcurrent_worker_threads() const {
 207   return _workers.nconcurrent();
 208 }
 209 
 210 uint ZHeap::nconcurrent_no_boost_worker_threads() const {


< prev index next >