1 /* 2 * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 #include "precompiled.hpp" 25 #include "gc/z/zGlobals.hpp" 26 #include "gc/z/zList.inline.hpp" 27 #include "gc/z/zNUMA.hpp" 28 #include "gc/z/zPage.inline.hpp" 29 #include "gc/z/zPageCache.hpp" 30 #include "gc/z/zStat.hpp" 31 #include "gc/z/zValue.inline.hpp" 32 #include "jfr/jfrEvents.hpp" 33 #include "logging/log.hpp" 34 #include "memory/allocation.hpp" 35 36 static const ZStatCounter ZCounterPageCacheHitL1("Memory", "Page Cache Hit L1", ZStatUnitOpsPerSecond); 37 static const ZStatCounter ZCounterPageCacheHitL2("Memory", "Page Cache Hit L2", ZStatUnitOpsPerSecond); 38 static const ZStatCounter ZCounterPageCacheHitL3("Memory", "Page Cache Hit L3", ZStatUnitOpsPerSecond); 39 static const ZStatCounter ZCounterPageCacheMiss("Memory", "Page Cache Miss", ZStatUnitOpsPerSecond); 40 41 class ZPageCacheFlushClosure : public StackObj { 42 friend class ZPageCache; 43 44 protected: 45 const size_t _requested; 46 size_t _flushed; 47 48 public: 49 ZPageCacheFlushClosure(size_t requested); 50 virtual bool do_page(const ZPage* page) = 0; 51 }; 52 53 ZPageCacheFlushClosure::ZPageCacheFlushClosure(size_t requested) : 54 _requested(requested), 55 _flushed(0) {} 56 57 ZPageCache::ZPageCache() : 58 _small(), 59 _medium(), 60 _large() {} 61 62 ZPage* ZPageCache::alloc_small_page() { 63 const uint32_t numa_id = ZNUMA::id(); 64 const uint32_t numa_count = ZNUMA::count(); 65 66 // Try NUMA local page cache 67 ZPage* const l1_page = _small.get(numa_id).remove_first(); 68 if (l1_page != NULL) { 69 ZStatInc(ZCounterPageCacheHitL1); 70 return l1_page; 71 } 72 73 // Try NUMA remote page cache(s) 74 uint32_t remote_numa_id = numa_id + 1; 75 const uint32_t remote_numa_count = numa_count - 1; 76 for (uint32_t i = 0; i < remote_numa_count; i++) { 77 if (remote_numa_id == numa_count) { 78 remote_numa_id = 0; 79 } 80 81 ZPage* const l2_page = _small.get(remote_numa_id).remove_first(); 82 if (l2_page != NULL) { 83 ZStatInc(ZCounterPageCacheHitL2); 84 return l2_page; 85 } 86 87 remote_numa_id++; 88 } 89 90 return NULL; 91 } 92 93 ZPage* ZPageCache::alloc_medium_page() { 94 ZPage* const page = _medium.remove_first(); 95 if (page != NULL) { 96 ZStatInc(ZCounterPageCacheHitL1); 97 return page; 98 } 99 100 return NULL; 101 } 102 103 ZPage* ZPageCache::alloc_large_page(size_t size) { 104 // Find a page with the right size 105 ZListIterator<ZPage> iter(&_large); 106 for (ZPage* page; iter.next(&page);) { 107 if (size == page->size()) { 108 // Page found 109 _large.remove(page); 110 ZStatInc(ZCounterPageCacheHitL1); 111 return page; 112 } 113 } 114 115 return NULL; 116 } 117 118 ZPage* ZPageCache::alloc_oversized_medium_page(size_t size) { 119 if (size <= ZPageSizeMedium) { 120 return _medium.remove_first(); 121 } 122 123 return NULL; 124 } 125 126 ZPage* ZPageCache::alloc_oversized_large_page(size_t size) { 127 // Find a page that is large enough 128 ZListIterator<ZPage> iter(&_large); 129 for (ZPage* page; iter.next(&page);) { 130 if (size <= page->size()) { 131 // Page found 132 _large.remove(page); 133 return page; 134 } 135 } 136 137 return NULL; 138 } 139 140 ZPage* ZPageCache::alloc_oversized_page(size_t size) { 141 ZPage* page = alloc_oversized_large_page(size); 142 if (page == NULL) { 143 page = alloc_oversized_medium_page(size); 144 } 145 146 if (page != NULL) { 147 ZStatInc(ZCounterPageCacheHitL3); 148 } 149 150 return page; 151 } 152 153 ZPage* ZPageCache::alloc_page(uint8_t type, size_t size) { 154 ZPage* page; 155 156 // Try allocate exact page 157 if (type == ZPageTypeSmall) { 158 page = alloc_small_page(); 159 } else if (type == ZPageTypeMedium) { 160 page = alloc_medium_page(); 161 } else { 162 page = alloc_large_page(size); 163 } 164 165 if (page == NULL) { 166 // Try allocate potentially oversized page 167 ZPage* const oversized = alloc_oversized_page(size); 168 if (oversized != NULL) { 169 if (size < oversized->size()) { 170 // Split oversized page 171 page = oversized->split(type, size); 172 173 // Cache remainder 174 free_page(oversized); 175 } else { 176 // Re-type correctly sized page 177 page = oversized->retype(type); 178 } 179 } 180 } 181 182 if (page == NULL) { 183 ZStatInc(ZCounterPageCacheMiss); 184 } 185 186 return page; 187 } 188 189 void ZPageCache::free_page(ZPage* page) { 190 const uint8_t type = page->type(); 191 if (type == ZPageTypeSmall) { 192 _small.get(page->numa_id()).insert_first(page); 193 } else if (type == ZPageTypeMedium) { 194 _medium.insert_first(page); 195 } else { 196 _large.insert_first(page); 197 } 198 } 199 200 bool ZPageCache::flush_list_inner(ZPageCacheFlushClosure* cl, ZList<ZPage>* from, ZList<ZPage>* to) { 201 ZPage* const page = from->last(); 202 if (page == NULL || !cl->do_page(page)) { 203 // Don't flush page 204 return false; 205 } 206 207 // Flush page 208 from->remove(page); 209 to->insert_last(page); 210 return true; 211 } 212 213 void ZPageCache::flush_list(ZPageCacheFlushClosure* cl, ZList<ZPage>* from, ZList<ZPage>* to) { 214 while (flush_list_inner(cl, from, to)); 215 } 216 217 void ZPageCache::flush_per_numa_lists(ZPageCacheFlushClosure* cl, ZPerNUMA<ZList<ZPage> >* from, ZList<ZPage>* to) { 218 const uint32_t numa_count = ZNUMA::count(); 219 uint32_t numa_done = 0; 220 uint32_t numa_next = 0; 221 222 // Flush lists round-robin 223 while (numa_done < numa_count) { 224 ZList<ZPage>* numa_list = from->addr(numa_next); 225 if (++numa_next == numa_count) { 226 numa_next = 0; 227 } 228 229 if (flush_list_inner(cl, numa_list, to)) { 230 // Not done 231 numa_done = 0; 232 } else { 233 // Done 234 numa_done++; 235 } 236 } 237 } 238 239 void ZPageCache::flush(ZPageCacheFlushClosure* cl, ZList<ZPage>* to) { 240 // Prefer flushing large, then medium and last small pages 241 flush_list(cl, &_large, to); 242 flush_list(cl, &_medium, to); 243 flush_per_numa_lists(cl, &_small, to); 244 245 if (cl->_flushed > cl->_requested) { 246 // Overflushed, keep part of last page 247 const size_t overflushed = cl->_flushed - cl->_requested; 248 free_page(to->last()->split(overflushed)); 249 cl->_flushed -= overflushed; 250 } 251 } 252 253 class ZPageCacheFlushForAllocationClosure : public ZPageCacheFlushClosure { 254 public: 255 ZPageCacheFlushForAllocationClosure(size_t requested) : 256 ZPageCacheFlushClosure(requested) {} 257 258 virtual bool do_page(const ZPage* page) { 259 if (_flushed < _requested) { 260 // Flush page 261 _flushed += page->size(); 262 return true; 263 } 264 265 // Don't flush page 266 return false; 267 } 268 }; 269 270 void ZPageCache::flush_for_allocation(size_t requested, ZList<ZPage>* to) { 271 EventZPageCacheFlush event; 272 273 // Flush 274 ZPageCacheFlushForAllocationClosure cl(requested); 275 flush(&cl, to); 276 277 // Send event 278 event.commit(requested, true /* for_allocation */); 279 } 280 281 class ZPageCacheFlushForUncommitClosure : public ZPageCacheFlushClosure { 282 private: 283 const uint64_t _now; 284 const uint64_t _delay; 285 uint64_t* _timeout; 286 287 public: 288 ZPageCacheFlushForUncommitClosure(size_t requested, uint64_t delay, uint64_t* timeout) : 289 ZPageCacheFlushClosure(requested), 290 _now(os::elapsedTime()), 291 _delay(delay), 292 _timeout(timeout) {} 293 294 virtual bool do_page(const ZPage* page) { 295 const uint64_t expires = page->last_used() + _delay; 296 const uint64_t timeout = expires - MIN2(expires, _now); 297 298 if (_flushed < _requested && timeout == 0) { 299 // Flush page 300 _flushed += page->size(); 301 return true; 302 } 303 304 // Record shortest non-expired timeout 305 *_timeout = MIN2(*_timeout, timeout); 306 307 // Don't flush page 308 return false; 309 } 310 }; 311 312 size_t ZPageCache::flush_for_uncommit(size_t requested, uint64_t delay, uint64_t* timeout, ZList<ZPage>* to) { 313 if (requested == 0) { 314 // Nothing to flush 315 return 0; 316 } 317 318 EventZPageCacheFlush event; 319 320 // Flush 321 ZPageCacheFlushForUncommitClosure cl(requested, delay, timeout); 322 flush(&cl, to); 323 324 // Send event 325 event.commit(requested, false /* for_allocation */); 326 327 return cl._flushed; 328 } 329 330 void ZPageCache::pages_do(ZPageClosure* cl) const { 331 // Small 332 ZPerNUMAConstIterator<ZList<ZPage> > iter_numa(&_small); 333 for (const ZList<ZPage>* list; iter_numa.next(&list);) { 334 ZListIterator<ZPage> iter_small(list); 335 for (ZPage* page; iter_small.next(&page);) { 336 cl->do_page(page); 337 } 338 } 339 340 // Medium 341 ZListIterator<ZPage> iter_medium(&_medium); 342 for (ZPage* page; iter_medium.next(&page);) { 343 cl->do_page(page); 344 } 345 346 // Large 347 ZListIterator<ZPage> iter_large(&_large); 348 for (ZPage* page; iter_large.next(&page);) { 349 cl->do_page(page); 350 } 351 }