Print this page
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/memory/defNewGeneration.cpp
+++ new/src/share/vm/memory/defNewGeneration.cpp
1 1 /*
2 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "gc_implementation/shared/collectorCounters.hpp"
27 27 #include "gc_implementation/shared/gcPolicyCounters.hpp"
28 28 #include "gc_implementation/shared/gcHeapSummary.hpp"
29 29 #include "gc_implementation/shared/gcTimer.hpp"
30 30 #include "gc_implementation/shared/gcTraceTime.hpp"
31 31 #include "gc_implementation/shared/gcTrace.hpp"
32 32 #include "gc_implementation/shared/spaceDecorator.hpp"
33 33 #include "memory/defNewGeneration.inline.hpp"
34 34 #include "memory/gcLocker.inline.hpp"
35 35 #include "memory/genCollectedHeap.hpp"
36 36 #include "memory/genOopClosures.inline.hpp"
37 37 #include "memory/generationSpec.hpp"
38 38 #include "memory/iterator.hpp"
39 39 #include "memory/referencePolicy.hpp"
40 40 #include "memory/space.inline.hpp"
41 41 #include "oops/instanceRefKlass.hpp"
42 42 #include "oops/oop.inline.hpp"
43 43 #include "runtime/java.hpp"
44 44 #include "utilities/copy.hpp"
45 45 #include "utilities/stack.inline.hpp"
46 46 #ifdef TARGET_OS_FAMILY_linux
47 47 # include "thread_linux.inline.hpp"
48 48 #endif
49 49 #ifdef TARGET_OS_FAMILY_solaris
50 50 # include "thread_solaris.inline.hpp"
51 51 #endif
52 52 #ifdef TARGET_OS_FAMILY_windows
53 53 # include "thread_windows.inline.hpp"
54 54 #endif
55 55 #ifdef TARGET_OS_FAMILY_bsd
56 56 # include "thread_bsd.inline.hpp"
57 57 #endif
58 58
59 59 //
60 60 // DefNewGeneration functions.
61 61
62 62 // Methods of protected closure types.
63 63
64 64 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {
65 65 assert(g->level() == 0, "Optimized for youngest gen.");
66 66 }
67 67 void DefNewGeneration::IsAliveClosure::do_object(oop p) {
68 68 assert(false, "Do not call.");
69 69 }
70 70 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
71 71 return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
72 72 }
73 73
74 74 DefNewGeneration::KeepAliveClosure::
75 75 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
76 76 GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
77 77 assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind.");
78 78 _rs = (CardTableRS*)rs;
79 79 }
80 80
81 81 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
82 82 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
83 83
84 84
85 85 DefNewGeneration::FastKeepAliveClosure::
86 86 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
87 87 DefNewGeneration::KeepAliveClosure(cl) {
88 88 _boundary = g->reserved().end();
89 89 }
90 90
91 91 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
92 92 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
93 93
94 94 DefNewGeneration::EvacuateFollowersClosure::
95 95 EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
96 96 ScanClosure* cur, ScanClosure* older) :
97 97 _gch(gch), _level(level),
98 98 _scan_cur_or_nonheap(cur), _scan_older(older)
99 99 {}
100 100
101 101 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
102 102 do {
103 103 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
104 104 _scan_older);
105 105 } while (!_gch->no_allocs_since_save_marks(_level));
106 106 }
107 107
108 108 DefNewGeneration::FastEvacuateFollowersClosure::
109 109 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
110 110 DefNewGeneration* gen,
111 111 FastScanClosure* cur, FastScanClosure* older) :
112 112 _gch(gch), _level(level), _gen(gen),
113 113 _scan_cur_or_nonheap(cur), _scan_older(older)
114 114 {}
115 115
116 116 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
117 117 do {
118 118 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
119 119 _scan_older);
120 120 } while (!_gch->no_allocs_since_save_marks(_level));
121 121 guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
122 122 }
123 123
124 124 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
125 125 OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
126 126 {
127 127 assert(_g->level() == 0, "Optimized for youngest generation");
128 128 _boundary = _g->reserved().end();
129 129 }
130 130
131 131 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); }
132 132 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
133 133
134 134 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
135 135 OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
136 136 {
137 137 assert(_g->level() == 0, "Optimized for youngest generation");
138 138 _boundary = _g->reserved().end();
139 139 }
140 140
141 141 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); }
142 142 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
143 143
144 144 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
145 145 OopClosure(g->ref_processor()), _g(g)
146 146 {
147 147 assert(_g->level() == 0, "Optimized for youngest generation");
148 148 _boundary = _g->reserved().end();
149 149 }
150 150
151 151 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); }
152 152 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
153 153
154 154 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); }
155 155 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
156 156
157 157 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
158 158 size_t initial_size,
159 159 int level,
160 160 const char* policy)
161 161 : Generation(rs, initial_size, level),
162 162 _promo_failure_drain_in_progress(false),
163 163 _should_allocate_from_space(false)
164 164 {
165 165 MemRegion cmr((HeapWord*)_virtual_space.low(),
166 166 (HeapWord*)_virtual_space.high());
167 167 Universe::heap()->barrier_set()->resize_covered_region(cmr);
168 168
169 169 if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
170 170 _eden_space = new ConcEdenSpace(this);
171 171 } else {
172 172 _eden_space = new EdenSpace(this);
173 173 }
174 174 _from_space = new ContiguousSpace();
175 175 _to_space = new ContiguousSpace();
176 176
177 177 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
178 178 vm_exit_during_initialization("Could not allocate a new gen space");
179 179
180 180 // Compute the maximum eden and survivor space sizes. These sizes
181 181 // are computed assuming the entire reserved space is committed.
182 182 // These values are exported as performance counters.
183 183 uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
184 184 uintx size = _virtual_space.reserved_size();
185 185 _max_survivor_size = compute_survivor_size(size, alignment);
186 186 _max_eden_size = size - (2*_max_survivor_size);
187 187
188 188 // allocate the performance counters
189 189
190 190 // Generation counters -- generation 0, 3 subspaces
191 191 _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space);
192 192 _gc_counters = new CollectorCounters(policy, 0);
193 193
194 194 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
195 195 _gen_counters);
196 196 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
197 197 _gen_counters);
198 198 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
199 199 _gen_counters);
200 200
201 201 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
202 202 update_counters();
203 203 _next_gen = NULL;
204 204 _tenuring_threshold = MaxTenuringThreshold;
205 205 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
206 206
207 207 _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
208 208 }
209 209
210 210 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
211 211 bool clear_space,
212 212 bool mangle_space) {
213 213 uintx alignment =
214 214 GenCollectedHeap::heap()->collector_policy()->min_alignment();
215 215
216 216 // If the spaces are being cleared (only done at heap initialization
217 217 // currently), the survivor spaces need not be empty.
218 218 // Otherwise, no care is taken for used areas in the survivor spaces
219 219 // so check.
220 220 assert(clear_space || (to()->is_empty() && from()->is_empty()),
221 221 "Initialization of the survivor spaces assumes these are empty");
222 222
223 223 // Compute sizes
224 224 uintx size = _virtual_space.committed_size();
225 225 uintx survivor_size = compute_survivor_size(size, alignment);
226 226 uintx eden_size = size - (2*survivor_size);
227 227 assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
228 228
229 229 if (eden_size < minimum_eden_size) {
230 230 // May happen due to 64Kb rounding, if so adjust eden size back up
231 231 minimum_eden_size = align_size_up(minimum_eden_size, alignment);
232 232 uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
233 233 uintx unaligned_survivor_size =
234 234 align_size_down(maximum_survivor_size, alignment);
235 235 survivor_size = MAX2(unaligned_survivor_size, alignment);
236 236 eden_size = size - (2*survivor_size);
237 237 assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
238 238 assert(eden_size >= minimum_eden_size, "just checking");
239 239 }
240 240
241 241 char *eden_start = _virtual_space.low();
242 242 char *from_start = eden_start + eden_size;
243 243 char *to_start = from_start + survivor_size;
244 244 char *to_end = to_start + survivor_size;
245 245
246 246 assert(to_end == _virtual_space.high(), "just checking");
247 247 assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment");
248 248 assert(Space::is_aligned((HeapWord*)from_start), "checking alignment");
249 249 assert(Space::is_aligned((HeapWord*)to_start), "checking alignment");
250 250
251 251 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
252 252 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
253 253 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
254 254
255 255 // A minimum eden size implies that there is a part of eden that
256 256 // is being used and that affects the initialization of any
257 257 // newly formed eden.
258 258 bool live_in_eden = minimum_eden_size > 0;
259 259
260 260 // If not clearing the spaces, do some checking to verify that
261 261 // the space are already mangled.
262 262 if (!clear_space) {
263 263 // Must check mangling before the spaces are reshaped. Otherwise,
264 264 // the bottom or end of one space may have moved into another
265 265 // a failure of the check may not correctly indicate which space
266 266 // is not properly mangled.
267 267 if (ZapUnusedHeapArea) {
268 268 HeapWord* limit = (HeapWord*) _virtual_space.high();
269 269 eden()->check_mangled_unused_area(limit);
270 270 from()->check_mangled_unused_area(limit);
271 271 to()->check_mangled_unused_area(limit);
272 272 }
273 273 }
274 274
275 275 // Reset the spaces for their new regions.
276 276 eden()->initialize(edenMR,
277 277 clear_space && !live_in_eden,
278 278 SpaceDecorator::Mangle);
279 279 // If clear_space and live_in_eden, we will not have cleared any
280 280 // portion of eden above its top. This can cause newly
281 281 // expanded space not to be mangled if using ZapUnusedHeapArea.
282 282 // We explicitly do such mangling here.
283 283 if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
284 284 eden()->mangle_unused_area();
285 285 }
286 286 from()->initialize(fromMR, clear_space, mangle_space);
287 287 to()->initialize(toMR, clear_space, mangle_space);
288 288
289 289 // Set next compaction spaces.
290 290 eden()->set_next_compaction_space(from());
291 291 // The to-space is normally empty before a compaction so need
292 292 // not be considered. The exception is during promotion
293 293 // failure handling when to-space can contain live objects.
294 294 from()->set_next_compaction_space(NULL);
295 295 }
296 296
297 297 void DefNewGeneration::swap_spaces() {
298 298 ContiguousSpace* s = from();
299 299 _from_space = to();
300 300 _to_space = s;
301 301 eden()->set_next_compaction_space(from());
302 302 // The to-space is normally empty before a compaction so need
303 303 // not be considered. The exception is during promotion
304 304 // failure handling when to-space can contain live objects.
305 305 from()->set_next_compaction_space(NULL);
306 306
307 307 if (UsePerfData) {
308 308 CSpaceCounters* c = _from_counters;
309 309 _from_counters = _to_counters;
310 310 _to_counters = c;
311 311 }
312 312 }
313 313
314 314 bool DefNewGeneration::expand(size_t bytes) {
315 315 MutexLocker x(ExpandHeap_lock);
316 316 HeapWord* prev_high = (HeapWord*) _virtual_space.high();
317 317 bool success = _virtual_space.expand_by(bytes);
318 318 if (success && ZapUnusedHeapArea) {
319 319 // Mangle newly committed space immediately because it
320 320 // can be done here more simply that after the new
321 321 // spaces have been computed.
322 322 HeapWord* new_high = (HeapWord*) _virtual_space.high();
323 323 MemRegion mangle_region(prev_high, new_high);
324 324 SpaceMangler::mangle_region(mangle_region);
325 325 }
326 326
327 327 // Do not attempt an expand-to-the reserve size. The
328 328 // request should properly observe the maximum size of
329 329 // the generation so an expand-to-reserve should be
330 330 // unnecessary. Also a second call to expand-to-reserve
331 331 // value potentially can cause an undue expansion.
332 332 // For example if the first expand fail for unknown reasons,
333 333 // but the second succeeds and expands the heap to its maximum
334 334 // value.
335 335 if (GC_locker::is_active()) {
336 336 if (PrintGC && Verbose) {
337 337 gclog_or_tty->print_cr("Garbage collection disabled, "
338 338 "expanded heap instead");
339 339 }
340 340 }
341 341
342 342 return success;
343 343 }
344 344
345 345
346 346 void DefNewGeneration::compute_new_size() {
347 347 // This is called after a gc that includes the following generation
348 348 // (which is required to exist.) So from-space will normally be empty.
349 349 // Note that we check both spaces, since if scavenge failed they revert roles.
350 350 // If not we bail out (otherwise we would have to relocate the objects)
351 351 if (!from()->is_empty() || !to()->is_empty()) {
352 352 return;
353 353 }
354 354
355 355 int next_level = level() + 1;
356 356 GenCollectedHeap* gch = GenCollectedHeap::heap();
357 357 assert(next_level < gch->_n_gens,
358 358 "DefNewGeneration cannot be an oldest gen");
359 359
360 360 Generation* next_gen = gch->_gens[next_level];
361 361 size_t old_size = next_gen->capacity();
362 362 size_t new_size_before = _virtual_space.committed_size();
363 363 size_t min_new_size = spec()->init_size();
364 364 size_t max_new_size = reserved().byte_size();
365 365 assert(min_new_size <= new_size_before &&
366 366 new_size_before <= max_new_size,
367 367 "just checking");
368 368 // All space sizes must be multiples of Generation::GenGrain.
369 369 size_t alignment = Generation::GenGrain;
370 370
371 371 // Compute desired new generation size based on NewRatio and
372 372 // NewSizeThreadIncrease
373 373 size_t desired_new_size = old_size/NewRatio;
374 374 int threads_count = Threads::number_of_non_daemon_threads();
375 375 size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
376 376 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
377 377
378 378 // Adjust new generation size
379 379 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
380 380 assert(desired_new_size <= max_new_size, "just checking");
381 381
382 382 bool changed = false;
383 383 if (desired_new_size > new_size_before) {
384 384 size_t change = desired_new_size - new_size_before;
385 385 assert(change % alignment == 0, "just checking");
386 386 if (expand(change)) {
387 387 changed = true;
388 388 }
389 389 // If the heap failed to expand to the desired size,
390 390 // "changed" will be false. If the expansion failed
391 391 // (and at this point it was expected to succeed),
392 392 // ignore the failure (leaving "changed" as false).
393 393 }
394 394 if (desired_new_size < new_size_before && eden()->is_empty()) {
395 395 // bail out of shrinking if objects in eden
396 396 size_t change = new_size_before - desired_new_size;
397 397 assert(change % alignment == 0, "just checking");
398 398 _virtual_space.shrink_by(change);
399 399 changed = true;
400 400 }
401 401 if (changed) {
402 402 // The spaces have already been mangled at this point but
403 403 // may not have been cleared (set top = bottom) and should be.
404 404 // Mangling was done when the heap was being expanded.
405 405 compute_space_boundaries(eden()->used(),
406 406 SpaceDecorator::Clear,
407 407 SpaceDecorator::DontMangle);
408 408 MemRegion cmr((HeapWord*)_virtual_space.low(),
409 409 (HeapWord*)_virtual_space.high());
410 410 Universe::heap()->barrier_set()->resize_covered_region(cmr);
411 411 if (Verbose && PrintGC) {
412 412 size_t new_size_after = _virtual_space.committed_size();
413 413 size_t eden_size_after = eden()->capacity();
414 414 size_t survivor_size_after = from()->capacity();
415 415 gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
416 416 SIZE_FORMAT "K [eden="
417 417 SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
418 418 new_size_before/K, new_size_after/K,
419 419 eden_size_after/K, survivor_size_after/K);
420 420 if (WizardMode) {
421 421 gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
422 422 thread_increase_size/K, threads_count);
423 423 }
424 424 gclog_or_tty->cr();
425 425 }
426 426 }
427 427 }
428 428
429 429 void DefNewGeneration::object_iterate_since_last_GC(ObjectClosure* cl) {
430 430 // $$$ This may be wrong in case of "scavenge failure"?
431 431 eden()->object_iterate(cl);
432 432 }
433 433
434 434 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
435 435 assert(false, "NYI -- are you sure you want to call this?");
436 436 }
437 437
438 438
439 439 size_t DefNewGeneration::capacity() const {
440 440 return eden()->capacity()
441 441 + from()->capacity(); // to() is only used during scavenge
442 442 }
443 443
444 444
445 445 size_t DefNewGeneration::used() const {
446 446 return eden()->used()
447 447 + from()->used(); // to() is only used during scavenge
448 448 }
449 449
450 450
451 451 size_t DefNewGeneration::free() const {
452 452 return eden()->free()
453 453 + from()->free(); // to() is only used during scavenge
454 454 }
455 455
456 456 size_t DefNewGeneration::max_capacity() const {
457 457 const size_t alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
458 458 const size_t reserved_bytes = reserved().byte_size();
459 459 return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
460 460 }
461 461
462 462 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
463 463 return eden()->free();
464 464 }
465 465
466 466 size_t DefNewGeneration::capacity_before_gc() const {
467 467 return eden()->capacity();
468 468 }
469 469
470 470 size_t DefNewGeneration::contiguous_available() const {
471 471 return eden()->free();
472 472 }
473 473
474 474
475 475 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); }
476 476 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
477 477
478 478 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
479 479 eden()->object_iterate(blk);
480 480 from()->object_iterate(blk);
481 481 }
482 482
483 483
484 484 void DefNewGeneration::space_iterate(SpaceClosure* blk,
485 485 bool usedOnly) {
486 486 blk->do_space(eden());
487 487 blk->do_space(from());
488 488 blk->do_space(to());
489 489 }
490 490
491 491 // The last collection bailed out, we are running out of heap space,
492 492 // so we try to allocate the from-space, too.
493 493 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
494 494 HeapWord* result = NULL;
495 495 if (Verbose && PrintGCDetails) {
496 496 gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):"
497 497 " will_fail: %s"
498 498 " heap_lock: %s"
499 499 " free: " SIZE_FORMAT,
500 500 size,
501 501 GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
502 502 "true" : "false",
503 503 Heap_lock->is_locked() ? "locked" : "unlocked",
504 504 from()->free());
505 505 }
506 506 if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) {
507 507 if (Heap_lock->owned_by_self() ||
508 508 (SafepointSynchronize::is_at_safepoint() &&
509 509 Thread::current()->is_VM_thread())) {
510 510 // If the Heap_lock is not locked by this thread, this will be called
511 511 // again later with the Heap_lock held.
512 512 result = from()->allocate(size);
513 513 } else if (PrintGC && Verbose) {
514 514 gclog_or_tty->print_cr(" Heap_lock is not owned by self");
515 515 }
516 516 } else if (PrintGC && Verbose) {
517 517 gclog_or_tty->print_cr(" should_allocate_from_space: NOT");
518 518 }
519 519 if (PrintGC && Verbose) {
520 520 gclog_or_tty->print_cr(" returns %s", result == NULL ? "NULL" : "object");
521 521 }
522 522 return result;
523 523 }
524 524
525 525 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
526 526 bool is_tlab,
527 527 bool parallel) {
528 528 // We don't attempt to expand the young generation (but perhaps we should.)
529 529 return allocate(size, is_tlab);
530 530 }
531 531
532 532
533 533 void DefNewGeneration::collect(bool full,
534 534 bool clear_all_soft_refs,
535 535 size_t size,
536 536 bool is_tlab) {
537 537 assert(full || size > 0, "otherwise we don't want to collect");
538 538
539 539 GenCollectedHeap* gch = GenCollectedHeap::heap();
540 540
541 541 _gc_timer->register_gc_start(os::elapsed_counter());
542 542 DefNewTracer gc_tracer;
543 543 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
544 544
545 545 _next_gen = gch->next_gen(this);
546 546 assert(_next_gen != NULL,
547 547 "This must be the youngest gen, and not the only gen");
548 548
549 549 // If the next generation is too full to accommodate promotion
550 550 // from this generation, pass on collection; let the next generation
551 551 // do it.
552 552 if (!collection_attempt_is_safe()) {
553 553 if (Verbose && PrintGCDetails) {
554 554 gclog_or_tty->print(" :: Collection attempt not safe :: ");
555 555 }
556 556 gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
557 557 return;
558 558 }
559 559 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
560 560
561 561 init_assuming_no_promotion_failure();
562 562
563 563 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
564 564 // Capture heap used before collection (for printing).
565 565 size_t gch_prev_used = gch->used();
566 566
567 567 gch->trace_heap_before_gc(&gc_tracer);
568 568
569 569 SpecializationStats::clear();
570 570
571 571 // These can be shared for all code paths
572 572 IsAliveClosure is_alive(this);
573 573 ScanWeakRefClosure scan_weak_ref(this);
574 574
575 575 age_table()->clear();
576 576 to()->clear(SpaceDecorator::Mangle);
577 577
578 578 gch->rem_set()->prepare_for_younger_refs_iterate(false);
579 579
580 580 assert(gch->no_allocs_since_save_marks(0),
581 581 "save marks have not been newly set.");
582 582
583 583 // Not very pretty.
584 584 CollectorPolicy* cp = gch->collector_policy();
585 585
586 586 FastScanClosure fsc_with_no_gc_barrier(this, false);
587 587 FastScanClosure fsc_with_gc_barrier(this, true);
588 588
589 589 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
590 590 FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
591 591 &fsc_with_no_gc_barrier,
592 592 &fsc_with_gc_barrier);
593 593
594 594 assert(gch->no_allocs_since_save_marks(0),
595 595 "save marks have not been newly set.");
596 596
597 597 gch->gen_process_strong_roots(_level,
598 598 true, // Process younger gens, if any,
599 599 // as strong roots.
600 600 true, // activate StrongRootsScope
601 601 false, // not collecting perm generation.
602 602 SharedHeap::SO_AllClasses,
603 603 &fsc_with_no_gc_barrier,
604 604 true, // walk *all* scavengable nmethods
605 605 &fsc_with_gc_barrier);
606 606
607 607 // "evacuate followers".
608 608 evacuate_followers.do_void();
609 609
610 610 FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
611 611 ReferenceProcessor* rp = ref_processor();
612 612 rp->setup_policy(clear_all_soft_refs);
613 613 const ReferenceProcessorStats& stats =
614 614 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
615 615 NULL, _gc_timer);
616 616 gc_tracer.report_gc_reference_stats(stats);
617 617 if (!_promotion_failed) {
618 618 // Swap the survivor spaces.
619 619 eden()->clear(SpaceDecorator::Mangle);
620 620 from()->clear(SpaceDecorator::Mangle);
621 621 if (ZapUnusedHeapArea) {
622 622 // This is now done here because of the piece-meal mangling which
623 623 // can check for valid mangling at intermediate points in the
624 624 // collection(s). When a minor collection fails to collect
625 625 // sufficient space resizing of the young generation can occur
626 626 // an redistribute the spaces in the young generation. Mangle
627 627 // here so that unzapped regions don't get distributed to
628 628 // other spaces.
629 629 to()->mangle_unused_area();
630 630 }
631 631 swap_spaces();
632 632
633 633 assert(to()->is_empty(), "to space should be empty now");
634 634
635 635 // Set the desired survivor size to half the real survivor space
636 636 _tenuring_threshold =
637 637 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
638 638
639 639 // A successful scavenge should restart the GC time limit count which is
640 640 // for full GC's.
641 641 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
642 642 size_policy->reset_gc_overhead_limit_count();
643 643 if (PrintGC && !PrintGCDetails) {
644 644 gch->print_heap_change(gch_prev_used);
645 645 }
646 646 assert(!gch->incremental_collection_failed(), "Should be clear");
647 647 } else {
648 648 assert(_promo_failure_scan_stack.is_empty(), "post condition");
649 649 _promo_failure_scan_stack.clear(true); // Clear cached segments.
650 650
651 651 remove_forwarding_pointers();
652 652 if (PrintGCDetails) {
653 653 gclog_or_tty->print(" (promotion failed) ");
654 654 }
655 655 // Add to-space to the list of space to compact
656 656 // when a promotion failure has occurred. In that
657 657 // case there can be live objects in to-space
658 658 // as a result of a partial evacuation of eden
659 659 // and from-space.
660 660 swap_spaces(); // For uniformity wrt ParNewGeneration.
661 661 from()->set_next_compaction_space(to());
662 662 gch->set_incremental_collection_failed();
663 663
664 664 // Inform the next generation that a promotion failure occurred.
665 665 _next_gen->promotion_failure_occurred();
666 666 gc_tracer.report_promotion_failed(_promotion_failed_info);
667 667
668 668 // Reset the PromotionFailureALot counters.
669 669 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
670 670 }
671 671 // set new iteration safe limit for the survivor spaces
672 672 from()->set_concurrent_iteration_safe_limit(from()->top());
673 673 to()->set_concurrent_iteration_safe_limit(to()->top());
674 674 SpecializationStats::print();
675 675
676 676 // We need to use a monotonically non-decreasing time in ms
677 677 // or we will see time-warp warnings and os::javaTimeMillis()
678 678 // does not guarantee monotonicity.
679 679 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
680 680 update_time_of_last_gc(now);
681 681
682 682 gch->trace_heap_after_gc(&gc_tracer);
683 683 gc_tracer.report_tenuring_threshold(tenuring_threshold());
684 684
685 685 _gc_timer->register_gc_end(os::elapsed_counter());
686 686
687 687 gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
688 688 }
689 689
690 690 class RemoveForwardPointerClosure: public ObjectClosure {
691 691 public:
692 692 void do_object(oop obj) {
693 693 obj->init_mark();
694 694 }
695 695 };
696 696
697 697 void DefNewGeneration::init_assuming_no_promotion_failure() {
698 698 _promotion_failed = false;
699 699 _promotion_failed_info.reset();
700 700 from()->set_next_compaction_space(NULL);
701 701 }
702 702
703 703 void DefNewGeneration::remove_forwarding_pointers() {
704 704 RemoveForwardPointerClosure rspc;
705 705 eden()->object_iterate(&rspc);
706 706 from()->object_iterate(&rspc);
707 707
708 708 // Now restore saved marks, if any.
709 709 assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(),
710 710 "should be the same");
711 711 while (!_objs_with_preserved_marks.is_empty()) {
712 712 oop obj = _objs_with_preserved_marks.pop();
713 713 markOop m = _preserved_marks_of_objs.pop();
714 714 obj->set_mark(m);
715 715 }
716 716 _objs_with_preserved_marks.clear(true);
717 717 _preserved_marks_of_objs.clear(true);
718 718 }
719 719
720 720 void DefNewGeneration::preserve_mark(oop obj, markOop m) {
721 721 assert(_promotion_failed && m->must_be_preserved_for_promotion_failure(obj),
722 722 "Oversaving!");
723 723 _objs_with_preserved_marks.push(obj);
724 724 _preserved_marks_of_objs.push(m);
725 725 }
726 726
727 727 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
728 728 if (m->must_be_preserved_for_promotion_failure(obj)) {
729 729 preserve_mark(obj, m);
730 730 }
731 731 }
732 732
733 733 void DefNewGeneration::handle_promotion_failure(oop old) {
734 734 if (PrintPromotionFailure && !_promotion_failed) {
735 735 gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ",
736 736 old->size());
737 737 }
738 738 _promotion_failed = true;
739 739 _promotion_failed_info.register_copy_failure(old->size());
740 740 preserve_mark_if_necessary(old, old->mark());
741 741 // forward to self
742 742 old->forward_to(old);
743 743
744 744 _promo_failure_scan_stack.push(old);
745 745
746 746 if (!_promo_failure_drain_in_progress) {
747 747 // prevent recursion in copy_to_survivor_space()
748 748 _promo_failure_drain_in_progress = true;
749 749 drain_promo_failure_scan_stack();
750 750 _promo_failure_drain_in_progress = false;
751 751 }
752 752 }
753 753
754 754 oop DefNewGeneration::copy_to_survivor_space(oop old) {
755 755 assert(is_in_reserved(old) && !old->is_forwarded(),
756 756 "shouldn't be scavenging this oop");
757 757 size_t s = old->size();
758 758 oop obj = NULL;
759 759
760 760 // Try allocating obj in to-space (unless too old)
761 761 if (old->age() < tenuring_threshold()) {
762 762 obj = (oop) to()->allocate(s);
763 763 }
764 764
765 765 // Otherwise try allocating obj tenured
766 766 if (obj == NULL) {
767 767 obj = _next_gen->promote(old, s);
768 768 if (obj == NULL) {
769 769 handle_promotion_failure(old);
770 770 return old;
771 771 }
772 772 } else {
773 773 // Prefetch beyond obj
774 774 const intx interval = PrefetchCopyIntervalInBytes;
775 775 Prefetch::write(obj, interval);
776 776
777 777 // Copy obj
778 778 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
779 779
780 780 // Increment age if obj still in new generation
781 781 obj->incr_age();
782 782 age_table()->add(obj, s);
783 783 }
784 784
785 785 // Done, insert forward pointer to obj in this header
786 786 old->forward_to(obj);
787 787
788 788 return obj;
789 789 }
790 790
791 791 void DefNewGeneration::drain_promo_failure_scan_stack() {
792 792 while (!_promo_failure_scan_stack.is_empty()) {
793 793 oop obj = _promo_failure_scan_stack.pop();
794 794 obj->oop_iterate(_promo_failure_scan_stack_closure);
795 795 }
796 796 }
797 797
798 798 void DefNewGeneration::save_marks() {
799 799 eden()->set_saved_mark();
800 800 to()->set_saved_mark();
801 801 from()->set_saved_mark();
802 802 }
803 803
804 804
805 805 void DefNewGeneration::reset_saved_marks() {
806 806 eden()->reset_saved_mark();
807 807 to()->reset_saved_mark();
808 808 from()->reset_saved_mark();
809 809 }
810 810
811 811
812 812 bool DefNewGeneration::no_allocs_since_save_marks() {
813 813 assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
814 814 assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
815 815 return to()->saved_mark_at_top();
816 816 }
817 817
818 818 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
819 819 \
820 820 void DefNewGeneration:: \
821 821 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
822 822 cl->set_generation(this); \
823 823 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \
824 824 to()->oop_since_save_marks_iterate##nv_suffix(cl); \
825 825 from()->oop_since_save_marks_iterate##nv_suffix(cl); \
826 826 cl->reset_generation(); \
827 827 save_marks(); \
828 828 }
829 829
830 830 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
831 831
832 832 #undef DefNew_SINCE_SAVE_MARKS_DEFN
833 833
834 834 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
835 835 size_t max_alloc_words) {
836 836 if (requestor == this || _promotion_failed) return;
837 837 assert(requestor->level() > level(), "DefNewGeneration must be youngest");
838 838
839 839 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate.
840 840 if (to_space->top() > to_space->bottom()) {
841 841 trace("to_space not empty when contribute_scratch called");
842 842 }
843 843 */
844 844
845 845 ContiguousSpace* to_space = to();
846 846 assert(to_space->end() >= to_space->top(), "pointers out of order");
847 847 size_t free_words = pointer_delta(to_space->end(), to_space->top());
848 848 if (free_words >= MinFreeScratchWords) {
849 849 ScratchBlock* sb = (ScratchBlock*)to_space->top();
850 850 sb->num_words = free_words;
851 851 sb->next = list;
852 852 list = sb;
853 853 }
854 854 }
855 855
856 856 void DefNewGeneration::reset_scratch() {
857 857 // If contributing scratch in to_space, mangle all of
858 858 // to_space if ZapUnusedHeapArea. This is needed because
859 859 // top is not maintained while using to-space as scratch.
860 860 if (ZapUnusedHeapArea) {
861 861 to()->mangle_unused_area_complete();
862 862 }
863 863 }
864 864
865 865 bool DefNewGeneration::collection_attempt_is_safe() {
866 866 if (!to()->is_empty()) {
867 867 if (Verbose && PrintGCDetails) {
868 868 gclog_or_tty->print(" :: to is not empty :: ");
869 869 }
870 870 return false;
871 871 }
872 872 if (_next_gen == NULL) {
873 873 GenCollectedHeap* gch = GenCollectedHeap::heap();
874 874 _next_gen = gch->next_gen(this);
875 875 assert(_next_gen != NULL,
876 876 "This must be the youngest gen, and not the only gen");
877 877 }
878 878 return _next_gen->promotion_attempt_is_safe(used());
879 879 }
880 880
881 881 void DefNewGeneration::gc_epilogue(bool full) {
882 882 DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
883 883
884 884 assert(!GC_locker::is_active(), "We should not be executing here");
885 885 // Check if the heap is approaching full after a collection has
886 886 // been done. Generally the young generation is empty at
887 887 // a minimum at the end of a collection. If it is not, then
888 888 // the heap is approaching full.
889 889 GenCollectedHeap* gch = GenCollectedHeap::heap();
890 890 if (full) {
891 891 DEBUG_ONLY(seen_incremental_collection_failed = false;)
892 892 if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
893 893 if (Verbose && PrintGCDetails) {
894 894 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
895 895 GCCause::to_string(gch->gc_cause()));
896 896 }
897 897 gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
898 898 set_should_allocate_from_space(); // we seem to be running out of space
899 899 } else {
900 900 if (Verbose && PrintGCDetails) {
901 901 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",
902 902 GCCause::to_string(gch->gc_cause()));
903 903 }
904 904 gch->clear_incremental_collection_failed(); // We just did a full collection
905 905 clear_should_allocate_from_space(); // if set
906 906 }
907 907 } else {
908 908 #ifdef ASSERT
909 909 // It is possible that incremental_collection_failed() == true
910 910 // here, because an attempted scavenge did not succeed. The policy
911 911 // is normally expected to cause a full collection which should
912 912 // clear that condition, so we should not be here twice in a row
913 913 // with incremental_collection_failed() == true without having done
914 914 // a full collection in between.
915 915 if (!seen_incremental_collection_failed &&
916 916 gch->incremental_collection_failed()) {
917 917 if (Verbose && PrintGCDetails) {
918 918 gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
919 919 GCCause::to_string(gch->gc_cause()));
920 920 }
921 921 seen_incremental_collection_failed = true;
922 922 } else if (seen_incremental_collection_failed) {
923 923 if (Verbose && PrintGCDetails) {
924 924 gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
925 925 GCCause::to_string(gch->gc_cause()));
926 926 }
927 927 assert(gch->gc_cause() == GCCause::_scavenge_alot ||
928 928 (gch->gc_cause() == GCCause::_java_lang_system_gc && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
929 929 !gch->incremental_collection_failed(),
930 930 "Twice in a row");
931 931 seen_incremental_collection_failed = false;
932 932 }
933 933 #endif // ASSERT
934 934 }
935 935
936 936 if (ZapUnusedHeapArea) {
937 937 eden()->check_mangled_unused_area_complete();
938 938 from()->check_mangled_unused_area_complete();
939 939 to()->check_mangled_unused_area_complete();
940 940 }
941 941
942 942 if (!CleanChunkPoolAsync) {
943 943 Chunk::clean_chunk_pool();
944 944 }
945 945
946 946 // update the generation and space performance counters
947 947 update_counters();
948 948 gch->collector_policy()->counters()->update_counters();
949 949 }
950 950
951 951 void DefNewGeneration::record_spaces_top() {
952 952 assert(ZapUnusedHeapArea, "Not mangling unused space");
953 953 eden()->set_top_for_allocations();
954 954 to()->set_top_for_allocations();
955 955 from()->set_top_for_allocations();
956 956 }
957 957
958 958 void DefNewGeneration::ref_processor_init() {
959 959 Generation::ref_processor_init();
960 960 }
961 961
962 962
963 963 void DefNewGeneration::update_counters() {
964 964 if (UsePerfData) {
965 965 _eden_counters->update_all();
966 966 _from_counters->update_all();
967 967 _to_counters->update_all();
968 968 _gen_counters->update_all();
969 969 }
970 970 }
971 971
972 972 void DefNewGeneration::verify() {
973 973 eden()->verify();
974 974 from()->verify();
975 975 to()->verify();
976 976 }
977 977
978 978 void DefNewGeneration::print_on(outputStream* st) const {
979 979 Generation::print_on(st);
980 980 st->print(" eden");
981 981 eden()->print_on(st);
982 982 st->print(" from");
983 983 from()->print_on(st);
984 984 st->print(" to ");
985 985 to()->print_on(st);
986 986 }
987 987
988 988
989 989 const char* DefNewGeneration::name() const {
990 990 return "def new generation";
991 991 }
992 992
993 993 // Moved from inline file as they are not called inline
994 994 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
995 995 return eden();
996 996 }
↓ open down ↓ |
996 lines elided |
↑ open up ↑ |
997 997
998 998 HeapWord* DefNewGeneration::allocate(size_t word_size,
999 999 bool is_tlab) {
1000 1000 // This is the slow-path allocation for the DefNewGeneration.
1001 1001 // Most allocations are fast-path in compiled code.
1002 1002 // We try to allocate from the eden. If that works, we are happy.
1003 1003 // Note that since DefNewGeneration supports lock-free allocation, we
1004 1004 // have to use it here, as well.
1005 1005 HeapWord* result = eden()->par_allocate(word_size);
1006 1006 if (result != NULL) {
1007 + if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
1008 + _next_gen->sample_eden_chunk();
1009 + }
1007 1010 return result;
1008 1011 }
1009 1012 do {
1010 1013 HeapWord* old_limit = eden()->soft_end();
1011 1014 if (old_limit < eden()->end()) {
1012 1015 // Tell the next generation we reached a limit.
1013 1016 HeapWord* new_limit =
1014 1017 next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
1015 1018 if (new_limit != NULL) {
1016 1019 Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
1017 1020 } else {
1018 1021 assert(eden()->soft_end() == eden()->end(),
1019 1022 "invalid state after allocation_limit_reached returned null");
1020 1023 }
1021 1024 } else {
1022 1025 // The allocation failed and the soft limit is equal to the hard limit,
1023 1026 // there are no reasons to do an attempt to allocate
1024 1027 assert(old_limit == eden()->end(), "sanity check");
1025 1028 break;
1026 1029 }
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
1027 1030 // Try to allocate until succeeded or the soft limit can't be adjusted
1028 1031 result = eden()->par_allocate(word_size);
1029 1032 } while (result == NULL);
1030 1033
1031 1034 // If the eden is full and the last collection bailed out, we are running
1032 1035 // out of heap space, and we try to allocate the from-space, too.
1033 1036 // allocate_from_space can't be inlined because that would introduce a
1034 1037 // circular dependency at compile time.
1035 1038 if (result == NULL) {
1036 1039 result = allocate_from_space(word_size);
1040 + } else if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
1041 + _next_gen->sample_eden_chunk();
1037 1042 }
1038 1043 return result;
1039 1044 }
1040 1045
1041 1046 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
1042 1047 bool is_tlab) {
1043 - return eden()->par_allocate(word_size);
1048 + HeapWord* res = eden()->par_allocate(word_size);
1049 + if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
1050 + _next_gen->sample_eden_chunk();
1051 + }
1052 + return res;
1044 1053 }
1045 1054
1046 1055 void DefNewGeneration::gc_prologue(bool full) {
1047 1056 // Ensure that _end and _soft_end are the same in eden space.
1048 1057 eden()->set_soft_end(eden()->end());
1049 1058 }
1050 1059
1051 1060 size_t DefNewGeneration::tlab_capacity() const {
1052 1061 return eden()->capacity();
1053 1062 }
1054 1063
1055 1064 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
1056 1065 return unsafe_max_alloc_nogc();
1057 1066 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX