|
|||||||||||||||||||
Source file | Conditionals | Statements | Methods | TOTAL | |||||||||||||||
AbstractConcurrentReadCache.java | 60.3% | 62.8% | 52.8% | 61% |
|
1 |
/*
|
|
2 |
* Copyright (c) 2002-2003 by OpenSymphony
|
|
3 |
* All rights reserved.
|
|
4 |
*/
|
|
5 |
/*
|
|
6 |
File: AbstractConcurrentReadCache
|
|
7 |
|
|
8 |
Written by Doug Lea. Adapted from JDK1.2 HashMap.java and Hashtable.java
|
|
9 |
which carries the following copyright:
|
|
10 |
|
|
11 |
* Copyright 1997 by Sun Microsystems, Inc.,
|
|
12 |
* 901 San Antonio Road, Palo Alto, California, 94303, U.S.A.
|
|
13 |
* All rights reserved.
|
|
14 |
*
|
|
15 |
* This software is the confidential and proprietary information
|
|
16 |
* of Sun Microsystems, Inc. ("Confidential Information"). You
|
|
17 |
* shall not disclose such Confidential Information and shall use
|
|
18 |
* it only in accordance with the terms of the license agreement
|
|
19 |
* you entered into with Sun.
|
|
20 |
|
|
21 |
This class is a modified version of ConcurrentReaderHashMap, which was written
|
|
22 |
by Doug Lea (http://gee.cs.oswego.edu/dl/). The modifications where done
|
|
23 |
by Pyxis Technologies. This is a base class for the OSCache module of the
|
|
24 |
openSymphony project (www.opensymphony.com).
|
|
25 |
|
|
26 |
History:
|
|
27 |
Date Who What
|
|
28 |
28oct1999 dl Created
|
|
29 |
14dec1999 dl jmm snapshot
|
|
30 |
19apr2000 dl use barrierLock
|
|
31 |
12jan2001 dl public release
|
|
32 |
Oct2001 abergevin@pyxis-tech.com
|
|
33 |
Integrated persistence and outer algorithm support
|
|
34 |
*/
|
|
35 |
package com.opensymphony.oscache.base.algorithm;
|
|
36 |
|
|
37 |
|
|
38 |
/** OpenSymphony BEGIN */
|
|
39 |
import com.opensymphony.oscache.base.CacheEntry;
|
|
40 |
import com.opensymphony.oscache.base.persistence.CachePersistenceException;
|
|
41 |
import com.opensymphony.oscache.base.persistence.PersistenceListener;
|
|
42 |
|
|
43 |
import org.apache.commons.logging.Log;
|
|
44 |
import org.apache.commons.logging.LogFactory;
|
|
45 |
|
|
46 |
import java.io.IOException;
|
|
47 |
import java.io.Serializable;
|
|
48 |
|
|
49 |
import java.util.*;
|
|
50 |
|
|
51 |
/**
|
|
52 |
* A version of Hashtable that supports mostly-concurrent reading, but exclusive writing.
|
|
53 |
* Because reads are not limited to periods
|
|
54 |
* without writes, a concurrent reader policy is weaker than a classic
|
|
55 |
* reader/writer policy, but is generally faster and allows more
|
|
56 |
* concurrency. This class is a good choice especially for tables that
|
|
57 |
* are mainly created by one thread during the start-up phase of a
|
|
58 |
* program, and from then on, are mainly read (with perhaps occasional
|
|
59 |
* additions or removals) in many threads. If you also need concurrency
|
|
60 |
* among writes, consider instead using ConcurrentHashMap.
|
|
61 |
* <p>
|
|
62 |
*
|
|
63 |
* Successful retrievals using get(key) and containsKey(key) usually
|
|
64 |
* run without locking. Unsuccessful ones (i.e., when the key is not
|
|
65 |
* present) do involve brief synchronization (locking). Also, the
|
|
66 |
* size and isEmpty methods are always synchronized.
|
|
67 |
*
|
|
68 |
* <p> Because retrieval operations can ordinarily overlap with
|
|
69 |
* writing operations (i.e., put, remove, and their derivatives),
|
|
70 |
* retrievals can only be guaranteed to return the results of the most
|
|
71 |
* recently <em>completed</em> operations holding upon their
|
|
72 |
* onset. Retrieval operations may or may not return results
|
|
73 |
* reflecting in-progress writing operations. However, the retrieval
|
|
74 |
* operations do always return consistent results -- either those
|
|
75 |
* holding before any single modification or after it, but never a
|
|
76 |
* nonsense result. For aggregate operations such as putAll and
|
|
77 |
* clear, concurrent reads may reflect insertion or removal of only
|
|
78 |
* some entries. In those rare contexts in which you use a hash table
|
|
79 |
* to synchronize operations across threads (for example, to prevent
|
|
80 |
* reads until after clears), you should either encase operations
|
|
81 |
* in synchronized blocks, or instead use java.util.Hashtable.
|
|
82 |
*
|
|
83 |
* <p>
|
|
84 |
*
|
|
85 |
* This class also supports optional guaranteed
|
|
86 |
* exclusive reads, simply by surrounding a call within a synchronized
|
|
87 |
* block, as in <br>
|
|
88 |
* <code>AbstractConcurrentReadCache t; ... Object v; <br>
|
|
89 |
* synchronized(t) { v = t.get(k); } </code> <br>
|
|
90 |
*
|
|
91 |
* But this is not usually necessary in practice. For
|
|
92 |
* example, it is generally inefficient to write:
|
|
93 |
*
|
|
94 |
* <pre>
|
|
95 |
* AbstractConcurrentReadCache t; ... // Inefficient version
|
|
96 |
* Object key; ...
|
|
97 |
* Object value; ...
|
|
98 |
* synchronized(t) {
|
|
99 |
* if (!t.containsKey(key))
|
|
100 |
* t.put(key, value);
|
|
101 |
* // other code if not previously present
|
|
102 |
* }
|
|
103 |
* else {
|
|
104 |
* // other code if it was previously present
|
|
105 |
* }
|
|
106 |
* }
|
|
107 |
*</pre>
|
|
108 |
* Instead, just take advantage of the fact that put returns
|
|
109 |
* null if the key was not previously present:
|
|
110 |
* <pre>
|
|
111 |
* AbstractConcurrentReadCache t; ... // Use this instead
|
|
112 |
* Object key; ...
|
|
113 |
* Object value; ...
|
|
114 |
* Object oldValue = t.put(key, value);
|
|
115 |
* if (oldValue == null) {
|
|
116 |
* // other code if not previously present
|
|
117 |
* }
|
|
118 |
* else {
|
|
119 |
* // other code if it was previously present
|
|
120 |
* }
|
|
121 |
*</pre>
|
|
122 |
* <p>
|
|
123 |
*
|
|
124 |
* Iterators and Enumerations (i.e., those returned by
|
|
125 |
* keySet().iterator(), entrySet().iterator(), values().iterator(),
|
|
126 |
* keys(), and elements()) return elements reflecting the state of the
|
|
127 |
* hash table at some point at or since the creation of the
|
|
128 |
* iterator/enumeration. They will return at most one instance of
|
|
129 |
* each element (via next()/nextElement()), but might or might not
|
|
130 |
* reflect puts and removes that have been processed since they were
|
|
131 |
* created. They do <em>not</em> throw ConcurrentModificationException.
|
|
132 |
* However, these iterators are designed to be used by only one
|
|
133 |
* thread at a time. Sharing an iterator across multiple threads may
|
|
134 |
* lead to unpredictable results if the table is being concurrently
|
|
135 |
* modified. Again, you can ensure interference-free iteration by
|
|
136 |
* enclosing the iteration in a synchronized block. <p>
|
|
137 |
*
|
|
138 |
* This class may be used as a direct replacement for any use of
|
|
139 |
* java.util.Hashtable that does not depend on readers being blocked
|
|
140 |
* during updates. Like Hashtable but unlike java.util.HashMap,
|
|
141 |
* this class does NOT allow <tt>null</tt> to be used as a key or
|
|
142 |
* value. This class is also typically faster than ConcurrentHashMap
|
|
143 |
* when there is usually only one thread updating the table, but
|
|
144 |
* possibly many retrieving values from it.
|
|
145 |
* <p>
|
|
146 |
*
|
|
147 |
* Implementation note: A slightly faster implementation of
|
|
148 |
* this class will be possible once planned Java Memory Model
|
|
149 |
* revisions are in place.
|
|
150 |
*
|
|
151 |
* <p>[<a href="http://gee.cs.oswego.edu/dl/classes/EDU/oswego/cs/dl/util/concurrent/intro.html"> Introduction to this package. </a>]
|
|
152 |
**/
|
|
153 |
public abstract class AbstractConcurrentReadCache extends AbstractMap implements Map, Cloneable, Serializable { |
|
154 |
/**
|
|
155 |
* The default initial number of table slots for this table (32).
|
|
156 |
* Used when not otherwise specified in constructor.
|
|
157 |
**/
|
|
158 |
public static int DEFAULT_INITIAL_CAPACITY = 32; |
|
159 |
|
|
160 |
/**
|
|
161 |
* The minimum capacity.
|
|
162 |
* Used if a lower value is implicitly specified
|
|
163 |
* by either of the constructors with arguments.
|
|
164 |
* MUST be a power of two.
|
|
165 |
*/
|
|
166 |
private static final int MINIMUM_CAPACITY = 4; |
|
167 |
|
|
168 |
/**
|
|
169 |
* The maximum capacity.
|
|
170 |
* Used if a higher value is implicitly specified
|
|
171 |
* by either of the constructors with arguments.
|
|
172 |
* MUST be a power of two <= 1<<30.
|
|
173 |
*/
|
|
174 |
private static final int MAXIMUM_CAPACITY = 1 << 30; |
|
175 |
|
|
176 |
/**
|
|
177 |
* The default load factor for this table.
|
|
178 |
* Used when not otherwise specified in constructor, the default is 0.75f.
|
|
179 |
**/
|
|
180 |
public static final float DEFAULT_LOAD_FACTOR = 0.75f; |
|
181 |
|
|
182 |
//OpenSymphony BEGIN (pretty long!)
|
|
183 |
protected static final String NULL = "_nul!~"; |
|
184 |
protected static Log log = LogFactory.getLog(AbstractConcurrentReadCache.class); |
|
185 |
|
|
186 |
/*
|
|
187 |
The basic strategy is an optimistic-style scheme based on
|
|
188 |
the guarantee that the hash table and its lists are always
|
|
189 |
kept in a consistent enough state to be read without locking:
|
|
190 |
|
|
191 |
* Read operations first proceed without locking, by traversing the
|
|
192 |
apparently correct list of the apparently correct bin. If an
|
|
193 |
entry is found, but not invalidated (value field null), it is
|
|
194 |
returned. If not found, operations must recheck (after a memory
|
|
195 |
barrier) to make sure they are using both the right list and
|
|
196 |
the right table (which can change under resizes). If
|
|
197 |
invalidated, reads must acquire main update lock to wait out
|
|
198 |
the update, and then re-traverse.
|
|
199 |
|
|
200 |
* All list additions are at the front of each bin, making it easy
|
|
201 |
to check changes, and also fast to traverse. Entry next
|
|
202 |
pointers are never assigned. Remove() builds new nodes when
|
|
203 |
necessary to preserve this.
|
|
204 |
|
|
205 |
* Remove() (also clear()) invalidates removed nodes to alert read
|
|
206 |
operations that they must wait out the full modifications.
|
|
207 |
|
|
208 |
*/
|
|
209 |
|
|
210 |
/**
|
|
211 |
* Lock used only for its memory effects. We use a Boolean
|
|
212 |
* because it is serializable, and we create a new one because
|
|
213 |
* we need a unique object for each cache instance.
|
|
214 |
**/
|
|
215 |
protected final Boolean barrierLock = new Boolean(true); |
|
216 |
|
|
217 |
/**
|
|
218 |
* field written to only to guarantee lock ordering.
|
|
219 |
**/
|
|
220 |
protected transient Object lastWrite; |
|
221 |
|
|
222 |
/**
|
|
223 |
* The hash table data.
|
|
224 |
*/
|
|
225 |
protected transient Entry[] table; |
|
226 |
|
|
227 |
/**
|
|
228 |
* The total number of mappings in the hash table.
|
|
229 |
*/
|
|
230 |
protected transient int count; |
|
231 |
|
|
232 |
/**
|
|
233 |
* Persistence listener.
|
|
234 |
*/
|
|
235 |
protected PersistenceListener persistenceListener = null; |
|
236 |
|
|
237 |
/**
|
|
238 |
* Use memory cache or not.
|
|
239 |
*/
|
|
240 |
protected boolean memoryCaching = true; |
|
241 |
|
|
242 |
/**
|
|
243 |
* Use unlimited disk caching.
|
|
244 |
*/
|
|
245 |
protected boolean unlimitedDiskCache = false; |
|
246 |
|
|
247 |
/**
|
|
248 |
* The load factor for the hash table.
|
|
249 |
*
|
|
250 |
* @serial
|
|
251 |
*/
|
|
252 |
protected float loadFactor; |
|
253 |
|
|
254 |
/**
|
|
255 |
* Default cache capacity (number of entries).
|
|
256 |
*/
|
|
257 |
protected final int DEFAULT_MAX_ENTRIES = 100; |
|
258 |
|
|
259 |
/**
|
|
260 |
* Max number of element in cache when considered unlimited.
|
|
261 |
*/
|
|
262 |
protected final int UNLIMITED = 2147483646; |
|
263 |
protected transient Collection values = null; |
|
264 |
|
|
265 |
/**
|
|
266 |
* A HashMap containing the group information.
|
|
267 |
* Each entry uses the group name as the key, and holds a
|
|
268 |
* <code>Set</code> of containing keys of all
|
|
269 |
* the cache entries that belong to that particular group.
|
|
270 |
*/
|
|
271 |
protected HashMap groups = null; |
|
272 |
protected transient Set entrySet = null; |
|
273 |
|
|
274 |
// Views
|
|
275 |
protected transient Set keySet = null; |
|
276 |
|
|
277 |
/**
|
|
278 |
* Cache capacity (number of entries).
|
|
279 |
*/
|
|
280 |
protected int maxEntries = DEFAULT_MAX_ENTRIES; |
|
281 |
|
|
282 |
/**
|
|
283 |
* The table is rehashed when its size exceeds this threshold.
|
|
284 |
* (The value of this field is always (int)(capacity * loadFactor).)
|
|
285 |
*
|
|
286 |
* @serial
|
|
287 |
*/
|
|
288 |
protected int threshold; |
|
289 |
|
|
290 |
/**
|
|
291 |
* Use overflow persistence caching.
|
|
292 |
*/
|
|
293 |
private boolean overflowPersistence = false; |
|
294 |
|
|
295 |
/**
|
|
296 |
* Constructs a new, empty map with the specified initial capacity and the specified load factor.
|
|
297 |
*
|
|
298 |
* @param initialCapacity the initial capacity
|
|
299 |
* The actual initial capacity is rounded to the nearest power of two.
|
|
300 |
* @param loadFactor the load factor of the AbstractConcurrentReadCache
|
|
301 |
* @throws IllegalArgumentException if the initial maximum number
|
|
302 |
* of elements is less
|
|
303 |
* than zero, or if the load factor is nonpositive.
|
|
304 |
*/
|
|
305 | 116 |
public AbstractConcurrentReadCache(int initialCapacity, float loadFactor) { |
306 | 116 |
if (loadFactor <= 0) {
|
307 | 0 |
throw new IllegalArgumentException("Illegal Load factor: " + loadFactor); |
308 |
} |
|
309 |
|
|
310 | 116 |
this.loadFactor = loadFactor;
|
311 |
|
|
312 | 116 |
int cap = p2capacity(initialCapacity);
|
313 | 116 |
table = new Entry[cap];
|
314 | 116 |
threshold = (int) (cap * loadFactor);
|
315 |
} |
|
316 |
|
|
317 |
/**
|
|
318 |
* Constructs a new, empty map with the specified initial capacity and default load factor.
|
|
319 |
*
|
|
320 |
* @param initialCapacity the initial capacity of the
|
|
321 |
* AbstractConcurrentReadCache.
|
|
322 |
* @throws IllegalArgumentException if the initial maximum number
|
|
323 |
* of elements is less
|
|
324 |
* than zero.
|
|
325 |
*/
|
|
326 | 0 |
public AbstractConcurrentReadCache(int initialCapacity) { |
327 | 0 |
this(initialCapacity, DEFAULT_LOAD_FACTOR);
|
328 |
} |
|
329 |
|
|
330 |
/**
|
|
331 |
* Constructs a new, empty map with a default initial capacity and load factor.
|
|
332 |
*/
|
|
333 | 116 |
public AbstractConcurrentReadCache() {
|
334 | 116 |
this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR);
|
335 |
} |
|
336 |
|
|
337 |
/**
|
|
338 |
* Constructs a new map with the same mappings as the given map.
|
|
339 |
* The map is created with a capacity of twice the number of mappings in
|
|
340 |
* the given map or 11 (whichever is greater), and a default load factor.
|
|
341 |
*/
|
|
342 | 0 |
public AbstractConcurrentReadCache(Map t) {
|
343 | 0 |
this(Math.max(2 * t.size(), 11), DEFAULT_LOAD_FACTOR);
|
344 | 0 |
putAll(t); |
345 |
} |
|
346 |
|
|
347 |
/**
|
|
348 |
* Returns <tt>true</tt> if this map contains no key-value mappings.
|
|
349 |
*
|
|
350 |
* @return <tt>true</tt> if this map contains no key-value mappings.
|
|
351 |
*/
|
|
352 | 0 |
public synchronized boolean isEmpty() { |
353 | 0 |
return count == 0;
|
354 |
} |
|
355 |
|
|
356 |
/**
|
|
357 |
* Returns a set of the cache keys that reside in a particular group.
|
|
358 |
*
|
|
359 |
* @param groupName The name of the group to retrieve.
|
|
360 |
* @return a set containing all of the keys of cache entries that belong
|
|
361 |
* to this group, or <code>null</code> if the group was not found.
|
|
362 |
* @exception NullPointerException if the groupName is <code>null</code>.
|
|
363 |
*/
|
|
364 | 36 |
public Set getGroup(String groupName) {
|
365 | 36 |
if (log.isDebugEnabled()) {
|
366 | 0 |
log.debug("getGroup called (group=" + groupName + ")"); |
367 |
} |
|
368 |
|
|
369 | 36 |
Set groupEntries = null;
|
370 |
|
|
371 | 36 |
if (memoryCaching && (groups != null)) { |
372 | 24 |
groupEntries = (Set) getGroupForReading(groupName); |
373 |
} |
|
374 |
|
|
375 | 36 |
if (groupEntries == null) { |
376 |
// Not in the map, try the persistence layer
|
|
377 | 12 |
groupEntries = persistRetrieveGroup(groupName); |
378 |
} |
|
379 |
|
|
380 | 36 |
return groupEntries;
|
381 |
} |
|
382 |
|
|
383 |
/**
|
|
384 |
* Set the cache capacity
|
|
385 |
*/
|
|
386 | 48 |
public void setMaxEntries(int newLimit) { |
387 | 48 |
if (newLimit > 0) {
|
388 | 32 |
maxEntries = newLimit; |
389 |
|
|
390 | 32 |
synchronized (this) { // because remove() isn't synchronized |
391 |
|
|
392 | 32 |
while (size() > maxEntries) {
|
393 | 16 |
remove(removeItem(), false);
|
394 |
} |
|
395 |
} |
|
396 |
} else {
|
|
397 |
// Capacity must be at least 1
|
|
398 | 16 |
throw new IllegalArgumentException("Cache maximum number of entries must be at least 1"); |
399 |
} |
|
400 |
} |
|
401 |
|
|
402 |
/**
|
|
403 |
* Retrieve the cache capacity (number of entries).
|
|
404 |
*/
|
|
405 | 32 |
public int getMaxEntries() { |
406 | 32 |
return maxEntries;
|
407 |
} |
|
408 |
|
|
409 |
/**
|
|
410 |
* Sets the memory caching flag.
|
|
411 |
*/
|
|
412 | 116 |
public void setMemoryCaching(boolean memoryCaching) { |
413 | 116 |
this.memoryCaching = memoryCaching;
|
414 |
} |
|
415 |
|
|
416 |
/**
|
|
417 |
* Check if memory caching is used.
|
|
418 |
*/
|
|
419 | 24 |
public boolean isMemoryCaching() { |
420 | 24 |
return memoryCaching;
|
421 |
} |
|
422 |
|
|
423 |
/**
|
|
424 |
* Set the persistence listener to use.
|
|
425 |
*/
|
|
426 | 102 |
public void setPersistenceListener(PersistenceListener listener) { |
427 | 102 |
this.persistenceListener = listener;
|
428 |
} |
|
429 |
|
|
430 |
/**
|
|
431 |
* Get the persistence listener.
|
|
432 |
*/
|
|
433 | 64 |
public PersistenceListener getPersistenceListener() {
|
434 | 64 |
return persistenceListener;
|
435 |
} |
|
436 |
|
|
437 |
/**
|
|
438 |
* Sets the unlimited disk caching flag.
|
|
439 |
*/
|
|
440 | 92 |
public void setUnlimitedDiskCache(boolean unlimitedDiskCache) { |
441 | 92 |
this.unlimitedDiskCache = unlimitedDiskCache;
|
442 |
} |
|
443 |
|
|
444 |
/**
|
|
445 |
* Check if we use unlimited disk cache.
|
|
446 |
*/
|
|
447 | 0 |
public boolean isUnlimitedDiskCache() { |
448 | 0 |
return unlimitedDiskCache;
|
449 |
} |
|
450 |
|
|
451 |
/**
|
|
452 |
* Check if we use overflowPersistence
|
|
453 |
*
|
|
454 |
* @return Returns the overflowPersistence.
|
|
455 |
*/
|
|
456 | 16 |
public boolean isOverflowPersistence() { |
457 | 16 |
return this.overflowPersistence; |
458 |
} |
|
459 |
|
|
460 |
/**
|
|
461 |
* Sets the overflowPersistence flag
|
|
462 |
*
|
|
463 |
* @param overflowPersistence The overflowPersistence to set.
|
|
464 |
*/
|
|
465 | 116 |
public void setOverflowPersistence(boolean overflowPersistence) { |
466 | 116 |
this.overflowPersistence = overflowPersistence;
|
467 |
} |
|
468 |
|
|
469 |
/**
|
|
470 |
* Return the number of slots in this table.
|
|
471 |
**/
|
|
472 | 0 |
public synchronized int capacity() { |
473 | 0 |
return table.length;
|
474 |
} |
|
475 |
|
|
476 |
/**
|
|
477 |
* Removes all mappings from this map.
|
|
478 |
*/
|
|
479 | 188 |
public synchronized void clear() { |
480 | 188 |
Entry[] tab = table; |
481 |
|
|
482 | 188 |
for (int i = 0; i < tab.length; ++i) { |
483 |
// must invalidate all to force concurrent get's to wait and then retry
|
|
484 | 6016 |
for (Entry e = tab[i]; e != null; e = e.next) { |
485 | 204 |
e.value = null;
|
486 |
|
|
487 |
/** OpenSymphony BEGIN */
|
|
488 | 204 |
itemRemoved(e.key); |
489 |
|
|
490 |
/** OpenSymphony END */
|
|
491 |
} |
|
492 |
|
|
493 | 6016 |
tab[i] = null;
|
494 |
} |
|
495 |
|
|
496 |
// Clean out the entire disk cache
|
|
497 | 188 |
persistClear(); |
498 |
|
|
499 | 188 |
count = 0; |
500 | 188 |
recordModification(tab); |
501 |
} |
|
502 |
|
|
503 |
/**
|
|
504 |
* Returns a shallow copy of this.
|
|
505 |
* <tt>AbstractConcurrentReadCache</tt> instance: the keys and
|
|
506 |
* values themselves are not cloned.
|
|
507 |
*
|
|
508 |
* @return a shallow copy of this map.
|
|
509 |
*/
|
|
510 | 0 |
public synchronized Object clone() { |
511 | 0 |
try {
|
512 | 0 |
AbstractConcurrentReadCache t = (AbstractConcurrentReadCache) super.clone();
|
513 | 0 |
t.keySet = null;
|
514 | 0 |
t.entrySet = null;
|
515 | 0 |
t.values = null;
|
516 |
|
|
517 | 0 |
Entry[] tab = table; |
518 | 0 |
t.table = new Entry[tab.length];
|
519 |
|
|
520 | 0 |
Entry[] ttab = t.table; |
521 |
|
|
522 | 0 |
for (int i = 0; i < tab.length; ++i) { |
523 | 0 |
Entry first = tab[i]; |
524 |
|
|
525 | 0 |
if (first != null) { |
526 | 0 |
ttab[i] = (Entry) (first.clone()); |
527 |
} |
|
528 |
} |
|
529 |
|
|
530 | 0 |
return t;
|
531 |
} catch (CloneNotSupportedException e) {
|
|
532 |
// this shouldn't happen, since we are Cloneable
|
|
533 | 0 |
throw new InternalError(); |
534 |
} |
|
535 |
} |
|
536 |
|
|
537 |
/**
|
|
538 |
* Tests if some key maps into the specified value in this table.
|
|
539 |
* This operation is more expensive than the <code>containsKey</code>
|
|
540 |
* method.<p>
|
|
541 |
*
|
|
542 |
* Note that this method is identical in functionality to containsValue,
|
|
543 |
* (which is part of the Map interface in the collections framework).
|
|
544 |
*
|
|
545 |
* @param value a value to search for.
|
|
546 |
* @return <code>true</code> if and only if some key maps to the
|
|
547 |
* <code>value</code> argument in this table as
|
|
548 |
* determined by the <tt>equals</tt> method;
|
|
549 |
* <code>false</code> otherwise.
|
|
550 |
* @exception NullPointerException if the value is <code>null</code>.
|
|
551 |
* @see #containsKey(Object)
|
|
552 |
* @see #containsValue(Object)
|
|
553 |
* @see Map
|
|
554 |
*/
|
|
555 | 0 |
public boolean contains(Object value) { |
556 | 0 |
return containsValue(value);
|
557 |
} |
|
558 |
|
|
559 |
/**
|
|
560 |
* Tests if the specified object is a key in this table.
|
|
561 |
*
|
|
562 |
* @param key possible key.
|
|
563 |
* @return <code>true</code> if and only if the specified object
|
|
564 |
* is a key in this table, as determined by the
|
|
565 |
* <tt>equals</tt> method; <code>false</code> otherwise.
|
|
566 |
* @exception NullPointerException if the key is
|
|
567 |
* <code>null</code>.
|
|
568 |
* @see #contains(Object)
|
|
569 |
*/
|
|
570 | 24 |
public boolean containsKey(Object key) { |
571 | 24 |
return get(key) != null; |
572 |
|
|
573 |
/** OpenSymphony BEGIN */
|
|
574 |
|
|
575 |
// TODO: Also check the persistence?
|
|
576 |
|
|
577 |
/** OpenSymphony END */
|
|
578 |
} |
|
579 |
|
|
580 |
/**
|
|
581 |
* Returns <tt>true</tt> if this map maps one or more keys to the
|
|
582 |
* specified value. Note: This method requires a full internal
|
|
583 |
* traversal of the hash table, and so is much slower than
|
|
584 |
* method <tt>containsKey</tt>.
|
|
585 |
*
|
|
586 |
* @param value value whose presence in this map is to be tested.
|
|
587 |
* @return <tt>true</tt> if this map maps one or more keys to the
|
|
588 |
* specified value.
|
|
589 |
* @exception NullPointerException if the value is <code>null</code>.
|
|
590 |
*/
|
|
591 | 0 |
public boolean containsValue(Object value) { |
592 | 0 |
if (value == null) { |
593 | 0 |
throw new NullPointerException(); |
594 |
} |
|
595 |
|
|
596 | 0 |
Entry[] tab = getTableForReading(); |
597 |
|
|
598 | 0 |
for (int i = 0; i < tab.length; ++i) { |
599 | 0 |
for (Entry e = tab[i]; e != null; e = e.next) { |
600 | 0 |
Object v = e.value; |
601 |
|
|
602 | 0 |
if ((v != null) && value.equals(v)) { |
603 | 0 |
return true; |
604 |
} |
|
605 |
} |
|
606 |
} |
|
607 |
|
|
608 | 0 |
return false; |
609 |
} |
|
610 |
|
|
611 |
/**
|
|
612 |
* Returns an enumeration of the values in this table.
|
|
613 |
* Use the Enumeration methods on the returned object to fetch the elements
|
|
614 |
* sequentially.
|
|
615 |
*
|
|
616 |
* @return an enumeration of the values in this table.
|
|
617 |
* @see java.util.Enumeration
|
|
618 |
* @see #keys()
|
|
619 |
* @see #values()
|
|
620 |
* @see Map
|
|
621 |
*/
|
|
622 | 0 |
public Enumeration elements() {
|
623 | 0 |
return new ValueIterator(); |
624 |
} |
|
625 |
|
|
626 |
/**
|
|
627 |
* Returns a collection view of the mappings contained in this map.
|
|
628 |
* Each element in the returned collection is a <tt>Map.Entry</tt>. The
|
|
629 |
* collection is backed by the map, so changes to the map are reflected in
|
|
630 |
* the collection, and vice-versa. The collection supports element
|
|
631 |
* removal, which removes the corresponding mapping from the map, via the
|
|
632 |
* <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>,
|
|
633 |
* <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations.
|
|
634 |
* It does not support the <tt>add</tt> or <tt>addAll</tt> operations.
|
|
635 |
*
|
|
636 |
* @return a collection view of the mappings contained in this map.
|
|
637 |
*/
|
|
638 | 24 |
public Set entrySet() {
|
639 | 24 |
Set es = entrySet; |
640 |
|
|
641 | 24 |
if (es != null) { |
642 | 0 |
return es;
|
643 |
} else {
|
|
644 | 24 |
return entrySet = new AbstractSet() { |
645 | 24 |
public Iterator iterator() {
|
646 | 24 |
return new HashIterator(); |
647 |
} |
|
648 |
|
|
649 | 0 |
public boolean contains(Object o) { |
650 | 0 |
if (!(o instanceof Map.Entry)) { |
651 | 0 |
return false; |
652 |
} |
|
653 |
|
|
654 | 0 |
Map.Entry entry = (Map.Entry) o; |
655 | 0 |
Object key = entry.getKey(); |
656 | 0 |
Object v = AbstractConcurrentReadCache.this.get(key);
|
657 |
|
|
658 | 0 |
return (v != null) && v.equals(entry.getValue()); |
659 |
} |
|
660 |
|
|
661 | 0 |
public boolean remove(Object o) { |
662 | 0 |
if (!(o instanceof Map.Entry)) { |
663 | 0 |
return false; |
664 |
} |
|
665 |
|
|
666 | 0 |
return AbstractConcurrentReadCache.this.findAndRemoveEntry((Map.Entry) o); |
667 |
} |
|
668 |
|
|
669 | 0 |
public int size() { |
670 | 0 |
return AbstractConcurrentReadCache.this.size(); |
671 |
} |
|
672 |
|
|
673 | 0 |
public void clear() { |
674 | 0 |
AbstractConcurrentReadCache.this.clear();
|
675 |
} |
|
676 |
}; |
|
677 |
} |
|
678 |
} |
|
679 |
|
|
680 |
/**
|
|
681 |
* Returns the value to which the specified key is mapped in this table.
|
|
682 |
*
|
|
683 |
* @param key a key in the table.
|
|
684 |
* @return the value to which the key is mapped in this table;
|
|
685 |
* <code>null</code> if the key is not mapped to any value in
|
|
686 |
* this table.
|
|
687 |
* @exception NullPointerException if the key is
|
|
688 |
* <code>null</code>.
|
|
689 |
* @see #put(Object, Object)
|
|
690 |
*/
|
|
691 | 2000873 |
public Object get(Object key) {
|
692 | 2000875 |
if (log.isDebugEnabled()) {
|
693 | 0 |
log.debug("get called (key=" + key + ")"); |
694 |
} |
|
695 |
|
|
696 |
// throw null pointer exception if key null
|
|
697 | 2000876 |
int hash = hash(key);
|
698 |
|
|
699 |
/*
|
|
700 |
Start off at the apparently correct bin. If entry is found, we
|
|
701 |
need to check after a barrier anyway. If not found, we need a
|
|
702 |
barrier to check if we are actually in right bin. So either
|
|
703 |
way, we encounter only one barrier unless we need to retry.
|
|
704 |
And we only need to fully synchronize if there have been
|
|
705 |
concurrent modifications.
|
|
706 |
*/
|
|
707 | 2000851 |
Entry[] tab = table; |
708 | 2000851 |
int index = hash & (tab.length - 1);
|
709 | 2000851 |
Entry first = tab[index]; |
710 | 2000811 |
Entry e = first; |
711 |
|
|
712 | 2000850 |
for (;;) {
|
713 | 2000956 |
if (e == null) { |
714 |
// If key apparently not there, check to
|
|
715 |
// make sure this was a valid read
|
|
716 | 311 |
tab = getTableForReading(); |
717 |
|
|
718 | 312 |
if (first == tab[index]) {
|
719 |
/** OpenSymphony BEGIN */
|
|
720 |
|
|
721 |
/* Previous code
|
|
722 |
return null;*/
|
|
723 |
|
|
724 |
// Not in the table, try persistence
|
|
725 | 312 |
Object value = persistRetrieve(key); |
726 |
|
|
727 | 312 |
if (value != null) { |
728 |
// Update the map, but don't persist the data
|
|
729 | 24 |
put(key, value, false);
|
730 |
} |
|
731 |
|
|
732 | 312 |
return value;
|
733 |
|
|
734 |
/** OpenSymphony END */
|
|
735 |
} else {
|
|
736 |
// Wrong list -- must restart traversal at new first
|
|
737 | 0 |
e = first = tab[index = hash & (tab.length - 1)]; |
738 |
} |
|
739 |
} |
|
740 |
// checking for pointer equality first wins in most applications
|
|
741 | 2000630 |
else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) { |
742 | 2000544 |
Object value = e.value; |
743 |
|
|
744 | 2000543 |
if (value != null) { |
745 |
/** OpenSymphony BEGIN */
|
|
746 |
|
|
747 |
/* Previous code
|
|
748 |
return value;*/
|
|
749 | 2000542 |
if (NULL.equals(value)) {
|
750 |
// Memory cache disable, use disk
|
|
751 | 120 |
value = persistRetrieve(e.key); |
752 |
|
|
753 | 120 |
if (value != null) { |
754 | 120 |
itemRetrieved(key); |
755 |
} |
|
756 |
|
|
757 | 120 |
return value; // fix [CACHE-13] |
758 |
} else {
|
|
759 | 2000421 |
itemRetrieved(key); |
760 |
|
|
761 | 2000423 |
return value;
|
762 |
} |
|
763 |
|
|
764 |
/** OpenSymphony END */
|
|
765 |
} |
|
766 |
|
|
767 |
// Entry was invalidated during deletion. But it could
|
|
768 |
// have been re-inserted, so we must retraverse.
|
|
769 |
// To avoid useless contention, get lock to wait out modifications
|
|
770 |
// before retraversing.
|
|
771 | 0 |
synchronized (this) { |
772 | 0 |
tab = table; |
773 |
} |
|
774 |
|
|
775 | 0 |
e = first = tab[index = hash & (tab.length - 1)]; |
776 |
} else {
|
|
777 | 105 |
e = e.next; |
778 |
} |
|
779 |
} |
|
780 |
} |
|
781 |
|
|
782 |
/**
|
|
783 |
* Returns a set view of the keys contained in this map.
|
|
784 |
* The set is backed by the map, so changes to the map are reflected in the set, and
|
|
785 |
* vice-versa. The set supports element removal, which removes the
|
|
786 |
* corresponding mapping from this map, via the <tt>Iterator.remove</tt>,
|
|
787 |
* <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt>, and
|
|
788 |
* <tt>clear</tt> operations. It does not support the <tt>add</tt> or
|
|
789 |
* <tt>addAll</tt> operations.
|
|
790 |
*
|
|
791 |
* @return a set view of the keys contained in this map.
|
|
792 |
*/
|
|
793 | 24 |
public Set keySet() {
|
794 | 24 |
Set ks = keySet; |
795 |
|
|
796 | 24 |
if (ks != null) { |
797 | 8 |
return ks;
|
798 |
} else {
|
|
799 | 16 |
return keySet = new AbstractSet() { |
800 | 24 |
public Iterator iterator() {
|
801 | 24 |
return new KeyIterator(); |
802 |
} |
|
803 |
|
|
804 | 0 |
public int size() { |
805 | 0 |
return AbstractConcurrentReadCache.this.size(); |
806 |
} |
|
807 |
|
|
808 | 0 |
public boolean contains(Object o) { |
809 | 0 |
return AbstractConcurrentReadCache.this.containsKey(o); |
810 |
} |
|
811 |
|
|
812 | 0 |
public boolean remove(Object o) { |
813 | 0 |
return AbstractConcurrentReadCache.this.remove(o) != null; |
814 |
} |
|
815 |
|
|
816 | 0 |
public void clear() { |
817 | 0 |
AbstractConcurrentReadCache.this.clear();
|
818 |
} |
|
819 |
}; |
|
820 |
} |
|
821 |
} |
|
822 |
|
|
823 |
/**
|
|
824 |
* Returns an enumeration of the keys in this table.
|
|
825 |
*
|
|
826 |
* @return an enumeration of the keys in this table.
|
|
827 |
* @see Enumeration
|
|
828 |
* @see #elements()
|
|
829 |
* @see #keySet()
|
|
830 |
* @see Map
|
|
831 |
*/
|
|
832 | 0 |
public Enumeration keys() {
|
833 | 0 |
return new KeyIterator(); |
834 |
} |
|
835 |
|
|
836 |
/**
|
|
837 |
* Return the load factor
|
|
838 |
**/
|
|
839 | 0 |
public float loadFactor() { |
840 | 0 |
return loadFactor;
|
841 |
} |
|
842 |
|
|
843 |
/**
|
|
844 |
* Maps the specified <code>key</code> to the specified <code>value</code> in this table.
|
|
845 |
* Neither the key nor the
|
|
846 |
* value can be <code>null</code>. <p>
|
|
847 |
*
|
|
848 |
* The value can be retrieved by calling the <code>get</code> method
|
|
849 |
* with a key that is equal to the original key.
|
|
850 |
*
|
|
851 |
* @param key the table key.
|
|
852 |
* @param value the value.
|
|
853 |
* @return the previous value of the specified key in this table,
|
|
854 |
* or <code>null</code> if it did not have one.
|
|
855 |
* @exception NullPointerException if the key or value is
|
|
856 |
* <code>null</code>.
|
|
857 |
* @see Object#equals(Object)
|
|
858 |
* @see #get(Object)
|
|
859 |
*/
|
|
860 |
/** OpenSymphony BEGIN */
|
|
861 | 623 |
public Object put(Object key, Object value) {
|
862 |
// Call the internal put using persistance
|
|
863 | 623 |
return put(key, value, true); |
864 |
} |
|
865 |
|
|
866 |
/**
|
|
867 |
* Copies all of the mappings from the specified map to this one.
|
|
868 |
*
|
|
869 |
* These mappings replace any mappings that this map had for any of the
|
|
870 |
* keys currently in the specified Map.
|
|
871 |
*
|
|
872 |
* @param t Mappings to be stored in this map.
|
|
873 |
*/
|
|
874 | 0 |
public synchronized void putAll(Map t) { |
875 | 0 |
for (Iterator it = t.entrySet().iterator(); it.hasNext();) {
|
876 | 0 |
Map.Entry entry = (Map.Entry) it.next(); |
877 | 0 |
Object key = entry.getKey(); |
878 | 0 |
Object value = entry.getValue(); |
879 | 0 |
put(key, value); |
880 |
} |
|
881 |
} |
|
882 |
|
|
883 |
/**
|
|
884 |
* Removes the key (and its corresponding value) from this table.
|
|
885 |
* This method does nothing if the key is not in the table.
|
|
886 |
*
|
|
887 |
* @param key the key that needs to be removed.
|
|
888 |
* @return the value to which the key had been mapped in this table,
|
|
889 |
* or <code>null</code> if the key did not have a mapping.
|
|
890 |
* @exception NullPointerException if the key is
|
|
891 |
* <code>null</code>.
|
|
892 |
*/
|
|
893 |
/** OpenSymphony BEGIN */
|
|
894 | 24 |
public Object remove(Object key) {
|
895 | 24 |
return remove(key, true); |
896 |
} |
|
897 |
|
|
898 |
/**
|
|
899 |
* Returns the total number of cache entries held in this map.
|
|
900 |
*
|
|
901 |
* @return the number of key-value mappings in this map.
|
|
902 |
*/
|
|
903 | 684 |
public synchronized int size() { |
904 | 684 |
return count;
|
905 |
} |
|
906 |
|
|
907 |
/**
|
|
908 |
* Returns a collection view of the values contained in this map.
|
|
909 |
* The collection is backed by the map, so changes to the map are reflected in
|
|
910 |
* the collection, and vice-versa. The collection supports element
|
|
911 |
* removal, which removes the corresponding mapping from this map, via the
|
|
912 |
* <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>,
|
|
913 |
* <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations.
|
|
914 |
* It does not support the <tt>add</tt> or <tt>addAll</tt> operations.
|
|
915 |
*
|
|
916 |
* @return a collection view of the values contained in this map.
|
|
917 |
*/
|
|
918 | 0 |
public Collection values() {
|
919 | 0 |
Collection vs = values; |
920 |
|
|
921 | 0 |
if (vs != null) { |
922 | 0 |
return vs;
|
923 |
} else {
|
|
924 | 0 |
return values = new AbstractCollection() { |
925 | 0 |
public Iterator iterator() {
|
926 | 0 |
return new ValueIterator(); |
927 |
} |
|
928 |
|
|
929 | 0 |
public int size() { |
930 | 0 |
return AbstractConcurrentReadCache.this.size(); |
931 |
} |
|
932 |
|
|
933 | 0 |
public boolean contains(Object o) { |
934 | 0 |
return AbstractConcurrentReadCache.this.containsValue(o); |
935 |
} |
|
936 |
|
|
937 | 0 |
public void clear() { |
938 | 0 |
AbstractConcurrentReadCache.this.clear();
|
939 |
} |
|
940 |
}; |
|
941 |
} |
|
942 |
} |
|
943 |
|
|
944 |
/**
|
|
945 |
* Get ref to group.
|
|
946 |
* CACHE-127 Synchronized copying of the group entry set since
|
|
947 |
* the new HashSet(Collection c) constructor uses the iterator.
|
|
948 |
* This may slow things down but it is better than a
|
|
949 |
* ConcurrentModificationException. We might have to revisit the
|
|
950 |
* code if performance is too adversely impacted.
|
|
951 |
**/
|
|
952 | 24 |
protected synchronized final Set getGroupForReading(String groupName) { |
953 | 24 |
Set group = (Set) getGroupsForReading().get(groupName); |
954 | 24 |
return new HashSet(group); |
955 |
} |
|
956 |
|
|
957 |
/**
|
|
958 |
* Get ref to groups.
|
|
959 |
* The reference and the cells it
|
|
960 |
* accesses will be at least as fresh as from last
|
|
961 |
* use of barrierLock
|
|
962 |
**/
|
|
963 | 24 |
protected final Map getGroupsForReading() {
|
964 | 24 |
synchronized (barrierLock) {
|
965 | 24 |
return groups;
|
966 |
} |
|
967 |
} |
|
968 |
|
|
969 |
/**
|
|
970 |
* Get ref to table; the reference and the cells it
|
|
971 |
* accesses will be at least as fresh as from last
|
|
972 |
* use of barrierLock
|
|
973 |
**/
|
|
974 | 359 |
protected final Entry[] getTableForReading() {
|
975 | 358 |
synchronized (barrierLock) {
|
976 | 360 |
return table;
|
977 |
} |
|
978 |
} |
|
979 |
|
|
980 |
/**
|
|
981 |
* Force a memory synchronization that will cause
|
|
982 |
* all readers to see table. Call only when already
|
|
983 |
* holding main synch lock.
|
|
984 |
**/
|
|
985 | 768 |
protected final void recordModification(Object x) { |
986 | 768 |
synchronized (barrierLock) {
|
987 | 768 |
lastWrite = x; |
988 |
} |
|
989 |
} |
|
990 |
|
|
991 |
/**
|
|
992 |
* Helper method for entrySet remove.
|
|
993 |
**/
|
|
994 | 0 |
protected synchronized boolean findAndRemoveEntry(Map.Entry entry) { |
995 | 0 |
Object key = entry.getKey(); |
996 | 0 |
Object v = get(key); |
997 |
|
|
998 | 0 |
if ((v != null) && v.equals(entry.getValue())) { |
999 | 0 |
remove(key); |
1000 |
|
|
1001 | 0 |
return true; |
1002 |
} else {
|
|
1003 | 0 |
return false; |
1004 |
} |
|
1005 |
} |
|
1006 |
|
|
1007 |
/**
|
|
1008 |
* Remove an object from the persistence.
|
|
1009 |
* @param key The key of the object to remove
|
|
1010 |
*/
|
|
1011 | 46 |
protected void persistRemove(Object key) { |
1012 | 46 |
if (log.isDebugEnabled()) {
|
1013 | 0 |
log.debug("PersistRemove called (key=" + key + ")"); |
1014 |
} |
|
1015 |
|
|
1016 | 46 |
if (persistenceListener != null) { |
1017 | 22 |
try {
|
1018 | 22 |
persistenceListener.remove((String) key); |
1019 |
} catch (CachePersistenceException e) {
|
|
1020 | 0 |
log.error("[oscache] Exception removing cache entry with key '" + key + "' from persistence", e); |
1021 |
} |
|
1022 |
} |
|
1023 |
} |
|
1024 |
|
|
1025 |
/**
|
|
1026 |
* Removes a cache group using the persistence listener.
|
|
1027 |
* @param groupName The name of the group to remove
|
|
1028 |
*/
|
|
1029 | 0 |
protected void persistRemoveGroup(String groupName) { |
1030 | 0 |
if (log.isDebugEnabled()) {
|
1031 | 0 |
log.debug("persistRemoveGroup called (groupName=" + groupName + ")"); |
1032 |
} |
|
1033 |
|
|
1034 | 0 |
if (persistenceListener != null) { |
1035 | 0 |
try {
|
1036 | 0 |
persistenceListener.removeGroup(groupName); |
1037 |
} catch (CachePersistenceException e) {
|
|
1038 | 0 |
log.error("[oscache] Exception removing group " + groupName, e);
|
1039 |
} |
|
1040 |
} |
|
1041 |
} |
|
1042 |
|
|
1043 |
/**
|
|
1044 |
* Retrieve an object from the persistence listener.
|
|
1045 |
* @param key The key of the object to retrieve
|
|
1046 |
*/
|
|
1047 | 463 |
protected Object persistRetrieve(Object key) {
|
1048 | 461 |
if (log.isDebugEnabled()) {
|
1049 | 0 |
log.debug("persistRetrieve called (key=" + key + ")"); |
1050 |
} |
|
1051 |
|
|
1052 | 461 |
Object entry = null;
|
1053 |
|
|
1054 | 461 |
if (persistenceListener != null) { |
1055 | 383 |
try {
|
1056 | 383 |
entry = persistenceListener.retrieve((String) key); |
1057 |
} catch (CachePersistenceException e) {
|
|
1058 |
/**
|
|
1059 |
* It is normal that we get an exception occasionally.
|
|
1060 |
* It happens when the item is invalidated (written or removed)
|
|
1061 |
* during read. The logic is constructed so that read is retried.
|
|
1062 |
*/
|
|
1063 |
} |
|
1064 |
} |
|
1065 |
|
|
1066 | 463 |
return entry;
|
1067 |
} |
|
1068 |
|
|
1069 |
/**
|
|
1070 |
* Retrieves a cache group using the persistence listener.
|
|
1071 |
* @param groupName The name of the group to retrieve
|
|
1072 |
*/
|
|
1073 | 148 |
protected Set persistRetrieveGroup(String groupName) {
|
1074 | 148 |
if (log.isDebugEnabled()) {
|
1075 | 0 |
log.debug("persistRetrieveGroup called (groupName=" + groupName + ")"); |
1076 |
} |
|
1077 |
|
|
1078 | 148 |
if (persistenceListener != null) { |
1079 | 109 |
try {
|
1080 | 109 |
return persistenceListener.retrieveGroup(groupName);
|
1081 |
} catch (CachePersistenceException e) {
|
|
1082 | 0 |
log.error("[oscache] Exception retrieving group " + groupName, e);
|
1083 |
} |
|
1084 |
} |
|
1085 |
|
|
1086 | 39 |
return null; |
1087 |
} |
|
1088 |
|
|
1089 |
/**
|
|
1090 |
* Store an object in the cache using the persistence listener.
|
|
1091 |
* @param key The object key
|
|
1092 |
* @param obj The object to store
|
|
1093 |
*/
|
|
1094 | 444 |
protected void persistStore(Object key, Object obj) { |
1095 | 444 |
if (log.isDebugEnabled()) {
|
1096 | 0 |
log.debug("persistStore called (key=" + key + ")"); |
1097 |
} |
|
1098 |
|
|
1099 | 444 |
if (persistenceListener != null) { |
1100 | 182 |
try {
|
1101 | 182 |
persistenceListener.store((String) key, obj); |
1102 |
} catch (CachePersistenceException e) {
|
|
1103 | 0 |
log.error("[oscache] Exception persisting " + key, e);
|
1104 |
} |
|
1105 |
} |
|
1106 |
} |
|
1107 |
|
|
1108 |
/**
|
|
1109 |
* Creates or Updates a cache group using the persistence listener.
|
|
1110 |
* @param groupName The name of the group to update
|
|
1111 |
* @param group The entries for the group
|
|
1112 |
*/
|
|
1113 | 130 |
protected void persistStoreGroup(String groupName, Set group) { |
1114 | 130 |
if (log.isDebugEnabled()) {
|
1115 | 0 |
log.debug("persistStoreGroup called (groupName=" + groupName + ")"); |
1116 |
} |
|
1117 |
|
|
1118 | 130 |
if (persistenceListener != null) { |
1119 | 98 |
try {
|
1120 | 98 |
if ((group == null) || group.isEmpty()) { |
1121 | 0 |
persistenceListener.removeGroup(groupName); |
1122 |
} else {
|
|
1123 | 98 |
persistenceListener.storeGroup(groupName, group); |
1124 |
} |
|
1125 |
} catch (CachePersistenceException e) {
|
|
1126 | 0 |
log.error("[oscache] Exception persisting group " + groupName, e);
|
1127 |
} |
|
1128 |
} |
|
1129 |
} |
|
1130 |
|
|
1131 |
/**
|
|
1132 |
* Removes the entire cache from persistent storage.
|
|
1133 |
*/
|
|
1134 | 188 |
protected void persistClear() { |
1135 | 188 |
if (log.isDebugEnabled()) {
|
1136 | 0 |
log.debug("persistClear called");
|
1137 |
; |
|
1138 |
} |
|
1139 |
|
|
1140 | 188 |
if (persistenceListener != null) { |
1141 | 73 |
try {
|
1142 | 73 |
persistenceListener.clear(); |
1143 |
} catch (CachePersistenceException e) {
|
|
1144 | 0 |
log.error("[oscache] Exception clearing persistent cache", e);
|
1145 |
} |
|
1146 |
} |
|
1147 |
} |
|
1148 |
|
|
1149 |
/**
|
|
1150 |
* Notify the underlying implementation that an item was put in the cache.
|
|
1151 |
*
|
|
1152 |
* @param key The cache key of the item that was put.
|
|
1153 |
*/
|
|
1154 |
protected abstract void itemPut(Object key); |
|
1155 |
|
|
1156 |
/**
|
|
1157 |
* Notify any underlying algorithm that an item has been retrieved from the cache.
|
|
1158 |
*
|
|
1159 |
* @param key The cache key of the item that was retrieved.
|
|
1160 |
*/
|
|
1161 |
protected abstract void itemRetrieved(Object key); |
|
1162 |
|
|
1163 |
/**
|
|
1164 |
* Notify the underlying implementation that an item was removed from the cache.
|
|
1165 |
*
|
|
1166 |
* @param key The cache key of the item that was removed.
|
|
1167 |
*/
|
|
1168 |
protected abstract void itemRemoved(Object key); |
|
1169 |
|
|
1170 |
/**
|
|
1171 |
* The cache has reached its cacpacity and an item needs to be removed.
|
|
1172 |
* (typically according to an algorithm such as LRU or FIFO).
|
|
1173 |
*
|
|
1174 |
* @return The key of whichever item was removed.
|
|
1175 |
*/
|
|
1176 |
protected abstract Object removeItem();
|
|
1177 |
|
|
1178 |
/**
|
|
1179 |
* Reconstitute the <tt>AbstractConcurrentReadCache</tt>.
|
|
1180 |
* instance from a stream (i.e.,
|
|
1181 |
* deserialize it).
|
|
1182 |
*/
|
|
1183 | 0 |
private synchronized void readObject(java.io.ObjectInputStream s) throws IOException, ClassNotFoundException { |
1184 |
// Read in the threshold, loadfactor, and any hidden stuff
|
|
1185 | 0 |
s.defaultReadObject(); |
1186 |
|
|
1187 |
// Read in number of buckets and allocate the bucket array;
|
|
1188 | 0 |
int numBuckets = s.readInt();
|
1189 | 0 |
table = new Entry[numBuckets];
|
1190 |
|
|
1191 |
// Read in size (number of Mappings)
|
|
1192 | 0 |
int size = s.readInt();
|
1193 |
|
|
1194 |
// Read the keys and values, and put the mappings in the table
|
|
1195 | 0 |
for (int i = 0; i < size; i++) { |
1196 | 0 |
Object key = s.readObject(); |
1197 | 0 |
Object value = s.readObject(); |
1198 | 0 |
put(key, value); |
1199 |
} |
|
1200 |
} |
|
1201 |
|
|
1202 |
/**
|
|
1203 |
* Rehashes the contents of this map into a new table with a larger capacity.
|
|
1204 |
* This method is called automatically when the
|
|
1205 |
* number of keys in this map exceeds its capacity and load factor.
|
|
1206 |
*/
|
|
1207 | 4 |
protected void rehash() { |
1208 | 4 |
Entry[] oldMap = table; |
1209 | 4 |
int oldCapacity = oldMap.length;
|
1210 |
|
|
1211 | 4 |
if (oldCapacity >= MAXIMUM_CAPACITY) {
|
1212 | 0 |
return;
|
1213 |
} |
|
1214 |
|
|
1215 | 4 |
int newCapacity = oldCapacity << 1;
|
1216 | 4 |
Entry[] newMap = new Entry[newCapacity];
|
1217 | 4 |
threshold = (int) (newCapacity * loadFactor);
|
1218 |
|
|
1219 |
/*
|
|
1220 |
We need to guarantee that any existing reads of oldMap can
|
|
1221 |
proceed. So we cannot yet null out each oldMap bin.
|
|
1222 |
|
|
1223 |
Because we are using power-of-two expansion, the elements
|
|
1224 |
from each bin must either stay at same index, or move
|
|
1225 |
to oldCapacity+index. We also minimize new node creation by
|
|
1226 |
catching cases where old nodes can be reused because their
|
|
1227 |
.next fields won't change. (This is checked only for sequences
|
|
1228 |
of one and two. It is not worth checking longer ones.)
|
|
1229 |
*/
|
|
1230 | 4 |
for (int i = 0; i < oldCapacity; ++i) { |
1231 | 128 |
Entry l = null;
|
1232 | 128 |
Entry h = null;
|
1233 | 128 |
Entry e = oldMap[i]; |
1234 |
|
|
1235 | 128 |
while (e != null) { |
1236 | 87 |
int hash = e.hash;
|
1237 | 87 |
Entry next = e.next; |
1238 |
|
|
1239 | 87 |
if ((hash & oldCapacity) == 0) {
|
1240 |
// stays at newMap[i]
|
|
1241 | 40 |
if (l == null) { |
1242 |
// try to reuse node
|
|
1243 | 36 |
if ((next == null) || ((next.next == null) && ((next.hash & oldCapacity) == 0))) { |
1244 | 26 |
l = e; |
1245 |
|
|
1246 | 26 |
break;
|
1247 |
} |
|
1248 |
} |
|
1249 |
|
|
1250 | 14 |
l = new Entry(hash, e.key, e.value, l);
|
1251 |
} else {
|
|
1252 |
// moves to newMap[oldCapacity+i]
|
|
1253 | 47 |
if (h == null) { |
1254 | 36 |
if ((next == null) || ((next.next == null) && ((next.hash & oldCapacity) != 0))) { |
1255 | 27 |
h = e; |
1256 |
|
|
1257 | 27 |
break;
|
1258 |
} |
|
1259 |
} |
|
1260 |
|
|
1261 | 20 |
h = new Entry(hash, e.key, e.value, h);
|
1262 |
} |
|
1263 |
|
|
1264 | 34 |
e = next; |
1265 |
} |
|
1266 |
|
|
1267 | 128 |
newMap[i] = l; |
1268 | 128 |
newMap[oldCapacity + i] = h; |
1269 |
} |
|
1270 |
|
|
1271 | 4 |
table = newMap; |
1272 | 4 |
recordModification(newMap); |
1273 |
} |
|
1274 |
|
|
1275 |
/**
|
|
1276 |
* Continuation of put(), called only when synch lock is
|
|
1277 |
* held and interference has been detected.
|
|
1278 |
**/
|
|
1279 |
/** OpenSymphony BEGIN */
|
|
1280 |
|
|
1281 |
/* Previous code
|
|
1282 |
protected Object sput(Object key, Object value, int hash) {*/
|
|
1283 | 11 |
protected Object sput(Object key, Object value, int hash, boolean persist) { |
1284 |
/** OpenSymphony END */
|
|
1285 | 11 |
Entry[] tab = table; |
1286 | 11 |
int index = hash & (tab.length - 1);
|
1287 | 11 |
Entry first = tab[index]; |
1288 | 11 |
Entry e = first; |
1289 |
|
|
1290 | 11 |
for (;;) {
|
1291 | 21 |
if (e == null) { |
1292 |
/** OpenSymphony BEGIN */
|
|
1293 |
|
|
1294 |
// Previous code
|
|
1295 |
// Entry newEntry = new Entry(hash, key, value, first);
|
|
1296 | 11 |
Entry newEntry; |
1297 |
|
|
1298 | 11 |
if (memoryCaching) {
|
1299 | 3 |
newEntry = new Entry(hash, key, value, first);
|
1300 |
} else {
|
|
1301 | 8 |
newEntry = new Entry(hash, key, NULL, first);
|
1302 |
} |
|
1303 |
|
|
1304 | 11 |
itemPut(key); |
1305 |
|
|
1306 |
// Persist if required
|
|
1307 | 11 |
if (persist && !overflowPersistence) {
|
1308 | 10 |
persistStore(key, value); |
1309 |
} |
|
1310 |
|
|
1311 |
// If we have a CacheEntry, update the group lookups
|
|
1312 | 11 |
if (value instanceof CacheEntry) { |
1313 | 11 |
updateGroups(null, (CacheEntry) value, persist);
|
1314 |
} |
|
1315 |
|
|
1316 |
/** OpenSymphony END */
|
|
1317 | 11 |
tab[index] = newEntry; |
1318 |
|
|
1319 | 11 |
if (++count >= threshold) {
|
1320 | 0 |
rehash(); |
1321 |
} else {
|
|
1322 | 11 |
recordModification(newEntry); |
1323 |
} |
|
1324 |
|
|
1325 | 11 |
return null; |
1326 | 10 |
} else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) { |
1327 | 0 |
Object oldValue = e.value; |
1328 |
|
|
1329 |
/** OpenSymphony BEGIN */
|
|
1330 |
|
|
1331 |
/* Previous code
|
|
1332 |
e.value = value; */
|
|
1333 | 0 |
if (memoryCaching) {
|
1334 | 0 |
e.value = value; |
1335 |
} |
|
1336 |
|
|
1337 |
// Persist if required
|
|
1338 | 0 |
if (persist && overflowPersistence) {
|
1339 | 0 |
persistRemove(key); |
1340 | 0 |
} else if (persist) { |
1341 | 0 |
persistStore(key, value); |
1342 |
} |
|
1343 |
|
|
1344 | 0 |
updateGroups(oldValue, value, persist); |
1345 |
|
|
1346 | 0 |
itemPut(key); |
1347 |
|
|
1348 |
/** OpenSymphony END */
|
|
1349 | 0 |
return oldValue;
|
1350 |
} else {
|
|
1351 | 10 |
e = e.next; |
1352 |
} |
|
1353 |
} |
|
1354 |
} |
|
1355 |
|
|
1356 |
/**
|
|
1357 |
* Continuation of remove(), called only when synch lock is
|
|
1358 |
* held and interference has been detected.
|
|
1359 |
**/
|
|
1360 |
/** OpenSymphony BEGIN */
|
|
1361 |
|
|
1362 |
/* Previous code
|
|
1363 |
protected Object sremove(Object key, int hash) { */
|
|
1364 | 0 |
protected Object sremove(Object key, int hash, boolean invokeAlgorithm) { |
1365 |
/** OpenSymphony END */
|
|
1366 | 0 |
Entry[] tab = table; |
1367 | 0 |
int index = hash & (tab.length - 1);
|
1368 | 0 |
Entry first = tab[index]; |
1369 | 0 |
Entry e = first; |
1370 |
|
|
1371 | 0 |
for (;;) {
|
1372 | 0 |
if (e == null) { |
1373 | 0 |
return null; |
1374 | 0 |
} else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) { |
1375 | 0 |
Object oldValue = e.value; |
1376 | 0 |
e.value = null;
|
1377 | 0 |
count--; |
1378 |
|
|
1379 |
/** OpenSymphony BEGIN */
|
|
1380 | 0 |
if (!unlimitedDiskCache && !overflowPersistence) {
|
1381 | 0 |
persistRemove(e.key); |
1382 |
} |
|
1383 |
|
|
1384 | 0 |
if (overflowPersistence && ((size() + 1) >= maxEntries)) {
|
1385 | 0 |
persistStore(key, oldValue); |
1386 |
} |
|
1387 |
|
|
1388 | 0 |
if (invokeAlgorithm) {
|
1389 | 0 |
itemRemoved(key); |
1390 |
} |
|
1391 |
|
|
1392 |
/** OpenSymphony END */
|
|
1393 | 0 |
Entry head = e.next; |
1394 |
|
|
1395 | 0 |
for (Entry p = first; p != e; p = p.next) {
|
1396 | 0 |
head = new Entry(p.hash, p.key, p.value, head);
|
1397 |
} |
|
1398 |
|
|
1399 | 0 |
tab[index] = head; |
1400 | 0 |
recordModification(head); |
1401 |
|
|
1402 | 0 |
return oldValue;
|
1403 |
} else {
|
|
1404 | 0 |
e = e.next; |
1405 |
} |
|
1406 |
} |
|
1407 |
} |
|
1408 |
|
|
1409 |
/**
|
|
1410 |
* Save the state of the <tt>AbstractConcurrentReadCache</tt> instance to a stream.
|
|
1411 |
* (i.e., serialize it).
|
|
1412 |
*
|
|
1413 |
* @serialData The <i>capacity</i> of the
|
|
1414 |
* AbstractConcurrentReadCache (the length of the
|
|
1415 |
* bucket array) is emitted (int), followed by the
|
|
1416 |
* <i>size</i> of the AbstractConcurrentReadCache (the number of key-value
|
|
1417 |
* mappings), followed by the key (Object) and value (Object)
|
|
1418 |
* for each key-value mapping represented by the AbstractConcurrentReadCache
|
|
1419 |
* The key-value mappings are emitted in no particular order.
|
|
1420 |
*/
|
|
1421 | 0 |
private synchronized void writeObject(java.io.ObjectOutputStream s) throws IOException { |
1422 |
// Write out the threshold, loadfactor, and any hidden stuff
|
|
1423 | 0 |
s.defaultWriteObject(); |
1424 |
|
|
1425 |
// Write out number of buckets
|
|
1426 | 0 |
s.writeInt(table.length); |
1427 |
|
|
1428 |
// Write out size (number of Mappings)
|
|
1429 | 0 |
s.writeInt(count); |
1430 |
|
|
1431 |
// Write out keys and values (alternating)
|
|
1432 | 0 |
for (int index = table.length - 1; index >= 0; index--) { |
1433 | 0 |
Entry entry = table[index]; |
1434 |
|
|
1435 | 0 |
while (entry != null) { |
1436 | 0 |
s.writeObject(entry.key); |
1437 | 0 |
s.writeObject(entry.value); |
1438 | 0 |
entry = entry.next; |
1439 |
} |
|
1440 |
} |
|
1441 |
} |
|
1442 |
|
|
1443 |
/**
|
|
1444 |
* Return hash code for Object x.
|
|
1445 |
* Since we are using power-of-two
|
|
1446 |
* tables, it is worth the effort to improve hashcode via
|
|
1447 |
* the same multiplicative scheme as used in IdentityHashMap.
|
|
1448 |
*/
|
|
1449 | 2001559 |
private static int hash(Object x) { |
1450 | 2001562 |
int h = x.hashCode();
|
1451 |
|
|
1452 |
// Multiply by 127 (quickly, via shifts), and mix in some high
|
|
1453 |
// bits to help guard against bunching of codes that are
|
|
1454 |
// consecutive or equally spaced.
|
|
1455 | 2001540 |
return ((h << 7) - h + (h >>> 9) + (h >>> 17));
|
1456 |
} |
|
1457 |
|
|
1458 |
/**
|
|
1459 |
* Add this cache key to the groups specified groups.
|
|
1460 |
* We have to treat the
|
|
1461 |
* memory and disk group mappings seperately so they remain valid for their
|
|
1462 |
* corresponding memory/disk caches. (eg if mem is limited to 100 entries
|
|
1463 |
* and disk is unlimited, the group mappings will be different).
|
|
1464 |
*
|
|
1465 |
* @param key The cache key that we are ading to the groups.
|
|
1466 |
* @param newGroups the set of groups we want to add this cache entry to.
|
|
1467 |
* @param persist A flag to indicate whether the keys should be added to
|
|
1468 |
* the persistent cache layer.
|
|
1469 |
*/
|
|
1470 | 152 |
private void addGroupMappings(String key, Set newGroups, boolean persist) { |
1471 |
// Add this CacheEntry to the groups that it is now a member of
|
|
1472 | 152 |
for (Iterator it = newGroups.iterator(); it.hasNext();) {
|
1473 | 142 |
String groupName = (String) it.next(); |
1474 |
|
|
1475 |
// Update the in-memory groups
|
|
1476 | 142 |
if (memoryCaching) {
|
1477 | 102 |
if (groups == null) { |
1478 | 9 |
groups = new HashMap();
|
1479 |
} |
|
1480 |
|
|
1481 | 102 |
Set memoryGroup = (Set) groups.get(groupName); |
1482 |
|
|
1483 | 102 |
if (memoryGroup == null) { |
1484 | 33 |
memoryGroup = new HashSet();
|
1485 | 33 |
groups.put(groupName, memoryGroup); |
1486 |
} |
|
1487 |
|
|
1488 | 102 |
memoryGroup.add(key); |
1489 |
} |
|
1490 |
|
|
1491 |
// Update the persistent group maps
|
|
1492 | 142 |
if (persist) {
|
1493 | 112 |
Set persistentGroup = persistRetrieveGroup(groupName); |
1494 |
|
|
1495 | 112 |
if (persistentGroup == null) { |
1496 | 47 |
persistentGroup = new HashSet();
|
1497 |
} |
|
1498 |
|
|
1499 | 112 |
persistentGroup.add(key); |
1500 | 112 |
persistStoreGroup(groupName, persistentGroup); |
1501 |
} |
|
1502 |
} |
|
1503 |
} |
|
1504 |
|
|
1505 |
/** OpenSymphony END (pretty long!) */
|
|
1506 |
/**
|
|
1507 |
* Returns the appropriate capacity (power of two) for the specified
|
|
1508 |
* initial capacity argument.
|
|
1509 |
*/
|
|
1510 | 116 |
private int p2capacity(int initialCapacity) { |
1511 | 116 |
int cap = initialCapacity;
|
1512 |
|
|
1513 |
// Compute the appropriate capacity
|
|
1514 | 116 |
int result;
|
1515 |
|
|
1516 | 116 |
if ((cap > MAXIMUM_CAPACITY) || (cap < 0)) {
|
1517 | 0 |
result = MAXIMUM_CAPACITY; |
1518 |
} else {
|
|
1519 | 116 |
result = MINIMUM_CAPACITY; |
1520 |
|
|
1521 | 116 |
while (result < cap) {
|
1522 | 348 |
result <<= 1; |
1523 |
} |
|
1524 |
} |
|
1525 |
|
|
1526 | 116 |
return result;
|
1527 |
} |
|
1528 |
|
|
1529 |
/* Previous code
|
|
1530 |
public Object put(Object key, Object value)*/
|
|
1531 | 647 |
private Object put(Object key, Object value, boolean persist) { |
1532 |
/** OpenSymphony END */
|
|
1533 | 647 |
if (value == null) { |
1534 | 24 |
throw new NullPointerException(); |
1535 |
} |
|
1536 |
|
|
1537 | 623 |
int hash = hash(key);
|
1538 | 623 |
Entry[] tab = table; |
1539 | 623 |
int index = hash & (tab.length - 1);
|
1540 | 623 |
Entry first = tab[index]; |
1541 | 623 |
Entry e = first; |
1542 |
|
|
1543 | 623 |
for (;;) {
|
1544 | 699 |
if (e == null) { |
1545 | 516 |
synchronized (this) { |
1546 | 516 |
tab = table; |
1547 |
|
|
1548 |
/** OpenSymphony BEGIN */
|
|
1549 |
|
|
1550 |
// Previous code
|
|
1551 |
|
|
1552 |
/* if (first == tab[index]) {
|
|
1553 |
// Add to front of list
|
|
1554 |
Entry newEntry = new Entry(hash, key, value, first);
|
|
1555 |
tab[index] = newEntry;
|
|
1556 |
if (++count >= threshold) rehash();
|
|
1557 |
else recordModification(newEntry);
|
|
1558 |
return null; */
|
|
1559 |
|
|
1560 |
// Remove an item if the cache is full
|
|
1561 | 516 |
if (size() >= maxEntries) {
|
1562 | 24 |
remove(removeItem(), false);
|
1563 |
} |
|
1564 |
|
|
1565 | 516 |
if (first == tab[index]) {
|
1566 |
// Add to front of list
|
|
1567 | 505 |
Entry newEntry = null;
|
1568 |
|
|
1569 | 505 |
if (memoryCaching) {
|
1570 | 447 |
newEntry = new Entry(hash, key, value, first);
|
1571 |
} else {
|
|
1572 | 58 |
newEntry = new Entry(hash, key, NULL, first);
|
1573 |
} |
|
1574 |
|
|
1575 | 505 |
tab[index] = newEntry; |
1576 | 505 |
itemPut(key); |
1577 |
|
|
1578 |
// Persist if required
|
|
1579 | 505 |
if (persist && !overflowPersistence) {
|
1580 | 333 |
persistStore(key, value); |
1581 |
} |
|
1582 |
|
|
1583 |
// If we have a CacheEntry, update the group lookups
|
|
1584 | 505 |
if (value instanceof CacheEntry) { |
1585 | 249 |
updateGroups(null, (CacheEntry) value, persist);
|
1586 |
} |
|
1587 |
|
|
1588 | 505 |
if (++count >= threshold) {
|
1589 | 4 |
rehash(); |
1590 |
} else {
|
|
1591 | 501 |
recordModification(newEntry); |
1592 |
} |
|
1593 |
|
|
1594 | 505 |
return newEntry;
|
1595 |
|
|
1596 |
/** OpenSymphony END */
|
|
1597 |
} else {
|
|
1598 |
// wrong list -- retry
|
|
1599 |
|
|
1600 |
/** OpenSymphony BEGIN */
|
|
1601 |
|
|
1602 |
/* Previous code
|
|
1603 |
return sput(key, value, hash);*/
|
|
1604 | 11 |
return sput(key, value, hash, persist);
|
1605 |
|
|
1606 |
/** OpenSymphony END */
|
|
1607 |
} |
|
1608 |
} |
|
1609 | 183 |
} else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) { |
1610 |
// synch to avoid race with remove and to
|
|
1611 |
// ensure proper serialization of multiple replaces
|
|
1612 | 107 |
synchronized (this) { |
1613 | 107 |
tab = table; |
1614 |
|
|
1615 | 107 |
Object oldValue = e.value; |
1616 |
|
|
1617 |
// [CACHE-118] - get the old cache entry even if there's no memory cache
|
|
1618 | 107 |
if (persist && (oldValue == NULL)) {
|
1619 | 31 |
oldValue = persistRetrieve(key); |
1620 |
} |
|
1621 |
|
|
1622 | 107 |
if ((first == tab[index]) && (oldValue != null)) { |
1623 |
/** OpenSymphony BEGIN */
|
|
1624 |
|
|
1625 |
/* Previous code
|
|
1626 |
e.value = value;
|
|
1627 |
return oldValue; */
|
|
1628 | 107 |
if (memoryCaching) {
|
1629 | 76 |
e.value = value; |
1630 |
} |
|
1631 |
|
|
1632 |
// Persist if required
|
|
1633 | 107 |
if (persist && overflowPersistence) {
|
1634 | 22 |
persistRemove(key); |
1635 | 85 |
} else if (persist) { |
1636 | 85 |
persistStore(key, value); |
1637 |
} |
|
1638 |
|
|
1639 | 107 |
updateGroups(oldValue, value, persist); |
1640 | 107 |
itemPut(key); |
1641 |
|
|
1642 | 107 |
return oldValue;
|
1643 |
|
|
1644 |
/** OpenSymphony END */
|
|
1645 |
} else {
|
|
1646 |
// retry if wrong list or lost race against concurrent remove
|
|
1647 |
|
|
1648 |
/** OpenSymphony BEGIN */
|
|
1649 |
|
|
1650 |
/* Previous code
|
|
1651 |
return sput(key, value, hash);*/
|
|
1652 | 0 |
return sput(key, value, hash, persist);
|
1653 |
|
|
1654 |
/** OpenSymphony END */
|
|
1655 |
} |
|
1656 |
} |
|
1657 |
} else {
|
|
1658 | 76 |
e = e.next; |
1659 |
} |
|
1660 |
} |
|
1661 |
} |
|
1662 |
|
|
1663 | 64 |
private synchronized Object remove(Object key, boolean invokeAlgorithm) |
1664 |
/* Previous code
|
|
1665 |
public Object remove(Object key) */
|
|
1666 |
|
|
1667 |
/** OpenSymphony END */ {
|
|
1668 |
/*
|
|
1669 |
Strategy:
|
|
1670 |
|
|
1671 |
Find the entry, then
|
|
1672 |
1. Set value field to null, to force get() to retry
|
|
1673 |
2. Rebuild the list without this entry.
|
|
1674 |
All entries following removed node can stay in list, but
|
|
1675 |
all preceeding ones need to be cloned. Traversals rely
|
|
1676 |
on this strategy to ensure that elements will not be
|
|
1677 |
repeated during iteration.
|
|
1678 |
*/
|
|
1679 |
|
|
1680 |
/** OpenSymphony BEGIN */
|
|
1681 | 64 |
if (key == null) { |
1682 | 0 |
return null; |
1683 |
} |
|
1684 |
|
|
1685 |
/** OpenSymphony END */
|
|
1686 | 64 |
int hash = hash(key);
|
1687 | 64 |
Entry[] tab = table; |
1688 | 64 |
int index = hash & (tab.length - 1);
|
1689 | 64 |
Entry first = tab[index]; |
1690 | 64 |
Entry e = first; |
1691 |
|
|
1692 | 64 |
for (;;) {
|
1693 | 64 |
if (e == null) { |
1694 | 0 |
tab = getTableForReading(); |
1695 |
|
|
1696 | 0 |
if (first == tab[index]) {
|
1697 | 0 |
return null; |
1698 |
} else {
|
|
1699 |
// Wrong list -- must restart traversal at new first
|
|
1700 |
|
|
1701 |
/** OpenSymphony BEGIN */
|
|
1702 |
|
|
1703 |
/* Previous Code
|
|
1704 |
return sremove(key, hash); */
|
|
1705 | 0 |
return sremove(key, hash, invokeAlgorithm);
|
1706 |
|
|
1707 |
/** OpenSymphony END */
|
|
1708 |
} |
|
1709 | 64 |
} else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) { |
1710 | 64 |
synchronized (this) { |
1711 | 64 |
tab = table; |
1712 |
|
|
1713 | 64 |
Object oldValue = e.value; |
1714 |
|
|
1715 |
// re-find under synch if wrong list
|
|
1716 | 64 |
if ((first != tab[index]) || (oldValue == null)) { |
1717 |
/** OpenSymphony BEGIN */
|
|
1718 |
|
|
1719 |
/* Previous Code
|
|
1720 |
return sremove(key, hash); */
|
|
1721 | 0 |
return sremove(key, hash, invokeAlgorithm);
|
1722 |
} |
|
1723 |
|
|
1724 |
/** OpenSymphony END */
|
|
1725 | 64 |
e.value = null;
|
1726 | 64 |
count--; |
1727 |
|
|
1728 |
/** OpenSymphony BEGIN */
|
|
1729 | 64 |
if (!unlimitedDiskCache && !overflowPersistence) {
|
1730 | 24 |
persistRemove(e.key); |
1731 |
} |
|
1732 |
|
|
1733 | 64 |
if (overflowPersistence && ((size() + 1) >= maxEntries)) {
|
1734 | 16 |
persistStore(key, oldValue); |
1735 |
} |
|
1736 |
|
|
1737 | 64 |
if (invokeAlgorithm) {
|
1738 | 24 |
itemRemoved(key); |
1739 |
} |
|
1740 |
|
|
1741 |
/** OpenSymphony END */
|
|
1742 | 64 |
Entry head = e.next; |
1743 |
|
|
1744 | 64 |
for (Entry p = first; p != e; p = p.next) {
|
1745 | 0 |
head = new Entry(p.hash, p.key, p.value, head);
|
1746 |
} |
|
1747 |
|
|
1748 | 64 |
tab[index] = head; |
1749 | 64 |
recordModification(head); |
1750 |
|
|
1751 | 64 |
return oldValue;
|
1752 |
} |
|
1753 |
} else {
|
|
1754 | 0 |
e = e.next; |
1755 |
} |
|
1756 |
} |
|
1757 |
} |
|
1758 |
|
|
1759 |
/**
|
|
1760 |
* Remove this CacheEntry from the groups it no longer belongs to.
|
|
1761 |
* We have to treat the memory and disk group mappings seperately so they remain
|
|
1762 |
* valid for their corresponding memory/disk caches. (eg if mem is limited
|
|
1763 |
* to 100 entries and disk is unlimited, the group mappings will be
|
|
1764 |
* different).
|
|
1765 |
*
|
|
1766 |
* @param key The cache key that we are removing from the groups.
|
|
1767 |
* @param oldGroups the set of groups we want to remove the cache entry
|
|
1768 |
* from.
|
|
1769 |
* @param persist A flag to indicate whether the keys should be removed
|
|
1770 |
* from the persistent cache layer.
|
|
1771 |
*/
|
|
1772 | 80 |
private void removeGroupMappings(String key, Set oldGroups, boolean persist) { |
1773 | 80 |
for (Iterator it = oldGroups.iterator(); it.hasNext();) {
|
1774 | 24 |
String groupName = (String) it.next(); |
1775 |
|
|
1776 |
// Update the in-memory groups
|
|
1777 | 24 |
if (memoryCaching && (this.groups != null)) { |
1778 | 18 |
Set memoryGroup = (Set) groups.get(groupName); |
1779 |
|
|
1780 | 18 |
if (memoryGroup != null) { |
1781 | 18 |
memoryGroup.remove(key); |
1782 |
|
|
1783 | 18 |
if (memoryGroup.isEmpty()) {
|
1784 | 0 |
groups.remove(groupName); |
1785 |
} |
|
1786 |
} |
|
1787 |
} |
|
1788 |
|
|
1789 |
// Update the persistent group maps
|
|
1790 | 24 |
if (persist) {
|
1791 | 24 |
Set persistentGroup = persistRetrieveGroup(groupName); |
1792 |
|
|
1793 | 24 |
if (persistentGroup != null) { |
1794 | 18 |
persistentGroup.remove(key); |
1795 |
|
|
1796 | 18 |
if (persistentGroup.isEmpty()) {
|
1797 | 0 |
persistRemoveGroup(groupName); |
1798 |
} else {
|
|
1799 | 18 |
persistStoreGroup(groupName, persistentGroup); |
1800 |
} |
|
1801 |
} |
|
1802 |
} |
|
1803 |
} |
|
1804 |
} |
|
1805 |
|
|
1806 |
/**
|
|
1807 |
* Updates the groups to reflect the differences between the old and new
|
|
1808 |
* cache entries. Either of the old or new values can be <code>null</code>
|
|
1809 |
* or contain a <code>null</code> group list, in which case the entry's
|
|
1810 |
* groups will all be added or removed respectively.
|
|
1811 |
*
|
|
1812 |
* @param oldValue The old CacheEntry that is being replaced.
|
|
1813 |
* @param newValue The new CacheEntry that is being inserted.
|
|
1814 |
*/
|
|
1815 | 107 |
private void updateGroups(Object oldValue, Object newValue, boolean persist) { |
1816 |
// If we have/had a CacheEntry, update the group lookups
|
|
1817 | 107 |
boolean oldIsCE = oldValue instanceof CacheEntry; |
1818 | 107 |
boolean newIsCE = newValue instanceof CacheEntry; |
1819 |
|
|
1820 | 107 |
if (newIsCE && oldIsCE) {
|
1821 | 107 |
updateGroups((CacheEntry) oldValue, (CacheEntry) newValue, persist); |
1822 | 0 |
} else if (newIsCE) { |
1823 | 0 |
updateGroups(null, (CacheEntry) newValue, persist);
|
1824 | 0 |
} else if (oldIsCE) { |
1825 | 0 |
updateGroups((CacheEntry) oldValue, null, persist);
|
1826 |
} |
|
1827 |
} |
|
1828 |
|
|
1829 |
/**
|
|
1830 |
* Updates the groups to reflect the differences between the old and new cache entries.
|
|
1831 |
* Either of the old or new values can be <code>null</code>
|
|
1832 |
* or contain a <code>null</code> group list, in which case the entry's
|
|
1833 |
* groups will all be added or removed respectively.
|
|
1834 |
*
|
|
1835 |
* @param oldValue The old CacheEntry that is being replaced.
|
|
1836 |
* @param newValue The new CacheEntry that is being inserted.
|
|
1837 |
*/
|
|
1838 | 367 |
private void updateGroups(CacheEntry oldValue, CacheEntry newValue, boolean persist) { |
1839 | 367 |
Set oldGroups = null;
|
1840 | 367 |
Set newGroups = null;
|
1841 |
|
|
1842 | 367 |
if (oldValue != null) { |
1843 | 107 |
oldGroups = oldValue.getGroups(); |
1844 |
} |
|
1845 |
|
|
1846 | 367 |
if (newValue != null) { |
1847 | 367 |
newGroups = newValue.getGroups(); |
1848 |
} |
|
1849 |
|
|
1850 |
// Get the names of the groups to remove
|
|
1851 | 367 |
if (oldGroups != null) { |
1852 | 80 |
Set removeFromGroups = new HashSet();
|
1853 |
|
|
1854 | 80 |
for (Iterator it = oldGroups.iterator(); it.hasNext();) {
|
1855 | 134 |
String groupName = (String) it.next(); |
1856 |
|
|
1857 | 134 |
if ((newGroups == null) || !newGroups.contains(groupName)) { |
1858 |
// We need to remove this group
|
|
1859 | 24 |
removeFromGroups.add(groupName); |
1860 |
} |
|
1861 |
} |
|
1862 |
|
|
1863 | 80 |
removeGroupMappings(oldValue.getKey(), removeFromGroups, persist); |
1864 |
} |
|
1865 |
|
|
1866 |
// Get the names of the groups to add
|
|
1867 | 367 |
if (newGroups != null) { |
1868 | 152 |
Set addToGroups = new HashSet();
|
1869 |
|
|
1870 | 152 |
for (Iterator it = newGroups.iterator(); it.hasNext();) {
|
1871 | 252 |
String groupName = (String) it.next(); |
1872 |
|
|
1873 | 252 |
if ((oldGroups == null) || !oldGroups.contains(groupName)) { |
1874 |
// We need to add this group
|
|
1875 | 142 |
addToGroups.add(groupName); |
1876 |
} |
|
1877 |
} |
|
1878 |
|
|
1879 | 152 |
addGroupMappings(newValue.getKey(), addToGroups, persist); |
1880 |
} |
|
1881 |
} |
|
1882 |
|
|
1883 |
/**
|
|
1884 |
* AbstractConcurrentReadCache collision list entry.
|
|
1885 |
*/
|
|
1886 |
protected static class Entry implements Map.Entry { |
|
1887 |
protected final Entry next;
|
|
1888 |
protected final Object key;
|
|
1889 |
|
|
1890 |
/*
|
|
1891 |
The use of volatile for value field ensures that
|
|
1892 |
we can detect status changes without synchronization.
|
|
1893 |
The other fields are never changed, and are
|
|
1894 |
marked as final.
|
|
1895 |
*/
|
|
1896 |
protected final int hash; |
|
1897 |
protected volatile Object value; |
|
1898 |
|
|
1899 | 550 |
Entry(int hash, Object key, Object value, Entry next) {
|
1900 | 550 |
this.hash = hash;
|
1901 | 550 |
this.key = key;
|
1902 | 550 |
this.next = next;
|
1903 | 550 |
this.value = value;
|
1904 |
} |
|
1905 |
|
|
1906 |
// Map.Entry Ops
|
|
1907 | 0 |
public Object getKey() {
|
1908 | 0 |
return key;
|
1909 |
} |
|
1910 |
|
|
1911 |
/**
|
|
1912 |
* Set the value of this entry.
|
|
1913 |
* Note: In an entrySet or
|
|
1914 |
* entrySet.iterator), unless the set or iterator is used under
|
|
1915 |
* synchronization of the table as a whole (or you can otherwise
|
|
1916 |
* guarantee lack of concurrent modification), <tt>setValue</tt>
|
|
1917 |
* is not strictly guaranteed to actually replace the value field
|
|
1918 |
* obtained via the <tt>get</tt> operation of the underlying hash
|
|
1919 |
* table in multithreaded applications. If iterator-wide
|
|
1920 |
* synchronization is not used, and any other concurrent
|
|
1921 |
* <tt>put</tt> or <tt>remove</tt> operations occur, sometimes
|
|
1922 |
* even to <em>other</em> entries, then this change is not
|
|
1923 |
* guaranteed to be reflected in the hash table. (It might, or it
|
|
1924 |
* might not. There are no assurances either way.)
|
|
1925 |
*
|
|
1926 |
* @param value the new value.
|
|
1927 |
* @return the previous value, or null if entry has been detectably
|
|
1928 |
* removed.
|
|
1929 |
* @exception NullPointerException if the value is <code>null</code>.
|
|
1930 |
*
|
|
1931 |
**/
|
|
1932 | 0 |
public Object setValue(Object value) {
|
1933 | 0 |
if (value == null) { |
1934 | 0 |
throw new NullPointerException(); |
1935 |
} |
|
1936 |
|
|
1937 | 0 |
Object oldValue = this.value;
|
1938 | 0 |
this.value = value;
|
1939 |
|
|
1940 | 0 |
return oldValue;
|
1941 |
} |
|
1942 |
|
|
1943 |
/**
|
|
1944 |
* Get the value.
|
|
1945 |
* Note: In an entrySet or entrySet.iterator,
|
|
1946 |
* unless the set or iterator is used under synchronization of the
|
|
1947 |
* table as a whole (or you can otherwise guarantee lack of
|
|
1948 |
* concurrent modification), <tt>getValue</tt> <em>might</em>
|
|
1949 |
* return null, reflecting the fact that the entry has been
|
|
1950 |
* concurrently removed. However, there are no assurances that
|
|
1951 |
* concurrent removals will be reflected using this method.
|
|
1952 |
*
|
|
1953 |
* @return the current value, or null if the entry has been
|
|
1954 |
* detectably removed.
|
|
1955 |
**/
|
|
1956 | 0 |
public Object getValue() {
|
1957 | 0 |
return value;
|
1958 |
} |
|
1959 |
|
|
1960 | 0 |
public boolean equals(Object o) { |
1961 | 0 |
if (!(o instanceof Map.Entry)) { |
1962 | 0 |
return false; |
1963 |
} |
|
1964 |
|
|
1965 | 0 |
Map.Entry e = (Map.Entry) o; |
1966 |
|
|
1967 | 0 |
if (!key.equals(e.getKey())) {
|
1968 | 0 |
return false; |
1969 |
} |
|
1970 |
|
|
1971 | 0 |
Object v = value; |
1972 |
|
|
1973 | 0 |
return (v == null) ? (e.getValue() == null) : v.equals(e.getValue()); |
1974 |
} |
|
1975 |
|
|
1976 | 0 |
public int hashCode() { |
1977 | 0 |
Object v = value; |
1978 |
|
|
1979 | 0 |
return hash ^ ((v == null) ? 0 : v.hashCode()); |
1980 |
} |
|
1981 |
|
|
1982 | 0 |
public String toString() {
|
1983 | 0 |
return key + "=" + value; |
1984 |
} |
|
1985 |
|
|
1986 | 0 |
protected Object clone() {
|
1987 | 0 |
return new Entry(hash, key, value, ((next == null) ? null : (Entry) next.clone())); |
1988 |
} |
|
1989 |
} |
|
1990 |
|
|
1991 |
protected class HashIterator implements Iterator, Enumeration { |
|
1992 |
protected final Entry[] tab; // snapshot of table |
|
1993 |
protected Entry entry = null; // current node of slot |
|
1994 |
protected Entry lastReturned = null; // last node returned by next |
|
1995 |
protected Object currentKey; // key for current node |
|
1996 |
protected Object currentValue; // value for current node |
|
1997 |
protected int index; // current slot |
|
1998 |
|
|
1999 | 48 |
protected HashIterator() {
|
2000 | 48 |
tab = AbstractConcurrentReadCache.this.getTableForReading();
|
2001 | 48 |
index = tab.length - 1; |
2002 |
} |
|
2003 |
|
|
2004 | 0 |
public boolean hasMoreElements() { |
2005 | 0 |
return hasNext();
|
2006 |
} |
|
2007 |
|
|
2008 | 120 |
public boolean hasNext() { |
2009 |
/*
|
|
2010 |
currentkey and currentValue are set here to ensure that next()
|
|
2011 |
returns normally if hasNext() returns true. This avoids
|
|
2012 |
surprises especially when final element is removed during
|
|
2013 |
traversal -- instead, we just ignore the removal during
|
|
2014 |
current traversal.
|
|
2015 |
*/
|
|
2016 | 120 |
for (;;) {
|
2017 | 192 |
if (entry != null) { |
2018 | 72 |
Object v = entry.value; |
2019 |
|
|
2020 | 72 |
if (v != null) { |
2021 | 72 |
currentKey = entry.key; |
2022 | 72 |
currentValue = v; |
2023 |
|
|
2024 | 72 |
return true; |
2025 |
} else {
|
|
2026 | 0 |
entry = entry.next; |
2027 |
} |
|
2028 |
} |
|
2029 |
|
|
2030 | 120 |
while ((entry == null) && (index >= 0)) { |
2031 | 1536 |
entry = tab[index--]; |
2032 |
} |
|
2033 |
|
|
2034 | 120 |
if (entry == null) { |
2035 | 48 |
currentKey = currentValue = null;
|
2036 |
|
|
2037 | 48 |
return false; |
2038 |
} |
|
2039 |
} |
|
2040 |
} |
|
2041 |
|
|
2042 | 72 |
public Object next() {
|
2043 | 72 |
if ((currentKey == null) && !hasNext()) { |
2044 | 0 |
throw new NoSuchElementException(); |
2045 |
} |
|
2046 |
|
|
2047 | 72 |
Object result = returnValueOfNext(); |
2048 | 72 |
lastReturned = entry; |
2049 | 72 |
currentKey = currentValue = null;
|
2050 | 72 |
entry = entry.next; |
2051 |
|
|
2052 | 72 |
return result;
|
2053 |
} |
|
2054 |
|
|
2055 | 0 |
public Object nextElement() {
|
2056 | 0 |
return next();
|
2057 |
} |
|
2058 |
|
|
2059 | 0 |
public void remove() { |
2060 | 0 |
if (lastReturned == null) { |
2061 | 0 |
throw new IllegalStateException(); |
2062 |
} |
|
2063 |
|
|
2064 | 0 |
AbstractConcurrentReadCache.this.remove(lastReturned.key);
|
2065 |
} |
|
2066 |
|
|
2067 | 0 |
protected Object returnValueOfNext() {
|
2068 | 0 |
return entry;
|
2069 |
} |
|
2070 |
} |
|
2071 |
|
|
2072 |
protected class KeyIterator extends HashIterator { |
|
2073 | 72 |
protected Object returnValueOfNext() {
|
2074 | 72 |
return currentKey;
|
2075 |
} |
|
2076 |
} |
|
2077 |
|
|
2078 |
protected class ValueIterator extends HashIterator { |
|
2079 | 0 |
protected Object returnValueOfNext() {
|
2080 | 0 |
return currentValue;
|
2081 |
} |
|
2082 |
} |
|
2083 |
} |
|
2084 |
|
|