|
Robert, Here is something to get you thinking: package org.iseriestoolkit.util; import java.sql.Connection; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Statement; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Set; /** * Pools collections created from SQL queries */ public class CollectionCache { private static CollectionCache cc = null; /** Holds the collection pool entries. */ private HashMap collectionPool = new HashMap(); /** Used to keep the pool from growing to fill all available memory. */ private int maxCollections = 50; /** Creates a new instance of CollectionCache */ public CollectionCache() {} /** Creates a new instance of CollectionCache with a specified size (default is 50 * collections). * @param size The number of collections to hold. */ public CollectionCache(int size) { this(); setMaxCollections(size); } public void addToPool(String name, Collection collToAdd) { CollectionCacheEntry cce = new CollectionCacheEntry(); cce.setResultset(collToAdd); this.collectionPool.put(name, cce); } public static synchronized CollectionCache getDefaultCollectionCache() { if (null == cc) { cc = new CollectionCache(); } return cc; } public void removeCollection(int hashCode) { String key; Set ks = this.collectionPool.keySet(); Iterator ccKeys = ks.iterator(); CollectionCache.CollectionCacheEntry cce; boolean found = false; while (ccKeys.hasNext() && !found) { key = (String)ccKeys.next(); cce = (CollectionCache.CollectionCacheEntry)this.collectionPool.get(key); if (cce.hashCode() == hashCode) { found = true; this.collectionPool.remove(key); } } } /** Returns a collection based on a query. * If the collection is already in the pool, it gets returned. * If the collection is not in the pool, it gets created, added, and returned. * * @param sqlStatement The query to execute * @return The collection (a List (Collection) of HashMaps). */ public List getColl(String sqlStatement) { long startTime; long endTime; startTime = System.currentTimeMillis(); Collection retVal = null; CollectionCacheEntry cce = null; if (collectionPool.containsKey(sqlStatement)) { cce = (CollectionCacheEntry)collectionPool.get(sqlStatement); retVal = cce.getResultset(); } else { cce = new CollectionCacheEntry(); ArrayList cc = new ArrayList(1); // Do something here to build entry ArrayList cce.timeToBuild = System.currentTimeMillis() - cce.createTime; cce.setResultset(retVal); collectionPool.put(sqlStatement, cce); // discard any "extra" collections trimCollectionCache(); } endTime = System.currentTimeMillis(); return (List)retVal; } /** How many collections can this thing hold? * @return Duh, how many collections this thing can hold. */ public int getMaxCollections() { return maxCollections; } /** How many collections can this thing hold? * This method will also clear any "extra" collections - i.e., if you go from 100 * to 50 and there are 85 collections in the pool 35 will be dropped. * @param newVal Duh, how many collections this thing can hold. */ public void setMaxCollections(int newVal) { maxCollections = newVal; trimCollectionCache(); } /** How many collections are there in the pool now? * @return Duh, how many collections are in the pool now. */ public int getCollectionCount() { return collectionPool.size(); } /** Removes any extra collections from the pool. */ private void trimCollectionCache() { CollectionCacheEntry cce = null; while (getMaxCollections() < getCollectionCount()) { // discard collection with smallest weight Set ks = collectionPool.keySet(); Iterator ccKeys = ks.iterator(); String minName = null; String curName = null; long minWgt = Long.MAX_VALUE; while (ccKeys.hasNext()) { curName = ccKeys.next().toString(); cce = (CollectionCacheEntry)collectionPool.get(curName); if (minWgt > cce.getWgt()) { minName = curName; } } if (null != minName) { collectionPool.remove(minName); } else { // There was nothing to discard, and we are in an endless loop. // This is bad. :-( break; } } } // Inner class for entries in the pool. private class CollectionCacheEntry { /** The data for the collection. */ private Collection resultset; /** How many times the collection has been used. */ public long requests; /** How long it took to build the collection */ public long timeToBuild; /** How many records are in this entry. */ public long recordcount; /** How many fields per record are in this entry. */ public long fieldcount; /** When the collection was created */ public long createTime; /** When the collection was last requested */ public long lastUseTime; /** Creates a collection pool entry */ CollectionCacheEntry() { createTime = System.currentTimeMillis(); lastUseTime = createTime; requests = 1; } public long getWgt() { /* This function is used to determine which entries to remove when the * pool gets trimmed. The lower the value, they more likely the entry * is to be removed. */ return lastUseTime; } /** Setter for collection pool entry data * @param v the new collection pool entry data */ public void setResultset(Collection c) { resultset = c; } /** Getter for collection pool entry data * @return The collection pool entry data */ public Collection getResultset() { requests++; lastUseTime = System.currentTimeMillis(); return resultset; } } } I cut out some of the stuff that relies on classes I wrote to build the collection and to get a hash for the SQL statement but you should get the idea. The Avalon interface is better and this should be adapted to that interface because there is never one right way to determine what to cut from the cache. David >>> rupshall@psasoft.com 07/16/02 02:15PM >>> David, Thanks, I'll look at those places to see what I can find out and I'd gladly take any examples you are will to give me. Thanks, Robert ----- Original Message ----- From: "David Morris" <David.Morris@plumcreek.com> > Robert, > >... I ended up creating my own implementation > and posting a description of that algorithm to the Avalon developer's > list. > > David Morris
As an Amazon Associate we earn from qualifying purchases.
This mailing list archive is Copyright 1997-2024 by midrange.com and David Gibbs as a compilation work. Use of the archive is restricted to research of a business or technical nature. Any other uses are prohibited. Full details are available on our policy page. If you have questions about this, please contact [javascript protected email address].
Operating expenses for this site are earned using the Amazon Associate program and Google Adsense.