atomic_container.h

00001 /* -*- mode:C++; c-basic-offset:4 -*-
00002      Shore-MT -- Multi-threaded port of the SHORE storage manager
00003    
00004                        Copyright (c) 2007-2009
00005       Data Intensive Applications and Systems Labaratory (DIAS)
00006                Ecole Polytechnique Federale de Lausanne
00007    
00008                          All Rights Reserved.
00009    
00010    Permission to use, copy, modify and distribute this software and
00011    its documentation is hereby granted, provided that both the
00012    copyright notice and this permission notice appear in all copies of
00013    the software, derivative works or modified versions, and any
00014    portions thereof, and that both notices appear in supporting
00015    documentation.
00016    
00017    This code is distributed in the hope that it will be useful, but
00018    WITHOUT ANY WARRANTY; without even the implied warranty of
00019    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. THE AUTHORS
00020    DISCLAIM ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
00021    RESULTING FROM THE USE OF THIS SOFTWARE.
00022 */
00023 /*<std-header orig-src='shore' incl-file-exclusion='ATOMIC_CONTAINER_H'>
00024 
00025  $Id: atomic_container.h,v 1.3 2010/06/23 23:42:57 nhall Exp $
00026 
00027 SHORE -- Scalable Heterogeneous Object REpository
00028 
00029 Copyright (c) 1994-99 Computer Sciences Department, University of
00030                       Wisconsin -- Madison
00031 All Rights Reserved.
00032 
00033 Permission to use, copy, modify and distribute this software and its
00034 documentation is hereby granted, provided that both the copyright
00035 notice and this permission notice appear in all copies of the
00036 software, derivative works or modified versions, and any portions
00037 thereof, and that both notices appear in supporting documentation.
00038 
00039 THE AUTHORS AND THE COMPUTER SCIENCES DEPARTMENT OF THE UNIVERSITY
00040 OF WISCONSIN - MADISON ALLOW FREE USE OF THIS SOFTWARE IN ITS
00041 "AS IS" CONDITION, AND THEY DISCLAIM ANY LIABILITY OF ANY KIND
00042 FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
00043 
00044 This software was developed with support by the Advanced Research
00045 Project Agency, ARPA order number 018 (formerly 8230), monitored by
00046 the U.S. Army Research Laboratory under contract DAAB07-91-C-Q518.
00047 Further funding for this work was provided by DARPA through
00048 Rome Research Laboratory Contract No. F30602-97-2-0247.
00049 
00050 */
00051 
00052 #ifndef __ATOMIC_CONTAINER
00053 #define __ATOMIC_CONTAINER
00054 
00055 #include "atomic_templates.h"
00056 
00057 // for placement new support, which users need
00058 #include <new>
00059 #include <cassert>
00060 #include <stdlib.h>
00061 
00062 /** \brief A thread-safe, lock-free, almost wait-free atomic 
00063  * container for untyped items.
00064  *
00065  * This class takes care of pushing and
00066  * popping elements from the container for multiple concurrent threads. It
00067  * is up to the user (client code) to allocate and deallocate the
00068  * storage for items pushed on this container.
00069  *
00070  * The objects being stored here must have an embedded next pointer.
00071  * The offset given in the constructor tells the container the offset
00072  * of the "next" pointer in the objects being stored here.  The offset
00073  * can be + or - from the pointer being given in push().
00074  *
00075  * WARNING: in order to avoid the so-called "ABA" problem, the
00076  * container must begin with and maintain a reasonably large pool. 
00077  * There is the possibility of recently-freed objects being reused 
00078  * very quickly, in turn
00079  * enabling internal corruption from a possible race where a thread
00080  * begins to allocate an object, but other threads do enough
00081  * pops and pushes to cycle through 8 version numbers, and all this
00082  * happens before the first thread finishes.  It's unlikely but 
00083  * possible.
00084  */
00085 class atomic_container {
00086     /// \cond skipdoc
00087 protected:
00088     struct ptr { ptr* next; }; 
00089     /// Unions for punning, i.e., type conversion
00090     union vpn { void* v; ptr* p; long n; };
00091     union pvn { ptr* p; void* v; long n; };
00092     /// \endcond skipdoc
00093 
00094 public:
00095     typedef long int offset_typ;
00096 
00097     atomic_container(offset_typ offset)
00098         : _offset(offset), _locked(0), _active(0), _backup(0)
00099     { }
00100     
00101     /// Pop an item off the stack.
00102     ///  If we don't find any to pop, return a null ptr.
00103     ///   We do not go to the global heap. The client must do that.
00104     void* pop() {
00105         pvn old_value = {*&_active};
00106         while(pointer(old_value)) {
00107             // swap if the head's pointer and version are unchanged
00108             pvn new_value = next_pointer(old_value);
00109             void* cur_value = atomic_cas_ptr(&_active, old_value.v, new_value.v);
00110             if(old_value.v == cur_value)
00111                 break;
00112 
00113             // try again...
00114             old_value.v = cur_value;
00115         }
00116 
00117         // slow alloc?
00118         return pointer(old_value)? prepare(old_value) : slow_pop();
00119     }
00120 
00121     /// Push an item onto the stack
00122     void push(void* v) {
00123         // back up to the real start of this allocation
00124         vpn u = {v};        
00125         u.n += _offset;
00126 
00127         // enqueue it
00128         pvn old_value = { _backup };
00129         while(1) {
00130             u.p->next = old_value.p;
00131             membar_producer();
00132             void* cur_value = atomic_cas_ptr(&_backup, old_value.v, u.v);
00133             if(old_value.v == cur_value)
00134                 break;
00135 
00136             // try again...
00137             old_value.v = cur_value;
00138         }
00139     }
00140     /// Only for debugging.
00141     offset_typ offset() const { return  _offset; } 
00142 
00143     ~atomic_container() {  // for shutdown/restart purposes:
00144              _locked = 0; _active = 0; _backup = 0;
00145     }
00146     
00147 protected:
00148     /// Strip off the pointer's version number and hide the header.
00149     template<class Union>
00150     void* prepare(Union rval) {
00151         rval.n = pointer(rval) - _offset;
00152         return rval.v;
00153     }
00154     
00155     /// Return a null pointer (i.e., it contains the offset only).
00156     void* null() { 
00157         union {
00158             offset_typ  i;
00159             void *v;
00160         } _pun = { _offset };
00161         return _pun.v; 
00162     } 
00163 
00164     offset_typ const _offset;
00165     
00166 private:
00167     unsigned volatile _locked;
00168     /// The list of active stuff.
00169     /// Pop will pull things off this list until it's empty.
00170     ptr* volatile _active;
00171     /// The list of things recently pushed.
00172     ///  Push uses this list to avoid interference with pop.
00173     ptr* volatile _backup;
00174 
00175 #ifdef ARCH_LP64
00176     enum { VERSION_MASK=0x7 };
00177 #else
00178     enum { VERSION_MASK=0x3 }; //alas. few versions 
00179 #endif
00180     
00181     ///Return the pointer with the version mask removed.
00182     template<class Union>
00183     static long pointer(Union value) { return value.n &~VERSION_MASK; }
00184     
00185     ///Return the version mask that's embedded in the pointer.
00186     static long version(long value) { return value & VERSION_MASK; }
00187 
00188     ///Return a non-dereferencable pointer to the next item after the given one.
00189     /// The given value might have the version number embedded (or not).
00190     /// The returned ptr will have the same version as that in the ptr. 
00191     static pvn next_pointer(pvn value) {
00192         long ver = version(value.n);
00193         value.n -= ver; // subtract out the version
00194         value.p = value.p->next; // get ptr to the next item
00195         value.n += ver; // add back in the version
00196         return value;
00197     }
00198     
00199     /// Spin until acquiring the lock is free or noticing that _active
00200     ///   has become non-null. 
00201     bool attempt_lock() {
00202         bool mine = false;
00203         pvn active = { *&_active };
00204         while(!mine) {
00205             while(*&_locked && !pointer(active)) active.p = *&_active;
00206             if(pointer(active)) return false;
00207             mine = !atomic_swap_32(&_locked, true);
00208         }
00209         if(mine) {
00210             membar_enter();
00211             active.p = *&_active;
00212             if(!pointer(active))
00213                 return true;
00214             
00215             release_lock();
00216         }
00217         return false;
00218     }
00219     ///Release the lock.
00220     void release_lock() {
00221         membar_exit();
00222         _locked = false;
00223     }
00224     
00225     /// Grab a lock, swap active and backup lists,
00226     ///  and try again to pop.
00227     void* slow_pop() {
00228         if(!attempt_lock())
00229             return pop(); // try again
00230 
00231         /* At this point (holding the lock) the _active list will
00232            not change to non-null on us. The _backup list may
00233            continue to grow so we atomically cut it loose
00234         */
00235         vpn rval = { atomic_swap_ptr(&_backup, NULL) };
00236         if(rval.p) {
00237             // keep head for ourselves, rest becomes new _active
00238             pvn old_list = { _active };
00239             pvn new_list = {rval.p->next};
00240             new_list.n += version(old_list.n+1);
00241             _active = new_list.p;
00242         }
00243         else {
00244             rval.v = null();
00245         }
00246         
00247         release_lock();
00248         return prepare(rval);
00249     }
00250     
00251 };
00252 
00253 #endif

Generated on Wed Jul 7 17:22:32 2010 for Shore Storage Manager by  doxygen 1.4.7