Start line:  
End line:  

Snippet Preview

Snippet HTML Code

Stack Overflow Questions
  /*
   * JBoss, Home of Professional Open Source
   * Copyright 2010, Red Hat, Inc. and/or its affiliates,
   * and individual contributors as indicated by the @author tags.
   * See the copyright.txt in the distribution for a
   * full listing of individual contributors.
   * This copyrighted material is made available to anyone wishing to use,
   * modify, copy, or redistribute it subject to the terms and conditions
   * of the GNU Lesser General Public License, v. 2.1.
  * This program is distributed in the hope that it will be useful, but WITHOUT A
  * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
  * PARTICULAR PURPOSE.  See the GNU Lesser General Public License for more details.
  * You should have received a copy of the GNU Lesser General Public License,
  * v.2.1 along with this distribution; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
  * MA  02110-1301, USA.
  *
  * (C) 2010,
  * @author JBoss, by Red Hat.
  */
 package com.arjuna.ats.internal.arjuna.objectstore.hornetq;
 
 import java.io.File;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
 
Implementation of the tx store backed by HornetQ's journal. This is a bean suitable for hooking into the app server lifecycle.

Author(s):
Jonathan Halliday (jonathan.halliday@redhat.com), 2010-03
 
 public class HornetqJournalStore
 {
     private final Journal journal;
 
     private final ConcurrentMap<String,Map<UidRecordInfo>> content = new ConcurrentHashMap<StringMap<UidRecordInfo>>();
 
     private final Object uidMappingLock = new Object();
     private final boolean syncWrites;
     private final boolean syncDeletes;
     private long maxID = 0;
 
     private final String storeDirCanonicalPath;
 
     private static final byte RECORD_TYPE = 0x00;
 
     public void stop() throws Exception {
         .stop();
     }
 
     public void start() throws Exception {
 
         .start();
 
         List<RecordInfocommittedRecords = new LinkedList<RecordInfo>();
         List<PreparedTransactionInfopreparedTransactions = new LinkedList<PreparedTransactionInfo>();
         TransactionFailureCallback failureCallback = new TransactionFailureCallback() {
             public void failedTransaction(long ljava.util.List<org.hornetq.core.journal.RecordInforecordInfosjava.util.List<org.hornetq.core.journal.RecordInforecordInfos1) {
                 ..warn_journal_load_error();
             }
         };
 
         JournalLoadInformation journalLoadInformation = .load(committedRecordspreparedTransactionsfailureCallback);
          = journalLoadInformation.getMaxID();
 
         if(!preparedTransactions.isEmpty()) {
             ..warn_journal_load_error();
         }
 
         for(RecordInfo record : committedRecords) {
             InputBuffer inputBuffer = new InputBuffer(record.data);
             Uid uid = UidHelper.unpackFrom(inputBuffer);
             String typeName = inputBuffer.unpackString();
             getContentForType(typeName).put(uidrecord);
            // don't unpack the rest yet, we may never need it. read_committed does it on demand.
        }
    }
         = envBean.isSyncWrites();
         = envBean.isSyncDeletes();
        File storeDir = new File(envBean.getStoreDir());
        if(!storeDir.exists() && !storeDir.mkdirs()) {
            throw new IOException(..get_dir_create_failed(storeDir.getCanonicalPath()));
        }
         = storeDir.getCanonicalPath();
        SequentialFileFactory sequentialFileFactory;
        if(envBean.isAsyncIO() && AIOSequentialFileFactory.isSupported()) {
            sequentialFileFactory = new AIOSequentialFileFactory(
                    envBean.getStoreDir(),
                    envBean.getBufferSize(),
                    (int)(1000000000d / envBean.getBufferFlushesPerSecond()), // bufferTimeout nanos .000000001 second
                    envBean.isLogRates());
        } else {
            sequentialFileFactory = new NIOSequentialFileFactory(
                    envBean.getStoreDir(),
                    true,
                    envBean.getBufferSize(),
                    (int)(1000000000d / envBean.getBufferFlushesPerSecond()), // bufferTimeout nanos .000000001 second
                    envBean.isLogRates());
        }
         = new JournalImpl(envBean.getFileSize(), envBean.getMinFiles(), envBean.getCompactMinFiles(),
                        envBean.getCompactPercentage(), sequentialFileFactoryenvBean.getFilePrefix(),
                        envBean.getFileExtension(), envBean.getMaxIO());
    }


    
Remove the object's committed state.

Parameters:
uid The object to work on.
typeName The type of the object to work on.
Returns:
true if no errors occurred, false otherwise.
Throws:
com.arjuna.ats.arjuna.exceptions.ObjectStoreException if things go wrong.
    public boolean remove_committed(Uid uidString typeNamethrows ObjectStoreException
    {
        try {
            long id = getId(uidtypeName); // look up the id *before* doing the remove from state, or it won't be there any more.
            getContentForType(typeName).remove(uid);
            .appendDeleteRecord(id);
            return true;
        } catch (IllegalStateException e) {
            return false;
        } catch(Exception e) {
            throw new ObjectStoreException(e);
        }
    }

    
Write a new copy of the object's committed state.

Parameters:
uid The object to work on.
typeName The type of the object to work on.
txData The state to write.
Returns:
true if no errors occurred, false otherwise.
Throws:
com.arjuna.ats.arjuna.exceptions.ObjectStoreException if things go wrong.
    public boolean write_committed(Uid uidString typeNameOutputObjectState txDatathrows ObjectStoreException
    {
        try {
            OutputBuffer outputBuffer = new OutputBuffer();
            UidHelper.packInto(uidoutputBuffer);
            outputBuffer.packString(typeName);
            outputBuffer.packBytes(txData.buffer());
            long id = getId(uidtypeName);
            byte[] data = outputBuffer.buffer();
            // yup, there is a race condition here.
            if(getContentForType(typeName).containsKey(uid)) {
                .appendUpdateRecord(iddata);
            } else {
                .appendAddRecord(iddata);
            }
            RecordInfo record = new RecordInfo(iddatafalse, (short)0);
            getContentForType(typeName).put(uidrecord);
        } catch(Exception e) {
            throw new ObjectStoreException(e);
        }
        return true;
    }

    
Read the object's committed state.

Parameters:
uid The object to work on.
typeName The type of the object to work on.
Returns:
the state of the object.
Throws:
com.arjuna.ats.arjuna.exceptions.ObjectStoreException if things go wrong.
    public InputObjectState read_committed(Uid uidString typeNamethrows ObjectStoreException
    {
        RecordInfo record = getContentForType(typeName).get(uid);
        if(record == null) {
            return null;
        }
        // this repeated unpacking is a little inefficient - subclass RecordInfo to hold unpacked form too?
        // not too much of an issue as log reads are done for recovery only.
        try {
            InputBuffer inputBuffer = new InputBuffer(record.data);
            Uid unpackedUid = UidHelper.unpackFrom(inputBuffer);
            String unpackedTypeName = inputBuffer.unpackString();
            InputObjectState inputObjectState = new InputObjectState(uidtypeNameinputBuffer.unpackBytes());
            return inputObjectState;
        } catch(Exception e) {
            throw new ObjectStoreException(e);
        }
    }
    public boolean contains(Uid uidString typeName) {
        RecordInfo record = getContentForType(typeName).get(uid);
        return record != null;
    }

    

Returns:
the "name" of the object store. Where in the hierarchy it appears, e.g., /ObjectStore/MyName/...
    public String getStoreName()
    {
        return this.getClass().getSimpleName()+":"+;
    }
    public String[] getKnownTypes() {
        return .keySet().toArray(new String[.size()]);
    }
    public Uid[] getUidsForType(String typeName) {
        Set<UidkeySet = getContentForType(typeName).keySet();
        return keySet.toArray(new Uid[keySet.size()]);
    }
    /////////////////////////////////
    private Map<UidRecordInfogetContentForType(String typeName) {
        Map<UidRecordInforesult = .get(typeName);
        if(result == null) {
            .putIfAbsent(typeNamenew ConcurrentHashMap<UidRecordInfo>());
            result = .get(typeName);
        }
        return result;
    }
    private long getId(Uid uidString typeName) {
        synchronized () {
            RecordInfo record = getContentForType(typeName).get(uid);
            if(record != null) {
                return record.id;
            } else {
                ++;
                return ;
            }
        }
    }
New to GrepCode? Check out our FAQ X