Start line:  
End line:  

Snippet Preview

Snippet HTML Code

Stack Overflow Questions
Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
  
  
  package org.apache.hadoop.hbase.regionserver;
  
  import java.util.List;
  import java.util.Map;
  import java.util.UUID;
  
View to an on-disk Region. Provides the set of methods necessary to interact with the on-disk region data.
  
  public class HRegionFileSystem {
    public static final Log LOG = LogFactory.getLog(HRegionFileSystem.class);

  
Name of the region info file that resides just under the region directory.
  
    public final static String REGION_INFO_FILE = ".regioninfo";

  
Temporary subdirectory of the region directory used for merges.
  
    public static final String REGION_MERGES_DIR = ".merges";

  
Temporary subdirectory of the region directory used for splits.
  
    public static final String REGION_SPLITS_DIR = ".splits";

  
Temporary subdirectory of the region directory used for compaction output.
  
    private static final String REGION_TEMP_DIR = ".tmp";
  
    private final HRegionInfo regionInfo;
    private final Configuration conf;
    private final Path tableDir;
    private final FileSystem fs;

  
In order to handle NN connectivity hiccups, one need to retry non-idempotent operation at the client level.
  
    private final int hdfsClientRetriesNumber;
    private final int baseSleepBeforeRetries;
    private static final int DEFAULT_HDFS_CLIENT_RETRIES_NUMBER = 10;
    private static final int DEFAULT_BASE_SLEEP_BEFORE_RETRIES = 1000;

  
Create a view to the on-disk region

Parameters:
conf the org.apache.hadoop.conf.Configuration to use
fs org.apache.hadoop.fs.FileSystem that contains the region
tableDir org.apache.hadoop.fs.Path to where the table is being stored
regionInfo org.apache.hadoop.hbase.HRegionInfo for region
  
    HRegionFileSystem(final Configuration conffinal FileSystem fsfinal Path tableDir,
        final HRegionInfo regionInfo) {
      this. = fs;
      this. = conf;
      this. = tableDir;
     this. = regionInfo;
     this. = conf.getInt("hdfs.client.retries.number",
     this. = conf.getInt("hdfs.client.sleep.before.retries",
  }

  

Returns:
the underlying org.apache.hadoop.fs.FileSystem
 
   public FileSystem getFileSystem() {
     return this.;
   }

  

Returns:
the org.apache.hadoop.hbase.HRegionInfo that describe this on-disk region view
 
   public HRegionInfo getRegionInfo() {
     return this.;
   }

  

Returns:
org.apache.hadoop.fs.Path to the region's root directory.
 
   public Path getTableDir() {
     return this.;
   }

  

Returns:
org.apache.hadoop.fs.Path to the region directory.
 
   public Path getRegionDir() {
     return new Path(this.this..getEncodedName());
   }
 
   // ===========================================================================
   //  Temp Helpers
   // ===========================================================================
   

Returns:
org.apache.hadoop.fs.Path to the region's temp directory, used for file creations
 
   Path getTempDir() {
     return new Path(getRegionDir(), );
   }

  
Clean up any temp detritus that may have been left around from previous operation attempts.
 
   void cleanupTempDir() throws IOException {
     deleteDir(getTempDir());
   }
 
   // ===========================================================================
   //  Store/StoreFile Helpers
   // ===========================================================================
   
Returns the directory path of the specified family

Parameters:
familyName Column Family Name
Returns:
org.apache.hadoop.fs.Path to the directory of the specified family
 
   public Path getStoreDir(final String familyName) {
     return new Path(this.getRegionDir(), familyName);
   }

  
Create the store directory for the specified family name

Parameters:
familyName Column Family Name
Returns:
org.apache.hadoop.fs.Path to the directory of the specified family
Throws:
java.io.IOException if the directory creation fails.
 
   Path createStoreDir(final String familyNamethrows IOException {
     Path storeDir = getStoreDir(familyName);
     if(!.exists(storeDir) && !createDir(storeDir))
       throw new IOException("Failed creating "+storeDir);
     return storeDir;
   }

  
Returns the store files available for the family. This methods performs the filtering based on the valid store files.

Parameters:
familyName Column Family Name
Returns:
a set of StoreFileInfo for the specified family.
 
   public Collection<StoreFileInfogetStoreFiles(final byte[] familyNamethrows IOException {
     return getStoreFiles(Bytes.toString(familyName));
   }
 
   public Collection<StoreFileInfogetStoreFiles(final String familyNamethrows IOException {
     return getStoreFiles(familyNametrue);
   }

  
Returns the store files available for the family. This methods performs the filtering based on the valid store files.

Parameters:
familyName Column Family Name
Returns:
a set of StoreFileInfo for the specified family.
 
   public Collection<StoreFileInfogetStoreFiles(final String familyNamefinal boolean validate)
       throws IOException {
     Path familyDir = getStoreDir(familyName);
     FileStatus[] files = FSUtils.listStatus(this.familyDir);
     if (files == null) {
       .debug("No StoreFiles for: " + familyDir);
       return null;
     }
 
     ArrayList<StoreFileInfostoreFiles = new ArrayList<StoreFileInfo>(files.length);
     for (FileStatus statusfiles) {
       if (validate && !StoreFileInfo.isValid(status)) {
         .warn("Invalid StoreFile: " + status.getPath());
         continue;
       }
 
       storeFiles.add(new StoreFileInfo(this.this.status));
     }
     return storeFiles;
   }

  
Return Qualified Path of the specified family/file

Parameters:
familyName Column Family Name
fileName File Name
Returns:
The qualified Path for the specified family/file
 
   Path getStoreFilePath(final String familyNamefinal String fileName) {
     Path familyDir = getStoreDir(familyName);
     return new Path(familyDirfileName).makeQualified(this.);
   }

  
Return the store file information of the specified family/file.

Parameters:
familyName Column Family Name
fileName File Name
Returns:
The StoreFileInfo for the specified family/file
 
   StoreFileInfo getStoreFileInfo(final String familyNamefinal String fileName)
       throws IOException {
     Path familyDir = getStoreDir(familyName);
     FileStatus status = .getFileStatus(new Path(familyDirfileName));
     return new StoreFileInfo(this.this.status);
   }

  
Returns true if the specified family has reference files

Parameters:
familyName Column Family Name
Returns:
true if family contains reference files
Throws:
java.io.IOException
 
   public boolean hasReferences(final String familyNamethrows IOException {
     FileStatus[] files = FSUtils.listStatus(getStoreDir(familyName),
         new FSUtils.ReferenceFileFilter());
     return files != null && files.length > 0;
   }

  
Check whether region has Reference file

Parameters:
htd table desciptor of the region
Returns:
true if region has reference file
Throws:
java.io.IOException
 
   public boolean hasReferences(final HTableDescriptor htdthrows IOException {
     for (HColumnDescriptor family : htd.getFamilies()) {
       if (hasReferences(family.getNameAsString())) {
         return true;
       }
     }
     return false;
   }

  

Returns:
the set of families present on disk
Throws:
java.io.IOException
 
   public Collection<StringgetFamilies() throws IOException {
     FileStatus[] fds = FSUtils.listStatus(getRegionDir(), new FSUtils.FamilyDirFilter());
     if (fds == nullreturn null;
 
     ArrayList<Stringfamilies = new ArrayList<String>(fds.length);
     for (FileStatus statusfds) {
       families.add(status.getPath().getName());
     }
 
     return families;
   }

  
Remove the region family from disk, archiving the store files.

Parameters:
familyName Column Family Name
Throws:
java.io.IOException if an error occours during the archiving
 
   public void deleteFamily(final String familyNamethrows IOException {
     // archive family store files
     HFileArchiver.archiveFamily(, Bytes.toBytes(familyName));
 
     // delete the family folder
     Path familyDir = getStoreDir(familyName);
     if(.exists(familyDir) && !deleteDir(familyDir))
       throw new IOException("Could not delete family " + familyName
           + " from FileSystem for region " + .getRegionNameAsString() + "("
           + .getEncodedName() + ")");
   }

  
Generate a unique file name, used by createTempName() and commitStoreFile()

Parameters:
suffix extra information to append to the generated name
Returns:
Unique file name
 
   private static String generateUniqueName(final String suffix) {
     String name = UUID.randomUUID().toString().replaceAll("-""");
     if (suffix != nullname += suffix;
     return name;
   }

  
Generate a unique temporary Path. Used in conjuction with commitStoreFile() to get a safer file creation. Path file = fs.createTempName(); ...StoreFile.Writer(file)... fs.commitStoreFile("family", file);

Returns:
Unique org.apache.hadoop.fs.Path of the temporary file
 
   public Path createTempName() {
     return createTempName(null);
   }

  
Generate a unique temporary Path. Used in conjuction with commitStoreFile() to get a safer file creation. Path file = fs.createTempName(); ...StoreFile.Writer(file)... fs.commitStoreFile("family", file);

Parameters:
suffix extra information to append to the generated name
Returns:
Unique org.apache.hadoop.fs.Path of the temporary file
 
   public Path createTempName(final String suffix) {
     return new Path(getTempDir(), generateUniqueName(suffix));
   }

  
Move the file from a build/temp location to the main family store directory.

Parameters:
familyName Family that will gain the file
buildPath org.apache.hadoop.fs.Path to the file to commit.
Returns:
The new org.apache.hadoop.fs.Path of the committed file
Throws:
java.io.IOException
 
   public Path commitStoreFile(final String familyNamefinal Path buildPaththrows IOException {
     return commitStoreFile(familyNamebuildPath, -1, false);
   }

  
Move the file from a build/temp location to the main family store directory.

Parameters:
familyName Family that will gain the file
buildPath org.apache.hadoop.fs.Path to the file to commit.
seqNum Sequence Number to append to the file name (less then 0 if no sequence number)
generateNewName False if you want to keep the buildPath name
Returns:
The new org.apache.hadoop.fs.Path of the committed file
Throws:
java.io.IOException
 
   private Path commitStoreFile(final String familyNamefinal Path buildPath,
       final long seqNumfinal boolean generateNewNamethrows IOException {
     Path storeDir = getStoreDir(familyName);
     if(!.exists(storeDir) && !createDir(storeDir))
       throw new IOException("Failed creating " + storeDir);
 
     String name = buildPath.getName();
     if (generateNewName) {
       name = generateUniqueName((seqNum < 0) ? null : "_SeqId_" + seqNum + "_");
     }
     Path dstPath = new Path(storeDirname);
     if (!.exists(buildPath)) {
       throw new FileNotFoundException(buildPath.toString());
     }
     .debug("Committing store file " + buildPath + " as " + dstPath);
     // buildPath exists, therefore not doing an exists() check.
     if (!rename(buildPathdstPath)) {
       throw new IOException("Failed rename of " + buildPath + " to " + dstPath);
     }
     return dstPath;
   }


  
Moves multiple store files to the relative region's family store directory.

Parameters:
storeFiles list of store files divided by family
Throws:
java.io.IOException
 
   void commitStoreFiles(final Map<byte[], List<StoreFile>> storeFilesthrows IOException {
     for (Map.Entry<byte[], List<StoreFile>> esstoreFiles.entrySet()) {
       String familyName = Bytes.toString(es.getKey());
       for (StoreFile sfes.getValue()) {
         commitStoreFile(familyNamesf.getPath());
       }
     }
   }

  
Archives the specified store file from the specified family.

Parameters:
familyName Family that contains the store files
filePath org.apache.hadoop.fs.Path to the store file to remove
Throws:
java.io.IOException if the archiving fails
 
   public void removeStoreFile(final String familyNamefinal Path filePath)
       throws IOException {
     HFileArchiver.archiveStoreFile(this.this.this.,
         this., Bytes.toBytes(familyName), filePath);
   }

  
Closes and archives the specified store files from the specified family.

Parameters:
familyName Family that contains the store files
storeFiles set of store files to remove
Throws:
java.io.IOException if the archiving fails
 
   public void removeStoreFiles(final String familyNamefinal Collection<StoreFilestoreFiles)
       throws IOException {
     HFileArchiver.archiveStoreFiles(this.this.this.,
         this., Bytes.toBytes(familyName), storeFiles);
   }

  
Bulk load: Add a specified store file to the specified family. If the source file is on the same different file-system is moved from the source location to the destination location, otherwise is copied over.

Parameters:
familyName Family that will gain the file
srcPath org.apache.hadoop.fs.Path to the file to import
seqNum Bulk Load sequence number
Returns:
The destination org.apache.hadoop.fs.Path of the bulk loaded file
Throws:
java.io.IOException
 
   Path bulkLoadStoreFile(final String familyNamePath srcPathlong seqNum)
       throws IOException {
     // Copy the file if it's on another filesystem
     FileSystem srcFs = srcPath.getFileSystem();
     FileSystem desFs =  instanceof HFileSystem ? ((HFileSystem)).getBackingFs() : ;
 
     // We can't compare FileSystem instances as equals() includes UGI instance
     // as part of the comparison and won't work when doing SecureBulkLoad
     // TODO deal with viewFS
     if (!FSHDFSUtils.isSameHdfs(srcFsdesFs)) {
       .info("Bulk-load file " + srcPath + " is on different filesystem than " +
           "the destination store. Copying file over to destination filesystem.");
       Path tmpPath = createTempName();
       FileUtil.copy(srcFssrcPathtmpPathfalse);
       .info("Copied " + srcPath + " to temporary path on destination filesystem: " + tmpPath);
       srcPath = tmpPath;
     }
 
     return commitStoreFile(familyNamesrcPathseqNumtrue);
   }
 
   // ===========================================================================
   //  Splits Helpers
   // ===========================================================================
   

Returns:
org.apache.hadoop.fs.Path to the temp directory used during split operations
 
   Path getSplitsDir() {
     return new Path(getRegionDir(), );
   }
 
   Path getSplitsDir(final HRegionInfo hri) {
     return new Path(getSplitsDir(), hri.getEncodedName());
   }

  
Clean up any split detritus that may have been left around from previous split attempts.
 
   void cleanupSplitsDir() throws IOException {
   }

  
Clean up any split detritus that may have been left around from previous split attempts. Call this method on initial region deploy.

 
   void cleanupAnySplitDetritus() throws IOException {
     Path splitdir = this.getSplitsDir();
     if (!.exists(splitdir)) return;
     // Look at the splitdir.  It could have the encoded names of the daughter
     // regions we tried to make.  See if the daughter regions actually got made
     // out under the tabledir.  If here under splitdir still, then the split did
     // not complete.  Try and do cleanup.  This code WILL NOT catch the case
     // where we successfully created daughter a but regionserver crashed during
     // the creation of region b.  In this case, there'll be an orphan daughter
     // dir in the filesystem.  TOOD: Fix.
     FileStatus[] daughters = FSUtils.listStatus(splitdirnew FSUtils.DirFilter());
     if (daughters != null) {
       for (FileStatus daughterdaughters) {
         Path daughterDir = new Path(getTableDir(), daughter.getPath().getName());
         if (.exists(daughterDir) && !deleteDir(daughterDir)) {
           throw new IOException("Failed delete of " + daughterDir);
         }
       }
     }
     cleanupSplitsDir();
     .info("Cleaned up old failed split transaction detritus: " + splitdir);
   }

  
Remove daughter region

Parameters:
regionInfo daughter org.apache.hadoop.hbase.HRegionInfo
Throws:
java.io.IOException
 
   void cleanupDaughterRegion(final HRegionInfo regionInfothrows IOException {
     Path regionDir = new Path(this.regionInfo.getEncodedName());
     if (this..exists(regionDir) && !deleteDir(regionDir)) {
       throw new IOException("Failed delete of " + regionDir);
     }
   }

  
Commit a daughter region, moving it from the split temporary directory to the proper location in the filesystem.

Parameters:
regionInfo daughter org.apache.hadoop.hbase.HRegionInfo
Throws:
java.io.IOException
 
   Path commitDaughterRegion(final HRegionInfo regionInfo)
       throws IOException {
     Path regionDir = new Path(this.regionInfo.getEncodedName());
     Path daughterTmpDir = this.getSplitsDir(regionInfo);
 
     if (.exists(daughterTmpDir)) {
 
       // Write HRI to a file in case we need to recover hbase:meta
       Path regionInfoFile = new Path(daughterTmpDir);
       byte[] regionInfoContent = getRegionInfoFileContent(regionInfo);
       writeRegionInfoFileContent(regionInfoFileregionInfoContent);
 
       // Move the daughter temp dir to the table dir
       if (!rename(daughterTmpDirregionDir)) {
         throw new IOException("Unable to rename " + daughterTmpDir + " to " + regionDir);
       }
     }
 
     return regionDir;
   }

  
Create the region splits directory.
 
   void createSplitsDir() throws IOException {
     Path splitdir = getSplitsDir();
     if (.exists(splitdir)) {
       .info("The " + splitdir + " directory exists.  Hence deleting it to recreate it");
       if (!deleteDir(splitdir)) {
         throw new IOException("Failed deletion of " + splitdir
             + " before creating them again.");
       }
     }
     // splitDir doesn't exists now. No need to do an exists() call for it.
     if (!createDir(splitdir)) {
       throw new IOException("Failed create of " + splitdir);
     }
   }

  
Write out a split reference. Package local so it doesnt leak out of regionserver.

Parameters:
hri org.apache.hadoop.hbase.HRegionInfo of the destination
familyName Column Family Name
f File to split.
splitRow Split Row
top True if we are referring to the top half of the hfile.
splitPolicy
Returns:
Path to created reference.
Throws:
java.io.IOException
 
   Path splitStoreFile(final HRegionInfo hrifinal String familyNamefinal StoreFile f,
       final byte[] splitRowfinal boolean topRegionSplitPolicy splitPolicythrows IOException {
 
     if (splitPolicy == null || !splitPolicy.skipStoreFileRangeCheck()) {
       // Check whether the split row lies in the range of the store file
       // If it is outside the range, return directly.
       if (top) {
         //check if larger than last key.
         KeyValue splitKey = KeyValue.createFirstOnRow(splitRow);
         byte[] lastKey = f.createReader().getLastKey();      
         // If lastKey is null means storefile is empty.
         if (lastKey == nullreturn null;
         if (f.getReader().getComparator().compareFlatKey(splitKey.getBuffer(),
           splitKey.getKeyOffset(), splitKey.getKeyLength(), lastKey, 0, lastKey.length) > 0) {
           return null;
         }
       } else {
         //check if smaller than first key
         KeyValue splitKey = KeyValue.createLastOnRow(splitRow);
         byte[] firstKey = f.createReader().getFirstKey();
         // If firstKey is null means storefile is empty.
         if (firstKey == nullreturn null;
         if (f.getReader().getComparator().compareFlatKey(splitKey.getBuffer(),
           splitKey.getKeyOffset(), splitKey.getKeyLength(), firstKey, 0, firstKey.length) < 0) {
           return null;
         }
       }
     }
 
     f.closeReader(true);
 
     Path splitDir = new Path(getSplitsDir(hri), familyName);
     // A reference to the bottom half of the hsf store file.
     Reference r =
       top ? Reference.createTopReference(splitRow): Reference.createBottomReference(splitRow);
     // Add the referred-to regions name as a dot separated suffix.
     // See REF_NAME_REGEX regex above.  The referred-to regions name is
     // up in the path of the passed in <code>f</code> -- parentdir is family,
     // then the directory above is the region name.
     String parentRegionName = .getEncodedName();
     // Write reference with same file id only with the other region name as
     // suffix and into the new region location (under same family).
     Path p = new Path(splitDirf.getPath().getName() + "." + parentRegionName);
     return r.write(p);
   }
 
   // ===========================================================================
   //  Merge Helpers
   // ===========================================================================
   

Returns:
org.apache.hadoop.fs.Path to the temp directory used during merge operations
 
   Path getMergesDir() {
     return new Path(getRegionDir(), );
   }
 
   Path getMergesDir(final HRegionInfo hri) {
     return new Path(getMergesDir(), hri.getEncodedName());
   }

  
Clean up any merge detritus that may have been left around from previous merge attempts.
 
   void cleanupMergesDir() throws IOException {
   }

  
Remove merged region

 
   void cleanupMergedRegion(final HRegionInfo mergedRegionthrows IOException {
     Path regionDir = new Path(this.mergedRegion.getEncodedName());
     if (this..exists(regionDir) && !this..delete(regionDirtrue)) {
       throw new IOException("Failed delete of " + regionDir);
     }
   }

  
Create the region merges directory.

Throws:
java.io.IOException If merges dir already exists or we fail to create it.
See also:
cleanupMergesDir()
 
   void createMergesDir() throws IOException {
     Path mergesdir = getMergesDir();
     if (.exists(mergesdir)) {
       .info("The " + mergesdir
           + " directory exists.  Hence deleting it to recreate it");
       if (!.delete(mergesdirtrue)) {
         throw new IOException("Failed deletion of " + mergesdir
             + " before creating them again.");
       }
     }
     if (!.mkdirs(mergesdir))
       throw new IOException("Failed create of " + mergesdir);
   }

  
Write out a merge reference under the given merges directory. Package local so it doesnt leak out of regionserver.

Parameters:
mergedRegion org.apache.hadoop.hbase.HRegionInfo of the merged region
familyName Column Family Name
f File to create reference.
mergedDir
Returns:
Path to created reference.
Throws:
java.io.IOException
 
   Path mergeStoreFile(final HRegionInfo mergedRegionfinal String familyName,
       final StoreFile ffinal Path mergedDir)
       throws IOException {
     Path referenceDir = new Path(new Path(mergedDir,
         mergedRegion.getEncodedName()), familyName);
     // A whole reference to the store file.
     Reference r = Reference.createTopReference(.getStartKey());
     // Add the referred-to regions name as a dot separated suffix.
     // See REF_NAME_REGEX regex above. The referred-to regions name is
     // up in the path of the passed in <code>f</code> -- parentdir is family,
     // then the directory above is the region name.
     String mergingRegionName = .getEncodedName();
     // Write reference with same file id only with the other region name as
     // suffix and into the new region location (under same family).
     Path p = new Path(referenceDirf.getPath().getName() + "."
         + mergingRegionName);
     return r.write(p);
   }

  
Commit a merged region, moving it from the merges temporary directory to the proper location in the filesystem.

Parameters:
mergedRegionInfo merged region org.apache.hadoop.hbase.HRegionInfo
Throws:
java.io.IOException
 
   void commitMergedRegion(final HRegionInfo mergedRegionInfothrows IOException {
     Path regionDir = new Path(this.mergedRegionInfo.getEncodedName());
     Path mergedRegionTmpDir = this.getMergesDir(mergedRegionInfo);
     // Move the tmp dir in the expected location
     if (mergedRegionTmpDir != null && .exists(mergedRegionTmpDir)) {
       if (!.rename(mergedRegionTmpDirregionDir)) {
         throw new IOException("Unable to rename " + mergedRegionTmpDir + " to "
             + regionDir);
       }
     }
   }
 
   // ===========================================================================
   //  Create/Open/Delete Helpers
   // ===========================================================================
   
Log the current state of the region

Parameters:
LOG log to output information
Throws:
java.io.IOException if an unexpected exception occurs
 
   void logFileSystemState(final Log LOGthrows IOException {
     FSUtils.logFileSystemState(this.getRegionDir(), LOG);
   }

  

Parameters:
hri
Returns:
Content of the file we write out to the filesystem under a region
Throws:
java.io.IOException
 
   private static byte[] getRegionInfoFileContent(final HRegionInfo hrithrows IOException {
     return hri.toDelimitedByteArray();
   }

  
Create a org.apache.hadoop.hbase.HRegionInfo from the serialized version on-disk.

Parameters:
fs org.apache.hadoop.fs.FileSystem that contains the Region Info file
regionDir org.apache.hadoop.fs.Path to the Region Directory that contains the Info file
Returns:
An org.apache.hadoop.hbase.HRegionInfo instance gotten from the Region Info file.
Throws:
java.io.IOException if an error occurred during file open/read operation.
 
   public static HRegionInfo loadRegionInfoFileContent(final FileSystem fsfinal Path regionDir)
       throws IOException {
     FSDataInputStream in = fs.open(new Path(regionDir));
     try {
       return HRegionInfo.parseFrom(in);
     } finally {
       in.close();
     }
   }

  
Write the .regioninfo file on-disk.
 
   private static void writeRegionInfoFileContent(final Configuration conffinal FileSystem fs,
       final Path regionInfoFilefinal byte[] contentthrows IOException {
     // First check to get the permissions
     FsPermission perms = FSUtils.getFilePermissions(fsconf.);
     // Write the RegionInfo file content
     FSDataOutputStream out = FSUtils.create(fsregionInfoFilepermsnull);
     try {
       out.write(content);
     } finally {
       out.close();
     }
   }

  
Write out an info file under the stored region directory. Useful recovering mangled regions. If the regionInfo already exists on-disk, then we fast exit.
 
     // Compose the content of the file so we can compare to length in filesystem. If not same,
     // rewrite it (it may have been written in the old format using Writables instead of pb). The
     // pb version is much shorter -- we write now w/o the toString version -- so checking length
     // only should be sufficient. I don't want to read the file every time to check if it pb
     // serialized.
     byte[] content = getRegionInfoFileContent();
     try {
       Path regionInfoFile = new Path(getRegionDir(), );
 
       FileStatus status = .getFileStatus(regionInfoFile);
       if (status != null && status.getLen() == content.length) {
         // Then assume the content good and move on.
         // NOTE: that the length is not sufficient to define the the content matches.
         return;
       }
 
       .info("Rewriting .regioninfo file at: " + regionInfoFile);
       if (!.delete(regionInfoFilefalse)) {
         throw new IOException("Unable to remove existing " + regionInfoFile);
       }
     } catch (FileNotFoundException e) {
       .warn( + " file not found for region: " + .getEncodedName());
     }
 
     // Write HRI to a file in case we need to recover hbase:meta
     writeRegionInfoOnFilesystem(contenttrue);
   }

  
Write out an info file under the region directory. Useful recovering mangled regions.

Parameters:
useTempDir indicate whether or not using the region .tmp dir for a safer file creation.
 
   private void writeRegionInfoOnFilesystem(boolean useTempDirthrows IOException {
     byte[] content = getRegionInfoFileContent();
     writeRegionInfoOnFilesystem(contentuseTempDir);
   }

  
Write out an info file under the region directory. Useful recovering mangled regions.

Parameters:
regionInfoContent serialized version of the org.apache.hadoop.hbase.HRegionInfo
useTempDir indicate whether or not using the region .tmp dir for a safer file creation.
 
   private void writeRegionInfoOnFilesystem(final byte[] regionInfoContent,
       final boolean useTempDirthrows IOException {
     Path regionInfoFile = new Path(getRegionDir(), );
     if (useTempDir) {
       // Create in tmpDir and then move into place in case we crash after
       // create but before close. If we don't successfully close the file,
       // subsequent region reopens will fail the below because create is
       // registered in NN.
 
       // And then create the file
       Path tmpPath = new Path(getTempDir(), );
 
       // If datanode crashes or if the RS goes down just before the close is called while trying to
       // close the created regioninfo file in the .tmp directory then on next
       // creation we will be getting AlreadyCreatedException.
       // Hence delete and create the file if exists.
       if (FSUtils.isExists(tmpPath)) {
         FSUtils.delete(tmpPathtrue);
       }
 
       // Write HRI to a file in case we need to recover hbase:meta
       writeRegionInfoFileContent(tmpPathregionInfoContent);
 
       // Move the created file to the original path
       if (.exists(tmpPath) &&  !rename(tmpPathregionInfoFile)) {
         throw new IOException("Unable to rename " + tmpPath + " to " + regionInfoFile);
       }
     } else {
       // Write HRI to a file in case we need to recover hbase:meta
       writeRegionInfoFileContent(regionInfoFileregionInfoContent);
     }
   }

  
Create a new Region on file-system.

Parameters:
conf the org.apache.hadoop.conf.Configuration to use
fs org.apache.hadoop.fs.FileSystem from which to add the region
tableDir org.apache.hadoop.fs.Path to where the table is being stored
regionInfo org.apache.hadoop.hbase.HRegionInfo for region to be added
Throws:
java.io.IOException if the region creation fails due to a FileSystem exception.
 
   public static HRegionFileSystem createRegionOnFileSystem(final Configuration conf,
       final FileSystem fsfinal Path tableDirfinal HRegionInfo regionInfothrows IOException {
     HRegionFileSystem regionFs = new HRegionFileSystem(conffstableDirregionInfo);
     Path regionDir = regionFs.getRegionDir();
 
     if (fs.exists(regionDir)) {
       .warn("Trying to create a region that already exists on disk: " + regionDir);
       throw new IOException("The specified region already exists on disk: " + regionDir);
     }
 
     // Create the region directory
     if (!createDirOnFileSystem(fsconfregionDir)) {
       .warn("Unable to create the region directory: " + regionDir);
       throw new IOException("Unable to create region directory: " + regionDir);
     }
 
     // Write HRI to a file in case we need to recover hbase:meta
     regionFs.writeRegionInfoOnFilesystem(false);
     return regionFs;
   }

  
Open Region from file-system.

Parameters:
conf the org.apache.hadoop.conf.Configuration to use
fs org.apache.hadoop.fs.FileSystem from which to add the region
tableDir org.apache.hadoop.fs.Path to where the table is being stored
regionInfo org.apache.hadoop.hbase.HRegionInfo for region to be added
readOnly True if you don't want to edit the region data
Throws:
java.io.IOException if the region creation fails due to a FileSystem exception.
 
   public static HRegionFileSystem openRegionFromFileSystem(final Configuration conf,
       final FileSystem fsfinal Path tableDirfinal HRegionInfo regionInfoboolean readOnly)
       throws IOException {
     HRegionFileSystem regionFs = new HRegionFileSystem(conffstableDirregionInfo);
     Path regionDir = regionFs.getRegionDir();
 
     if (!fs.exists(regionDir)) {
       .warn("Trying to open a region that do not exists on disk: " + regionDir);
       throw new IOException("The specified region do not exists on disk: " + regionDir);
     }
 
     if (!readOnly) {
       // Cleanup temporary directories
       regionFs.cleanupTempDir();
       regionFs.cleanupSplitsDir();
       regionFs.cleanupMergesDir();
 
       // if it doesn't exists, Write HRI to a file, in case we need to recover hbase:meta
       regionFs.checkRegionInfoOnFilesystem();
     }
 
     return regionFs;
   }

  
Remove the region from the table directory, archiving the region's hfiles.

Parameters:
conf the org.apache.hadoop.conf.Configuration to use
fs org.apache.hadoop.fs.FileSystem from which to remove the region
tableDir org.apache.hadoop.fs.Path to where the table is being stored
regionInfo org.apache.hadoop.hbase.HRegionInfo for region to be deleted
Throws:
java.io.IOException if the request cannot be completed
 
   public static void deleteRegionFromFileSystem(final Configuration conf,
       final FileSystem fsfinal Path tableDirfinal HRegionInfo regionInfothrows IOException {
     HRegionFileSystem regionFs = new HRegionFileSystem(conffstableDirregionInfo);
     Path regionDir = regionFs.getRegionDir();
 
     if (!fs.exists(regionDir)) {
       .warn("Trying to delete a region that do not exists on disk: " + regionDir);
       return;
     }
 
     if (.isDebugEnabled()) {
       .debug("DELETING region " + regionDir);
     }
 
     // Archive region
     Path rootDir = FSUtils.getRootDir(conf);
     HFileArchiver.archiveRegion(fsrootDirtableDirregionDir);
 
     // Delete empty region dir
     if (!fs.delete(regionDirtrue)) {
       .warn("Failed delete of " + regionDir);
     }
   }

  
Creates a directory. Assumes the user has already checked for this directory existence.

Parameters:
dir
Returns:
the result of fs.mkdirs(). In case underlying fs throws an IOException, it checks whether the directory exists or not, and returns true if it exists.
Throws:
java.io.IOException
 
   boolean createDir(Path dirthrows IOException {
     int i = 0;
     IOException lastIOE = null;
     do {
       try {
         return .mkdirs(dir);
       } catch (IOException ioe) {
         lastIOE = ioe;
         if (.exists(dir)) return true// directory is present
         sleepBeforeRetry("Create Directory"i+1);
       }
     } while (++i <= );
     throw new IOException("Exception in createDir"lastIOE);
   }

  
Renames a directory. Assumes the user has already checked for this directory existence.

Parameters:
srcpath
dstPath
Returns:
true if rename is successful.
Throws:
java.io.IOException
 
   boolean rename(Path srcpathPath dstPaththrows IOException {
     IOException lastIOE = null;
     int i = 0;
     do {
       try {
         return .rename(srcpathdstPath);
       } catch (IOException ioe) {
         lastIOE = ioe;
         if (!.exists(srcpath) && .exists(dstPath)) return true// successful move
         // dir is not there, retry after some time.
         sleepBeforeRetry("Rename Directory"i+1);
       }
     } while (++i <= );
     throw new IOException("Exception in rename"lastIOE);
   }

  
Deletes a directory. Assumes the user has already checked for this directory existence.

Parameters:
dir
Returns:
true if the directory is deleted.
Throws:
java.io.IOException
 
   boolean deleteDir(Path dirthrows IOException {
     IOException lastIOE = null;
     int i = 0;
     do {
       try {
         return .delete(dirtrue);
       } catch (IOException ioe) {
         lastIOE = ioe;
         if (!.exists(dir)) return true;
         // dir is there, retry deleting after some time.
         sleepBeforeRetry("Delete Directory"i+1);
      }
    } while (++i <= );
    throw new IOException("Exception in DeleteDir"lastIOE);
  }

  
sleeping logic; handles the interrupt exception.
  private void sleepBeforeRetry(String msgint sleepMultiplier) {
  }

  
Creates a directory for a filesystem and configuration object. Assumes the user has already checked for this directory existence.

Parameters:
fs
conf
dir
Returns:
the result of fs.mkdirs(). In case underlying fs throws an IOException, it checks whether the directory exists or not, and returns true if it exists.
Throws:
java.io.IOException
  private static boolean createDirOnFileSystem(FileSystem fsConfiguration confPath dir)
      throws IOException {
    int i = 0;
    IOException lastIOE = null;
    int hdfsClientRetriesNumber = conf.getInt("hdfs.client.retries.number",
    int baseSleepBeforeRetries = conf.getInt("hdfs.client.sleep.before.retries",
    do {
      try {
        return fs.mkdirs(dir);
      } catch (IOException ioe) {
        lastIOE = ioe;
        if (fs.exists(dir)) return true// directory is present
        sleepBeforeRetry("Create Directory"i+1, baseSleepBeforeRetrieshdfsClientRetriesNumber);
      }
    } while (++i <= hdfsClientRetriesNumber);
    throw new IOException("Exception in createDir"lastIOE);
  }

  
sleeping logic for static methods; handles the interrupt exception. Keeping a static version for this to avoid re-looking for the integer values.
  private static void sleepBeforeRetry(String msgint sleepMultiplierint baseSleepBeforeRetries,
      int hdfsClientRetriesNumber) {
    if (sleepMultiplier > hdfsClientRetriesNumber) {
      .debug(msg + ", retries exhausted");
      return;
    }
    .debug(msg + ", sleeping " + baseSleepBeforeRetries + " times " + sleepMultiplier);
    Threads.sleep((long)baseSleepBeforeRetries * sleepMultiplier);
  }
New to GrepCode? Check out our FAQ X