-
Notifications
You must be signed in to change notification settings - Fork 857
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
fix hdfs federation #441
base: master
Are you sure you want to change the base?
fix hdfs federation #441
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -19,6 +19,7 @@ | |
import org.apache.hadoop.conf.Configuration; | ||
import org.apache.hadoop.fs.FileSystem; | ||
import org.apache.hadoop.fs.Path; | ||
import org.apache.hadoop.fs.viewfs.NotInMountpointException; | ||
import org.apache.log4j.Logger; | ||
import java.io.IOException; | ||
|
||
|
@@ -44,6 +45,22 @@ public static void load() { | |
HDFS_BLOCK_SIZE = FileSystem.get(new Configuration()).getDefaultBlockSize(new Path("/")); | ||
} catch (IOException e) { | ||
logger.error("Error getting FS Block Size!", e); | ||
} catch (NotInMountpointException e) { | ||
logger.warn("Fix hdfs federation : The path / is not exist. Will try /tmp"); | ||
try { | ||
HDFS_BLOCK_SIZE = FileSystem.get(new Configuration()).getDefaultBlockSize(new Path("/tmp")); | ||
} catch (IOException e1) { | ||
e1.printStackTrace(); | ||
} catch (NotInMountpointException e2){ | ||
logger.warn("Fix hdfs federation : The path /tmp is not exist. Use default block size 128 * 1024 * 1024"); | ||
/** | ||
* Currently most companies use 128M as the default block size | ||
*/ | ||
HDFS_BLOCK_SIZE = 128 * 1024 * 1024; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think its better to have 64MB as default (as thats HDFS default block size) . There should be property from the configuration file for defaultblocksize (or the path which is mounted instead of looking for / or /tmp), if getDefaultBlockSize gives exception then user provided default size can be taken (or other way round check if user have provided default block size , if not then go for getDefaultBlockSize and if that also gives exception then by default 64MB should be taken). |
||
} | ||
} catch (Exception s){ | ||
logger.warn("Error getting FS Block Size!", s); | ||
HDFS_BLOCK_SIZE = 128 * 1024 * 1024; | ||
} | ||
|
||
logger.info("HDFS BLock size: " + HDFS_BLOCK_SIZE); | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Please log the error , instead of printStackTrace