Description
It seems that ITLoadAndverify now ignores most important parameters for running it in distributed mode:
hbase org.apache.hadoop.hbase.test.IntegrationTestLoadAndVerify -Dloadmapper.backrefs=50 -Dloadmapper.map.tasks=30 -Dloadmapper.num_to_write=10000000 -Dverify.reduce.tasks=30 -Dverify.scannercaching=10000 loadAndVerify
would still launch a job which writes 2000 keys, and runs with 2 mappers.
Likely cause: HBASE-11253.
diff --git hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java index 390b894..a1da601 100644 --- hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java +++ hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java @@ -123,10 +123,10 @@ public class IntegrationTestLoadAndVerify extends IntegrationTestBase { util = getTestingUtil(getConf()); util.initializeCluster(3); this.setConf(util.getConfiguration()); + getConf().setLong(NUM_TO_WRITE_KEY, NUM_TO_WRITE_DEFAULT / 100); + getConf().setInt(NUM_MAP_TASKS_KEY, NUM_MAP_TASKS_DEFAULT / 100); + getConf().setInt(NUM_REDUCE_TASKS_KEY, NUM_REDUCE_TASKS_DEFAULT / 10); if (!util.isDistributedCluster()) { - getConf().setLong(NUM_TO_WRITE_KEY, NUM_TO_WRITE_DEFAULT / 100); - getConf().setInt(NUM_MAP_TASKS_KEY, NUM_MAP_TASKS_DEFAULT / 100); - getConf().setInt(NUM_REDUCE_TASKS_KEY, NUM_REDUCE_TASKS_DEFAULT / 10); util.startMiniMapReduceCluster(); } }