Details
-
Bug
-
Status: Patch Available
-
Major
-
Resolution: Unresolved
-
2.7.1
-
None
-
None
-
CentOS 7
Hadoop-2.7.1
Hive-1.2.1
Description
try { //increment processed counter only if skipping feature is enabled boolean incrProcCount = SkipBadRecords.getReducerMaxSkipGroups(job)>0 && SkipBadRecords.getAutoIncrReducerProcCount(job); ReduceValuesIterator<INKEY,INVALUE> values = isSkipping() ? new SkippingReduceValuesIterator<INKEY,INVALUE>(rIter, comparator, keyClass, valueClass, job, reporter, umbilical) : new ReduceValuesIterator<INKEY,INVALUE>(rIter, comparator, keyClass, valueClass, job, reporter); values.informReduceProgress(); while (values.more()) { reduceInputKeyCounter.increment(1); reducer.reduce(values.getKey(), values, collector, reporter); if(incrProcCount) { reporter.incrCounter(SkipBadRecords.COUNTER_GROUP, SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS, 1); } values.nextKey(); values.informReduceProgress(); } reducer.close(); reducer = null; out.close(reporter); out = null; } finally { IOUtils.cleanupWithLogger(LOG, reducer); closeQuietly(out, reporter); } }
if reducer.close(); throw Exception , reducer = null; will not run, then IOUtils.cleanupWithLogger(LOG, reducer);
will throw Exception and overwrite the Exception of reducer.close();
so we should catch it and print log to help targeting issues