Example code is below.
package com.test.FileNameChangeTest
import java.io.IOException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
public class FileNameOutputsMapper extends
Mapper<LongWritable, Text, NullWritable, NullWritable> {
protected String filename;
private RecordWriter<Text, Text> recordWriter;
private Text outputValue;
private Text outputKey;
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
//TODO map logic
// write to output to file using Writer
recordWriter.write(outputKey, outputValue);
}
@Override
protected void setup(Context context) throws IOException,
InterruptedException {
InputSplit split = context.getInputSplit();
Path path = ((FileSplit) split).getPath();
filename = path.getParent().getName() + "/" + path.getName();
final Path baseOutputPath = FileOutputFormat.getOutputPath(context);
final Path outputFilePath = new Path(baseOutputPath, filename);
//override the getDefaultWorkFile
TextOutputFormat<Text, Text> testOutPutFormat = new TextOutputFormat<Text, Text>() {
@Override
public Path getDefaultWorkFile(TaskAttemptContext context,
String extension) throws IOException {
return outputFilePath;
}
};
// this will create the output subfolder
recordWriter = testOutPutFormat.getRecordWriter(context);
}
@Override
protected void cleanup(Context context) throws IOException,
InterruptedException {
recordWriter.close(context);