Deprecated API


Contents
Deprecated Interfaces
org.apache.hadoop.io.Closeable
          use java.io.Closeable 
org.apache.hadoop.mapred.InputFormat
          Use InputFormat instead. 
org.apache.hadoop.mapred.InputSplit
          Use InputSplit instead. 
org.apache.hadoop.mapred.JobConfigurable
           
org.apache.hadoop.mapred.Mapper
          Use Mapper instead. 
org.apache.hadoop.mapred.MapRunnable
          Use Mapper instead. 
org.apache.hadoop.metrics.MetricsContext
          in favor of org.apache.hadoop.metrics2 usage. 
org.apache.hadoop.metrics.MetricsRecord
          in favor of org.apache.hadoop.metrics2 usage. 
org.apache.hadoop.mapred.OutputFormat
          Use OutputFormat instead. 
org.apache.hadoop.mapred.Partitioner
          Use Partitioner instead. 
org.apache.hadoop.mapred.Reducer
          Use Reducer instead. 
org.apache.hadoop.metrics.Updater
          in favor of org.apache.hadoop.metrics2 usage. 
 

Deprecated Classes
org.apache.hadoop.metrics.spi.AbstractMetricsContext
          in favor of org.apache.hadoop.metrics2 usage. 
org.apache.hadoop.metrics.spi.CompositeContext
          in favor of org.apache.hadoop.metrics2 usage. 
org.apache.hadoop.metrics.ContextFactory
          in favor of org.apache.hadoop.metrics2 usage. 
org.apache.hadoop.mapred.Counters
          Use Counters instead. 
org.apache.hadoop.log.EventCounter
          use EventCounter instead 
org.apache.hadoop.metrics.jvm.EventCounter
          use EventCounter instead 
org.apache.hadoop.metrics.file.FileContext
          use FileSink instead. 
org.apache.hadoop.mapred.FileInputFormat
          Use FileInputFormat instead. 
org.apache.hadoop.mapred.FileSplit
          Use FileSplit instead. 
org.apache.hadoop.metrics.ganglia.GangliaContext
          in favor of org.apache.hadoop.metrics2 usage. 
org.apache.hadoop.mapred.lib.HashPartitioner
          Use HashPartitioner instead. 
org.apache.hadoop.mapred.ID
           
org.apache.hadoop.mapred.lib.IdentityMapper
          Use Mapper instead. 
org.apache.hadoop.mapred.lib.IdentityReducer
          Use Reducer instead. 
org.apache.hadoop.fs.InMemoryFileSystem
           
org.apache.hadoop.mapred.lib.InverseMapper
          Use InverseMapper instead. 
org.apache.hadoop.mapred.JobConf
          Use Configuration instead 
org.apache.hadoop.mapred.JobContext
          Use JobContext instead. 
org.apache.hadoop.mapred.JobID
           
org.apache.hadoop.metrics.jvm.JvmMetrics
          in favor of org.apache.hadoop.metrics2 usage. 
org.apache.hadoop.mapred.LineRecordReader
          Use LineRecordReader instead. 
org.apache.hadoop.mapred.LineRecordReader.LineReader
          Use LineReader instead. 
org.apache.hadoop.mapred.lib.LongSumReducer
          Use LongSumReducer instead. 
org.apache.hadoop.mapred.MapReduceBase
           
org.apache.hadoop.metrics.util.MBeanUtil
          in favor of MBeans. 
org.apache.hadoop.metrics.util.MetricsBase
          in favor of org.apache.hadoop.metrics2 usage. 
org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase
          in favor of org.apache.hadoop.metrics2 usage. 
org.apache.hadoop.metrics.util.MetricsIntValue
          in favor of MetricMutableGaugeInt. 
org.apache.hadoop.metrics.util.MetricsLongValue
          in favor of org.apache.hadoop.metrics2 usage. 
org.apache.hadoop.metrics.spi.MetricsRecordImpl
          in favor of org.apache.hadoop.metrics2 usage. 
org.apache.hadoop.metrics.util.MetricsRegistry
          in favor of MetricsRegistry. 
org.apache.hadoop.metrics.MetricsServlet
          in favor of org.apache.hadoop.metrics2 usage. 
org.apache.hadoop.metrics.util.MetricsTimeVaryingInt
          in favor of MetricMutableCounterInt. 
org.apache.hadoop.metrics.util.MetricsTimeVaryingLong
          in favor of MetricMutableCounterLong. 
org.apache.hadoop.metrics.util.MetricsTimeVaryingRate
          in favor of MetricMutableGauge. 
org.apache.hadoop.metrics.MetricsUtil
          in favor of org.apache.hadoop.metrics2 usage. 
org.apache.hadoop.metrics.spi.MetricValue
          in favor of org.apache.hadoop.metrics2 usage. 
org.apache.hadoop.mapred.MultiFileInputFormat
          Use CombineFileInputFormat instead 
org.apache.hadoop.mapred.MultiFileSplit
          Use CombineFileSplit instead 
org.apache.hadoop.metrics.spi.NoEmitMetricsContext
          in favor of org.apache.hadoop.metrics2 usage. 
org.apache.hadoop.metrics.spi.NullContext
          in favor of org.apache.hadoop.metrics2 usage. 
org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
          in favor of org.apache.hadoop.metrics2 usage. 
org.apache.hadoop.mapred.lib.NullOutputFormat
          Use NullOutputFormat instead. 
org.apache.hadoop.mapred.OutputCommitter
          Use OutputCommitter instead. 
org.apache.hadoop.mapred.OutputLogFilter
          Use Utils.OutputFileUtils.OutputLogFilter instead. 
org.apache.hadoop.metrics.spi.OutputRecord
          in favor of org.apache.hadoop.metrics2 usage. 
org.apache.hadoop.mapred.SequenceFileInputFormat
          Use SequenceFileInputFormat instead. 
org.apache.hadoop.mapred.SequenceFileOutputFormat
          Use SequenceFileOutputFormat instead. 
org.apache.hadoop.mapred.TaskAttemptContext
          Use TaskAttemptContext instead. 
org.apache.hadoop.mapred.TaskAttemptID
           
org.apache.hadoop.mapred.TaskID
           
org.apache.hadoop.mapred.TextInputFormat
          Use TextInputFormat instead. 
org.apache.hadoop.mapred.TextOutputFormat
          Use TextOutputFormat instead. 
org.apache.hadoop.mapred.lib.TokenCountMapper
          Use TokenCounterMapper instead. 
org.apache.hadoop.io.UTF8
          replaced by Text 
org.apache.hadoop.streaming.UTF8ByteArrayUtils
          use UTF8ByteArrayUtils and StreamKeyValUtil instead 
 

Deprecated Exceptions
org.apache.hadoop.fs.permission.AccessControlException
          Use AccessControlException instead. 
org.apache.hadoop.metrics.MetricsException
          in favor of MetricsException. 
 

Deprecated Fields
org.apache.hadoop.mapred.JobConf.MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY
            
org.apache.hadoop.mapred.JobConf.MAPRED_TASK_ENV
          Use JobConf.MAPRED_MAP_TASK_ENV or JobConf.MAPRED_REDUCE_TASK_ENV 
org.apache.hadoop.mapred.JobConf.MAPRED_TASK_JAVA_OPTS
          Use JobConf.MAPRED_MAP_TASK_JAVA_OPTS or JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS 
org.apache.hadoop.mapred.JobConf.MAPRED_TASK_MAXPMEM_PROPERTY
            
org.apache.hadoop.mapred.JobConf.MAPRED_TASK_MAXVMEM_PROPERTY
          Use JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY and JobConf.MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY 
org.apache.hadoop.mapred.JobConf.MAPRED_TASK_ULIMIT
          Use JobConf.MAPRED_MAP_TASK_ULIMIT or JobConf.MAPRED_REDUCE_TASK_ULIMIT 
org.apache.hadoop.security.authorize.ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG
          Use CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION instead. 
org.apache.hadoop.mapred.JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY
            
 

Deprecated Methods
org.apache.hadoop.filecache.DistributedCache.addArchiveToClassPath(Path, Configuration)
          Please use DistributedCache.addArchiveToClassPath(Path, Configuration, FileSystem) instead. The FileSystem should be obtained within an appropriate doAs. 
org.apache.hadoop.filecache.DistributedCache.addFileToClassPath(Path, Configuration)
          Please use DistributedCache.addFileToClassPath(Path, Configuration, FileSystem) instead. The FileSystem should be obtained within an appropriate doAs. 
org.apache.hadoop.http.HttpServer.addInternalServlet(String, String, Class)
          this is a temporary method 
org.apache.hadoop.http.HttpServer.addSslListener(InetSocketAddress, String, String, String)
          Use HttpServer.addSslListener(InetSocketAddress, Configuration, boolean) 
org.apache.hadoop.fs.FsShell.byteDesc(long)
          Consider using StringUtils.byteDesc(long) instead. 
org.apache.hadoop.ipc.RPC.call(Method, Object[][], InetSocketAddress[], Configuration)
          Use RPC.call(Method, Object[][], InetSocketAddress[], UserGroupInformation, Configuration) instead 
org.apache.hadoop.ipc.Client.call(Writable[], InetSocketAddress[])
          Use Client.call(Writable[], InetSocketAddress[], Class, UserGroupInformation, Configuration) instead 
org.apache.hadoop.ipc.Client.call(Writable[], InetSocketAddress[], Class, UserGroupInformation)
          Use Client.call(Writable[], InetSocketAddress[], Class, UserGroupInformation, Configuration) instead 
org.apache.hadoop.ipc.Client.call(Writable, InetSocketAddress)
          Use Client.call(Writable, ConnectionId) instead 
org.apache.hadoop.ipc.Client.call(Writable, InetSocketAddress, Class, UserGroupInformation)
          Use Client.call(Writable, ConnectionId) instead 
org.apache.hadoop.ipc.Client.call(Writable, InetSocketAddress, UserGroupInformation)
          Use Client.call(Writable, ConnectionId) instead 
org.apache.hadoop.ipc.Server.call(Writable, long)
          Use Server.call(Class, Writable, long) instead 
org.apache.hadoop.mapred.TaskLog.captureOutAndError(List, List, File, File, long, boolean, String)
          pidFiles are no more used. Instead pid is exported to env variable JVM_PID. 
org.apache.hadoop.mapred.TaskLog.captureOutAndError(List, List, File, File, long, String)
          pidFiles are no more used. Instead pid is exported to env variable JVM_PID. 
org.apache.hadoop.mapred.FileOutputCommitter.cleanupJob(JobContext)
           
org.apache.hadoop.mapred.OutputCommitter.cleanupJob(JobContext)
          use OutputCommitter.commitJob(JobContext) or OutputCommitter.abortJob(JobContext, int) instead 
org.apache.hadoop.mapred.OutputCommitter.cleanupJob(JobContext)
           
org.apache.hadoop.mapreduce.OutputCommitter.cleanupJob(JobContext)
          use OutputCommitter.commitJob(JobContext) or OutputCommitter.abortJob(JobContext, JobStatus.State) instead 
org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter.cleanupJob(JobContext)
           
org.apache.hadoop.io.WritableUtils.cloneInto(Writable, Writable)
          use ReflectionUtils.cloneInto instead. 
org.apache.hadoop.util.ReflectionUtils.cloneWritableInto(Writable, Writable)
           
org.apache.hadoop.filecache.DistributedCache.createAllSymlink(Configuration, File, File)
          Internal to MapReduce framework. Use DistributedCacheManager instead. 
org.apache.hadoop.io.file.tfile.TFile.Reader.createScanner(byte[], byte[])
          Use TFile.Reader.createScannerByKey(byte[], byte[]) instead. 
org.apache.hadoop.io.file.tfile.TFile.Reader.createScanner(RawComparable, RawComparable)
          Use TFile.Reader.createScannerByKey(RawComparable, RawComparable) instead. 
org.apache.hadoop.fs.FileSystem.delete(Path)
          Use delete(Path, boolean) instead 
org.apache.hadoop.fs.RawLocalFileSystem.delete(Path)
           
org.apache.hadoop.fs.FilterFileSystem.delete(Path)
           
org.apache.hadoop.fs.ftp.FTPFileSystem.delete(Path)
          Use delete(Path, boolean) instead 
org.apache.hadoop.fs.kfs.KosmosFileSystem.delete(Path)
           
org.apache.hadoop.fs.s3.S3FileSystem.delete(Path)
           
org.apache.hadoop.fs.s3native.NativeS3FileSystem.delete(Path)
           
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findByte(byte[], int, int, byte)
          use UTF8ByteArrayUtils.findByte(byte[], int, int, byte) 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findBytes(byte[], int, int, byte[])
          use UTF8ByteArrayUtils.findBytes(byte[], int, int, byte[]) 
org.apache.hadoop.mapred.Counters.findCounter(String, int, String)
            
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findNthByte(byte[], byte, int)
          use UTF8ByteArrayUtils.findNthByte(byte[], byte, int) 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findNthByte(byte[], int, int, byte, int)
          use UTF8ByteArrayUtils.findNthByte(byte[], int, int, byte, int) 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findTab(byte[])
          use StreamKeyValUtil.findTab(byte[]) 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findTab(byte[], int, int)
          use StreamKeyValUtil.findTab(byte[], int, int) 
org.apache.hadoop.fs.FileUtil.fullyDelete(FileSystem, Path)
          Use FileSystem.delete(Path, boolean) 
org.apache.hadoop.io.BytesWritable.get()
          Use BytesWritable.getBytes() instead. 
org.apache.hadoop.fs.FileSystem.getBlockSize(Path)
          Use getFileStatus() instead 
org.apache.hadoop.streaming.StreamJob.getClusterNick()
           
org.apache.hadoop.mapred.JobTracker.getClusterStatus()
          use JobTracker.getClusterStatus(boolean) 
org.apache.hadoop.io.SequenceFile.getCompressionType(Configuration)
          Use SequenceFileOutputFormat.getOutputCompressionType(org.apache.hadoop.mapred.JobConf) to get SequenceFile.CompressionType for job-outputs. 
org.apache.hadoop.mapred.Counters.Group.getCounter(int, String)
          use Counters.Group.getCounter(String) instead 
org.apache.hadoop.mapred.JobClient.getJob(String)
          Applications should rather use JobClient.getJob(JobID). 
org.apache.hadoop.mapred.JobProfile.getJobId()
          use getJobID() instead 
org.apache.hadoop.mapred.JobStatus.getJobId()
          use getJobID instead 
org.apache.hadoop.mapred.RunningJob.getJobID()
          This method is deprecated and will be removed. Applications should rather use RunningJob.getID(). 
org.apache.hadoop.mapred.JobID.getJobIDsPattern(String, Integer)
           
org.apache.hadoop.fs.FileSystem.getLength(Path)
          Use getFileStatus() instead 
org.apache.hadoop.fs.kfs.KosmosFileSystem.getLength(Path)
           
org.apache.hadoop.mapred.jobcontrol.Job.getMapredJobID()
          use Job.getAssignedJobID() instead 
org.apache.hadoop.mapred.JobClient.getMapTaskReports(String)
          Applications should rather use JobClient.getMapTaskReports(JobID) 
org.apache.hadoop.mapred.JobConf.getMaxPhysicalMemoryForTask()
          this variable is deprecated and nolonger in use. 
org.apache.hadoop.mapred.JobConf.getMaxVirtualMemoryForTask()
          Use JobConf.getMemoryForMapTask() and JobConf.getMemoryForReduceTask() 
org.apache.hadoop.fs.FileSystem.getName()
          call #getUri() instead. 
org.apache.hadoop.fs.FilterFileSystem.getName()
          call #getUri() instead. 
org.apache.hadoop.fs.kfs.KosmosFileSystem.getName()
           
org.apache.hadoop.fs.FileSystem.getNamed(String, Configuration)
          call #get(URI,Configuration) instead. 
org.apache.hadoop.mapred.JobClient.getReduceTaskReports(String)
          Applications should rather use JobClient.getReduceTaskReports(JobID) 
org.apache.hadoop.fs.FileSystem.getReplication(Path)
          Use getFileStatus() instead 
org.apache.hadoop.fs.kfs.KosmosFileSystem.getReplication(Path)
           
org.apache.hadoop.net.NetUtils.getServerAddress(Configuration, String, String, String)
           
org.apache.hadoop.io.BytesWritable.getSize()
          Use BytesWritable.getLength() instead. 
org.apache.hadoop.fs.FileSystem.getStatistics()
          use FileSystem.getAllStatistics() instead 
org.apache.hadoop.mapred.TaskAttemptID.getTaskAttemptIDsPattern(String, Integer, Boolean, Integer, Integer)
           
org.apache.hadoop.mapred.TaskReport.getTaskId()
          use TaskReport.getTaskID() instead 
org.apache.hadoop.mapred.TaskCompletionEvent.getTaskId()
          use TaskCompletionEvent.getTaskAttemptId() instead. 
org.apache.hadoop.mapred.TaskID.getTaskIDsPattern(String, Integer, Boolean, Integer)
           
org.apache.hadoop.mapred.JobClient.getTaskOutputFilter()
           
org.apache.hadoop.util.Shell.getUlimitMemoryCommand(Configuration)
          Use Shell.getUlimitMemoryCommand(int) 
org.apache.hadoop.streaming.StreamJob.go()
          use StreamJob.run(String[]) instead. 
org.apache.hadoop.fs.FileSystem.isDirectory(Path)
          Use getFileStatus() instead 
org.apache.hadoop.fs.kfs.KosmosFileSystem.isDirectory(Path)
           
org.apache.hadoop.fs.kfs.KosmosFileSystem.isFile(Path)
           
org.apache.hadoop.mapred.RunningJob.killTask(String, boolean)
          Applications should rather use RunningJob.killTask(TaskAttemptID, boolean) 
org.apache.hadoop.fs.FsShell.limitDecimalTo2(double)
          Consider using StringUtils.limitDecimalTo2(double) instead. 
org.apache.hadoop.fs.kfs.KosmosFileSystem.lock(Path, boolean)
           
org.apache.hadoop.mapred.JobHistory.MapAttempt.logFailed(TaskAttemptID, long, String, String)
          Use JobHistory.MapAttempt.logFailed(TaskAttemptID, long, String, String, String) 
org.apache.hadoop.mapred.JobHistory.ReduceAttempt.logFailed(TaskAttemptID, long, String, String)
          Use JobHistory.ReduceAttempt.logFailed(TaskAttemptID, long, String, String, String) 
org.apache.hadoop.mapred.JobHistory.ReduceAttempt.logFinished(TaskAttemptID, long, long, long, String)
          Use JobHistory.ReduceAttempt.logFinished(TaskAttemptID, long, long, long, String, String, String, Counters) 
org.apache.hadoop.mapred.JobHistory.MapAttempt.logFinished(TaskAttemptID, long, String)
          Use JobHistory.MapAttempt.logFinished(TaskAttemptID, long, String, String, String, Counters) 
org.apache.hadoop.mapred.JobHistory.JobInfo.logJobInfo(JobID, long, long, int)
          Use JobHistory.JobInfo.logJobInfo(JobID, long, long) instead. 
org.apache.hadoop.mapred.JobHistory.MapAttempt.logKilled(TaskAttemptID, long, String, String)
          Use JobHistory.MapAttempt.logKilled(TaskAttemptID, long, String, String, String) 
org.apache.hadoop.mapred.JobHistory.ReduceAttempt.logKilled(TaskAttemptID, long, String, String)
          Use JobHistory.ReduceAttempt.logKilled(TaskAttemptID, long, String, String, String) 
org.apache.hadoop.mapred.JobHistory.JobInfo.logStarted(JobID, long, int, int)
          Use JobHistory.JobInfo.logInited(JobID, long, int, int) and JobHistory.JobInfo.logStarted(JobID) 
org.apache.hadoop.mapred.JobHistory.MapAttempt.logStarted(TaskAttemptID, long, String)
          Use JobHistory.MapAttempt.logStarted(TaskAttemptID, long, String, int, String) 
org.apache.hadoop.mapred.JobHistory.ReduceAttempt.logStarted(TaskAttemptID, long, String)
          Use JobHistory.ReduceAttempt.logStarted(TaskAttemptID, long, String, int, String) 
org.apache.hadoop.mapred.JobHistory.JobInfo.logSubmitted(JobID, JobConf, String, long)
          Use JobHistory.JobInfo.logSubmitted(JobID, JobConf, String, long, boolean) instead. 
org.apache.hadoop.io.SequenceFile.Reader.next(DataOutputBuffer)
          Call SequenceFile.Reader.nextRaw(DataOutputBuffer,SequenceFile.ValueBytes). 
org.apache.hadoop.mapred.TaskAttemptID.read(DataInput)
           
org.apache.hadoop.mapred.JobID.read(DataInput)
           
org.apache.hadoop.mapred.TaskID.read(DataInput)
           
org.apache.hadoop.streaming.UTF8ByteArrayUtils.readLine(LineReader, Text)
          use StreamKeyValUtil.readLine(LineReader, Text) 
org.apache.hadoop.fs.kfs.KosmosFileSystem.release(Path)
           
org.apache.hadoop.io.SequenceFile.setCompressionType(Configuration, SequenceFile.CompressionType)
          Use the one of the many SequenceFile.createWriter methods to specify the SequenceFile.CompressionType while creating the SequenceFile or SequenceFileOutputFormat.setOutputCompressionType(org.apache.hadoop.mapred.JobConf, org.apache.hadoop.io.SequenceFile.CompressionType) to specify the SequenceFile.CompressionType for job-outputs. or 
org.apache.hadoop.mapreduce.Counter.setDisplayName(String)
           
org.apache.hadoop.mapred.jobcontrol.Job.setMapredJobID(String)
          use Job.setAssignedJobID(JobID) instead 
org.apache.hadoop.mapred.JobConf.setMaxPhysicalMemoryForTask(long)
           
org.apache.hadoop.mapred.JobConf.setMaxVirtualMemoryForTask(long)
          Use JobConf.setMemoryForMapTask(long mem) and Use JobConf.setMemoryForReduceTask(long mem) 
org.apache.hadoop.mapred.TaskCompletionEvent.setTaskId(String)
          use TaskCompletionEvent.setTaskID(TaskAttemptID) instead. 
org.apache.hadoop.mapred.JobClient.setTaskOutputFilter(JobClient.TaskStatusFilter)
           
org.apache.hadoop.streaming.UTF8ByteArrayUtils.splitKeyVal(byte[], int, int, Text, Text, int)
          use StreamKeyValUtil.splitKeyVal(byte[], int, int, Text, Text, int) 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.splitKeyVal(byte[], int, int, Text, Text, int, int)
          use StreamKeyValUtil.splitKeyVal(byte[], int, int, Text, Text, int, int) 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.splitKeyVal(byte[], Text, Text, int)
          use StreamKeyValUtil.splitKeyVal(byte[], Text, Text, int) 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.splitKeyVal(byte[], Text, Text, int, int)
          use StreamKeyValUtil.splitKeyVal(byte[], Text, Text, int, int) 
org.apache.hadoop.mapred.pipes.Submitter.submitJob(JobConf)
          Use Submitter.runJob(JobConf) 
 

Deprecated Constructors
org.apache.hadoop.mapred.FileSplit(Path, long, long, JobConf)
            
org.apache.hadoop.fs.FSDataOutputStream(OutputStream)
           
org.apache.hadoop.mapred.JobProfile(String, String, String, String, String)
          use JobProfile(String, JobID, String, String, String) instead 
org.apache.hadoop.io.SetFile.Writer(FileSystem, String, Class)
          pass a Configuration too 
org.apache.hadoop.streaming.StreamJob(String[], boolean)
          use StreamJob() with ToolRunner or set the Configuration using StreamJob.setConf(Configuration) and run with StreamJob.run(String[]). 
 



Copyright © 2009 The Apache Software Foundation