public class HiveScriptIOSchema extends Object implements HiveInspectors, scala.Product, scala.Serializable
HiveInspectors.typeInfoConversions
Constructor and Description |
---|
HiveScriptIOSchema(scala.collection.Seq<scala.Tuple2<String,String>> inputRowFormat,
scala.collection.Seq<scala.Tuple2<String,String>> outputRowFormat,
scala.Option<String> inputSerdeClass,
scala.Option<String> outputSerdeClass,
scala.collection.Seq<scala.Tuple2<String,String>> inputSerdeProps,
scala.collection.Seq<scala.Tuple2<String,String>> outputSerdeProps,
scala.Option<String> recordReaderClass,
scala.Option<String> recordWriterClass,
boolean schemaLess) |
Modifier and Type | Method and Description |
---|---|
static HiveScriptIOSchema |
apply(org.apache.spark.sql.catalyst.plans.logical.ScriptInputOutputSchema input) |
scala.Option<scala.Tuple2<org.apache.hadoop.hive.serde2.AbstractSerDe,org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector>> |
initInputSerDe(scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> input) |
scala.Option<scala.Tuple2<org.apache.hadoop.hive.serde2.AbstractSerDe,org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector>> |
initOutputSerDe(scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output) |
scala.collection.Seq<scala.Tuple2<String,String>> |
inputRowFormat() |
scala.collection.immutable.Map<String,String> |
inputRowFormatMap() |
scala.Option<String> |
inputSerdeClass() |
scala.collection.Seq<scala.Tuple2<String,String>> |
inputSerdeProps() |
scala.collection.Seq<scala.Tuple2<String,String>> |
outputRowFormat() |
scala.collection.immutable.Map<String,String> |
outputRowFormatMap() |
scala.Option<String> |
outputSerdeClass() |
scala.collection.Seq<scala.Tuple2<String,String>> |
outputSerdeProps() |
scala.Option<org.apache.hadoop.hive.ql.exec.RecordReader> |
recordReader(java.io.InputStream inputStream,
org.apache.hadoop.conf.Configuration conf) |
scala.Option<String> |
recordReaderClass() |
scala.Option<org.apache.hadoop.hive.ql.exec.RecordWriter> |
recordWriter(java.io.OutputStream outputStream,
org.apache.hadoop.conf.Configuration conf) |
scala.Option<String> |
recordWriterClass() |
boolean |
schemaLess() |
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
decimalTypeInfoToCatalyst, getBinaryWritable, getBinaryWritableConstantObjectInspector, getBooleanWritable, getBooleanWritableConstantObjectInspector, getByteWritable, getByteWritableConstantObjectInspector, getDateWritable, getDateWritableConstantObjectInspector, getDecimalWritable, getDecimalWritableConstantObjectInspector, getDoubleWritable, getDoubleWritableConstantObjectInspector, getFloatWritable, getFloatWritableConstantObjectInspector, getIntWritable, getIntWritableConstantObjectInspector, getLongWritable, getLongWritableConstantObjectInspector, getPrimitiveNullWritableConstantObjectInspector, getShortWritable, getShortWritableConstantObjectInspector, getStringWritable, getStringWritableConstantObjectInspector, getTimestampWritable, getTimestampWritableConstantObjectInspector, inspectorToDataType, isSubClassOf, javaTypeToDataType, toInspector, toInspector, unwrapperFor, unwrapperFor, withNullSafe, wrap, wrap, wrap, wrapperFor
public HiveScriptIOSchema(scala.collection.Seq<scala.Tuple2<String,String>> inputRowFormat, scala.collection.Seq<scala.Tuple2<String,String>> outputRowFormat, scala.Option<String> inputSerdeClass, scala.Option<String> outputSerdeClass, scala.collection.Seq<scala.Tuple2<String,String>> inputSerdeProps, scala.collection.Seq<scala.Tuple2<String,String>> outputSerdeProps, scala.Option<String> recordReaderClass, scala.Option<String> recordWriterClass, boolean schemaLess)
public static HiveScriptIOSchema apply(org.apache.spark.sql.catalyst.plans.logical.ScriptInputOutputSchema input)
public scala.collection.Seq<scala.Tuple2<String,String>> inputRowFormat()
public scala.collection.Seq<scala.Tuple2<String,String>> outputRowFormat()
public scala.Option<String> inputSerdeClass()
public scala.Option<String> outputSerdeClass()
public scala.collection.Seq<scala.Tuple2<String,String>> inputSerdeProps()
public scala.collection.Seq<scala.Tuple2<String,String>> outputSerdeProps()
public scala.Option<String> recordReaderClass()
public scala.Option<String> recordWriterClass()
public boolean schemaLess()
public scala.collection.immutable.Map<String,String> inputRowFormatMap()
public scala.collection.immutable.Map<String,String> outputRowFormatMap()
public scala.Option<scala.Tuple2<org.apache.hadoop.hive.serde2.AbstractSerDe,org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector>> initInputSerDe(scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> input)
public scala.Option<scala.Tuple2<org.apache.hadoop.hive.serde2.AbstractSerDe,org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector>> initOutputSerDe(scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output)
public scala.Option<org.apache.hadoop.hive.ql.exec.RecordReader> recordReader(java.io.InputStream inputStream, org.apache.hadoop.conf.Configuration conf)
public scala.Option<org.apache.hadoop.hive.ql.exec.RecordWriter> recordWriter(java.io.OutputStream outputStream, org.apache.hadoop.conf.Configuration conf)