package demo.Order;
import org.apache.hadoop.io.WritableComparable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
public class OrderBean implements WritableComparable<OrderBean> {
private int order_id;
private double price;
public OrderBean() {
}
public OrderBean(int order_id, double price) {
this.order_id = order_id;
this.price = price;
}
public int getOrder_id() {
return order_id;
}
public void setOrder_id(int order_id) {
this.order_id = order_id;
}
public double getPrice() {
return price;
}
public void setPrice(double price) {
this.price = price;
}
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(order_id);
out.writeDouble(price);
}
@Override
public void readFields(DataInput in) throws IOException {
int order_id = in.readInt();
double price = in.readDouble();
}
@Override
public String toString() {
return order_id +"\t" + price;
}
@Override
public int compareTo(OrderBean o) {
int result;
if (order_id > o.getOrder_id()) {
result = 1;
} else if (order_id < o.getOrder_id()) {
result = -1;
} else {
// 价格倒序排序
result = price > o.getPrice() ? -1 : 1;
}
return result;
}}
今天,在测试上面代码的时候,输出到目标文件的数据一直是0 ,0
没有一个正常的数据,结果多次调试对比,发现是序列化的发序列化是不能重新定义变量,
只需要将
@Override
public void readFields(DataInput in) throws IOException {
int order_id = in.readInt();
double price = in.readDouble();
}
里面改成
@Override
public void readFields(DataInput in) throws IOException {
order_id = in.readInt();
price = in.readDouble();
最后,切记,反序列化顺序和序列化一样,并且不能自己重新定义变量!!!!!