SpingMongodb的聚合函数实现条件统计(数组包含条件和等值条件)

例如Mongodb要统计状态时0状态时1的数据总和并且统计某个数组包含输入数组的数组总和

mongoDB的原始语句如下

db.term_logs_alaram.aggregate(
    [
        { '$group': {
            '_id': '$imei',
            'Astate': { 
                '$sum': {
                    '$cond': [
                        { '$eq': ['$state', 0]}, 
                        1,0
                    ]
                }
            },
            'Bstate1': { 
                '$sum': {
                    '$cond': [
                       { "$setIsSubset":[
                   "$bsIds", ["031","032"]
               
                 ]}, 
                        1,0
                    ]
                }
            }
        }}
    ]
)

如果要支持低版本的springDataMongodb我们需要实现 AggregationOperation 接口
建以实现该接口,因为高版本封装的还不是很完善。这个实现类构造的对象,将操作对像传入就能作为管道筛选

public class AlarmLogsAggregationOperation   implements AggregationOperation {
    private DBObject operation;

    public AlarmLogsAggregationOperation(DBObject operation) {
        this.operation = operation;
    }

    @Override
    public DBObject toDBObject(AggregationOperationContext context) {
        return context.getMappedObject(operation);
    }
}

下面是我工作中的代码和mongodb的原始语句有点出入

 @Override
    public PageQuery<TermAlarmLogExtend> findTermAlarmLogExtend(PageQuery query) throws ParseException {
        Map map = (HashMap) query.getParas();
        //获取开始时间
        String startDate = (String) map.get("startDate");
        //获取结束时间
        String endDate = (String) map.get("endDate");
        //获取基站的序号的数的获取
        String bsNumber = (String) map.get("bsNumber");
        String[] split = bsNumber.split(",");
        for (int i = 0; i < split.length; i++) {
            StringBuffer buffer = new StringBuffer("0");
            String s = buffer.append(split[i]).toString();
            split[i] = s;
        }
         //用作match管道过滤条件,相当于筛选在这两个时间段之间的数据
        Criteria cri = Criteria.where("upload_time").gte(CalendarUtils.parseDateTime(startDate + "")).lte(CalendarUtils.parseDateTime(endDate + ""));
        //这里构建project条件
        BasicDBObject scond = new BasicDBObject();
        scond.put("astate", new BasicDBObject(
                "$cond", new Object[]{
                new BasicDBObject(
                        "$eq", new Object[]{"$state", 0}
                ),
                1, 0}));
        scond.put("bstate", new BasicDBObject(
                "$cond", new Object[]{
                new BasicDBObject(
                        "$eq", new Object[]{"$state", 1}
                ),
                1, 0}));
        for (int i = 0; i < split.length; i++) {
            String bss = "bs" + i;
            String[] arr = {split[i]};
            scond.put(bss, new BasicDBObject(
                    "$cond", new Object[]{
                    new BasicDBObject(
                            "$setIsSubset", new Object[]{arr, "$bsIds"}
                    ),
                    1, 0}));
        }
        scond.put("imei", 1);
        scond.put("status", 1);
        //这两个语句将相关的语句构造成project管道
        BasicDBObject project = new BasicDBObject("$project", scond);
        AlarmLogsAggregationOperation alarmLogsAggregationOperation = new AlarmLogsAggregationOperation(project);
        //这个是用于聚合统计的管道
        GroupOperation as = Aggregation.group("imei").sum("status").as("failLocationTotal").sum("astate").as("locationTotal").sum("bstate").as("alarmTotal").sum("bs0").as("bs0").sum("bs1").as("bs1").sum("bs2").as("bs2").sum("bs3").as("bs3");
//这个是另外起别名
        ProjectionOperation as1 = Aggregation.project("failLocationTotal", "locationTotal", "alarmTotal", "bs0", "bs1", "bs2", "bs3").and("_id").as("imei");

        Aggregation aggregation = Aggregation.newAggregation(
                Aggregation.match(cri),
                alarmLogsAggregationOperation,
                as,
                as1
        );f
        //将构建的所有管道对象传入aggregate方法查询出所有的对象
        AggregationResults<TermAlarmLogExtend> results1 = super.aggregate(aggregation, "term_logs_alaram", TermAlarmLogExtend.class);
        //这是获取查询的所有数据(只有实体字段名和mongo字段相同才会映射否则可以自己断点调试看在那个字段里放着存储的数据)
        List<TermAlarmLogExtend> mappedResults = results1.getMappedResults();
        //这里做的是数据的分页将每页的数据返回给前端
        int first=query.getPageNumber() > 1 ? Integer.parseInt(((query.getPageNumber() - 1) * query.getPageSize()) + "") : 0;
        Integer last=0;
        if(first+query.getPageSize()>results1.getMappedResults().size()-1){
            last=results1.getMappedResults().size();
        }else{
            last=(int)(first+query.getPageSize());
        }
        List<TermAlarmLogExtend> all = mappedResults.subList(first, last);
        //设置戴数据的总的条数
        query.setTotalRow(results1.getMappedResults().size());
        query.setList(all);
        return query;
    }

这样去做就解决了
我用的mongodb的依赖是

<dependency>
    <groupId>org.springframework.data</groupId>
    <artifactId>spring-data-mongodb</artifactId>
    <version>1.9.2.RELEASE</version>
</dependency>
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值