HttpServer发送数据到kafka

目录

1、需求

2、框架结构图和步鄹图

3、代码结构

4、代码展现



1、需求

1.1、解析路径,将路径的最后一个字符串作为Appkey;
1.2、数据缓存,当Kafka无法正常访问时在本地Cache目录缓存数据;
1.3、安全验证,对请求的appkey进行合法性验证(签名验证待定);
1.4、自动更新appkey列表,每间隔一段时间获取一次最新的appkey列表;
1.5、增加ip字段,给每份数据增加一个ip字段;
1.6、记录日志,记录基本的统计信息日志,以及异常错误信息。

2、框架结构图和步鄹图

这里写图片描述

这里写图片描述

3、代码结构

这里写图片描述

4、代码展现

Configuration.java

package com.donews.data;

import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;

/**
 * Created by yuhui on 16-6-23.
 */
public class Configuration {
   public static  final Config conf= ConfigFactory.load();
}

Counter.java

package com.donews.data;


import io.vertx.core.Vertx;
import io.vertx.core.logging.Logger;
import io.vertx.core.logging.LoggerFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;

/**
 * Created by yuhui on 16-6-22.
 */
public class Counter {
    private Logger LOG = LoggerFactory.getLogger(Counter.class);
    AtomicLong messages = new AtomicLong(0L);
    AtomicLong bytes = new AtomicLong(0L);
    private long start = System.currentTimeMillis();

    private void reset() {
        messages.set(0L);
        bytes.set(0L);
        start = System.currentTimeMillis();
    }

    /***
     * 标记时间的方法
     二月 14, 2017 3:49:53 下午 com.donews.data.Counter
     信息: start Counter
     二月 14, 2017 3:49:54 下午 com.donews.data.Counter
     信息: start Counter
     二月 14, 2017 3:49:55 下午 com.donews.data.Counter
     信息: start Counter
     二月 14, 2017 3:49:56 下午 com.donews.data.Counter
     信息: start Counter
     * @param vertx
     */
    public void start(Vertx vertx) {
        LOG.info("start Counter");
        long delay = Configuration.conf.getDuration("server.counter.delay", TimeUnit.MILLISECONDS);
        vertx.setPeriodic(delay, h -> {
            long time = System.currentTimeMillis() - start;
            double rps = messages.get() * 1000.0 / time;
            double mbps = (bytes.get() * 1000.0 / 1024.0 / 1024.0) / time;
            Runtime runtime = Runtime.getRuntime();
            double totalMem = runtime.totalMemory() * 1.0 / 1024 / 1024;
            double maxMem = runtime.maxMemory() * 1.0 / 1024 / 1024;
            double freeMem = runtime.freeMemory() * 1.0 / 1024 / 1024;
            LOG.info("{0}:Message/S, {1}:MBytes/S", rps, mbps);
            LOG.info("totalMem:{0}MB maxMem:{1}MB freeMem:{2}MB", totalMem, maxMem, freeMem);
            reset();
        });
    }

}

KafkaHttpServer.java

package com.donews.data;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import io.vertx.core.Vertx;
import io.vertx.core.http.HttpServer;
import io.vertx.core.http.HttpServerResponse;
import io.vertx.core.json.JsonArray;
import io.vertx.core.json.JsonObject;
import io.vertx.core.logging.Logger;
import io.vertx.core.logging.LoggerFactory;
import io.vertx.ext.web.Router;
import io.vertx.ext.web.RoutingContext;
import io.vertx.ext.web.handler.BodyHandler;

import java.io.*;
import java.sql.*;
import java.time.Instant;
import java.util.HashSet;
import java.util.Set;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.atomic.AtomicInteger;

public class KafkaHttpServer {
    private static final Logger LOG = LoggerFactory.getLogger(KafkaHttpServer.class);
    private final Counter statistic = new Counter();
    private static final String DBDRIVER = "com.mysql.jdbc.Driver";
    private static final String URL = Configuration.conf.getString("mysql.url");
    private static final String USER = Configuration.conf.getString("mysql.user");
    private static final String PASSWORD = Configuration.conf.getString("mysql.password");
    private static HashSet<String> appkeys = new HashSet<>();
    private static boolean deleteFile = true;

    private void error(HttpServerResponse response, String message) {
        response.setStatusCode(500).end(new JsonObject()
                .put("code", 3)
                .put("msg", message)
                .encode());
    }

    private void ok(HttpServerResponse response, String message) {
        response.putHeader("Access-Control-Allow-Origin", "*");
        response.setStatusCode(200).end(new JsonObject()
                .put("code", 0)
                .put("msg", message)
                .encode());
    }

    private void startService(int port) {
        KafkaProducerWrapper sender = new KafkaProducerWrapper();
        Vertx vertx = Vertx.vertx();
        HttpServer server = vertx.createHttpServer();
        Router router = Router.router(vertx);
        router.route().handler(BodyHandler.create());
        //post请求:http://192.168.1.10:10002/mininfo/logs
        //router.route 这里是路由 ,/mininfo/logs类似于路由房间
        router.route("/mininfo/logs").handler(ctx -> {
            try {
                JsonArray array = ctx.getBodyAsJsonArray();
                String[] messages = new String[array.size()];
                for (int i = 0; i < array.size(); i++) {
                    JsonObject message = array.getJsonObject(i);
                    message.put("ip", ctx.request().remoteAddress().host());
                    if (!message.containsKey("timestamp")) {
                        message.put("timestamp", Instant.now().toString());
                    }
                    messages[i] = array.getJsonObject(i).encode();
                }
                sendMessages(sender, ctx, "appstatistic_production", messages);
            } catch (Exception e) {
                error(ctx.response(), e.getMessage());
            }
        });
        router.routeWithRegex("/mininfo/v1/logs/[^/]+").handler(routingContext -> {
            String path = routingContext.request().path();
            String topic = path.substring(path.lastIndexOf("/") + 1);
            LOG.info("现在处理的topic(appkey)为:" + topic);
            if (appkeys.contains(topic)) {
                LOG.info("经过验证,该topic(appkey)有效");
                String[] messages = routingContext.getBodyAsString().split("\n");
                //用于执行阻塞任务(有序执行和无序执行),默认顺序执行提交的阻塞任务
                vertx.executeBlocking(future -> {
                    sendMessages(sender, routingContext, topic, messages);
                    future.complete();
                }, result -> {
                });
            } else {
                LOG.info("您的topic(appkey)还没有配置,请在mysql中配置先");
                error(routingContext.response(), "please configurate " + topic + "(appkey) in Mysql first! After 10mins it`ll take action");
            }
        });
        router.route("/mininfo/v1/ip").handler(ctx -> {
            LOG.info("x-real-for" + ctx.request().getHeader("x-real-for"));
            LOG.info("x-forwarded-for" + ctx.request().getHeader("x-forwarded-for"));
            ok(ctx.response(), ctx.request().getHeader("x-forwarded-for"));
        });
        router.route("/*").handler(ctx -> error(ctx.response(), "wrong! check your path..."));
        server.requestHandler(router::accept).listen(port, result -> {
            if (result.succeeded()) {
                LOG.info("listen on port:{0}", String.valueOf(port));
                this.statistic.start(vertx);
            } else {
                LOG.error(result.cause());
                vertx.close();
            }
        });
        //如果你需要在你的程序关闭前采取什么措施,那么关闭钩子(shutdown hook)是很有用的,类似finally
        Runtime.getRuntime().addShutdownHook(new Thread(sender::close));
    }

    private void sendMessages(KafkaProducerWrapper sender, RoutingContext ctx, String topic, String[] messages) {
        AtomicInteger counter = new AtomicInteger(0);
        for (String message : messages) {
            if (message == null || "".equals(message)) {
                ok(ctx.response(), "Success");
                continue;
            }
            //将ip增加到数据的ip字段
            JSONObject jsonObject = JSON.parseObject(message);
            if (jsonObject.get("ip") == null) {
                LOG.info("正在增加ip字段");
                String ip;
                String header = ctx.request().getHeader("x-forwarded-for");
                if (!(header == null || header.trim().length() == 0 || header.trim().equals("null"))) {
                    ip = header.split(",")[0];
                } else {
                    ip = ctx.request().remoteAddress().host();
                }
                jsonObject.put("ip", ip);
                LOG.info("ip增加成功");
            }
            //topic, message, callback,以匿名函数的形式实现接口中的onCompletion函数
            sender.send(topic, jsonObject.toString(), (metadata, exception) -> {
                if (exception != null) {
                    LOG.warn(exception);
                    String msg = new JsonObject()
                            .put("error", exception.getMessage())
                            .put("commit", counter.get())
                            .encode();
                    error(ctx.response(), msg);
                    cacheLocal(jsonObject.toString(), "/home/lihui/httpkafka/data_bak/" + topic + ".txt");
                    LOG.info("连接kafka失败,写入cache缓存目录以备份数据");
                } else {
                    statistic.messages.incrementAndGet();  // Counter
                    statistic.bytes.addAndGet(message.length());
                    if (counter.incrementAndGet() == messages.length) {
                        ok(ctx.response(), "Success");
                    }
                }
            });
        }
    }

    /**
     * 将发送到kafka失败的消息缓存到本地
     *
     * @param message   message
     * @param cachePath cachePath
     */
    private void cacheLocal(String message, String cachePath) {
        try {
            FileWriter fileWriter = new FileWriter(cachePath, true);
            BufferedWriter bw = new BufferedWriter(fileWriter);
            bw.write(message);
            bw.newLine();
            bw.flush();
            bw.close();
        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    /**
     * 发送缓存数据到kafka,发送成功,删除缓存数据,失败过10分钟重试
     *
     * @param path 保存缓存数据的[目录]
     */
    private static void sendToKafka(String path) {
        String message;
        KafkaProducerWrapper sender = new KafkaProducerWrapper();
        File file = new File(path);
        if (file.isDirectory()) {
            String[] fileList = file.list();
            if (fileList != null && fileList.length != 0) {
                LOG.info("正在将缓存目录中的备份数据发送到kafka中...");
                for (String str : fileList) {
                    String topic = str.split("\\.")[0];
                    try {
                        BufferedReader reader = new BufferedReader(new FileReader(path + str));
                        while ((message = reader.readLine()) != null) {
                            sender.send(topic, message, (metadata, exception) -> {
                                if (metadata != null) {
                                    LOG.info("缓存的备份数据正在一条一条的插入kafka中");
                                } else {
                                    //程序错误重新运行
//                                    exception.printStackTrace();
                                    LOG.error("kafka连接异常为:===> 10分钟后会自动重试," + exception.getMessage(), exception);
                                    deleteFile = false;
                                }
                            });
                        }
                        if (deleteFile) {
                            LOG.info("开始删除已经插入到kafka中的缓存备份数据");
                            deleteFile(path, topic);
                            LOG.info("删除完毕!");
                        }
                        reader.close();
                    } catch (IOException e) {
                        e.printStackTrace();
                    }
                }
            } else {
                LOG.info("缓存目录中没有备份文件");
            }
        }
    }

    private static void deleteFile(String path, String appkey) {
        String appkeyPath = path + "/" + appkey + ".txt";
        File file = new File(appkeyPath);
        file.delete();
        LOG.info("成功删除appkey为" + appkey + "的缓存数据");
    }

    private static Set<String> getAppkeys() {
        Set<String> appkeys = new HashSet<>();
        String sql = "select appkey from service_config_yarn_properties_table";
        try {
            Class.forName(DBDRIVER);
            Connection conn = DriverManager.getConnection(URL, USER, PASSWORD);
            PreparedStatement ps = conn.prepareStatement(sql);
            ResultSet rs = ps.executeQuery();
            while (rs.next()) {
                appkeys.add(rs.getString(1));
            }
            rs.close();
            conn.close();
        } catch (ClassNotFoundException | SQLException e) {
            e.printStackTrace();
        }
        return appkeys;
    }

    public static void main(String[] args) throws Exception {
        Timer timer = new Timer();
        //1、10十分钟检查cache目录是否有数据,2、同步数据库的APPKEY,做安全验证
        timer.schedule(new TimerTask() {
            @Override
            public void run() {
                appkeys.addAll(getAppkeys());
                LOG.info("同步完数据库中的appkey(每隔十分钟)");
                sendToKafka("/home/lihui/httpkafka/data_bak/");
//              sendToKafka("C:\\Dell\\UpdatePackage\\log");
            }
        }, 0L, 10 * 60 * 1000L);

        //主线程
        try {
            int port = Configuration.conf.getInt("server.port");
            KafkaHttpServer front = new KafkaHttpServer();
            front.startService(port);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

KafkaProducerWrapper.java

package com.donews.data;

import com.typesafe.config.Config;
import io.vertx.core.logging.Logger;
import io.vertx.core.logging.LoggerFactory;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;


import java.util.Properties;

/**
 * Created by yuhui on 16-6-22.
 *
 * kafka的生产,通过send方法()
 */
public class KafkaProducerWrapper {
    private Logger LOG = LoggerFactory.getLogger(KafkaProducerWrapper.class);
    private KafkaProducer<String, String> producer = init();

    private KafkaProducer<String, String> init() {
        Config conf = Configuration.conf.getConfig("kafka");
        Properties props = new Properties();
        props.put("bootstrap.servers", conf.getString("bootstrap.servers"));
        props.put("acks", conf.getString("acks"));
        props.put("retries", conf.getInt("retries"));
        props.put("batch.size", conf.getInt("batch.size"));
        props.put("linger.ms", conf.getInt("linger.ms"));
        props.put("buffer.memory", conf.getLong("buffer.memory"));
        props.put("key.serializer", conf.getString("key.serializer"));
        props.put("value.serializer", conf.getString("value.serializer"));
        LOG.info("KafkaProducer Properties: {0}", props.toString());
        return new KafkaProducer<>(props);
    }

    public void send(String topic, String message, Callback callback) {
        producer.send(new ProducerRecord<>(topic, message), callback);
    }

    public void close() {
        producer.close();
        LOG.info("Kafka Producer Closed");
    }

    public static void main(String[] args) {
        //KafkaProducerWrapper sender=new KafkaProducerWrapper();
        //sender.producer.partitionsFor("xxxxx").forEach(System.out::println);
    }
}

application.conf

server {
  port = 20000
  counter.delay = 30s
}
kafka {
  bootstrap.servers = "XXX"
  acks = all
  retries = 1
  batch.size = 1048576
  linger.ms = 1
  buffer.memory = 33554432
  key.serializer = "org.apache.kafka.common.serialization.StringSerializer"
  value.serializer = "org.apache.kafka.common.serialization.StringSerializer"
}
mysql {
  url = "jdbc:mysql://XXX/user_privileges"
  user = "XXX"
  password = "XXX"
}

pom.xml

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>

    <groupId>com.donews.data</groupId>
    <artifactId>kafkahttp</artifactId>
    <version>1.0-SNAPSHOT</version>
    <dependencies>
        <dependency>
            <groupId>com.typesafe</groupId>
            <artifactId>config</artifactId>
            <version>1.3.0</version>
        </dependency>
        <dependency>
            <groupId>io.vertx</groupId>
            <artifactId>vertx-web</artifactId>
            <version>3.2.1</version>
        </dependency>
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-clients</artifactId>
            <version>0.9.0.1</version>
        </dependency>
        <dependency>
            <groupId>com.typesafe</groupId>
            <artifactId>config</artifactId>
            <version>1.3.0</version>
        </dependency>
        <dependency>
            <groupId>mysql</groupId>
            <artifactId>mysql-connector-java</artifactId>
            <version>6.0.2</version>
        </dependency>
        <dependency>
            <groupId>com.alibaba</groupId>
            <artifactId>fastjson</artifactId>
            <version>1.2.11</version>
        </dependency>
        <dependency>
            <groupId>org.apache.httpcomponents</groupId>
            <artifactId>httpclient</artifactId>
            <version>4.3.3</version>
        </dependency>
    </dependencies>

    <build>
        <plugins>
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-compiler-plugin</artifactId>
                <version>3.5.1</version>
                <configuration>
                    <source>1.8</source>
                    <target>1.8</target>
                </configuration>
            </plugin>
        </plugins>
    </build>


</project>
北京小辉微信公众号

在这里插入图片描述

大数据资料分享请关注
<think>我们正在讨论如何将Deepstream与Kafka集成,特别是使用Deepstream将数据发送到Kafka。根据用户的问题,他想要查找相关的技术文章,并提到了几个关键词:deepstreamintegratekafka,senddatatokafkausingdeepstream,deepstreamkafkaproducerexample。根据之前的引用,我们知道Kafka是一个分布式事件流平台,常用于构建实时数据管道和流处理应用(引用[4])。KafkaConnect是Kafka的集成API,用于将其他系统的数据导入Kafka或将Kafka的数据导出到其他系统(引用[1])。但是,用户的问题是关于Deepstream的,所以我们需要关注Deepstream如何作为生产者直接发送数据Kafka。Deepstream是一个用于构建实时应用程序的平台,特别擅长处理视频流和分析。它通常用于计算机视觉和人工智能应用。因此,将Deepstream的分析结果发送到Kafka是一种常见的需求,以便后续处理或与其他系统集成。解决方案可能包括:1.使用Deepstream的插件或扩展机制,编写一个Kafka生产者插件。2.在Deepstream的应用代码中,直接使用Kafka的生产者API(如kafka-python)将数据发送到Kafka。由于用户要求的是技术文章或示例,我们可以提供以下思路:###方法1:在Deepstream应用程序中直接使用Kafka生产者在Deepstream的Python应用程序中,我们可以初始化一个Kafka生产者,然后在Deepstream处理完每一帧数据后,将结果通过Kafka生产者发送出去。示例步骤:1.安装必要的库:`deepstream-sdk`和`kafka-python`(或confluent-kafka-python)。2.在Deepstream的pipeline中,添加一个自定义组件(例如,一个probe)来获取分析结果。3.在该组件的回调函数中,将数据发送到Kafka。###方法2:使用Deepstream的元数据(Metadata)扩展Deepstream允许在对象检测等结果上附加元数据。我们可以利用这个机制,将数据发送到Kafka。###示例代码框架以下是一个简单的代码框架,展示如何在DeepstreamPython应用中使用Kafka生产者:```pythonfromkafkaimportKafkaProducerimportjsonimportpyds#初始化Kafka生产者producer=KafkaProducer(bootstrap_servers='kafka-broker:9092',value_serializer=lambdav:json.dumps(v).encode('utf-8'))#Deepstream处理回调函数defsend_to_kafka(data):#将数据转换为字典或JSONmessage={'object_id':data.object_id,'label':data.label,'confidence':data.confidence,'bbox':[data.left,data.top,data.width,data.height]}producer.send('deepstream-topic',message)#在Deepstream的sinkpadprobe函数中调用send_to_kafkadefosd_sink_pad_buffer_probe(pad,info,user_data):#获取批处理数据batch_meta=pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))forframe_metainbatch_meta.frame_meta_list:forobj_metainframe_meta.obj_meta_list:#构建数据并发送send_to_kafka(obj_meta)returnGst.PadProbeReturn.OK#在创建pipeline时,将probe添加到sinkpad```注意:这只是一个概念性示例,实际实现需要根据Deepstream的版本和具体应用进行调整。###相关资源用户可能需要参考以下类型的资源:-Deepstream官方文档中关于元数据扩展和插件开发的部分。-KafkaPython客户端的文档(如kafka-python或confluent-kafka-python)。-社区中关于Deepstream和Kafka集成的博客文章或示例项目。§§相关问题§§1.如何安装和配置Deepstream以支持Kafka生产者?2.在Deepstream中发送数据Kafka时,如何确保数据的高吞吐量和低延迟?3.除了直接使用Kafka生产者API,还有哪些方法可以将Deepstream数据发送到Kafka?</think>### Deepstream与Kafka集成方案 要将Deepstream的分析数据发送到Kafka,主要有两种实现方式,核心原理都是通过Deepstream的元数据处理层(Metadata Layer)捕获分析结果,再通过Kafka生产者API传输: #### 方法1: 直接集成Kafka生产者 在Deepstream应用程序中直接调用Kafka生产者API发送数据,适用于自定义数据结构: ```python from kafka import KafkaProducer import json # 初始化Kafka生产者 producer = KafkaProducer( bootstrap_servers='kafka-server:9092', value_serializer=lambda v: json.dumps(v).encode('utf-8') ) def send_to_kafka(obj_meta): """将对象元数据发送到Kafka""" message = { 'object_id': obj_meta.object_id, 'class_id': obj_meta.class_id, 'confidence': obj_meta.confidence, 'bbox': [obj_meta.rect_params.left, obj_meta.rect_params.top, obj_meta.rect_params.width, obj_meta.rect_params.height] } producer.send('deepstream-topic', message) # 在Deepstream的probe回调中调用 def osd_sink_pad_buffer_probe(pad, info, user_data): batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) for frame_meta in batch_meta.frame_meta_list: for obj_meta in frame_meta.obj_meta_list: send_to_kafka(obj_meta) return Gst.PadProbeReturn.OK ``` #### 方法2: 使用Kafka Connect适配器 通过Kafka Connect的REST API转发数据,适合标准化数据格式场景: 1. **配置Kafka Connect**:创建HTTP Sink Connector ```json { "name": "deepstream-http-connector", "config": { "connector.class": "io.confluent.connect.http.HttpSinkConnector", "tasks.max": "1", "http.api.url": "http://kafka-connect:8083", "topics": "deepstream-topic" } } ``` 2. **Deepstream发送数据**: ```python import requests def connect_to_kafka(data): requests.post( "http://kafka-connect:8083/", json=data, headers={"Content-Type":"application/vnd.kafka.json.v2+json"} ) ``` #### 关键配置注意事项 1. **数据序列化**:推荐使用Avro格式并配合Schema Registry,确保数据模式兼容性 2. **容错机制**:配置Kafka生产者的重试策略和ACK机制 ```python producer = KafkaProducer( retries=5, acks='all', request_timeout_ms=30000 ) ``` 3. **性能优化**: - 启用`linger_ms`和`batch_size`提升吞吐量 - 使用异步发送避免阻塞Deepstream流水线 4. **安全认证**:若启用SASL/SSL,需配置认证参数: ```python security_protocol='SASL_SSL', sasl_mechanism='PLAIN', sasl_plain_username='user', sasl_plain_password='pass' ``` #### 应用场景优势 1. **实时视频分析管道**:将Deepstream的物体检测结果输入Kafka,供下游AI模型消费[^4] 2. **边缘计算集成**:在边缘节点运行Deepstream,通过Kafka集群实现中心化数据处理[^1] 3. **状态持久化**:利用Kafka的分区复制机制确保分析数据不丢失[^5] 4. **流处理衔接**:通过Kafka Streams对视频分析结果进行实时聚合[^3] > **架构示意图** > $$ \text{Deepstream} \xrightarrow{\text{Metadata}} \text{Kafka Producers} \rightarrow \underset{\text{(分区复制)}}{\boxed{\text{Kafka Cluster}}} \rightarrow \text{Stream Processing} $$
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

辉哥大数据

你的鼓舞将是我最大的动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值