会话互串Sessions mixed-up

本文详细介绍了如何在不同浏览器选项卡中实现多用户同时登录,通过使用ACL权限控制、过滤请求、新建及删除会话等方法确保用户权限安全,避免越权操作。同时,文章探讨了如何在单个服务器上管理不同应用间的会话,以防止账户多处登录,保障系统安全。
场景:
1-打开一个Tab,用Admin登录系统
2-再打开一个Tab,用User登录系统
3-退回Admin的Tab,操作数据
此时的用户已经不是Admin而是User了。
※早期主流浏览器没有选项卡,开创选项卡浏览器的是国人二次开发的MyIE。

不理解Session与Cookie的先补补这部分知识。
[img]http://dl2.iteye.com/upload/attachment/0115/6757/ff90d33c-2e12-3a10-a6d9-6c0cea7ca7f6.png[/img]

浏览器的多选项卡之间是共有一个Session的,所以这种情况下一般需要通过以下方法来访问系统:
1)打开不同的浏览器,比如:IE、FF、Chrome
2)打开同一浏览器的多个窗口(不是Ctrl+N打开)
IE: File>New Session
FF: File>New Private Window(Ctrl+Shift+P)
Chrome: Menu>New incognito Window(Ctrl+Shift+N)

这两种方法只能针对开发测试的,对用户来说就有些牵强了。

如何让系统支持多用户在多选项卡间同时登陆?首先最基本的需要采用ACL权限控制来保证用户的权限不会越级,比如上边这个场景不至于普通User执行了Admin的操作。

其次需要让多个选项卡之间只允许一个用户登录。
1)过滤请求,如果访问的是登录页,查看Session中是否已有用户信息,直接跳向指定页。
HttpSession session = request.getSession();

User user = (User) session.getAttribute("LOGIN_USER_INFO");
if (user == null) {
// Session Timeout
response.sendRedirect("LOGIN_URL");
return false;
} else {
// Multiple or Repeated Login
if ("LOGIN_URL".equals(request.getRequestURI())) {
response.sendRedirect("HOME_URL");
return false;
}
}

2)成功登陆后新建一个会话,删除老的会话,可防御会话劫持Session Fixation / Hijacking。
HttpSession oldSession = request.getSession(false);

if (oldSession != null && !oldSession.isNew()) {

// store the old session
Enumeration attrNames = oldSession.getAttributeNames();
Properties props = new Properties();
if (attrNames != null) {
while (attrNames.hasMoreElements()) {
String key = (String) attrNames.nextElement();
props.put(key, oldSession.getAttribute(key));
}
}

// invalidate the old session
oldSession.invalidate();

// generate a new session
HttpSession newSession = request.getSession(true);

// copy the data of the old session into the new session
attrNames = props.keys();
while (attrNames.hasMoreElements()) {
String key = (String) attrNames.nextElement();
newSession.setAttribute(key, props.get(key));
}
}


另外两个问题:
1)限制同一个用户在同一时间只能登录一次
这里说的不是SSO单点登录,而是防止一个账户多处登录
在服务器端存储一个User的会话Map<User, Session>(或存入DB)
创建一个HttpSessionListener来监听会话,如果该User的会话已经存在用新的会话替换它。

2)同一服务器多应用的会话管理
Tomcat使用JSESSIONID作为默认的Cookie键值来识别Session,也可以自定义该键值:
<Host name="localhost" appBase="webapps"...>
<Context path="/appA" sessionCookieName="yourCookieName"/>
</Host>
#include <dirent.h> #include <sys/statfs.h> #include "common.h" #include "storage.h" #include "iot_info.h" #include "device.h" #ifdef HEALTH_INFO_SUPPORT #include "health_info.h" #endif void storage_session_free(STORAGE_SESSION *storage_session); void storage_server_timeout_handle_immediately(STORAGE_SERVER *storage_server); /*---------------------------------------------------------*/ void storage_delete_posted_session(STORAGE_SERVER *storage_server, char *dev_id, int storage_type) { STORAGE_SESSION *session, *nsession; if (!storage_server || !dev_id) { DBG_ERR("arg NULL\n"); return; } if (STORAGE_STREAMING == storage_type) { list_for_each_entry_safe(session, nsession, &storage_server->stream_session, list) { if (!strncmp(session->dev_id, dev_id, LEN_MAX_ID) && !session->uploading_flag && (!session->stream_send_to_cloud_flag || session->delete_flag)) { storage_session_free(session); } } } else if (STORAGE_SNAPSHOT == storage_type) { list_for_each_entry_safe(session, nsession, &storage_server->snapshot_session, list) { DBG_DBG("%s %s %d %d %d\n", dev_id,session->dev_id, session->uploading_flag, session->snapshot_send_to_cloud_flag, session->delete_flag); if (!strncmp(session->dev_id, dev_id, LEN_MAX_ID) && !session->uploading_flag && (!session->snapshot_send_to_cloud_flag || session->delete_flag)) { storage_session_free(session); } } } } static void storage_session_telemetry_failure_count(STORAGE_SESSION *storage_session) { DEV_INFO *dev_info; MEDIACENTER_CTX *mediacenter_ctx; if (!storage_session) { DBG_ERR("arg NULL\n"); return; } mediacenter_ctx = storage_session->storage_server->worker_ctx->top_ctx; dev_info = get_dev_info(mediacenter_ctx, storage_session->dev_id, storage_session->dev_ip); if (dev_info && storage_session->post_success && (dev_info->telemetry.relay_clips_failure_count > 0)) { dev_info->telemetry.relay_clips_failure_count--; #ifdef HEALTH_INFO_SUPPORT dev_update_health_info(dev_info->dev_id); #endif } return; } void storage_delete_sessions_of_device(MEDIACENTER_CTX *mediacenter_ctx, char *dev_id) { WORKER_CTX *worker_ctx = NULL; STORAGE_SERVER *storage_server = NULL; STORAGE_SESSION *session = NULL, *nsession = NULL; if (!mediacenter_ctx || !dev_id) { DBG_ERR("arg NULL\n"); return; } if (list_empty(&mediacenter_ctx->worker_list)) { DBG_ERR("worker list empty\n"); return; } worker_ctx = (WORKER_CTX *)list_first_entry(&mediacenter_ctx->worker_list, STORAGE_SESSION, list); if (worker_ctx) { storage_server = worker_ctx->storage_server; if (storage_server) { list_for_each_entry_safe(session, nsession, &storage_server->stream_session, list) { if (!strncmp(session->dev_id, dev_id, LEN_MAX_ID)) { storage_session_free(session); } } list_for_each_entry_safe(session, nsession, &storage_server->snapshot_session, list) { if (!strncmp(session->dev_id, dev_id, LEN_MAX_ID)) { storage_session_free(session); } } } } } int storage_session_get_push_status(STORAGE_SESSION *session) { DEV_INFO *dev_info; MEDIACENTER_CTX *mediacenter_ctx; if (!session) { DBG_ERR("arg NULL\n"); return -1; } mediacenter_ctx = session->storage_server->worker_ctx->top_ctx; dev_info = get_dev_info(mediacenter_ctx, session->dev_id, session->dev_ip); if (dev_info) { if (STORAGE_STREAMING == session->storage_type) { return dev_info->alarm_stream_push_status; } else { return dev_info->alarm_snapshot_push_status; } } return -1; } void storage_session_set_push_status(STORAGE_SESSION *session, int status) { DEV_INFO *dev_info; MEDIACENTER_CTX *mediacenter_ctx; if (!session) { DBG_ERR("arg NULL\n"); return; } mediacenter_ctx = session->storage_server->worker_ctx->top_ctx; dev_info = get_dev_info(mediacenter_ctx, session->dev_id, session->dev_ip); if (dev_info) { DBG_INFO("It is alarm re-upload below\n"); if (STORAGE_STREAMING == session->storage_type) { dev_set_alarm_stream_status(dev_info, status); } else { dev_set_alarm_snapshot_status(dev_info, status); } } return; } void storage_session_reupload_timeout(struct uloop_timeout* tmo) { //MYDEBUG("STORAGE SESSION REUPLOAD CHECK TIMEOUT\n"); unsigned long long sent; STORAGE_SESSION *storage_session = container_of(tmo, STORAGE_SESSION, reupload_tmo); if (!storage_session) { DBG_ERR("arg NULL\n"); return; } uloop_timeout_set(&storage_session->reupload_tmo, storage_session->reupload_idle_time); if (storage_session->sock) { sent = storage_session->reupload_total_load - storage_session->sock->write_buf_length; if (sent <= storage_session->reupload_last_sent) { DBG_ERR("STORAGE SESSION REUPLOAD TIMEOUT\n"); tpsocket_free(storage_session->sock); } else { storage_session->reupload_last_sent = sent; storage_session->reupload_last_load = storage_session->reupload_total_load; } } } bool storage_session_write_list_force(STORAGE_SESSION *storage_session, struct list_head *head, bool force) { struct tpsocket_buf *buf; unsigned long long to_send = 0; if (!storage_session || !head) { DBG_ERR("arg NULL\n"); return false; } /* data length */ list_for_each_entry(buf, head, list) { to_send += tpbuf_data_len(buf); } /* move bufs to sock */ if (false == tpsocket_write_list_force(storage_session->sock, head, force)) { DBG_ERR("tpsocket_write_list error\n"); return false; } /* calculate total load */ storage_session->reupload_total_load += to_send; return true; } void storage_session_stream_add_tail(STORAGE_SESSION *storage_session) { struct list_head head; if (!storage_session) { DBG_ERR("arg NULL\n"); return; } if (storage_session->chunked) { INIT_LIST_HEAD(&head); tpsocket_chunk_encode(&head);//generate chunk end '0\r\n' tpsocket_write_list_force(storage_session->sock, &head, true); } tpsocket_write_list_force(storage_session->sock, &head, true); } void storage_session_snapshot_add_tail(STORAGE_SESSION *storage_session) { struct tpsocket_buf *mybuf = NULL; struct list_head head; if (!storage_session) { DBG_ERR("arg NULL\n"); return; } INIT_LIST_HEAD(&head); mybuf = tpbuf_snprintf(LEN_MAX_128, "\r\n"); if (mybuf) { list_add_tail(&mybuf->list, &head); } if (storage_session->region_buf_len) { mybuf = tpbuf_snprintf(LEN_MAX_128*4, "%s", storage_session->region_buf); if (mybuf) { list_add_tail(&mybuf->list, &head); } } mybuf = tpbuf_snprintf(LEN_MAX_128, "--%s--", SNAPSHOT_FILE_BOUNDARY);//generate snapshot multi-part end if (mybuf) { list_add_tail(&mybuf->list, &head); } if (storage_session->chunked) { tpsocket_chunk_encode(&head); } tpsocket_write_list_force(storage_session->sock, &head, true); } int storage_session_set_region(STORAGE_SESSION *storage_session, TRIGGER_REGION *region) { if (!storage_session || !region) { DBG_ERR("arg NULL\n"); return -1; } if (0 == region->x1 && 0 == region->y1 && 0 == region->x2 && 0 == region->y2) { } else { storage_session->region_buf_len = snprintf(storage_session->region_buf, LEN_MAX_128*4, "--%s\r\n" "Content-Disposition: form-data;name=\"region\"\r\n\r\n" "{\"top\": %d, \"bottom\": %d, \"left\": %d, \"right\": %d}\r\n", SNAPSHOT_FILE_BOUNDARY, region->y1, region->y2, region->x1, region->x2); MYDEBUG("%s", storage_session->region_buf); } return 0; } int storage_session_set_event_timestamp(STORAGE_SESSION *storage_session, time_t timestamp) { if (!storage_session) { DBG_ERR("arg NULL\n"); return -1; } //storage_session->event_timestamp = timestamp - common_get_time_diff(); storage_session->event_timestamp = timestamp; return 0; } int storage_session_set_event_type(STORAGE_SESSION *storage_session, enum EVENT_TYPE event_type) { if (!storage_session) { DBG_ERR("arg NULL\n"); return -1; } storage_session->event_type = event_type; return 0; } void storage_update_list(STORAGE_SERVER *storage_server, int flag) { STORAGE_SESSION *storage_session; char list_path[LEN_MAX_PATH] = {0}; FILE *fd; if (!storage_server) { DBG_ERR("arg NULL\n"); return; } if (STORAGE_STREAMING == flag) { memset(list_path, 0, LEN_MAX_PATH); snprintf(list_path, LEN_MAX_PATH, "%s/%s", STORAGE_ROOT_PATH, STORAGE_STREAMING_LIST); fd = fopen(list_path, "w"); if (fd) { list_for_each_entry(storage_session, &storage_server->stream_session, list) { fwrite(storage_session, sizeof(*storage_session), 1, fd); } fclose(fd); } } if (STORAGE_SNAPSHOT == flag) { memset(list_path, 0, LEN_MAX_PATH); snprintf(list_path, LEN_MAX_PATH, "%s/%s", STORAGE_ROOT_PATH, STORAGE_SNAPSHOT_LIST); fd = fopen(list_path, "w"); if (fd) { list_for_each_entry(storage_session, &storage_server->snapshot_session, list) { fwrite(storage_session, sizeof(*storage_session), 1, fd); } fclose(fd); } } } void storage_session_add_session(STORAGE_SESSION *storage_session) { STORAGE_SERVER *storage_server; STORAGE_SESSION *session, *nsession; if (!storage_session) { MYDEBUG("arg NULL\n"); return; } storage_server = storage_session->storage_server; if (STORAGE_STREAMING == storage_session->storage_type) { MYDEBUG("storage stream session = %p\n", storage_session); if (false == storage_session->storage_success) { storage_session_free(storage_session); return; } list_for_each_entry_safe(session, nsession, &storage_server->stream_session, list) { if (!strncmp(session->dev_id, storage_session->dev_id, LEN_MAX_ID) && !session->uploading_flag) { if (!strncmp(session->stream_path, storage_session->stream_path, LEN_MAX_PATH)) memset(session->stream_path, 0, LEN_MAX_PATH);//if old session's path is the same as new, do not delete file storage_session_free(session); } } if (get_dev_info(storage_server->worker_ctx->top_ctx, storage_session->dev_id, NULL)) { list_add_tail(&storage_session->list, &storage_server->stream_session); storage_update_list(storage_server, STORAGE_STREAMING); } else { //if device not exist, free session storage_session_free(storage_session); } } else { MYDEBUG("storage snapshot session = %p\n", storage_session); if (false == storage_session->storage_success) { storage_session_free(storage_session); return; } list_for_each_entry_safe(session, nsession, &storage_server->snapshot_session, list) { if (!strncmp(session->dev_id, storage_session->dev_id, LEN_MAX_ID) && !session->uploading_flag) { if (!strncmp(session->snapshot_path, storage_session->snapshot_path, LEN_MAX_PATH)) memset(session->snapshot_path, 0, LEN_MAX_PATH); storage_session_free(session); } } if (get_dev_info(storage_server->worker_ctx->top_ctx, storage_session->dev_id, NULL)) { list_add_tail(&storage_session->list, &storage_server->snapshot_session); storage_update_list(storage_server, STORAGE_SNAPSHOT); } else { //if device not exist, free session storage_session_free(storage_session); } } return; } bool storage_session_add_fixed_header(STORAGE_SESSION *storage_session) { struct tpsocket_buf *buf; if (!storage_session) { DBG_ERR("arg NULL\n"); return false; } /* add */ buf = tpbuf_snprintf(512, "%x\r\n" "--data-boundary--\r\n" "Content-Length: 218\r\n" "Content-Type: text/http-header\r\n" "\r\n" "HTTP/1.0 200 OK\r\n" "Content-Type: multipart/x-mixed-replace;boundary=data-boundary--\r\n" "X-Encrypt-Type: PLAIN\r\n" "Pragma: no-cache\r\n" "Cache-Control: no-cache\r\n" "Content-Length: -1\r\n" "Transfer-Encoding: chunked\r\n" "Connection: close\r\n\r\n" "\r\n", 292); if (buf) { list_add_tail(&buf->list, &storage_session->headers); } return true; } bool storage_stream_consumer_post(STORAGE_SESSION *storage_session) { struct list_head head; struct tpsocket_buf *buf; DEV_INFO *dev_info; char *request = NULL; if (!storage_session) { DBG_ERR("arg NULL\n"); return false; } dev_info = get_dev_info(storage_session->storage_server->worker_ctx->top_ctx, storage_session->dev_id, NULL); if (!dev_info) { DBG_ERR("get dev info failed\n"); return false; } INIT_LIST_HEAD(&head); char url[256] = {0}; request = strstr(dev_info->relay_storage_url, "/v1"); snprintf(url, 256, "%sactivity?deviceid=%s" "&eventid=%s" "&type=mixed" "&resolution=HD", request, storage_session->dev_id, storage_session->event_id); //MYDEBUG("storage token:%s\n", storage_session->xtoken); //MYDEBUG("storage cookie:%s\n", storage_session->cookie); buf = tpbuf_snprintf(1024, "POST %s HTTP/1.1\r\n" "User-Agent: %s/%s %s\r\n" // MODEL/FW_VERSION PROGRAM_NAME/PROGRAM_VERSION "Host: %s\r\n" "Transfer-Encoding: chunked\r\n" //http chunked transfer encoding "Content-Type: multipart/x-mixed-replace;boundary=%s\r\n" "X-token: %s\r\n" // X-token "X-Audio-Codec: %s\r\n" "Cookie: %s\r\n" "\r\n", url, dev_info->dev_model?dev_info->dev_model:"*", dev_info->sw_ver?dev_info->sw_ver:"*", THIS_PROCESS, storage_session->host, "data-boundary--", storage_session->xtoken, dev_info->audio_fmt, storage_session->cookie); list_add_tail(&buf->list, &head); /* header */ list_splice_tail_init(&storage_session->headers, &head); /* ready to write */ debug_show_tpbuf_list(&head, "STORAGE STREAM POST:", 3); if (false == storage_session_write_list_force(storage_session, &head, false)) { DBG_ERR("storage_session_write_list_force error\n"); tpsocket_free_buf(&head, NULL, 0); return false; } return true; } bool storage_snapshot_consumer_post(STORAGE_SESSION *storage_session) { struct list_head head; struct tpsocket_buf *buf; DEV_INFO *dev_info; char *request = NULL; if (!storage_session) { DBG_ERR("arg NULL\n"); return false; } INIT_LIST_HEAD(&head); dev_info = get_dev_info(storage_session->storage_server->worker_ctx->top_ctx, storage_session->dev_id, NULL); if (!dev_info) { DBG_ERR("get dev info failed\n"); return false; } /* add POST header */ char url[256] = {0}; int sub_header_len; char sub_header[512] = {0}; request = strstr(dev_info->storage_url, "/v1"); snprintf(url, 256, "%sdevice/%s/activity/%s/snapshot", request, storage_session->dev_id, storage_session->event_id); sub_header_len = snprintf(sub_header, 512, "--%s\r\n" "Content-Disposition: form-data;name=\"snapshot\";filename=\"example.jpg\"\r\n" "Content-Type: image/jpeg\r\n\r\n", SNAPSHOT_FILE_BOUNDARY); int content_len = sub_header_len+storage_session->content_len+strlen("\r\n")+storage_session->region_buf_len+ strlen("--")+strlen(SNAPSHOT_FILE_BOUNDARY)+strlen("--"); buf = tpbuf_snprintf(1024, "POST %s HTTP/1.1\r\n" "User-Agent: %s/%s %s\r\n" // MODEL/FW_VERSION PROGRAM_NAME/PROGRAM_VERSION "X-token: %s\r\n" // X-token "Host: %s\r\n" "Content-Length: %d\r\n" "Content-Type: multipart/form-data;boundary=%s\r\n" "\r\n" "%s",//sub_header url, dev_info->dev_model?dev_info->dev_model:"*", dev_info->sw_ver?dev_info->sw_ver:"*", THIS_PROCESS, storage_session->xtoken, storage_session->host, content_len, SNAPSHOT_FILE_BOUNDARY, sub_header); list_add_tail(&buf->list, &head); debug_show_tpbuf_list(&head, "STORAGE SNAPSHOT POST:", 3); if (false == storage_session_write_list_force(storage_session, &head, false)) { DBG_ERR("storage_session_write_list_force error\n"); tpsocket_free_buf(&head, NULL, 0); return false; } return true; } bool storage_file_produce(STORAGE_SESSION *storage_session, struct list_head *buf) { if (!storage_session) { DBG_ERR("arg NULL\n"); return false; } if (!storage_session->sock) { DBG_ERR("%p arg NULL, file = %p\n", storage_session, storage_session->file); return false; } if (tpsocket_writable(storage_session->sock)) { if (!list_empty(buf) && storage_session->chunked && !tpsocket_chunk_encode(buf)) { tpsocket_free_buf(buf, "PRODUCE", 0); return false; } storage_session_write_list_force(storage_session, buf, true); } return true; } bool storage_read_file(struct tpsocket_handler*handler, struct list_head*buf, int event) { STORAGE_SESSION *storage_session = handler->priv; //MYDEBUG("storage read file event = %s, sock = %p, consumer = %p\n", tpsocket_event_name(event), sock, storage_session); switch(event) { case TPSOCKET_EVENT_CONNECTED: break; case TPSOCKET_EVENT_REQ_HEAD: break; case TPSOCKET_EVENT_RSP_HEAD: break; case TPSOCKET_EVENT_UPGRADE: break; case TPSOCKET_EVENT_SUB_HEAD: case TPSOCKET_EVENT_STREAM: case TPSOCKET_EVENT_MESSAGE: if (false == storage_file_produce(storage_session, buf)) { return false; } goto out; //break; case TPSOCKET_EVENT_RESET: break; case TPSOCKET_EVENT_CLOSED: if (storage_session) { storage_session->file = NULL; if (storage_session->sock) { if (!list_empty(buf)) { if (storage_session->chunked) { tpsocket_chunk_encode(buf); tpsocket_write_list_force(storage_session->sock, buf, true); } } if (STORAGE_STREAMING == storage_session->storage_type) { storage_session_stream_add_tail(storage_session); } else { storage_session_snapshot_add_tail(storage_session); } } } break; case TPSOCKET_EVENT_ERROR: default: break; } tpsocket_free_buf(buf, tpsocket_event_name(event), 0); out: return true; } bool storage_start_read(STORAGE_SESSION *storage_session) { if (!storage_session) { DBG_ERR("arg NULL\n"); return false; } struct tpsocket_handler client = { .cb = storage_read_file, .flowcontrol = 10*1024, .priv = storage_session, .read_buf_max = STORAGE_MEMORY_CACHE_LIMIT, }; if (STORAGE_STREAMING == storage_session->storage_type) { storage_session->file = tpsocket_new(NULL, "stream", NULL, storage_session->stream_path, TPSOCKET_TYPE_FILE, &client); if (!storage_session->file) { DBG_ERR("open %s failed\n", storage_session->stream_path); return false; } } else { storage_session->file = tpsocket_new(NULL, "read", NULL, storage_session->snapshot_path, TPSOCKET_TYPE_FILE, &client); if (!storage_session->file) { DBG_ERR("open %s failed\n", storage_session->snapshot_path); return false; } } DBG_DBG("session = %p, sock = %p, open file = %p\n", storage_session, storage_session->sock, storage_session->file); return true; } bool tpsocket_event_storage_stream_to_cloud(struct tpsocket_handler *handler, struct list_head *buf, int event) { //debug_show_tpbuf_list(buf, "TEST", 3); struct tpsocket_fd *sock = container_of(handler, struct tpsocket_fd, handler); STORAGE_SESSION *storage_session = handler->priv; int err_code = 0; if (event != TPSOCKET_EVENT_WRITABLE) { MYDEBUG("storage stream to cloud event = %s, sock = %p, session = %p\n", tpsocket_event_name(event), sock, storage_session); if (storage_session) { DBG_DBG("Device Id = %s, session sock = %p\n", storage_session->dev_id, storage_session->sock); } } switch(event) { case TPSOCKET_EVENT_LISTEN: break; case TPSOCKET_EVENT_SHUTDOWN: break; case TPSOCKET_EVENT_ACCEPT: break; case TPSOCKET_EVENT_CONNECTED: MYDEBUG("STORAGESTREAM2CLOUD: %s:%s Connected\n", sock->addr, sock->port); if (false == storage_stream_consumer_post(storage_session)) return false; if (false == storage_start_read(storage_session)) { DBG_ERR("read stream file failed\n"); return false; } break; case TPSOCKET_EVENT_REQ_HEAD: break; case TPSOCKET_EVENT_RSP_HEAD: break; case TPSOCKET_EVENT_SUB_HEAD: break; case TPSOCKET_EVENT_UPGRADE: break; case TPSOCKET_EVENT_WRITABLE: if (storage_session->file && !list_empty(&(storage_session->file->parser.buf))) { if (storage_session->chunked && !tpsocket_chunk_encode(&(storage_session->file->parser.buf))) { tpsocket_free_buf(&(storage_session->file->parser.buf), tpsocket_event_name(event), 0); break; } storage_session_write_list_force(storage_session, &(storage_session->file->parser.buf), true); storage_session->file->read_buf_length = 0; } break; case TPSOCKET_EVENT_STREAM: return true; case TPSOCKET_EVENT_MESSAGE: err_code = common_check_error_code(buf); if (!err_code) { DBG_DBG("storage stream push success\n"); storage_session->post_success = 1; #ifndef TAPO_CAMERA } else if (err_code == -98400) { //If the recording/relay server finds out that the device is not eligible for cloud storage, // it returns an error code of -98400 upon which the firmware updates the cloud storage feature of that device. DEV_INFO *dev_info = get_dev_info(storage_session->storage_server->worker_ctx->top_ctx, storage_session->dev_id, NULL); if (dev_info) { dev_info->cloud_storage = false; } // mark as success here to avoid retrying. storage_session->post_success = 1; #endif } tpsocket_free_buf(buf, tpsocket_event_name(event), 0); common_tpsocket_close_immediately(sock); break; case TPSOCKET_EVENT_RESET: break; case TPSOCKET_EVENT_KEEPALIVE: common_tpsocket_close_immediately(sock); break; case TPSOCKET_EVENT_CLOSED: if (storage_session) { tpsocket_unbind(sock, &storage_session->sock); uloop_timeout_cancel(&storage_session->reupload_tmo); tpsocket_free2(&storage_session->file, storage_session); if (storage_session->post_success) { storage_session->stream_send_to_cloud_flag = 0; } storage_session->uploading_flag = 0; storage_session_set_push_status(storage_session, 0); /* handle timeout immediately */ storage_server_timeout_handle_immediately(storage_session->storage_server); } break; case TPSOCKET_EVENT_ERROR: default: break; } tpsocket_free_buf(buf, tpsocket_event_name(event), 0); return true; } bool tpsocket_event_storage_snapshot_to_cloud(struct tpsocket_handler *handler, struct list_head *buf, int event) { //debug_show_tpbuf_list(buf, "TEST", 3); struct tpsocket_fd *sock = container_of(handler, struct tpsocket_fd, handler); STORAGE_SESSION *storage_session = handler->priv; if (event != TPSOCKET_EVENT_WRITABLE) { MYDEBUG("storage snapshot to cloud event = %s, sock = %p, session = %p\n", tpsocket_event_name(event), sock, storage_session); if (storage_session) { DBG_DBG("Device Id = %s, session sock = %p\n", storage_session->dev_id, storage_session->sock); } } switch(event) { case TPSOCKET_EVENT_LISTEN: break; case TPSOCKET_EVENT_SHUTDOWN: break; case TPSOCKET_EVENT_ACCEPT: break; case TPSOCKET_EVENT_CONNECTED: MYDEBUG("STORAGESNAPSHOT2CLOUD: %s:%s Connected\n", sock->addr, sock->port); if (false == storage_snapshot_consumer_post(storage_session)) return false; if (false == storage_start_read(storage_session)) { DBG_ERR("read snapshot file failed\n"); return false; } break; case TPSOCKET_EVENT_REQ_HEAD: break; case TPSOCKET_EVENT_RSP_HEAD: break; case TPSOCKET_EVENT_SUB_HEAD: break; case TPSOCKET_EVENT_UPGRADE: break; case TPSOCKET_EVENT_WRITABLE: if (storage_session->file && !list_empty(&(storage_session->file->parser.buf))) { if (storage_session->chunked && !tpsocket_chunk_encode(&(storage_session->file->parser.buf))) { tpsocket_free_buf(&(storage_session->file->parser.buf), tpsocket_event_name(event), 0); break; } storage_session_write_list_force(storage_session, &(storage_session->file->parser.buf), true); storage_session->file->read_buf_length = 0; } break; case TPSOCKET_EVENT_STREAM: return true; case TPSOCKET_EVENT_MESSAGE: if (!common_check_error_code(buf)) { DBG_DBG("storage snapshot push success\n"); storage_session->post_success = 1; } tpsocket_free_buf(buf, tpsocket_event_name(event), 0); common_tpsocket_close_immediately(sock); break; case TPSOCKET_EVENT_RESET: break; case TPSOCKET_EVENT_KEEPALIVE: common_tpsocket_close_immediately(sock); break; case TPSOCKET_EVENT_CLOSED: if (storage_session) { tpsocket_unbind(sock, &storage_session->sock); uloop_timeout_cancel(&storage_session->reupload_tmo); tpsocket_free2(&storage_session->file, storage_session); if (storage_session->post_success) { storage_session->snapshot_send_to_cloud_flag = 0; //storage_session_free(storage_session); } else { storage_session->retry_count++; //list_add_tail(&storage_session->list, &storage_session->storage_server->snapshot_session); } storage_session->uploading_flag = 0; storage_session_set_push_status(storage_session, 0); /* handle timeout immediately */ storage_server_timeout_handle_immediately(storage_session->storage_server); } break; case TPSOCKET_EVENT_ERROR: default: break; } tpsocket_free_buf(buf, tpsocket_event_name(event), 0); return true; } bool storage_stream_upload(STORAGE_SESSION *storage_session) { DEV_INFO *dev_info = NULL; STORAGE_SERVER *storage_server; if (!storage_session) { DBG_ERR("arg NULL\n"); return false; } if (storage_session->sock) { DBG_DBG("sock exist\n"); return true; } storage_server = storage_session->storage_server; uloop_timeout_set(&storage_session->reupload_tmo, storage_session->reupload_idle_time); dev_info = get_dev_info(storage_server->worker_ctx->top_ctx, storage_session->dev_id, NULL); if (!dev_info) { DBG_ERR("Not find device\n"); return false; } /* create socket */ struct tpsocket_handler storage_stream_to_cloud = { .cb = tpsocket_event_storage_stream_to_cloud, .cer = MEDIACENTER_CERTFILE, .write_buf_max = STORAGE_MEMORY_CACHE_LIMIT, }; storage_session->retry_count++; if ((storage_session->sock = tpsocket_from_url(dev_info->relay_storage_url, &storage_stream_to_cloud))) { //if (storage_session->sock = tpsocket_from_url("http://192.168.137.103:8010", &storage_stream_to_cloud)) { DBG_DBG("Create session sock = %p, session = %p\n", storage_session->sock, storage_session); storage_session->sock->handler.priv = storage_session; storage_session->uploading_flag = 1; storage_session_set_push_status(storage_session, 1); } return true; } void storage_set_iot_info(void *arg, const char *host, const char *cookie, const char *token, const char *event_id) { STORAGE_SESSION *storage_session; #ifdef TAPO_CAMERA if (!arg || !host || !token || !event_id) { #else if (!arg || !host || !cookie || !token || !event_id) { #endif DBG_ERR("arg NULL\n"); return; } storage_session = (STORAGE_SESSION*)arg; snprintf(storage_session->host, LEN_MAX_HOST, "%s", host); snprintf(storage_session->cookie, LEN_MAX_COOKIE, "%s", cookie); snprintf(storage_session->xtoken, LEN_MAX_TOKEN, "%s", token); snprintf(storage_session->event_id, LEN_MAX_EVENT_ID, "%s", event_id); MYDEBUG("host: %s\n", storage_session->host); //MYDEBUG("cookie: %s\n", storage_session->cookie); //MYDEBUG("xtoken: %s\n", storage_session->xtoken); MYDEBUG("event_id: %s\n", storage_session->event_id); return; } void storage_get_iot_info_cb(void *priv, void *iot_info, char *host, char *cookie, char *token, char *event_id, int err_code) { STORAGE_SESSION *storage_session; if (!priv) { DBG_ERR("arg NULL\n"); return; } storage_session = (STORAGE_SESSION*)priv; if (err_code) { DBG_ERR("storage get iot info failed\n"); goto out; } if (!host || !cookie || !token || !event_id) { DBG_ERR("storage get iot info param error\n"); goto out; } storage_set_iot_info(storage_session, host, cookie, token, event_id); MYDEBUG("Have got iot info, reupload now\n"); if (STORAGE_STREAMING == storage_session->storage_type) { storage_stream_upload(storage_session); return; } out: storage_session->retry_count++; return; } int storage_stream_reupload(STORAGE_SESSION *storage_session) { IOT_INFO *iot_info; if (!storage_session) { DBG_ERR("arg NULL\n"); return -1; } if (storage_session->event_id[0]) { MYDEBUG("Have got iot info, reupload now\n"); if (false == storage_stream_upload(storage_session)) { storage_session->stream_send_to_cloud_flag = 0; } } else { //find event id iot_info = iot_info_find(storage_session->storage_server->worker_ctx->iot_info_server, storage_session->dev_id , storage_session->count_id); if (iot_info) { if (iot_info->ready) { #ifdef TAPO_CAMERA storage_get_iot_info_cb((void*)storage_session, NULL, iot_info->host, iot_info->cookie, iot_info->xtoken, iot_info->alarm_id, 0); #else storage_get_iot_info_cb((void*)storage_session, NULL, iot_info->host, iot_info->cookie, iot_info->token, iot_info->event_id, 0); #endif if (IOT_INFO_CONSUMER_NUM == iot_info->consumer_used_count) { iot_info_free(iot_info); } } else { storage_session->retry_count++; } } else { storage_session->stream_send_to_cloud_flag = 0; } } return 0; } void storage_force_delete_session(STORAGE_SERVER *storage_server) { struct statfs diskInfo; STORAGE_SESSION *stream_session, *nstream_session; int force = 0; if (!storage_server) { DBG_ERR("arg NULL\n"); return; } retry: if (force++ < 2 && !statfs(STORAGE_ROOT_PATH, &diskInfo)) { int free_size = (diskInfo.f_bsize * diskInfo.f_bfree) >> 20; if (free_size < 20) { MYDEBUG("FREE MEMORY %u MB!\n", free_size); if (free_size < 8) { list_for_each_entry_safe(stream_session, nstream_session, &storage_server->stream_session, list) { if (!stream_session->iot_consumer) { if ((force == 1 && stream_session->delete_flag) || (force == 2 && !stream_session->uploading_flag)) { storage_session_free(stream_session); } } } goto retry; } } } } void storage_check_list_to_upload(STORAGE_SERVER *storage_server) { STORAGE_SESSION *stream_session, *nstream_session; int ret = 0; if (!storage_server) { DBG_ERR("arg NULL\n"); return; } list_for_each_entry_safe(stream_session, nstream_session, &storage_server->stream_session, list) { if (!stream_session->iot_consumer) {//make sure alarm push finished //MYDEBUG("find storage stream session = %p\n", stream_session); if (stream_session->uploading_flag) { MYDEBUG("stream session %p uploading push busy ...\n", stream_session); break; } ret = storage_session_get_push_status(stream_session); if (1 == ret) { MYDEBUG("this stream session %p is ready, but other session of the same device push busy ...\n", stream_session); break; } else if (-1 == ret) { MYDEBUG("device not found\n"); break; } if (stream_session->stream_send_to_cloud_flag && (stream_session->retry_count < 3)) { MYDEBUG("stream session %p do upload!\n", stream_session); storage_stream_reupload(stream_session); } else { //DBG_DBG("stream session %p can be deleted\n", stream_session); stream_session->delete_flag = true; } } else { MYDEBUG("waitting iot session %p closed\n", stream_session->iot_consumer); } } } void storage_server_timeout_handle_immediately(STORAGE_SERVER *storage_server) { MYDEBUG("storage_server_timeout_handle_immediately ...\n"); if (!storage_server) { DBG_ERR("arg NULL\n"); return; } uloop_timeout_set(&storage_server->tmo, 100); } void storage_server_timeout_cb(struct uloop_timeout *tmo) { //MYDEBUG("storage server timeout cb ...\n"); STORAGE_SERVER *storage_server = container_of(tmo, STORAGE_SERVER, tmo); if (!storage_server) { DBG_ERR("storage_server NULL\n"); return; } storage_force_delete_session(storage_server); storage_check_list_to_upload(storage_server); uloop_timeout_set(tmo, 10*1000); } void storage_session_set_send_flag(void *arg, int flag) { STORAGE_SESSION *storage_session = NULL; if (!arg) { DBG_ERR("arg NULL\n"); return; } storage_session = (STORAGE_SESSION*)arg; if (STORAGE_STREAMING == storage_session->storage_type) { storage_session->stream_send_to_cloud_flag = flag; } else { storage_session->snapshot_send_to_cloud_flag = flag; } return; } void storage_session_set_iot_consumer_null(void *arg) { STORAGE_SESSION *storage_session; if (!arg) { DBG_ERR("arg NULL\n"); return; } storage_session = (STORAGE_SESSION*)arg; storage_session->iot_consumer = NULL; storage_update_list(storage_session->storage_server, storage_session->storage_type); return; } int storage_delete_empty_dir(char *filedir) { int count = 0; DIR *dir = NULL; struct dirent *Dirent = NULL; //char cmd[128] = {0}; if(!filedir){ DBG_ERR("arg NULL\n"); return -1; } dir = opendir(filedir); if(dir == NULL){ DBG_NOTICE("opendir:%s error:%s\n", filedir, strerror(errno)); return -1; } while((Dirent = readdir(dir)) != NULL){ if(0 != strncmp(Dirent->d_name, ".", sizeof(".")) && 0 != strncmp(Dirent->d_name, "..", sizeof(".."))) { count++; } } closedir(dir); if(!count){ //memset(cmd, '\0', 128); //snprintf(cmd, 128, "rm -rf %s", filedir); //if (system(cmd) < 0) { if (tpcom_system("rm", ("-rf"), ("%s", filedir)) < 0) { DBG_ERR("remove:%s error:%s\n", filedir, strerror(errno)); return -1; } DBG_DBG("remove dir:%s ok\n", filedir); } else { DBG_DBG("has %d file in %s\n", count, filedir); } return 0; } void storage_delete_file(char *file) { char *pos; char dev_id[LEN_MAX_ID] = {0}; char count_id[LEN_MAX_ID] = {0}; char dir[LEN_MAX_PATH] = {0}; if (!file) { DBG_ERR("arg NULL\n"); return; } if (!file[0]) { DBG_DBG("No file to delete\n"); return; } pos = file+strlen(STORAGE_ROOT_PATH); DBG_DBG("pos = %s\n", pos); sscanf(pos, "/%[^/]/%[^/]/%*s", dev_id, count_id); DBG_DBG("dev_id = %s, count_id = %s\n", dev_id, count_id); /* delete file */ remove(file); /* delete count id dir */ memset(dir, 0, LEN_MAX_PATH); snprintf(dir, LEN_MAX_PATH, "%s/%s/%s", STORAGE_ROOT_PATH, dev_id, count_id); storage_delete_empty_dir(dir); /* delete device dir */ memset(dir, 0, LEN_MAX_PATH); snprintf(dir, LEN_MAX_PATH, "%s/%s", STORAGE_ROOT_PATH, dev_id); storage_delete_empty_dir(dir); } void storage_move_file(char *file) { char *pos; char cmd[512] = {0}; char dev_id[LEN_MAX_ID] = {0}; char count_id[LEN_MAX_ID] = {0}; char dir[LEN_MAX_PATH] = {0}; char count_path[LEN_MAX_PATH] = {0}; if (!file || !file[0]) { DBG_ERR("arg NULL\n"); return; } pos = file+strlen(STORAGE_ROOT_PATH); MYDEBUG("pos = %s\n", pos); sscanf(pos, "/%[^/]/%[^/]/%*s", dev_id, count_id); MYDEBUG("dev_id = %s, count_id = %s\n", dev_id, count_id); /* remove file */ snprintf(count_path, LEN_MAX_PATH, "%s/%s/%s/", NFS_ROOT, dev_id, count_id); if (access(count_path, F_OK)) { //snprintf(cmd, 512, "mkdir -p %s", count_path); //system(cmd); tpcom_system("mkdir", ("-p"), ("%s", count_path)); } memset(cmd, 0, 512); snprintf(cmd, 512, "mv %s %s", file, count_path); DEBUG("cmd: %s\n", cmd); //system(cmd); tpcom_system("mv", (file), ("%s", count_path)); if (!access(file, F_OK)) { DBG_ERR("%s failed\n", cmd); remove(file); } /* delete count id dir */ memset(dir, 0, LEN_MAX_PATH); snprintf(dir, LEN_MAX_PATH, "%s/%s/%s", STORAGE_ROOT_PATH, dev_id, count_id); storage_delete_empty_dir(dir); /* delete device dir */ memset(dir, 0, LEN_MAX_PATH); snprintf(dir, LEN_MAX_PATH, "%s/%s", STORAGE_ROOT_PATH, dev_id); storage_delete_empty_dir(dir); } void storage_session_free(STORAGE_SESSION *storage_session) { MYDEBUG("STORAGE SESSION FREE %p\n", storage_session); if (!storage_session) { return; } list_del(&storage_session->list); uloop_timeout_cancel(&storage_session->reupload_tmo); tpsocket_free_buf(&storage_session->headers, NULL, 0); if (storage_session->sock) { storage_session_set_push_status(storage_session, 0); } tpsocket_free2(&storage_session->sock, storage_session); tpsocket_free2(&storage_session->file, storage_session); if (storage_session->iot_consumer) { alarm_consumer_set_storage_session_null(storage_session->iot_consumer); } /* telemetry */ storage_session_telemetry_failure_count(storage_session); /* delete file */ if (STORAGE_STREAMING == storage_session->storage_type) { if (STORAGE_DEBUG && !access("/nfsroot", F_OK)) { storage_move_file(storage_session->stream_path); } else { storage_delete_file(storage_session->stream_path); } } else { if (STORAGE_DEBUG && !access("/nfsroot", F_OK)) { storage_move_file(storage_session->snapshot_path); } else { storage_delete_file(storage_session->snapshot_path); } } free(storage_session); } bool tpsocket_event_consumer_to_storage(struct tpsocket_handler *handler, struct list_head *buf, int event) { struct tpsocket_fd *sock = container_of(handler, struct tpsocket_fd, handler); STORAGE_SESSION *storage_session = (STORAGE_SESSION*)handler->priv; if (STORAGE_SNAPSHOT == storage_session->storage_type) { MYDEBUG("storage snpashot cb: sock=%p, event=%d\n", sock, event); } switch(event) { case TPSOCKET_EVENT_CONNECTED: break; case TPSOCKET_EVENT_REQ_HEAD: break; case TPSOCKET_EVENT_RSP_HEAD: break; case TPSOCKET_EVENT_SUB_HEAD: break; case TPSOCKET_EVENT_UPGRADE: break; case TPSOCKET_EVENT_MESSAGE: break; case TPSOCKET_EVENT_STREAM: break; case TPSOCKET_EVENT_RESET: break; case TPSOCKET_EVENT_CLOSED: if (storage_session) { storage_session->file = NULL; list_del_init(&storage_session->list); storage_session_add_session(storage_session); } break; case TPSOCKET_EVENT_ERROR: MYDEBUG("tpsocket_event_consumer_to_storage ERROR %p\n", storage_session); storage_session->storage_success = false; default: break; } tpsocket_free_buf(buf, tpsocket_event_name(event), 0); return true; } STORAGE_SESSION *storage_session_new(char *path, STORAGE_SERVER *storage_server) { STORAGE_SESSION *storage_session; if (!path || ! storage_server) { DBG_ERR("arg NULL\n"); return NULL; } storage_session = (STORAGE_SESSION*)malloc(sizeof(*storage_session)); if (!storage_session) { DBG_ERR("malloc failed\n"); return NULL; } memset(storage_session, 0, sizeof(*storage_session)); INIT_LIST_HEAD(&storage_session->list); INIT_LIST_HEAD(&storage_session->headers); storage_session->storage_server = storage_server; storage_session->reupload_idle_time = 10000; storage_session->reupload_tmo.cb = storage_session_reupload_timeout; storage_session->storage_success = true; storage_session->delete_flag = false; /* create socket */ struct tpsocket_handler client = { .cb = tpsocket_event_consumer_to_storage, .flowcontrol = 500000, .priv = storage_session }; storage_session->file = tpsocket_new(NULL, "write", NULL, path, TPSOCKET_TYPE_FILE, &client); if (!(storage_session->file)) { DBG_ERR("tpsocket_new failed\n"); free(storage_session); return NULL; } //consumer->chunked = 0; //consumer->store = 1; return storage_session; } int storage_find_file_in_list(char *dev_id, char *timestamp_str, struct list_head *head) { STORAGE_SESSION *session; int timestamp; if (!dev_id || !timestamp_str || !head) { DBG_ERR("arg NULL\n"); return -1; } timestamp = atoi(timestamp_str); list_for_each_entry(session, head, list) { if (!strncmp(session->dev_id, dev_id, LEN_MAX_ID) && (session->event_timestamp == timestamp)) { return 0; } } return -1; } void storage_check_file(STORAGE_SERVER *storage_server) { if (!storage_server) { DBG_ERR("arg NULL\n"); return; } DIR *dir = NULL, *dev_dir = NULL; struct dirent *root_dirent = NULL, *dev_dirent = NULL; char dev_id[LEN_MAX_ID] = {0}, timestamp[LEN_MAX_ID] = {0}; char dev_path[LEN_MAX_PATH] = {0}, event_path[LEN_MAX_PATH] = {0}, file_path[LEN_MAX_PATH] = {0}; dir = opendir(STORAGE_ROOT_PATH); if(dir == NULL){ DBG_NOTICE("opendir:%s error:%s\n", STORAGE_ROOT_PATH, strerror(errno)); return; } while((root_dirent = readdir(dir)) != NULL){ /* dev */ if(0 == strncmp(root_dirent->d_name, ".", sizeof(".")) || 0 == strncmp(root_dirent->d_name, "..", sizeof(".."))) { continue; } memset(dev_path, 0, LEN_MAX_PATH); snprintf(dev_path, LEN_MAX_PATH, "%s/%s", STORAGE_ROOT_PATH, root_dirent->d_name); dev_dir = opendir(dev_path); if(dev_dir == NULL){ DBG_NOTICE("opendir:%s error:%s\n", dev_path, strerror(errno)); continue; } memset(dev_id, 0, LEN_MAX_ID); snprintf(dev_id, LEN_MAX_ID, "%s", root_dirent->d_name); while ((dev_dirent = readdir(dev_dir)) != NULL) { /* event */ if(0 == strncmp(dev_dirent->d_name, ".", sizeof(".")) || 0 == strncmp(dev_dirent->d_name, "..", sizeof(".."))) { continue; } memset(timestamp, 0, LEN_MAX_ID); snprintf(timestamp, LEN_MAX_ID, "%s", dev_dirent->d_name); memset(event_path, 0, LEN_MAX_PATH); snprintf(event_path, LEN_MAX_PATH, "%s/%s/%s", STORAGE_ROOT_PATH, dev_id, timestamp); memset(file_path, 0, LEN_MAX_PATH); snprintf(file_path, LEN_MAX_PATH, "%s/%s/%s/%s", STORAGE_ROOT_PATH, dev_id, timestamp, STORAGE_STREAMING_FILE); if (!access(file_path, F_OK)) { DBG_NOTICE("streaming file: %s\n", file_path); if (storage_find_file_in_list(dev_id, timestamp, &storage_server->stream_session)) { storage_delete_file(file_path); } } memset(file_path, 0, LEN_MAX_PATH); snprintf(file_path, LEN_MAX_PATH, "%s/%s/%s/%s", STORAGE_ROOT_PATH, dev_id, timestamp, STORAGE_SNAPSHOT_FILE); if (!access(file_path, F_OK)) { DBG_NOTICE("snapshot file: %s\n", file_path); if (storage_find_file_in_list(dev_id, timestamp, &storage_server->snapshot_session)) { storage_delete_file(file_path); } } storage_delete_empty_dir(event_path); } closedir(dev_dir); } closedir(dir); return; } void storage_get_iot_info(STORAGE_SESSION *session, struct list_head *iot_info_head) { IOT_INFO *iot_info; if (!session || !iot_info_head) { DBG_ERR("arg NULL\n"); return; } list_for_each_entry(iot_info, iot_info_head, list) { if (!strncmp(session->dev_id, iot_info->dev_id, LEN_MAX_ID) && (session->count_id == iot_info->count_id)) { #ifdef TAPO_CAMERA storage_set_iot_info(session, iot_info->host, iot_info->cookie, iot_info->xtoken, iot_info->alarm_id); #else storage_set_iot_info(session, iot_info->host, iot_info->cookie, iot_info->token, iot_info->event_id); #endif break; } } } bool storage_check_version() { FILE *version_fd; char version_path[LEN_MAX_PATH] = {0}, version[4] = {0}; //char cmd[128] = {0}; snprintf(version_path, LEN_MAX_PATH, "%s/%s", STORAGE_ROOT_PATH, STORAGE_VERSION_FILE); if (access(version_path, F_OK)) { DBG_NOTICE("storage version file not exist\n"); goto failed; } if (!(version_fd = fopen(version_path, "r"))) { DBG_NOTICE("open storage version file failed\n"); goto failed; } if (1 != fread(version, 4, 1, version_fd)) { DBG_NOTICE("read storage version file error\n"); fclose(version_fd); goto failed; } fclose(version_fd); if (strncmp(version, STORAGE_VERSION, 4)) { DBG_NOTICE("old version\n"); goto failed; } return true; failed: /* delete storage */ //snprintf(cmd, 128, "rm -rf %s/*", STORAGE_ROOT_PATH); //system(cmd); tpcom_system("rm", ("-rf"), ("%s", STORAGE_ROOT_PATH)); tpcom_system("mkdir", ("-p"), ("%s", STORAGE_ROOT_PATH)); /* create version */ snprintf(version, 4, "%s", STORAGE_VERSION); version_fd = fopen(version_path, "w"); if (version_fd) { fwrite(version, 4, 1, version_fd); fclose(version_fd); } return false; } void storage_file_list_init(STORAGE_SERVER *storage_server) { FILE *iot_info_fd, *stream_fd, *snapshot_fd; char iot_info_path[LEN_MAX_PATH] = {0}, stream_path[LEN_MAX_PATH] = {0}, snapshot_path[LEN_MAX_PATH] = {0}; struct list_head list; IOT_INFO *iot_info, *niot_info; if (!storage_server) { DBG_ERR("arg NULL\n"); return; } /* check version */ if (false == storage_check_version()) { return; } /* iot info */ INIT_LIST_HEAD(&list); snprintf(iot_info_path, LEN_MAX_PATH, "%s/%s", STORAGE_ROOT_PATH, STORAGE_IOT_INFO_LIST); iot_info_fd = fopen(iot_info_path, "r"); if (iot_info_fd) { while (1) { iot_info = malloc(sizeof(IOT_INFO)); if (fread(iot_info, sizeof(IOT_INFO), 1, iot_info_fd) <= 0) { free(iot_info); break; } list_add_tail(&iot_info->list, &list); } fclose(iot_info_fd); } /* stream session */ snprintf(stream_path, LEN_MAX_PATH, "%s/%s", STORAGE_ROOT_PATH, STORAGE_STREAMING_LIST); stream_fd = fopen(stream_path, "r"); if (stream_fd) { while (1) { STORAGE_SESSION *stream_session = malloc(sizeof(STORAGE_SESSION)); if (fread(stream_session, sizeof(STORAGE_SESSION), 1, stream_fd) <= 0) { free(stream_session); break; } DBG_NOTICE("dev_id: %s, count_id: %d\n", stream_session->dev_id, stream_session->count_id); INIT_LIST_HEAD(&stream_session->list); INIT_LIST_HEAD(&stream_session->headers); stream_session->sock = NULL; stream_session->file = NULL; stream_session->uploading_flag = 0; stream_session->producer = NULL; stream_session->iot_consumer = NULL; stream_session->storage_server = storage_server; storage_session_add_fixed_header(stream_session); memset(&stream_session->reupload_tmo, 0, sizeof(struct uloop_timeout)); list_add_tail(&stream_session->list, &storage_server->stream_session); storage_get_iot_info(stream_session, &list); } fclose(stream_fd); } /* snapshot session */ snprintf(snapshot_path, LEN_MAX_PATH, "%s/%s", STORAGE_ROOT_PATH, STORAGE_SNAPSHOT_LIST); snapshot_fd = fopen(snapshot_path, "r"); if (snapshot_fd) { while (1) { STORAGE_SESSION *snapshot_session = malloc(sizeof(STORAGE_SESSION)); if (fread(snapshot_session, sizeof(STORAGE_SESSION), 1, snapshot_fd) <= 0) { free(snapshot_session); break; } DBG_NOTICE("dev_id: %s, count_id: %d\n", snapshot_session->dev_id, snapshot_session->count_id); INIT_LIST_HEAD(&snapshot_session->list); INIT_LIST_HEAD(&snapshot_session->headers); snapshot_session->sock = NULL; snapshot_session->file = NULL; snapshot_session->uploading_flag = 0; snapshot_session->producer = NULL; snapshot_session->iot_consumer = NULL; snapshot_session->storage_server = storage_server; memset(&snapshot_session->reupload_tmo, 0, sizeof(struct uloop_timeout)); list_add_tail(&snapshot_session->list, &storage_server->snapshot_session); storage_get_iot_info(snapshot_session, &list); } fclose(snapshot_fd); } /* free & delete */ list_for_each_entry_safe(iot_info, niot_info, &list, list) { free(iot_info); } /* check file */ storage_check_file(storage_server); return; } void storage_server_free(STORAGE_SERVER *storage_server) { STORAGE_SESSION *storage_session, *nstorage_session; if (!storage_server) { return; } uloop_timeout_cancel(&storage_server->tmo); list_for_each_entry_safe(storage_session, nstorage_session, &storage_server->stream_session, list) { storage_session_free(storage_session); } list_for_each_entry_safe(storage_session, nstorage_session, &storage_server->snapshot_session, list) { storage_session_free(storage_session); } if (storage_server->worker_ctx) { storage_server->worker_ctx->storage_server = NULL; } list_del_init(&storage_server->list); free(storage_server); } int storage_server_init(WORKER_CTX *worker_ctx) { STORAGE_SERVER *storage_server; if (!worker_ctx) { DBG_ERR("worker ctx NULL\n"); return -1; } /* create */ storage_server = (STORAGE_SERVER*)malloc(sizeof(*storage_server)); if (!storage_server) { DBG_ERR("malloc failed\n"); return -1; } memset(storage_server, 0, sizeof(*storage_server)); storage_server->worker_ctx = worker_ctx; INIT_LIST_HEAD(&storage_server->list); INIT_LIST_HEAD(&storage_server->stream_session); INIT_LIST_HEAD(&storage_server->snapshot_session); //timer to re-upload storage_server->tmo.cb = storage_server_timeout_cb; uloop_timeout_set(&storage_server->tmo, 10*1000); /* set */ worker_ctx->storage_server = storage_server; /* file list init */ storage_file_list_init(storage_server); return 0; }
09-23
############################################################################### # Copyright (C) 2024 LiveTalking@lipku https://github.com/lipku/LiveTalking # email: lipku@foxmail.com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################### # server.py from flask import Flask, render_template,send_from_directory,request, jsonify from flask_sockets import Sockets import base64 import json #import gevent #from gevent import pywsgi #from geventwebsocket.handler import WebSocketHandler import re import numpy as np from threading import Thread,Event #import multiprocessing import torch.multiprocessing as mp from aiohttp import web import aiohttp import aiohttp_cors from aiortc import RTCPeerConnection, RTCSessionDescription from aiortc.rtcrtpsender import RTCRtpSender from webrtc import HumanPlayer from basereal import BaseReal from llm import llm_response import argparse import random import shutil import asyncio import torch from typing import Dict from logger import logger import torch import time import os # 添加这行到文件顶部的其他import语句附近 app = Flask(__name__) #sockets = Sockets(app) nerfreals:Dict[int, BaseReal] = {} #sessionid:BaseReal opt = None model = None avatar = None #####webrtc############################### pcs = set() def randN(N)->int: '''生成长度为 N的随机数 ''' min = pow(10, N - 1) max = pow(10, N) return random.randint(min, max - 1) def build_nerfreal(sessionid:int)->BaseReal: opt.sessionid=sessionid if opt.model == 'wav2lip': from lipreal import LipReal nerfreal = LipReal(opt,model,avatar) elif opt.model == 'musetalk': from musereal import MuseReal nerfreal = MuseReal(opt,model,avatar) elif opt.model == 'ernerf': from nerfreal import NeRFReal nerfreal = NeRFReal(opt,model,avatar) elif opt.model == 'ultralight': from lightreal import LightReal nerfreal = LightReal(opt,model,avatar) return nerfreal #@app.route('/offer', methods=['POST']) async def offer(request): params = await request.json() offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"]) if len(nerfreals) >= opt.max_session: logger.info('reach max session') return web.Response( content_type="application/json", text=json.dumps({"code": -1, "msg": "Maximum sessions reached"}), status=503 # HTTP 503 Service Unavailable ) sessionid = randN(6) logger.info('sessionid=%d', sessionid) nerfreals[sessionid] = None nerfreal = await asyncio.get_event_loop().run_in_executor(None, build_nerfreal, sessionid) nerfreals[sessionid] = nerfreal pc = RTCPeerConnection() pcs.add(pc) @pc.on("connectionstatechange") async def on_connectionstatechange(): logger.info("Connection state is %s" % pc.connectionState) if pc.connectionState == "failed": await pc.close() pcs.discard(pc) del nerfreals[sessionid] if pc.connectionState == "closed": pcs.discard(pc) del nerfreals[sessionid] player = HumanPlayer(nerfreals[sessionid]) audio_sender = pc.addTrack(player.audio) video_sender = pc.addTrack(player.video) capabilities = RTCRtpSender.getCapabilities("video") preferences = list(filter(lambda x: x.name == "H264", capabilities.codecs)) preferences += list(filter(lambda x: x.name == "VP8", capabilities.codecs)) preferences += list(filter(lambda x: x.name == "rtx", capabilities.codecs)) transceiver = pc.getTransceivers()[1] transceiver.setCodecPreferences(preferences) await pc.setRemoteDescription(offer) answer = await pc.createAnswer() await pc.setLocalDescription(answer) return web.Response( content_type="application/json", text=json.dumps( {"sdp": pc.localDescription.sdp, "type": pc.localDescription.type, "sessionid": sessionid} ), ) async def human(request): params = await request.json() sessionid = params.get('sessionid',0) if params.get('interrupt'): nerfreals[sessionid].flush_talk() if params['type']=='echo': nerfreals[sessionid].put_msg_txt(params['text']) elif params['type']=='chat': res=await asyncio.get_event_loop().run_in_executor(None, llm_response, params['text'],nerfreals[sessionid]) #nerfreals[sessionid].put_msg_txt(res) return web.Response( content_type="application/json", text=json.dumps( {"code": 0, "data":"ok"} ), ) async def humanaudio(request): try: form= await request.post() sessionid = int(form.get('sessionid',0)) fileobj = form["file"] filename=fileobj.filename filebytes=fileobj.file.read() nerfreals[sessionid].put_audio_file(filebytes) return web.Response( content_type="application/json", text=json.dumps( {"code": 0, "msg":"ok"} ), ) except Exception as e: return web.Response( content_type="application/json", text=json.dumps( {"code": -1, "msg":"err","data": ""+e.args[0]+""} ), ) async def set_audiotype(request): params = await request.json() sessionid = params.get('sessionid',0) nerfreals[sessionid].set_custom_state(params['audiotype'],params['reinit']) return web.Response( content_type="application/json", text=json.dumps( {"code": 0, "data":"ok"} ), ) async def record(request): params = await request.json() sessionid = params.get('sessionid',0) if params['type']=='start_record': # nerfreals[sessionid].put_msg_txt(params['text']) nerfreals[sessionid].start_recording() elif params['type']=='end_record': nerfreals[sessionid].stop_recording() return web.Response( content_type="application/json", text=json.dumps( {"code": 0, "data":"ok"} ), ) async def is_speaking(request): params = await request.json() sessionid = params.get('sessionid',0) return web.Response( content_type="application/json", text=json.dumps( {"code": 0, "data": nerfreals[sessionid].is_speaking()} ), ) async def on_shutdown(app): # close peer connections coros = [pc.close() for pc in pcs] await asyncio.gather(*coros) pcs.clear() async def post(url,data): try: async with aiohttp.ClientSession() as session: async with session.post(url,data=data) as response: return await response.text() except aiohttp.ClientError as e: logger.info(f'Error: {e}') async def run(push_url,sessionid): nerfreal = await asyncio.get_event_loop().run_in_executor(None, build_nerfreal,sessionid) nerfreals[sessionid] = nerfreal pc = RTCPeerConnection() pcs.add(pc) @pc.on("connectionstatechange") async def on_connectionstatechange(): logger.info("Connection state is %s" % pc.connectionState) if pc.connectionState == "failed": await pc.close() pcs.discard(pc) player = HumanPlayer(nerfreals[sessionid]) audio_sender = pc.addTrack(player.audio) video_sender = pc.addTrack(player.video) await pc.setLocalDescription(await pc.createOffer()) answer = await post(push_url,pc.localDescription.sdp) await pc.setRemoteDescription(RTCSessionDescription(sdp=answer,type='answer')) ########################################## # os.environ['MKL_SERVICE_FORCE_INTEL'] = '1' # os.environ['MULTIPROCESSING_METHOD'] = 'forkserver' if __name__ == '__main__': torch.cuda.set_device(0) # 指定使用第一块 GPU torch.set_default_tensor_type('torch.cuda.FloatTensor') # 默认张量类型为 GPU ###device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') ###torch.set_default_tensor_type(torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor) mp.set_start_method('spawn') parser = argparse.ArgumentParser() parser.add_argument('--pose', type=str, default="data/data_kf.json", help="transforms.json, pose source") parser.add_argument('--au', type=str, default="data/au.csv", help="eye blink area") parser.add_argument('--torso_imgs', type=str, default="", help="torso images path") parser.add_argument('-O', action='store_true', help="equals --fp16 --cuda_ray --exp_eye") parser.add_argument('--data_range', type=int, nargs='*', default=[0, -1], help="data range to use") parser.add_argument('--workspace', type=str, default='data/video') parser.add_argument('--seed', type=int, default=0) ### training options parser.add_argument('--ckpt', type=str, default='data/pretrained/ngp_kf.pth') # 在参数解析部分(约第 150 行)修改默认值: parser.add_argument('--num_rays', type=int, default=4096, help="减少每批光线数量") # 原值 65536 parser.add_argument('--batch_size', type=int, default=8, help="降低批大小") # 原值 16 parser.add_argument('--max_ray_batch', type=int, default=2048, help="避免推理时 OOM") # 原值 4096 ###parser.add_argument('--num_rays', type=int, default=4096 * 16, help="num rays sampled per image for each training step") parser.add_argument('--cuda_ray', action='store_true', help="use CUDA raymarching instead of pytorch") parser.add_argument('--max_steps', type=int, default=16, help="max num steps sampled per ray (only valid when using --cuda_ray)") parser.add_argument('--num_steps', type=int, default=16, help="num steps sampled per ray (only valid when NOT using --cuda_ray)") parser.add_argument('--upsample_steps', type=int, default=0, help="num steps up-sampled per ray (only valid when NOT using --cuda_ray)") parser.add_argument('--update_extra_interval', type=int, default=16, help="iter interval to update extra status (only valid when using --cuda_ray)") ###parser.add_argument('--max_ray_batch', type=int, default=4096, help="batch size of rays at inference to avoid OOM (only valid when NOT using --cuda_ray)") ### loss set parser.add_argument('--warmup_step', type=int, default=10000, help="warm up steps") parser.add_argument('--amb_aud_loss', type=int, default=1, help="use ambient aud loss") parser.add_argument('--amb_eye_loss', type=int, default=1, help="use ambient eye loss") parser.add_argument('--unc_loss', type=int, default=1, help="use uncertainty loss") parser.add_argument('--lambda_amb', type=float, default=1e-4, help="lambda for ambient loss") ### network backbone options parser.add_argument('--fp16', action='store_true', help="use amp mixed precision training") parser.add_argument('--bg_img', type=str, default='white', help="background image") parser.add_argument('--fbg', action='store_true', help="frame-wise bg") parser.add_argument('--exp_eye', action='store_true', help="explicitly control the eyes") parser.add_argument('--fix_eye', type=float, default=-1, help="fixed eye area, negative to disable, set to 0-0.3 for a reasonable eye") parser.add_argument('--smooth_eye', action='store_true', help="smooth the eye area sequence") parser.add_argument('--torso_shrink', type=float, default=0.8, help="shrink bg coords to allow more flexibility in deform") ### dataset options parser.add_argument('--color_space', type=str, default='srgb', help="Color space, supports (linear, srgb)") parser.add_argument('--preload', type=int, default=0, help="0 means load data from disk on-the-fly, 1 means preload to CPU, 2 means GPU.") # (the default value is for the fox dataset) parser.add_argument('--bound', type=float, default=1, help="assume the scene is bounded in box[-bound, bound]^3, if > 1, will invoke adaptive ray marching.") parser.add_argument('--scale', type=float, default=4, help="scale camera location into box[-bound, bound]^3") parser.add_argument('--offset', type=float, nargs='*', default=[0, 0, 0], help="offset of camera location") parser.add_argument('--dt_gamma', type=float, default=1/256, help="dt_gamma (>=0) for adaptive ray marching. set to 0 to disable, >0 to accelerate rendering (but usually with worse quality)") parser.add_argument('--min_near', type=float, default=0.05, help="minimum near distance for camera") parser.add_argument('--density_thresh', type=float, default=10, help="threshold for density grid to be occupied (sigma)") parser.add_argument('--density_thresh_torso', type=float, default=0.01, help="threshold for density grid to be occupied (alpha)") parser.add_argument('--patch_size', type=int, default=1, help="[experimental] render patches in training, so as to apply LPIPS loss. 1 means disabled, use [64, 32, 16] to enable") parser.add_argument('--init_lips', action='store_true', help="init lips region") parser.add_argument('--finetune_lips', action='store_true', help="use LPIPS and landmarks to fine tune lips region") parser.add_argument('--smooth_lips', action='store_true', help="smooth the enc_a in a exponential decay way...") parser.add_argument('--torso', action='store_true', help="fix head and train torso") parser.add_argument('--head_ckpt', type=str, default='', help="head model") ### GUI options parser.add_argument('--gui', action='store_true', help="start a GUI") parser.add_argument('--W', type=int, default=450, help="GUI width") parser.add_argument('--H', type=int, default=450, help="GUI height") parser.add_argument('--radius', type=float, default=3.35, help="default GUI camera radius from center") parser.add_argument('--fovy', type=float, default=21.24, help="default GUI camera fovy") parser.add_argument('--max_spp', type=int, default=1, help="GUI rendering max sample per pixel") ### else parser.add_argument('--att', type=int, default=2, help="audio attention mode (0 = turn off, 1 = left-direction, 2 = bi-direction)") parser.add_argument('--aud', type=str, default='', help="audio source (empty will load the default, else should be a path to a npy file)") parser.add_argument('--emb', action='store_true', help="use audio class + embedding instead of logits") parser.add_argument('--ind_dim', type=int, default=4, help="individual code dim, 0 to turn off") parser.add_argument('--ind_num', type=int, default=10000, help="number of individual codes, should be larger than training dataset size") parser.add_argument('--ind_dim_torso', type=int, default=8, help="individual code dim, 0 to turn off") parser.add_argument('--amb_dim', type=int, default=2, help="ambient dimension") parser.add_argument('--part', action='store_true', help="use partial training data (1/10)") parser.add_argument('--part2', action='store_true', help="use partial training data (first 15s)") parser.add_argument('--train_camera', action='store_true', help="optimize camera pose") parser.add_argument('--smooth_path', action='store_true', help="brute-force smooth camera pose trajectory with a window size") parser.add_argument('--smooth_path_window', type=int, default=7, help="smoothing window size") # asr parser.add_argument('--asr', action='store_true', help="load asr for real-time app") parser.add_argument('--asr_wav', type=str, default='', help="load the wav and use as input") parser.add_argument('--asr_play', action='store_true', help="play out the audio") #parser.add_argument('--asr_model', type=str, default='deepspeech') parser.add_argument('--asr_model', type=str, default='cpierse/wav2vec2-large-xlsr-53-esperanto') # # parser.add_argument('--asr_model', type=str, default='facebook/wav2vec2-large-960h-lv60-self') # parser.add_argument('--asr_model', type=str, default='facebook/hubert-large-ls960-ft') parser.add_argument('--asr_save_feats', action='store_true') # audio FPS parser.add_argument('--fps', type=int, default=50) # sliding window left-middle-right length (unit: 20ms) parser.add_argument('-l', type=int, default=10) parser.add_argument('-m', type=int, default=8) parser.add_argument('-r', type=int, default=10) parser.add_argument('--fullbody', action='store_true', help="fullbody human") parser.add_argument('--fullbody_img', type=str, default='data/fullbody/img') parser.add_argument('--fullbody_width', type=int, default=580) parser.add_argument('--fullbody_height', type=int, default=1080) parser.add_argument('--fullbody_offset_x', type=int, default=0) parser.add_argument('--fullbody_offset_y', type=int, default=0) #musetalk opt parser.add_argument('--avatar_id', type=str, default='avator_1') parser.add_argument('--bbox_shift', type=int, default=5) ###parser.add_argument('--batch_size', type=int, default=16) # parser.add_argument('--customvideo', action='store_true', help="custom video") # parser.add_argument('--customvideo_img', type=str, default='data/customvideo/img') # parser.add_argument('--customvideo_imgnum', type=int, default=1) parser.add_argument('--customvideo_config', type=str, default='') parser.add_argument('--tts', type=str, default='edgetts') #xtts gpt-sovits cosyvoice parser.add_argument('--REF_FILE', type=str, default=None) parser.add_argument('--REF_TEXT', type=str, default=None) parser.add_argument('--TTS_SERVER', type=str, default='http://127.0.0.1:9880') # http://localhost:9000 # parser.add_argument('--CHARACTER', type=str, default='test') # parser.add_argument('--EMOTION', type=str, default='default') parser.add_argument('--model', type=str, default='ernerf') #musetalk wav2lip parser.add_argument('--transport', type=str, default='rtcpush') #rtmp webrtc rtcpush parser.add_argument('--push_url', type=str, default='http://localhost:1985/rtc/v1/whip/?app=live&stream=livestream') #rtmp://localhost/live/livestream parser.add_argument('--max_session', type=int, default=100) #multi session count parser.add_argument('--listenport', type=int, default=8010) opt = parser.parse_args() #app.config.from_object(opt) #print(app.config) opt.customopt = [] if opt.customvideo_config!='': with open(opt.customvideo_config,'r') as file: opt.customopt = json.load(file) if opt.model == 'ernerf': from nerfreal import NeRFReal,load_model,load_avatar model = load_model(opt) avatar = load_avatar(opt) # we still need test_loader to provide audio features for testing. # for k in range(opt.max_session): # opt.sessionid=k # nerfreal = NeRFReal(opt, trainer, test_loader,audio_processor,audio_model) # nerfreals.append(nerfreal) elif opt.model == 'musetalk': from musereal import MuseReal,load_model,load_avatar,warm_up logger.info(opt) model = load_model() avatar = load_avatar(opt.avatar_id) warm_up(opt.batch_size,model) # for k in range(opt.max_session): # opt.sessionid=k # nerfreal = MuseReal(opt,audio_processor,vae, unet, pe,timesteps) # nerfreals.append(nerfreal) elif opt.model == 'wav2lip': from lipreal import LipReal,load_model,load_avatar,warm_up logger.info(opt) ###model = load_model("./models/wav2lip.pth") model = load_model("./models/wav2lip.pth").to('cuda') # 强制模型加载到 GPU ###model = load_model("./models/wav2lip.pth").to(device) # 动态适配 GPU/CPU avatar = load_avatar(opt.avatar_id) warm_up(opt.batch_size,model,256) # for k in range(opt.max_session): # opt.sessionid=k # nerfreal = LipReal(opt,model) # nerfreals.append(nerfreal) elif opt.model == 'ultralight': from lightreal import LightReal,load_model,load_avatar,warm_up logger.info(opt) model = load_model(opt) avatar = load_avatar(opt.avatar_id) warm_up(opt.batch_size,avatar,160) if opt.transport=='rtmp': thread_quit = Event() nerfreals[0] = build_nerfreal(0) rendthrd = Thread(target=nerfreals[0].render,args=(thread_quit,)) rendthrd.start() ############################################################################# appasync = web.Application() from aiohttp import WSMsgType async def websocket_handler(request): ws = web.WebSocketResponse() await ws.prepare(request) sessionid = request.query.get('sessionid', 0) if sessionid in nerfreals: nerfreals[sessionid].set_websocket(ws) async for msg in ws: if msg.type == WSMsgType.TEXT: try: data = json.loads(msg.data) # 处理可能的WebSocket消息 except json.JSONDecodeError: logger.error("无效的WebSocket消息格式") elif msg.type == WSMsgType.ERROR: logger.error(f"WebSocket错误: {ws.exception()}") if sessionid in nerfreals: nerfreals[sessionid].set_websocket(None) return ws async def get_system_reply(request): try: file_path = 'systemReply.txt' if not os.path.exists(file_path): logger.info('systemReply.txt 文件不存在') return web.Response( content_type="application/json", text=json.dumps({"text": ""}) ) # 只读取不清空文件 with open(file_path, 'r', encoding='utf-8') as f: content = f.read().strip() logger.info(f'从 systemReply.txt 读取内容: {content[:100]}...') # 只打印前100字符避免日志过长 return web.Response( content_type="application/json", text=json.dumps({"text": content}) ) except Exception as e: logger.error(f'读取 systemReply.txt 出错: {str(e)}') return web.Response( content_type="application/json", text=json.dumps({"error": str(e)}), status=500 ) async def clear_reply(request): try: params = await request.json() sessionid = params.get('sessionid', 0) file_path = 'systemReply.txt' # 清空文件内容 with open(file_path, 'w', encoding='utf-8') as f: f.write('') logger.info(f'已清空 systemReply.txt (会话ID: {sessionid})') return web.Response( content_type="application/json", text=json.dumps({"code": 0, "msg": "回复已清空"}) ) except Exception as e: logger.error(f'清空回复出错: {str(e)}') return web.Response( content_type="application/json", text=json.dumps({"code": -1, "error": str(e)}), status=500 ) async def get_system_reply_array(request): try: file_path = 'systemReplyArray.txt' if not os.path.exists(file_path): # 文件不存在时创建空文件 with open(file_path, 'w', encoding='utf-8') as f: f.write('') # 确保使用同步方式读取,避免异步问题 with open(file_path, 'r', encoding='utf-8') as f: content = f.read().strip() # 将换行符替换为 "||" content = content.replace('\n', '||') return web.Response( content_type="application/json", text=json.dumps({ "status": "success", "text": content, "timestamp": int(time.time()) # 添加时间戳防止缓存 }) ) except Exception as e: logger.error(f'读取systemReplyArray.txt出错: {str(e)}') return web.Response( content_type="application/json", status=500, text=json.dumps({ "status": "error", "error": str(e), "text": "" }) ) # 添加WebSocket路由 appasync.router.add_get("/ws", websocket_handler) appasync.router.add_post("/clear_reply", clear_reply) appasync.on_shutdown.append(on_shutdown) appasync.router.add_post("/offer", offer) appasync.router.add_post("/human", human) appasync.router.add_post("/humanaudio", humanaudio) appasync.router.add_post("/set_audiotype", set_audiotype) appasync.router.add_post("/record", record) appasync.router.add_post("/is_speaking", is_speaking) appasync.router.add_static('/',path='web') # 在appasync.router.add_...部分添加新路由 appasync.router.add_get("/get_system_reply", get_system_reply) appasync.router.add_get("/get_system_reply_array", get_system_reply_array) # Configure default CORS settings. cors = aiohttp_cors.setup(appasync, defaults={ "*": aiohttp_cors.ResourceOptions( allow_credentials=True, expose_headers="*", allow_headers="*", ) }) # Configure CORS on all routes. for route in list(appasync.router.routes()): cors.add(route) pagename='webrtcapi.html' if opt.transport=='rtmp': pagename='echoapi.html' elif opt.transport=='rtcpush': pagename='rtcpushapi.html' logger.info('start http server; http://<serverip>:'+str(opt.listenport)+'/'+pagename) logger.info('如果使用webrtc,推荐访问webrtc集成前端: http://127.0.0.1:'+str(opt.listenport)+'/ffnerchat.html') logger.info(f"模型使用的设备: {next(model.parameters()).device}") logger.info(f"当前 GPU 显存占用: {torch.cuda.memory_allocated() / 1024**2:.2f} MB") def run_server(runner): loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) loop.run_until_complete(runner.setup()) site = web.TCPSite(runner, '127.0.0.1', opt.listenport) loop.run_until_complete(site.start()) # 添加打印可访问的URL import socket import webbrowser hostname = socket.gethostname() local_ip = socket.gethostbyname(hostname) logger.info(f"服务已启动,可通过以下地址访问:") logger.info(f"本地: http://127.0.0.1:{opt.listenport}/ffnerchatm.html") logger.info(f"局域网: http://{local_ip}:{opt.listenport}/ffnerchatm.html") url = f"http://127.0.0.1:{opt.listenport}/ffnerchatm.html" # 尝试打开浏览器 try: webbrowser.open(url) logger.info("已尝试在默认浏览器中打开页面") except Exception as e: logger.error(f"无法打开浏览器: {e}") if opt.transport=='rtcpush': for k in range(opt.max_session): push_url = opt.push_url if k!=0: push_url = opt.push_url+str(k) loop.run_until_complete(run(push_url,k)) loop.run_forever() #Thread(target=run_server, args=(web.AppRunner(appasync),)).start() run_server(web.AppRunner(appasync)) #app.on_shutdown.append(on_shutdown) #app.router.add_post("/offer", offer) # print('start websocket server') # server = pywsgi.WSGIServer(('0.0.0.0', 8000), app, handler_class=WebSocketHandler) # server.serve_forever() 这里有语音转文字吗
最新发布
10-12
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值