G. Team Players

本文介绍了一种使用容斥原理解决特定图论问题的方法,并通过实例代码展示了如何通过编程实现这一解决方案。

直接上容斥,
然后暴力判断。
搞定…
c++ 代码如下:

#include<bits/stdc++.h>
#define lowbit(x) (x & -x)
#define rep(i,x,y) for(register int i = x ;i <= y; ++ i)
#define repd(i,x,y) for(register int i = x ; i >= y; -- i)
using namespace std;
typedef unsigned long long ull;
typedef long long ll;
template<typename T>inline void read(T&x)
{
    char c;int sign = 1;x = 0;
    do { c = getchar(); if(c == '-') sign = -1; }while(!isdigit(c));
    do { x = x * 10 + c - '0'; c = getchar(); }while(isdigit(c));
    x *= sign;
}

const int N = 2e5 + 20;
ull a,b,c,n,m;
ull u[N],v[N],ans;
ull s1[N],s2[N],s3[N];
vector<int> g[N],f[N];

int main()
{
    read(n); read(m);
    read(a); read(b); read(c);
    rep(i,1,m)
    {
        read(u[i]); read(v[i]);
        if(u[i] > v[i]) swap(u[i],v[i]);
        g[u[i]].push_back(v[i]);
        f[v[i]].push_back(u[i]);
    }

    rep(i,0,n-1)
    {
        ull x = n - i - 1;
        ans += a * i * (x * (x - 1) / 2);
        ans += b * i * i * x;
        ans += c * i * ((ull)i * (i - 1) / 2);
    }

    rep(i,1,m)
    {
        s1[  0 ] += 1;
        s1[u[i]] -= 1;

        s1[ u[i] ] += n - u[i] - 2;
        s1[u[i]+1] -= n - u[i] - 2;

        s2[u[i]+1] += 1;
        s2[ v[i] ] -= 1;

        s2[ u[i] ] += u[i];
        s2[u[i]+1] -= u[i];

        s2[ v[i] ] += n - v[i] - 1;
        s2[v[i]+1] -= n - v[i] - 1;

        s3[v[i]+1] += 1;
        s3[  n   ] -= 1;

        s3[ v[i] ] += v[i] - 1;
        s3[v[i]+1] -= v[i] - 1;
    }

    rep(i,1,n) 
        s1[i] += s1[i - 1],
        s2[i] += s2[i - 1],
        s3[i] += s3[i - 1];

    rep(i,0,n - 1)
    {
        ans -= a * i * s1[i];
        ans -= b * i * s2[i];
        ans -= c * i * s3[i];
    }

    rep(i,0,n-1) sort(g[i].begin(),g[i].end());
    rep(i,0,n-1) sort(f[i].begin(),f[i].end());
    rep(i,0,n-1)
    {
        int sz = g[i].size();
        rep(j,0,sz - 1)
        {
            int k = j + 1;
            while(k < sz)
            {
                ans += a * i;
                ans += b * g[i][j];
                ans += c * g[i][k];
                k ++ ;
            }

            int SZ = g[g[i][j]].size();
            rep(q,0,SZ - 1)
            {
                ans += a * i;
                ans += b * g[i][j];
                ans += c * g[g[i][j]][q];
            }
        }

        sz = f[i].size();
        rep(j,0,sz - 1)
        {
            int k = j + 1;
            while(k < sz)
            {
                ans += a * f[i][j];
                ans += b * f[i][k];
                ans += c * i;
                ++ k;
            }
        }
    }

    rep(i,0,n-1)
    {
        int sz = g[i].size();
        rep(j,0,sz - 1)
        {
            int t = j + 1,k = 0;
            int SZ = g[g[i][j]].size();
            while(t < sz && k < SZ)
            {
                if(g[i][t] == g[g[i][j]][k])
                {
                    ans -= a * i;
                    ans -= b * g[i][j];
                    ans -= c * g[i][t];
                    ++ t; ++ k;
                }
                else if(g[i][t] < g[g[i][j]][k]) ++ t;
                else ++ k;
            }
        }
    }

    cout << ans << endl;

    return 0;
}
import argparse from enum import Enum from typing import Iterator, List import os import cv2 import numpy as np import supervision as sv from tqdm import tqdm from ultralytics import YOLO from sports.annotators.soccer import draw_pitch, draw_points_on_pitch from sports.common.ball import BallTracker, BallAnnotator from sports.common.team import TeamClassifier from sports.common.view import ViewTransformer from sports.configs.soccer import SoccerPitchConfiguration PARENT_DIR = os.path.dirname(os.path.abspath(__file__)) PLAYER_DETECTION_MODEL_PATH = os.path.join(PARENT_DIR, 'data/football-player-detection.pt') PITCH_DETECTION_MODEL_PATH = os.path.join(PARENT_DIR, 'data/football-pitch-detection.pt') BALL_DETECTION_MODEL_PATH = os.path.join(PARENT_DIR, 'data/football-ball-detection.pt') BALL_CLASS_ID = 0 GOALKEEPER_CLASS_ID = 1 PLAYER_CLASS_ID = 2 REFEREE_CLASS_ID = 3 STRIDE = 60 CONFIG = SoccerPitchConfiguration() COLORS = ['#FF1493', '#00BFFF', '#FF6347', '#FFD700'] VERTEX_LABEL_ANNOTATOR = sv.VertexLabelAnnotator( color=[sv.Color.from_hex(color) for color in CONFIG.colors], text_color=sv.Color.from_hex('#FFFFFF'), border_radius=5, text_thickness=1, text_scale=0.5, text_padding=5, ) EDGE_ANNOTATOR = sv.EdgeAnnotator( color=sv.Color.from_hex('#FF1493'), thickness=2, edges=CONFIG.edges, ) TRIANGLE_ANNOTATOR = sv.TriangleAnnotator( color=sv.Color.from_hex('#FF1493'), base=20, height=15, ) BOX_ANNOTATOR = sv.BoxAnnotator( color=sv.ColorPalette.from_hex(COLORS), thickness=2 ) ELLIPSE_ANNOTATOR = sv.EllipseAnnotator( color=sv.ColorPalette.from_hex(COLORS), thickness=2 ) BOX_LABEL_ANNOTATOR = sv.LabelAnnotator( color=sv.ColorPalette.from_hex(COLORS), text_color=sv.Color.from_hex('#FFFFFF'), text_padding=5, text_thickness=1, ) ELLIPSE_LABEL_ANNOTATOR = sv.LabelAnnotator( color=sv.ColorPalette.from_hex(COLORS), text_color=sv.Color.from_hex('#FFFFFF'), text_padding=5, text_thickness=1, text_position=sv.Position.BOTTOM_CENTER, ) window_name = "My Window" cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) # 创建可调整大小的窗口 cv2.resizeWindow(window_name, 730, 400) # 设置窗口大小为 800x600 class Mode(Enum): """ Enum class representing different modes of operation for Soccer AI video analysis. """ PITCH_DETECTION = 'PITCH_DETECTION' PLAYER_DETECTION = 'PLAYER_DETECTION' BALL_DETECTION = 'BALL_DETECTION' PLAYER_TRACKING = 'PLAYER_TRACKING' TEAM_CLASSIFICATION = 'TEAM_CLASSIFICATION' RADAR = 'RADAR' def get_crops(frame: np.ndarray, detections: sv.Detections) -> List[np.ndarray]: """ Extract crops from the frame based on detected bounding boxes. Args: frame (np.ndarray): The frame from which to extract crops. detections (sv.Detections): Detected objects with bounding boxes. Returns: List[np.ndarray]: List of cropped images. """ return [sv.crop_image(frame, xyxy) for xyxy in detections.xyxy] def resolve_goalkeepers_team_id( players: sv.Detections, players_team_id: np.array, goalkeepers: sv.Detections ) -> np.ndarray: """ Resolve the team IDs for detected goalkeepers based on the proximity to team centroids. Args: players (sv.Detections): Detections of all players. players_team_id (np.array): Array containing team IDs of detected players. goalkeepers (sv.Detections): Detections of goalkeepers. Returns: np.ndarray: Array containing team IDs for the detected goalkeepers. This function calculates the centroids of the two teams based on the positions of the players. Then, it assigns each goalkeeper to the nearest team's centroid by calculating the distance between each goalkeeper and the centroids of the two teams. """ goalkeepers_xy = goalkeepers.get_anchors_coordinates(sv.Position.BOTTOM_CENTER) players_xy = players.get_anchors_coordinates(sv.Position.BOTTOM_CENTER) team_0_centroid = players_xy[players_team_id == 0].mean(axis=0) team_1_centroid = players_xy[players_team_id == 1].mean(axis=0) goalkeepers_team_id = [] for goalkeeper_xy in goalkeepers_xy: dist_0 = np.linalg.norm(goalkeeper_xy - team_0_centroid) dist_1 = np.linalg.norm(goalkeeper_xy - team_1_centroid) goalkeepers_team_id.append(0 if dist_0 < dist_1 else 1) return np.array(goalkeepers_team_id) def render_radar( detections: sv.Detections, keypoints: sv.KeyPoints, color_lookup: np.ndarray ) -> np.ndarray: mask = (keypoints.xy[0][:, 0] > 1) & (keypoints.xy[0][:, 1] > 1) transformer = ViewTransformer( source=keypoints.xy[0][mask].astype(np.float32), target=np.array(CONFIG.vertices)[mask].astype(np.float32) ) xy = detections.get_anchors_coordinates(anchor=sv.Position.BOTTOM_CENTER) transformed_xy = transformer.transform_points(points=xy) radar = draw_pitch(config=CONFIG) radar = draw_points_on_pitch( config=CONFIG, xy=transformed_xy[color_lookup == 0], face_color=sv.Color.from_hex(COLORS[0]), radius=20, pitch=radar) radar = draw_points_on_pitch( config=CONFIG, xy=transformed_xy[color_lookup == 1], face_color=sv.Color.from_hex(COLORS[1]), radius=20, pitch=radar) radar = draw_points_on_pitch( config=CONFIG, xy=transformed_xy[color_lookup == 2], face_color=sv.Color.from_hex(COLORS[2]), radius=20, pitch=radar) radar = draw_points_on_pitch( config=CONFIG, xy=transformed_xy[color_lookup == 3], face_color=sv.Color.from_hex(COLORS[3]), radius=20, pitch=radar) return radar def run_pitch_detection(source_video_path: str, device: str) -> Iterator[np.ndarray]: """ Run pitch detection on a video and yield annotated frames. Args: source_video_path (str): Path to the source video. device (str): Device to run the model on (e.g., 'cpu', 'cuda'). Yields: Iterator[np.ndarray]: Iterator over annotated frames. """ pitch_detection_model = YOLO(PITCH_DETECTION_MODEL_PATH).to(device=device) frame_generator = sv.get_video_frames_generator(source_path=source_video_path) for frame in frame_generator: result = pitch_detection_model(frame, verbose=False)[0] keypoints = sv.KeyPoints.from_ultralytics(result) annotated_frame = frame.copy() annotated_frame = VERTEX_LABEL_ANNOTATOR.annotate( annotated_frame, keypoints, CONFIG.labels) yield annotated_frame def run_player_detection(source_video_path: str, device: str) -> Iterator[np.ndarray]: """ Run player detection on a video and yield annotated frames. Args: source_video_path (str): Path to the source video. device (str): Device to run the model on (e.g., 'cpu', 'cuda'). Yields: Iterator[np.ndarray]: Iterator over annotated frames. """ player_detection_model = YOLO(PLAYER_DETECTION_MODEL_PATH).to(device=device) frame_generator = sv.get_video_frames_generator(source_path=source_video_path) for frame in frame_generator: result = player_detection_model(frame, imgsz=1280, verbose=False)[0] detections = sv.Detections.from_ultralytics(result) annotated_frame = frame.copy() annotated_frame = BOX_ANNOTATOR.annotate(annotated_frame, detections) annotated_frame = BOX_LABEL_ANNOTATOR.annotate(annotated_frame, detections) yield annotated_frame def run_ball_detection(source_video_path: str, device: str) -> Iterator[np.ndarray]: """ Run ball detection on a video and yield annotated frames. Args: source_video_path (str): Path to the source video. device (str): Device to run the model on (e.g., 'cpu', 'cuda'). Yields: Iterator[np.ndarray]: Iterator over annotated frames. """ ball_detection_model = YOLO(BALL_DETECTION_MODEL_PATH).to(device=device) frame_generator = sv.get_video_frames_generator(source_path=source_video_path) ball_tracker = BallTracker(buffer_size=20) ball_annotator = BallAnnotator(radius=6, buffer_size=10) def callback(image_slice: np.ndarray) -> sv.Detections: result = ball_detection_model(image_slice, imgsz=640, verbose=False)[0] return sv.Detections.from_ultralytics(result) slicer = sv.InferenceSlicer( callback=callback, overlap_filter_strategy=sv.OverlapFilter.NONE, slice_wh=(640, 640), ) for frame in frame_generator: detections = slicer(frame).with_nms(threshold=0.1) detections = ball_tracker.update(detections) annotated_frame = frame.copy() annotated_frame = ball_annotator.annotate(annotated_frame, detections) yield annotated_frame def run_player_tracking(source_video_path: str, device: str) -> Iterator[np.ndarray]: """ Run player tracking on a video and yield annotated frames with tracked players. Args: source_video_path (str): Path to the source video. device (str): Device to run the model on (e.g., 'cpu', 'cuda'). Yields: Iterator[np.ndarray]: Iterator over annotated frames. """ player_detection_model = YOLO(PLAYER_DETECTION_MODEL_PATH).to(device=device) frame_generator = sv.get_video_frames_generator(source_path=source_video_path) tracker = sv.ByteTrack(minimum_consecutive_frames=3) for frame in frame_generator: result = player_detection_model(frame, imgsz=1280, verbose=False)[0] detections = sv.Detections.from_ultralytics(result) detections = tracker.update_with_detections(detections) labels = [str(tracker_id) for tracker_id in detections.tracker_id] annotated_frame = frame.copy() annotated_frame = ELLIPSE_ANNOTATOR.annotate(annotated_frame, detections) annotated_frame = ELLIPSE_LABEL_ANNOTATOR.annotate( annotated_frame, detections, labels=labels) yield annotated_frame def run_team_classification(source_video_path: str, device: str) -> Iterator[np.ndarray]: """ Run team classification on a video and yield annotated frames with team colors. Args: source_video_path (str): Path to the source video. device (str): Device to run the model on (e.g., 'cpu', 'cuda'). Yields: Iterator[np.ndarray]: Iterator over annotated frames. """ player_detection_model = YOLO(PLAYER_DETECTION_MODEL_PATH).to(device=device) frame_generator = sv.get_video_frames_generator( source_path=source_video_path, stride=STRIDE) crops = [] for frame in tqdm(frame_generator, desc='collecting crops'): result = player_detection_model(frame, imgsz=1280, verbose=False)[0] detections = sv.Detections.from_ultralytics(result) crops += get_crops(frame, detections[detections.class_id == PLAYER_CLASS_ID]) team_classifier = TeamClassifier(device=device) team_classifier.fit(crops) frame_generator = sv.get_video_frames_generator(source_path=source_video_path) tracker = sv.ByteTrack(minimum_consecutive_frames=3) for frame in frame_generator: result = player_detection_model(frame, imgsz=1280, verbose=False)[0] detections = sv.Detections.from_ultralytics(result) detections = tracker.update_with_detections(detections) players = detections[detections.class_id == PLAYER_CLASS_ID] crops = get_crops(frame, players) players_team_id = team_classifier.predict(crops) goalkeepers = detections[detections.class_id == GOALKEEPER_CLASS_ID] goalkeepers_team_id = resolve_goalkeepers_team_id( players, players_team_id, goalkeepers) referees = detections[detections.class_id == REFEREE_CLASS_ID] detections = sv.Detections.merge([players, goalkeepers, referees]) color_lookup = np.array( players_team_id.tolist() + goalkeepers_team_id.tolist() + [REFEREE_CLASS_ID] * len(referees) ) labels = [str(tracker_id) for tracker_id in detections.tracker_id] annotated_frame = frame.copy() annotated_frame = ELLIPSE_ANNOTATOR.annotate( annotated_frame, detections, custom_color_lookup=color_lookup) annotated_frame = ELLIPSE_LABEL_ANNOTATOR.annotate( annotated_frame, detections, labels, custom_color_lookup=color_lookup) yield annotated_frame def run_radar(source_video_path: str, device: str) -> Iterator[np.ndarray]: player_detection_model = YOLO(PLAYER_DETECTION_MODEL_PATH).to(device=device) pitch_detection_model = YOLO(PITCH_DETECTION_MODEL_PATH).to(device=device) frame_generator = sv.get_video_frames_generator( source_path=source_video_path, stride=STRIDE) crops = [] for frame in tqdm(frame_generator, desc='collecting crops'): result = player_detection_model(frame, imgsz=1280, verbose=False)[0] detections = sv.Detections.from_ultralytics(result) crops += get_crops(frame, detections[detections.class_id == PLAYER_CLASS_ID]) team_classifier = TeamClassifier(device=device) team_classifier.fit(crops) frame_generator = sv.get_video_frames_generator(source_path=source_video_path) tracker = sv.ByteTrack(minimum_consecutive_frames=3) for frame in frame_generator: result = pitch_detection_model(frame, verbose=False)[0] keypoints = sv.KeyPoints.from_ultralytics(result) result = player_detection_model(frame, imgsz=1280, verbose=False)[0] detections = sv.Detections.from_ultralytics(result) detections = tracker.update_with_detections(detections) players = detections[detections.class_id == PLAYER_CLASS_ID] crops = get_crops(frame, players) players_team_id = team_classifier.predict(crops) goalkeepers = detections[detections.class_id == GOALKEEPER_CLASS_ID] goalkeepers_team_id = resolve_goalkeepers_team_id( players, players_team_id, goalkeepers) referees = detections[detections.class_id == REFEREE_CLASS_ID] detections = sv.Detections.merge([players, goalkeepers, referees]) color_lookup = np.array( players_team_id.tolist() + goalkeepers_team_id.tolist() + [REFEREE_CLASS_ID] * len(referees) ) labels = [str(tracker_id) for tracker_id in detections.tracker_id] annotated_frame = frame.copy() annotated_frame = ELLIPSE_ANNOTATOR.annotate( annotated_frame, detections, custom_color_lookup=color_lookup) annotated_frame = ELLIPSE_LABEL_ANNOTATOR.annotate( annotated_frame, detections, labels, custom_color_lookup=color_lookup) h, w, _ = frame.shape radar = render_radar(detections, keypoints, color_lookup) radar = sv.resize_image(radar, (w // 2, h // 2)) radar_h, radar_w, _ = radar.shape rect = sv.Rect( x=w // 2 - radar_w // 2, y=h - radar_h, width=radar_w, height=radar_h ) annotated_frame = sv.draw_image(annotated_frame, radar, opacity=0.5, rect=rect) yield annotated_frame def main(source_video_path: str, target_video_path: str, device: str, mode: Mode) -> None: if mode == Mode.PITCH_DETECTION: frame_generator = run_pitch_detection( source_video_path=source_video_path, device=device) elif mode == Mode.PLAYER_DETECTION: frame_generator = run_player_detection( source_video_path=source_video_path, device=device) elif mode == Mode.BALL_DETECTION: frame_generator = run_ball_detection( source_video_path=source_video_path, device=device) elif mode == Mode.PLAYER_TRACKING: frame_generator = run_player_tracking( source_video_path=source_video_path, device=device) elif mode == Mode.TEAM_CLASSIFICATION: frame_generator = run_team_classification( source_video_path=source_video_path, device=device) elif mode == Mode.RADAR: frame_generator = run_radar( source_video_path=source_video_path, device=device) else: raise NotImplementedError(f"Mode {mode} is not implemented.") video_info = sv.VideoInfo.from_video_path(source_video_path) with sv.VideoSink(target_video_path, video_info) as sink: for frame in frame_generator: sink.write_frame(frame) cv2.imshow(window_name, frame) if cv2.waitKey(1) & 0xFF == ord("q"): break cv2.destroyAllWindows() if __name__ == '__main__': parser = argparse.ArgumentParser(description='') parser.add_argument('--source_video_path', type=str, required=True) parser.add_argument('--target_video_path', type=str, required=True) parser.add_argument('--device', type=str, default='cpu') parser.add_argument('--mode', type=Mode, default=Mode.PLAYER_DETECTION) args = parser.parse_args() main( source_video_path=args.source_video_path, target_video_path=args.target_video_path, device=args.device, mode=args.mode )
11-05
【电动汽车充电站有序充电调度的分散式优化】基于蒙特卡诺和拉格朗日的电动汽车优化调度(分时电价调度)(Matlab代码实现)内容概要:本文介绍了基于蒙特卡洛和拉格朗日方法的电动汽车充电站有序充电调度优化方案,重点在于采用分散式优化策略应对分时电价机制下的充电需求管理。通过构建数学模型,结合不确定性因素如用户充电行为和电网负荷波动,利用蒙特卡洛模拟生成大量场景,并运用拉格朗日松弛法对复杂问题进行分解求解,从而实现全局最优或近似最优的充电调度计划。该方法有效降低了电网峰值负荷压力,提升了充电站运营效率与经济效益,同时兼顾用户充电便利性。 适合人群:具备一定电力系统、优化算法和Matlab编程基础的高校研究生、科研人员及从事智能电网、电动汽车相关领域的工程技术人员。 使用场景及目标:①应用于电动汽车充电站的日常运营管理,优化充电负荷分布;②服务于城市智能交通系统规划,提升电网与交通系统的协同水平;③作为学术研究案例,用于验证分散式优化算法在复杂能源系统中的有效性。 阅读建议:建议读者结合Matlab代码实现部分,深入理解蒙特卡洛模拟与拉格朗日松弛法的具体实施步骤,重点关注场景生成、约束处理与迭代收敛过程,以便在实际项目中灵活应用与改进。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值