CF1132F Clear the String

博客围绕区间DP展开,作者考场未认出区间DP而用贪心算法。介绍子状态dp[i][j]表示消除区间[i,j]内字母的最小步数,从两个方向考虑状态转移,给出两种转移公式,还提及维护时先枚举j再枚举i且j从大到小,最后给出AC代码。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

太久没有搞信息了果然会RP--。

考场竟然没有看出来是区间dp,胡乱打了个贪心,好在其他题问题不大。


子状态为\(dp[i][j]\)表示消除区间\([i,j]\)内所有字母所需的最小步数。

我们从两个方向考虑转移。

  1. 与上一个状态相比,新的字母不能缩短步数,那么直接加1。

  2. 可以缩短步数。那么枚举与哪一个字母相同。

第一种转移为\(dp[i][j]=min(dp[i+1][j],dp[i][j-1])+1\)

第二种转移枚举\([i,j]\)中的\(k\),然后有\(dp[i][j]=min(dp[i][j],dp[i+1][k-1]+dp[k][j])\)

在维护上有一个需要注意的细节:因为第一种转移需要\(dp[i+1][j]\)\(dp[i][j-1]\)的数据,所以我们需要先枚举\(j\)再枚举\(i\),同时\(j\)从大到小枚举。


AC代码如下:

#include <iostream>
#include <cstdio>
#include <cstring>

using namespace std;

namespace StandardIO {

    template<typename T>inline void read (T &x) {
        x=0;T f=1;char c=getchar();
        for (; c<'0'||c>'9'; c=getchar()) if (c=='-') f=-1;
        for (; c>='0'&&c<='9'; c=getchar()) x=x*10+c-'0';
        x*=f;
    }

    template<typename T>inline void write (T x) {
        if (x<0) putchar('-'),x*=-1;
        if (x>=10) write(x/10);
        putchar(x%10+'0');
    }

}

using namespace StandardIO;

namespace Fate {
    
    const int N=1010;
    
    int n;
    char c[N];
    int dp[N][N]; // min steps for clearing [i,j]
    
    inline void Grand_Order () {
        read(n);
        for (register int i=1; i<=n; ++i) {
            cin>>c[i];
            dp[i][i]=1;
        }
        for (register int r=2; r<=n; ++r) {
            for (register int l=r-1; l>=1; --l) {
                dp[l][r]=min(dp[l+1][r],dp[l][r-1])+1;
                for (register int k=l; k<=r; ++k) {
                    if (c[l]==c[k]) {
                        dp[l][r]=min(dp[l][r],dp[l+1][k-1]+dp[k][r]);
                    }
                }
            }
        }
        write(dp[1][n]);
    }
    
}

int main () {
//  freopen(".in","r",stdin);
//  freopen(".out","w",stdout);
    Fei_De::Gan_Bu_Guo_Ou_De();
}

转载于:https://www.cnblogs.com/ilverene/p/10822729.html

* This example program shows how 128 images of the interior of a church can be * combined into a mosaic that covers a 360x130 degree view. The images were acquired * with a camera in which the exposure and white balance were set to automatic. * Therefore, there are very large brightness and color differences between the images. * Hence, adjust_mosaic_images is used to align the images radiometrically. * Furthermore, blending is used to hide the transitions between the individual * images that make up the mosaic. dev_update_off () dev_close_window () dev_open_window (0, 0, 978, 324, 'black', WindowHandle) dev_set_part (0, 0, 647, 1955) set_display_font (WindowHandle, 16, 'mono', 'true', 'false') dev_set_color ('yellow') set_tposition (WindowHandle, 20, 20) write_string (WindowHandle, 'Reading images...') * Read the 128 images that make up the mosaic. gen_empty_obj (Images) for J := 1 to 128 by 1 read_image (Image, 'panorama/sankt_martin_automatic_' + J$'03d') concat_obj (Images, Image, Images) endfor get_image_size (Image, Width, Height) * Construct the tuples that determine which images should be matched. The * mosaic images were acquired as 16 vertical strips of 8 images each. * For each image, we match the image below the current image in the same * strip and the image to the left of the current image in the adjacent strip. FF := [1,1,2,2,3,3,4,4,5,5,6,6,7,7,8] TT := [2,9,3,10,4,11,5,12,6,13,7,14,8,15,16] From := [] To := [] for J := 0 to 15 by 1 From := [From,(FF - 1 + 8 * J) % 128 + 1] To := [To,(TT - 1 + 8 * J) % 128 + 1] endfor * Initialize the data that is required for the self-calibration. HomMatrices2D := [] Rows1 := [] Cols1 := [] Rows2 := [] Cols2 := [] NumMatches := [] for J := 0 to |From| - 1 by 1 * Select the images to match. select_obj (Images, ImageF, From[J]) select_obj (Images, ImageT, To[J]) * Perform the point extraction of the images. points_foerstner (ImageF, 1, 2, 3, 50, 0.1, 'gauss', 'true', RowsF, ColsF, CoRRJunctions, CoRCJunctions, CoCCJunctions, RowArea, ColumnArea, CoRRArea, CoRCArea, CoCCArea) points_foerstner (ImageT, 1, 2, 3, 50, 0.1, 'gauss', 'true', RowsT, ColsT, CoRRJunctions1, CoRCJunctions1, CoCCJunctions1, RowArea1, ColumnArea1, CoRRArea1, CoRCArea1, CoCCArea1) concat_obj (ImageT, ImageF, ImageTF) tile_images_offset (ImageTF, TiledImage, [0,0], [0,Width + 20], [-1,-1], [-1,-1], [-1,-1], [-1,-1], 2 * Width + 20, Height) gen_cross_contour_xld (PointsF, RowsF, ColsF + Width + 20, 6, rad(45)) gen_cross_contour_xld (PointsT, RowsT, ColsT, 6, rad(0)) * Convert the images to gray value images. rgb1_to_gray (ImageF, ImageFG) rgb1_to_gray (ImageT, ImageTG) * Determine the projective transformation between the images. proj_match_points_ransac (ImageFG, ImageTG, RowsF, ColsF, RowsT, ColsT, 'ncc', 10, 0, 0, 648, 968, [rad(-10),rad(40)], 0.5, 'gold_standard', 10, 42, HomMat2D, Points1, Points2) * After this, we accumulate the required data. HomMatrices2D := [HomMatrices2D,HomMat2D] Rows1 := [Rows1,subset(RowsF,Points1)] Cols1 := [Cols1,subset(ColsF,Points1)] Rows2 := [Rows2,subset(RowsT,Points2)] Cols2 := [Cols2,subset(ColsT,Points2)] NumMatches := [NumMatches,|Points1|] * The rest of the code within the loop visualizes the point matches. RF := subset(RowsF,Points1) CF := subset(ColsF,Points1) + Width + 20 RT := subset(RowsT,Points2) CT := subset(ColsT,Points2) gen_empty_obj (Matches) for K := 0 to |RF| - 1 by 1 gen_contour_polygon_xld (Match, [RF[K],RT[K]], [CF[K],CT[K]]) concat_obj (Matches, Match, Matches) endfor dev_clear_window () dev_display (TiledImage) dev_set_color ('blue') dev_display (Matches) dev_set_color ('green') dev_display (PointsF) dev_display (PointsT) dev_set_color ('yellow') set_tposition (WindowHandle, 20, 20) write_string (WindowHandle, 'Matches between images ' + From[J]$'d' + ' and ' + To[J]$'d') endfor dev_clear_window () dev_set_window_extents (-1, -1, 856, 428) dev_set_color ('yellow') set_tposition (WindowHandle, 20, 20) write_string (WindowHandle, 'Performing self-calibration...') * Perform the self-calibration. stationary_camera_self_calibration (128, 968, 648, 6, From, To, HomMatrices2D, Rows1, Cols1, Rows2, Cols2, NumMatches, 'gold_standard', ['focus','principal_point','kappa'], 'true', CameraMatrix, Kappa, RotationMatrices, X, Y, Z, Error) dev_clear_window () dev_set_color ('yellow') set_tposition (WindowHandle, 20, 20) write_string (WindowHandle, 'Removing radial distortions...') * Remove the radial distortions from the images. cam_mat_to_cam_par (CameraMatrix, Kappa, 968, 648, CamParam) change_radial_distortion_cam_par ('fixed', CamParam, 0, CamParOut) gen_radial_distortion_map (Map, CamParam, CamParOut, 'bilinear') map_image (Images, Map, Images) dev_clear_window () dev_set_color ('yellow') set_tposition (WindowHandle, 20, 20) write_string (WindowHandle, 'Adjusting the images radiometrically...') * Before we adjust the images radiometrically, we compute the perspective * transformations between the images from the camera matrix and the rotation * matrices that are returned by the self-calibration. They are more accurate * than the perspective transformations that are returned by the matching * since they have been optimized over all images. For details on how the * perspective transformation matrices are computed by the code below, see the * documentation of stationary_camera_self_calibration. hom_mat2d_invert (CameraMatrix, CameraMatrixInv) PermMat := [0.0,1.0,0.5,1.0,0.0,0.5,0.0,0.0,1.0] hom_mat2d_invert (PermMat, PermMatInv) hom_mat2d_compose (CameraMatrixInv, PermMatInv, CamMatPermInv) hom_mat2d_compose (PermMat, CameraMatrix, CamMatPerm) HomMats2D := [] for J := 0 to |From| - 1 by 1 RotMatFrom := RotationMatrices[9 * (From[J] - 1):9 * (From[J] - 1) + 8] RotMatTo := RotationMatrices[9 * (To[J] - 1):9 * (To[J] - 1) + 8] hom_mat2d_transpose (RotMatFrom, RotMatFromInv) hom_mat2d_compose (RotMatTo, RotMatFromInv, RotMat) hom_mat2d_compose (RotMat, CamMatPermInv, RotCamMatInv) hom_mat2d_compose (CamMatPerm, RotCamMatInv, HomMat2D) HomMats2D := [HomMats2D,HomMat2D] endfor * Now adjust the images radiometrically. Since the exposure and white balance * were set to automatic, we calculate 'mult_gray'. Since the camera is a consumer * camera and therefore has a highly nonlinear response, we compute 'response'. * To compensate the vignetting in the images, we compute 'vignetting'. Finally, * to speed up the optimization, we use a subsampling by a factor of 4. adjust_mosaic_images (Images, CorrectedImages, From, To, 118, HomMats2D, 'gold_standard', ['mult_gray','response','vignetting','subsampling_4'], 'laguerre') * Since the reference image was not aligned perfectly horizontally, we modify the * calibrated rotation matrices by rotating them by -5.5 degrees around the x axis. hom_mat3d_identity (HomMat3D) hom_mat3d_rotate (HomMat3D, rad(-5.5), 'x', 0, 0, 0, HomMat3D) RotMat := [HomMat3D[0:2],HomMat3D[4:6],HomMat3D[8:10]] RotMats := [] for J := 0 to 127 by 1 RotMatCalib := RotationMatrices[J * 9:J * 9 + 8] hom_mat2d_compose (RotMatCalib, RotMat, RotMatRot) RotMats := [RotMats,RotMatRot] endfor dev_clear_window () dev_set_color ('yellow') set_tposition (WindowHandle, 20, 20) write_string (WindowHandle, 'Creating spherical mosaic of the original images...') * Create the spherical mosaic of the original images. gen_spherical_mosaic (Images, SphericalMosaicOrig, CameraMatrix, RotMats, -90, 90, -180, 180, 0, 'voronoi', 'bilinear') get_image_size (SphericalMosaicOrig, Width, Height) dev_set_part (0, 0, Height - 1, Width - 1) dev_clear_window () dev_display (SphericalMosaicOrig) dev_set_color ('yellow') set_tposition (WindowHandle, Height - 300, 20) write_string (WindowHandle, 'Spherical mosaic of the original images') set_tposition (WindowHandle, Height - 150, 20) write_string (WindowHandle, 'Press \'Run\' to continue') stop () dev_clear_window () dev_set_color ('yellow') set_tposition (WindowHandle, 20, 20) write_string (WindowHandle, 'Creating spherical mosaic of the radiometrically adjusted images...') * Create the spherical mosaic of the radiometrically adjusted images. gen_spherical_mosaic (CorrectedImages, SphericalMosaicAdjust, CameraMatrix, RotMats, -90, 90, -180, 180, 0, 'voronoi', 'bilinear') get_image_size (SphericalMosaicAdjust, Width, Height) dev_set_part (0, 0, Height - 1, Width - 1) dev_clear_window () dev_display (SphericalMosaicAdjust) dev_set_color ('yellow') set_tposition (WindowHandle, Height - 300, 20) write_string (WindowHandle, 'Spherical mosaic of the radiometrically adjusted images') set_tposition (WindowHandle, Height - 150, 20) write_string (WindowHandle, 'Press \'Run\' to continue') stop () dev_clear_window () dev_set_color ('yellow') set_tposition (WindowHandle, 20, 20) write_string (WindowHandle, 'Creating blended spherical mosaic of the radiometrically adjusted images...') * Create the blended spherical mosaic of the radiometrically adjusted images. gen_spherical_mosaic (CorrectedImages, SphericalMosaicAdjustBlend, CameraMatrix, RotMats, -90, 90, -180, 180, 0, 'blend', 'bilinear') get_image_size (SphericalMosaicAdjustBlend, Width, Height) dev_set_part (0, 0, Height - 1, Width - 1) dev_clear_window () dev_display (SphericalMosaicAdjustBlend) dev_set_color ('yellow') set_tposition (WindowHandle, Height - 300, 20) write_string (WindowHandle, 'Blended spherical mosaic of the radiometrically adjusted images')
06-25
基于数据挖掘的音乐推荐系统设计与实现 需要一个代码说明,不需要论文 采用python语言,django框架,mysql数据库开发 编程环境:pycharm,mysql8.0 系统分为前台+后台模式开发 网站前台: 用户注册, 登录 搜索音乐,音乐欣赏(可以在线进行播放) 用户登陆时选择相关感兴趣的音乐风格 音乐收藏 音乐推荐算法:(重点) 本课题需要大量用户行为(如播放记录、收藏列表)、音乐特征(如音频特征、歌曲元数据)等数据 (1)根据用户之间相似性或关联性,给一个用户推荐与其相似或有关联的其他用户所感兴趣的音乐; (2)根据音乐之间的相似性或关联性,给一个用户推荐与其感兴趣的音乐相似或有关联的其他音乐。 基于用户的推荐和基于物品的推荐 其中基于用户的推荐是基于用户的相似度找出相似相似用户,然后向目标用户推荐其相似用户喜欢的东西(和你类似的人也喜欢**东西); 而基于物品的推荐是基于物品的相似度找出相似的物品做推荐(喜欢该音乐的人还喜欢了**音乐); 管理员 管理员信息管理 注册用户管理,审核 音乐爬虫(爬虫方式爬取网站音乐数据) 音乐信息管理(上传歌曲MP3,以便前台播放) 音乐收藏管理 用户 用户资料修改 我的音乐收藏 完整前后端源码,部署后可正常运行! 环境说明 开发语言:python后端 python版本:3.7 数据库:mysql 5.7+ 数据库工具:Navicat11+ 开发软件:pycharm
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值