LWC 52:686. Repeated String Match

本文介绍了一种解决特定字符串匹配问题的算法。该问题要求找到字符串A重复的最小次数,使得字符串B成为A的一个子串。文章提供了详细的解决方案及实现代码。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

LWC 52:686. Repeated String Match

传送门:686. Repeated String Match

Problem:

Given two strings A and B, find the minimum number of times A has to be repeated such that B is a substring of it. If no such solution, return -1.

For example, with A = “abcd” and B = “cdabcdab”.

Return 3, because by repeating A three times (“abcdabcdabcd”), B is a substring of it; and B is not a substring of A repeated two times (“abcdabcd”).

Note:

The length of A and B will be between 1 and 10000.

思路:
重复多少次之后能够使得A包含B,关键在于何时停止重复,显然,如果A的repeat后的长度大于B时,即可停止搜索了,因为在此长度下A都不能包含B,那么repat的次数再大也没用。

代码如下:

    public int repeatedStringMatch(String A, String B) {
        int nb = B.length();
        int na = A.length();
        int times = nb / na + 2;
        StringBuilder sb = new StringBuilder(A);
        for (int i = 1; i <= times; ++i) {
            if (sb.toString().contains(B)) return i;
            else {
                sb.append(A);
            }
        }
        return -1;
    }

times的上界可以设置的大点,当然+2已经是最紧的上界了。

class UniformAffineQuantizer(nn.Module): def __init__( self, n_bits: int = 8, symmetric: bool = False, per_channel_axes=[], metric="minmax", dynamic=False, dynamic_method="per_cluster", group_size=None, shape=None, lwc=False, disable_zero_point=False, ): """ support cluster quantize dynamic_method support per_token and per_cluster """ super().__init__() self.symmetric = symmetric self.disable_zero_point = disable_zero_point assert 2 <= n_bits <= 16, "bitwidth not supported" self.n_bits = n_bits if self.disable_zero_point: self.qmin = -(2 ** (n_bits - 1)) self.qmax = 2 ** (n_bits - 1) - 1 else: self.qmin = 0 self.qmax = 2 ** (n_bits) - 1 self.per_channel_axes = per_channel_axes self.metric = metric self.cluster_counts = None self.cluster_dim = None self.scale = None self.zero_point = None self.round_zero_point = None self.cached_xmin = None self.cached_xmax = None self.dynamic = dynamic self.dynamic_method = dynamic_method self.deficiency = 0 self.lwc = lwc init_value = 4. # inti value of learnable weight clipping if lwc: if group_size: dim1 = int(shape[0]*math.ceil(shape[1]/group_size)) self.deficiency = shape[-1]%group_size if self.deficiency > 0: self.deficiency = group_size - self.deficiency assert self.symmetric # support for mlc-llm symmetric quantization else: dim1 = shape[0] self.upbound_factor = nn.Parameter(torch.ones((dim1,1))*init_value) self.lowbound_factor = nn.Parameter(torch.ones((dim1,1))*init_value) self.sigmoid = nn.Sigmoid() self.enable = True self.group_size = group_size def change_n_bits(self, n_bits): self.n_bits = n_bits if self.disable_zero_point: self.qmin = -(2 ** (n_bits - 1)) self.qmax = 2 ** (n_bits - 1) - 1 else: self.qmin = 0 self.qmax = 2 ** (n_bits) - 1 def fake_quant(self, x, scale, round_zero_point): if self.deficiency > 0: pad_zeros = torch.zeros((x.shape[0],self.deficiency),dtype=x.dtype,device=x.device) x = torch.cat((x,pad_zeros),dim=1) if self.group_size: assert len(x.shape)==2, "only support linear layer now" dim1, dim2 = x.shape x = x.reshape(-1, self.group_size) x_int = round_ste(x / scale) if round_zero_point is not None: x_int = x_int.add(round_zero_point) x_int = x_int.clamp(self.qmin, self.qmax) x_dequant = x_int if round_zero_point is not None: x_dequant = x_dequant.sub(round_zero_point) x_dequant = x_dequant.mul(scale) if self.group_size: x_dequant = x_dequant.reshape(dim1, dim2) if self.deficiency > 0: x_dequant = x_dequant[:,:-self.deficiency] return x_dequant def forward(self, x: torch.Tensor): if self.n_bits >= 16 or not self.enable: return x if self.metric == "fix0to1": return x.mul_(2**self.n_bits-1).round_().div_(2**self.n_bits-1) if self.dynamic_method == "per_token" or self.dynamic_method == "per_channel": self.per_token_dynamic_calibration(x) else: raise NotImplementedError() x_dequant = self.fake_quant(x, self.scale, self.round_zero_point) return x_dequant def per_token_dynamic_calibration(self, x): if self.group_size: if self.deficiency == 0: x = x.reshape(-1,self.group_size) else: pad_zeros = torch.zeros((x.shape[0],self.deficiency),dtype=x.dtype,device=x.device) x = torch.cat((x,pad_zeros),dim=1) x = x.reshape(-1,self.group_size) reduce_shape = [-1] xmin = x.amin(reduce_shape, keepdim=True) xmax = x.amax(reduce_shape, keepdim=True) if self.lwc: xmax = self.sigmoid(self.upbound_factor)*xmax xmin = self.sigmoid(self.lowbound_factor)*xmin if self.symmetric: abs_max = torch.max(xmax.abs(),xmin.abs()) scale = abs_max / (2**(self.n_bits-1)-1) self.scale = scale.clamp(min=CLIPMIN, max=1e4) zero_point = (2**(self.n_bits-1)-1)*torch.ones_like(self.scale) else: range = xmax - xmin scale = range / (2**self.n_bits-1) self.scale = scale.clamp(min=CLIPMIN, max=1e4) zero_point = -(xmin) / (self.scale) if self.disable_zero_point: self.round_zero_point = None else: self.round_zero_point = zero_point.clamp(min=-1e4, max=1e4).round() def register_scales_and_zeros(self): self.register_buffer('scales', self.scale) self.register_buffer('zeros', self.round_zero_point) del self.scale del self.round_zero_point
最新发布
07-24
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值