Range Modular Queries

本文介绍了一种解决区间取模查询问题的有效算法。通过分块技术和预处理表的方法,在处理大量查询时能够显著提高效率。对于不同的取模基数,算法采取了灵活的策略以达到最优性能。

Range Modular Queries

题意

给出一个数列,q个查询,问查询区间内有几个数 a[i] % x == y。

分析

其实裸的分块就能过了,跑的还特别快。
这里分块的作用就是排序。

在x较小时可以暴力打表,x较大时枚举显得更加高效。

code

#include<bits/stdc++.h>
using namespace std;
typedef long long ll;
const int MAXN = 4e4 + 10;
const int BLOCK = 200;
int n, q, L, R;
int a[MAXN];
int ans[MAXN];
int mp[4 * MAXN];
struct block
{
    int l, r, x, y;
    int id, bid;
    bool operator < (const block& other) const
    {
        if(bid == other.bid) return r < other.r;
        return bid < other.bid;
    }
}b[MAXN];

void query(int l, int r, int id, int x, int y)
{
    if(id)
    {
        for(int i = l; i < L; i++) mp[a[i]]++;
        for(int i = R + 1; i <= r; i++) mp[a[i]]++;
        for(int i = L; i < l; i++) mp[a[i]]--;
        for(int i = r + 1; i <= R; i++) mp[a[i]]--;
    }
    else
    {
        for(int i = l; i <= r; i++)
            mp[a[i]]++;
    }
    for(int i = 0; i < MAXN; i += x)
        ans[b[id].id] += mp[i + y];
    L = l; R = r;
}

int main()
{
    scanf("%d%d", &n, &q);
    for(int i = 0; i < n; i++) scanf("%d", &a[i]);
    for(int i = 0; i < q; i++)
    {
        scanf("%d%d%d%d", &b[i].l, &b[i].r, &b[i].x, &b[i].y);
        b[i].id = i;
        b[i].bid = b[i].l / BLOCK;
    }
    sort(b, b + q);
    for(int i = 0; i < q; i++) query(b[i].l, b[i].r, i, b[i].x, b[i].y);
    for(int i = 0; i < q; i++) printf("%d\n", ans[i]);
    return 0;
}

code

#include<bits/stdc++.h>
#define all(x) (x).begin(), (x).end()
using namespace std;
typedef long long ll;
const int MAXN = 4e4 + 10;
const int MAXK = 205;
int a[MAXN], b[MAXN];
vector<int> pos[MAXK][MAXK];
vector<int> poss[MAXN];
int main()
{
    int n, q;
    scanf("%d%d", &n, &q);
    for(int i = 0; i < n; i++) scanf("%d", &a[i]);
    for(int i = 1; i < MAXK; i++)
        for(int j = 0; j < n; j++)
            pos[i][a[j] % i].push_back(j);
    for(int i = 0; i < n; i++)
    {
        poss[a[i]].push_back(i);
    }
    while(q--)
    {
        int l, r, x, y;
        scanf("%d%d%d%d", &l, &r, &x, &y);
        if(x < MAXK) // 打表
        {
            int ans = upper_bound(all(pos[x][y]), r) - lower_bound(all(pos[x][y]), l);
            printf("%d\n", ans);
        }
        else // x >= MAXK ,下面的循环会非常高效
        {
            int ans = 0;
            for(int i = y; i < MAXN; i += x)
            {
                ans += upper_bound(all(poss[i]), r) - lower_bound(all(poss[i]), l);
            }
            printf("%d\n", ans);
        }
    }
    return 0;
}

转载于:https://www.cnblogs.com/ftae/p/6791410.html

使用transfomers库实现encoder-decoder架构的,encoder和decoder都是transformerxl的,使用旋转位置编码的示例代码,旋转编码实现代码如下import torch class RotaryEmbedding(torch.nn.Module): def __init__(self, dim, base=10000): super().__init__() inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim)) self.register_buffer('inv_freq', inv_freq) self.seq_len_cached = 0 self.cos_cached = None self.sin_cached = None def forward(self, x, seq_dim=1): seq_len = x.shape[seq_dim] if seq_len != self.seq_len_cached: #if seq_len > self.seq_len_cached: self.seq_len_cached = seq_len t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq) freqs = torch.einsum('i,j->ij', t, self.inv_freq) emb = torch.cat((freqs, freqs), dim=-1).to(x.device) self.cos_cached = emb.cos()[None,:, None, :] self.sin_cached = emb.sin()[None,:, None, :] #else: # cos_return = self.cos_cached[..., :seq_len] # sin_return = self.sin_cached[..., :seq_len] # return cos_return, sin_return return self.cos_cached, self.sin_cached # rotary pos emb helpers: def rotate_half(x): x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:] return torch.cat((-x2, x1), dim=x1.ndim - 1) # dim=-1 triggers a bug in earlier torch versions @torch.jit.script def apply_rotary_pos_emb(q, k, cos, sin): return (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin)from torch.nn import Linear, Module from fast_transformers.attention import AttentionLayer from fast_transformers.events import EventDispatcher, QKVEvent from .rotary import RotaryEmbedding, apply_rotary_pos_emb class RotateAttentionLayer(AttentionLayer): """Rotate attention layer inherits from fast_transformer attention layer. The only thing added is an Embedding encoding, for more information on the attention layer see the fast_transformers code """ def __init__(self, attention, d_model, n_heads, d_keys=None, d_values=None, event_dispatcher=""): super(RotateAttentionLayer, self).__init__(attention,d_model, n_heads, d_keys=d_keys, d_values=d_values, event_dispatcher=event_dispatcher) self.rotaryemb = RotaryEmbedding(d_keys) print('Using Rotation Embedding') def forward(self, queries, keys, values, attn_mask, query_lengths, key_lengths): """ Using the same frame work as the fast_Transformers attention layer but injecting rotary information to the queries and the keys after the keys and queries are projected. In the argument description we make use of the following sizes - N: the batch size - L: The maximum length of the queries - S: The maximum length of the keys (the actual length per sequence is given by the length mask) - D: The input feature dimensionality passed in the constructor as 'd_model' Arguments --------- queries: (N, L, D) The tensor containing the queries keys: (N, S, D) The tensor containing the keys values: (N, S, D) The tensor containing the values attn_mask: An implementation of BaseMask that encodes where each query can attend to query_lengths: An implementation of BaseMask that encodes how many queries each sequence in the batch consists of key_lengths: An implementation of BaseMask that encodes how many queries each sequence in the batch consists of Returns ------- The new value for each query as a tensor of shape (N, L, D). """ # Extract the dimensions into local variables N, L, _ = queries.shape _, S, _ = keys.shape H = self.n_heads # Project the queries/keys/values queries = self.query_projection(queries).view(N, L, H, -1) keys = self.key_projection(keys).view(N, S, H, -1) cos, sin = self.rotaryemb(queries) queries, keys = apply_rotary_pos_emb(queries, keys, cos, sin) values = self.value_projection(values).view(N, S, H, -1) # Let the world know of the qkv self.event_dispatcher.dispatch(QKVEvent(self, queries, keys, values)) # Compute the attention new_values = self.inner_attention( queries, keys, values, attn_mask, query_lengths, key_lengths ).view(N, L, -1) # Project the output and return return self.out_projection(new_values)
07-21
## 01、数据介绍 数据整理全国30个省份制造业细分行业产值,能够反映一定时期内细分行业工业生产总规模和总水平的指标,是计算工业生产发展速度和主要比例关系、计算工业产品销售率等其他经济指标的基础。先进制造业数据可用于两业融合测算。 数据名称:全国30省-制造业细分行业产值/先进制造业细分行业产值数据 数据年份:2001-2022年 ## 02、数据指标 农副食品加工业;食品制造业;饮料制造业;烟草制造业;纺织业;纺织服装鞋帽制造业;皮革、毛皮、羽毛(绒)极其制品业;木材加工及木、竹、藤、棕、草制品业;家具制造业;造纸及纸制品业;印刷业和记录媒介的复制;文教体育用品制造业;石油加工、炼焦及核燃料加工业化;学原料及化学制品制造业;医药制造业;化学纤维制造业;橡胶和塑料制品制品业;非金属矿物制品业;黑色金属冶炼及压延加工业;有色金属冶炼及压延加工业;金属制品业;通用设备制造业;专用设备制造业;汽车制造业;其他运输设备制造业;电气机械及器材制造业;通信设备计算机及其他电子设备制造业;仪器仪表及文化、办公用机械制造业;其他制造业;废弃资源和废旧材料回收加工业;制造业总产值 印刷业和记录媒介的复制;石油加工、炼焦及核燃料加工业;化学原料及化学制品制造业;医药制造业;橡胶和塑料制品制品业;非金属矿物制品业;黑色金属冶炼及压延加工业;有色金属冶炼及压延加工业;金属制品业;通用设备制造业;专用设备制造业;汽车制造业;其他运输设备制造业;电气机械及器材制造业;通信设备计算机及其他电子设备制造业;仪器仪表及文化、办公用机械制造业;废弃资源和废旧材料回收加工业
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值