K Dress as women

本文探讨了一款基于二维平面上点消除的游戏策略,通过状态压缩和搜索算法确定游戏的胜负。利用状态压缩技术,将点的分布状态进行编码,再通过搜索算法判断先手玩家是否处于必胜状态。

https://ac.nowcoder.com/acm/contest/5944/K

One day, zyh and fzj are playing a game called Bejeweled, and they have promised the loser would dress as women.

The rules are as follows, in a two-dimensional plane, each time one can remove one or multiple collinear points from the plane. The one who removes the last point on the plane will win the game.

They both want the other to dress as women, so they always make the best choice. Now fyt wants to know who will lose. Note that zyh will always take the first.

题目大意:二维平面上面有一堆点,然后两个人轮流消除一些点,要么消除一个点,要么消除共线的点

数据很小,将n状压一下,然后爆搜当前状态,如果sg[(1 << n) - 1] == 1 , 那么先手就赢了,必胜态,否则就输了。

对于当前状态ans而言,

  1. 如果存在某一个状态res是ans的子状态 , 并且这个子状态是必输状态
  2. 而且呢,res^ans表示两者的差集,也就是当前操作者要消除的点集,
    如果这些点集在一条直线上面
    满足这两个条件,当前操作者绝对是必胜状态
#include <iostream>
#include <cstdio>
#include <algorithm>
#include <unordered_map>
#include <vector>
#include <map>
#include <list>
#include <queue>
#include <cstring>
#include <cstdlib>
#include <ctime>
#include <cmath>
#include <stack>
#include <set>
#pragma GCC optimize(3 , "Ofast" , "inline")
using namespace std ;
#define ios ios::sync_with_stdio(false) , cin.tie(0) , cout.tie(0)
#define x first
#define y second
typedef long long ll ;
const double esp = 1e-6 , pi = acos(-1) ;
typedef pair<int , int> PII ;
const int N = 1e6 + 10 , INF = 0x3f3f3f3f , mod = 1e9 + 7;
ll in()
{
  ll x = 0 , f = 1 ;
  char ch = getchar() ;
  while(!isdigit(ch)) {if(ch == '-') f = -1 ; ch = getchar() ;}
  while(isdigit(ch)) x = x * 10 + ch - 48 , ch = getchar() ;
  return x * f ;
}
int n , x[20] , y[20] , sg[N] , X[20] , Y[20];
bool check(int ans)
{
  int cnt = 0 ;
  for(int i = 0 ;i < n ;i ++ ) if((ans >> i) & 1) X[++ cnt] = x[i] , Y[cnt] = y[i] ;
  if(cnt <= 2) return 1 ;
  for(int i = 3; i <= cnt ;i ++ )
   if(1ll * (X[i] - X[1]) * (Y[2] - Y[1]) != 1ll * (X[2] - X[1]) * (Y[i] - Y[1]))
     return 0 ;
  return 1;
}
bool dfs(int ans)
{
  if(sg[ans] != -1) return sg[ans] ;
  if(check(ans)) return sg[ans] = 1;
  for(int i = (ans - 1) & ans ;i ; i = (i - 1) & ans)
   if(check(ans ^ i) && !dfs(i))
    return sg[ans] = 1 ;
  return sg[ans] = 0 ;
}
int main()
{
  n = in() ;
  for(int i = 0; i < n ;i ++ ) x[i] = in() , y[i] = in() ;
  memset(sg , -1 , sizeof sg) ;
  sg[0] = 0 ;
  if(dfs((1 << n) - 1)) puts("zyh") ;
  else puts("fzj") ;
  return 0 ;
}
/*
*/


import copy import requests import json import traceback import time from concurrent.futures import ThreadPoolExecutor, as_completed import pandas as pd import threading import warnings import os from lxml import html warnings.filterwarnings("ignore") def process(url): print(f'开始抓取{url}') response = crawl(url) if response: data = struct_data(response) with lock: all_data.extend(data) return True else: return False def crawl(url): headers = { "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7", "accept-language": "zh-CN,zh;q=0.9", "cache-control": "no-cache", "pragma": "no-cache", "priority": "u=0, i", "sec-ch-ua": "\"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"138\", \"Google Chrome\";v=\"138\"", "sec-ch-ua-mobile": "?0", "sec-ch-ua-platform": "\"Windows\"", "sec-fetch-dest": "document", "sec-fetch-mode": "navigate", "sec-fetch-site": "none", "sec-fetch-user": "?1", "upgrade-insecure-requests": "1", "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36" } params = { } retries = 3 delay = 3 for attempt in range(retries): try: response = requests.get(url, headers=headers, params=params) response.close() if response.status_code == 200: return response else: time.sleep(delay) continue except Exception as e: print(f"请求失败: {e}. 尝试次数: {attempt + 1}/{retries}") print(traceback.print_exc()) time.sleep(delay) return None def struct_data(response): #接收response对象 product_list = [] data_format = { "Type": "variation", "SKU": "", "Name": "", "Description": "", "Stock": 1000, "Sale price": "", "Regular price": "", "Categories": f"{category}", "Tags": "", "Images": "", "Parent": "", # "Attribute 1 name": "", # "Attribute 1 value(s)": "", # "Attribute 2 name": "", # "Attribute 2 value(s)": "", # "Attribute 3 name": "", # "Attribute 3 value(s)": "", "is_upload": 0, "brand": f"{brand}" } ''' product_list = [ {'Type':'variation','SKU':'xxx','Name':'xxx','Description':'xxx',....}, {'Type':'variation','SKU':'xxx','Name':'xxx','Description':'xxx',....}, ] product_list = [ {'Type':'simple','SKU':'xxx','Name':'xxx','Description':'xxx',....}, ] ''' result = response.text etree = html.fromstring(result) json_str = etree.xpath('//script[@id="mobify-data"]')[0].text json_data = json.loads(json_str) json_str1 = etree.xpath('//script[@data-react-helmet="true"]')[0].text json_data1 = json.loads(json_str1) parent = json_data1.get('sku') products = json_data.get('__PRELOADED_STATE__').get('__STATE_MANAGEMENT_LIBRARY').get('reduxStoreState').get('products').get(parent) name = products.get('name') description1 = products.get('shortDescription') description2 = products.get('longDescription') description = description1 if description1 else "" description += description2 if description2 else "" price = json_data1.get('offers').get('price') images = products.get('imageGroups')[0].get('images') images_list = [] for item in images: link = item.get('link') images_list.append(link) images_str = ','.join(images_list) color = json_data1.get('color') variants = products.get('variants') index = 0 for item in variants: size = item.get('variationValues').get('size') index += 1 data_format['SKU'] = parent + '_' + str(index) data_format['Name'] = name data_format['Description'] = description data_format['Sale price'] = price data_format['Regular price'] = price data_format['Images'] = images_str data_format['Parent'] = parent data_format['Attribute 1 name'] = 'Color' data_format['Attribute 1 value(s)'] = color data_format['Attribute 2 name'] = 'Size' data_format['Attribute 2 value(s)'] = size data = copy.deepcopy(data_format) product_list.append(data) return product_list def read_detail_url(category): pwd = '../json_data/url.json' with open(pwd, 'r',encoding='utf-8') as f: data = json.load(f) detail_url_list = data.get(category) return detail_url_list def write_to_csv(data): pwd = rf'../o_data/{category}.csv' with lock: try: df = pd.DataFrame(data) df.to_csv(pwd, mode='w', index=False, header=True, encoding='utf-8') # Write all at once, including header print(f'Data successfully written to csv') except Exception as e: print(f'Error writing to CSV: {e}') def struct_parent(): # 定义构造父类数据存储路径 dir_path = '../o_data' if not os.path.exists(dir_path): os.makedirs(dir_path) # 读取 CSV 文件 try: df = pd.read_csv(f'../o_data/{category}.csv') parent_rows = [] for index, row in df.iterrows(): if row['Type'] == 'simple': dict_row = row.to_dict() parent_rows.append(dict_row) # 根据子类数据构造父类行 grouped = df.groupby('Parent') for parent_value, group in grouped: attribute_1_values = group['Attribute 1 value(s)'].dropna().unique() has_attr2 = 'Attribute 2 name' in group.columns and not group['Attribute 2 name'].isna().all() if has_attr2: attribute_2_values = group['Attribute 2 value(s)'].dropna().unique() attr2_name = group['Attribute 2 name'].iloc[0] attr2_value_str = ','.join(map(str, attribute_2_values)) if len(attribute_2_values) > 0 else '' else: attr2_name = '' attr2_value_str = '' has_attr3 = 'Attribute 3 name' in group.columns and not group['Attribute 3 name'].isna().all() if has_attr3: attribute_3_values = group['Attribute 3 value(s)'].dropna().unique() attr3_name = group['Attribute 3 name'].iloc[0] attr3_value_str = ','.join(map(str, attribute_3_values)) if len(attribute_3_values) > 0 else '' else: attr3_name = '' attr3_value_str = '' has_attr4 = 'Attribute 4 name' in group.columns and not group['Attribute 4 name'].isna().all() if has_attr4: attribute_4_values = group['Attribute 4 value(s)'].dropna().unique() attr4_name = group['Attribute 4 name'].iloc[0] attr4_value_str = ','.join(map(str, attribute_4_values)) if len(attribute_4_values) > 0 else '' else: attr4_name = '' attr4_value_str = '' first_image = group['Images'].iloc[0] if not group['Images'].empty else '' # 创建父类行 parent_row = { 'Type': 'variable', 'SKU': parent_value, 'Name': group['Name'].iloc[0], 'Description': group['Description'].iloc[0], 'Sale price': group['Sale price'].iloc[0], 'Regular price': group['Regular price'].iloc[0], 'Categories': group['Categories'].iloc[0], 'Tags': group['Tags'].iloc[0], 'Images': first_image.split(',')[0] if first_image else '', 'Parent': '', 'Attribute 1 name': group['Attribute 1 name'].iloc[0], 'Attribute 1 value(s)': ','.join(map(str, attribute_1_values)), 'Attribute 2 name': attr2_name, 'Attribute 2 value(s)': attr2_value_str, 'Attribute 3 name': attr3_name, 'Attribute 3 value(s)': attr3_value_str, 'Attribute 4 name': attr4_name, 'Attribute 4 value(s)': attr4_value_str, 'is_upload': 0, 'brand': f'{brand}' } parent_rows.append(parent_row) # 添加子类行 for _, row in group.iterrows(): parent_rows.append(row.to_dict()) # 创建新的DataFrame保存结果 new_df = pd.DataFrame(parent_rows) # 保存到新的 CSV 文件中 new_df.to_csv(f'{dir_path}/{category}.csv', index=False) except Exception as e: print(traceback.print_exc()) print(e) def write_fail(category,detail_url_list): pwd = '../json_data/fail.json' if not os.path.exists(pwd): data = {} else: with open(pwd, 'r', encoding='utf-8') as file: data = json.load(file) data[category] = detail_url_list # 写回文件 with open(pwd, 'w', encoding='utf-8') as file: json.dump(data, file, indent=4, ensure_ascii=False)# 写回文件 if __name__ == '__main__': brand = 'Off-White' lock = threading.Lock() fail_list = [] with open('../json_data/collections.json', 'r', encoding='utf-8') as f: json_dict = json.load(f) for category in json_dict.keys(): print(f'开始抓取{category}分类') detail_url_list = read_detail_url(category) # detail_url_list = ['https://www.off---white.com/en-us/women/clothing/dresses/black-stamp-bandana-dress-OWDF001S25FLE0011027.html'] all_data = [] for i in range(3): with ThreadPoolExecutor(max_workers=10) as executor: futures = {executor.submit(process,url):url for url in detail_url_list} for future in as_completed(futures): url = futures[future] try: if not future.result(): fail_list.append(url) except Exception as e: print(traceback.print_exc()) print(f"抓取 {url} 时发生错误: {e}") fail_list.append(url) print('失败个数:', len(fail_list)) if not fail_list: detail_url_list = fail_list.copy() break print('重试失败的请求') detail_url_list = fail_list.copy() fail_list.clear() write_to_csv(all_data) # 重试5次失败的写入json if len(detail_url_list) != 0: write_fail(category,detail_url_list) # 构造父类 struct_parent()找不到路径怎么修改
07-25
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值