Failed to load properties file for project==>ANDROID_SDK_HOME

本文解决了Eclipse中新建Android项目时出现的“Failed to load properties file for project”错误,并分享了通过设置ANDROID_SDK_HOME环境变量及清理workspace来解决编译卡住的问题。
Failed to load properties file for project==>ANDROID_SDK_HOME
2011-06-22 22:23

eclipse编译apk的时候遇到问题,但没有提示,新建android工程提示:

Failed to load properties file for project

 

解决:

Properties->Java Build Path->class path Varable

添加变量ANDROID_SDK_HOME,指向sdk目录。

 

但是编译时候卡住了,,,,

 

后来把workspace里的隐藏文件删了,所有工程重新导入,ok了。

#!/usr/bin/env python3 # Copyright (C) 2017 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import atexit import hashlib import os import shutil import signal import subprocess import sys import tempfile import time import uuid import platform TRACE_TO_TEXT_SHAS = { 'linux': '7e3e10dfb324e31723efd63ac25037856e06eba0', 'mac': '21f0f42dd019b4f09addd404a114fbf2322ca8a4', } TRACE_TO_TEXT_PATH = tempfile.gettempdir() TRACE_TO_TEXT_BASE_URL = ('https://storage.googleapis.com/perfetto/') NULL = open(os.devnull) NOOUT = { 'stdout': NULL, 'stderr': NULL, } UUID = str(uuid.uuid4())[-6:] def check_hash(file_name, sha_value): file_hash = hashlib.sha1() with open(file_name, 'rb') as fd: while True: chunk = fd.read(4096) if not chunk: break file_hash.update(chunk) return file_hash.hexdigest() == sha_value def load_trace_to_text(os_name): sha_value = TRACE_TO_TEXT_SHAS[os_name] file_name = 'trace_to_text-' + os_name + '-' + sha_value local_file = os.path.join(TRACE_TO_TEXT_PATH, file_name) if os.path.exists(local_file): if not check_hash(local_file, sha_value): os.remove(local_file) else: return local_file url = TRACE_TO_TEXT_BASE_URL + file_name subprocess.check_call(['curl', '-L', '-#', '-o', local_file, url]) if not check_hash(local_file, sha_value): os.remove(local_file) raise ValueError("Invalid signature.") os.chmod(local_file, 0o755) return local_file PACKAGES_LIST_CFG = '''data_sources { config { name: "android.packages_list" } } ''' CFG_INDENT = ' ' CFG = '''buffers {{ size_kb: 63488 }} data_sources {{ config {{ name: "android.heapprofd" heapprofd_config {{ shmem_size_bytes: {shmem_size} sampling_interval_bytes: {interval} {target_cfg} }} }} }} duration_ms: {duration} write_into_file: true flush_timeout_ms: 30000 flush_period_ms: 604800000 ''' # flush_period_ms of 1 week to suppress trace_processor_shell warning. CONTINUOUS_DUMP = """ continuous_dump_config {{ dump_phase_ms: 0 dump_interval_ms: {dump_interval} }} """ PROFILE_LOCAL_PATH = os.path.join(tempfile.gettempdir(), UUID) IS_INTERRUPTED = False def sigint_handler(sig, frame): global IS_INTERRUPTED IS_INTERRUPTED = True def print_no_profile_error(): print("No profiles generated", file=sys.stderr) print( "If this is unexpected, check " "https://perfetto.dev/docs/data-sources/native-heap-profiler#troubleshooting.", file=sys.stderr) def known_issues_url(number): return ('https://perfetto.dev/docs/data-sources/native-heap-profiler' '#known-issues-android{}'.format(number)) KNOWN_ISSUES = { '10': known_issues_url(10), 'Q': known_issues_url(10), '11': known_issues_url(11), 'R': known_issues_url(11), } def maybe_known_issues(): release_or_codename = subprocess.check_output( ['adb', 'shell', 'getprop', 'ro.build.version.release_or_codename'] ).decode('utf-8').strip() return KNOWN_ISSUES.get(release_or_codename, None) SDK = { 'R': 30, } def release_or_newer(release): sdk = int(subprocess.check_output( ['adb', 'shell', 'getprop', 'ro.system.build.version.sdk'] ).decode('utf-8').strip()) if sdk >= SDK[release]: return True codename = subprocess.check_output( ['adb', 'shell', 'getprop', 'ro.build.version.codename'] ).decode('utf-8').strip() return codename == release def main(argv): parser = argparse.ArgumentParser() parser.add_argument( "-i", "--interval", help="Sampling interval. " "Default 4096 (4KiB)", type=int, default=4096) parser.add_argument( "-d", "--duration", help="Duration of profile (ms). 0 to run until interrupted. " "Default: until interrupted by user.", type=int, default=0) # This flag is a no-op now. We never start heapprofd explicitly using system # properties. parser.add_argument( "--no-start", help="Do not start heapprofd.", action='store_true') parser.add_argument( "-p", "--pid", help="Comma-separated list of PIDs to " "profile.", metavar="PIDS") parser.add_argument( "-n", "--name", help="Comma-separated list of process " "names to profile.", metavar="NAMES") parser.add_argument( "-f", "--functions", help="Comma-separated list of functions " "names to profile.", metavar="FUNCTIONS") parser.add_argument( "-c", "--continuous-dump", help="Dump interval in ms. 0 to disable continuous dump.", type=int, default=0) parser.add_argument( "--heaps", help="Comma-separated list of heaps to collect, e.g: malloc,art. " "Requires Android 12.", metavar="HEAPS") parser.add_argument( "--all-heaps", action="store_true", help="Collect allocations from all heaps registered by target." ) parser.add_argument( "--no-android-tree-symbolization", action="store_true", help="Do not symbolize using currently lunched target in the " "Android tree." ) parser.add_argument( "--disable-selinux", action="store_true", help="Disable SELinux enforcement for duration of " "profile.") parser.add_argument( "--no-versions", action="store_true", help="Do not get version information about APKs.") parser.add_argument( "--no-running", action="store_true", help="Do not target already running processes. Requires Android 11.") parser.add_argument( "--no-startup", action="store_true", help="Do not target processes that start during " "the profile. Requires Android 11.") parser.add_argument( "--shmem-size", help="Size of buffer between client and " "heapprofd. Default 8MiB. Needs to be a power of two " "multiple of 4096, at least 8192.", type=int, default=8 * 1048576) parser.add_argument( "--block-client", help="When buffer is full, block the " "client to wait for buffer space. Use with caution as " "this can significantly slow down the client. " "This is the default", action="store_true") parser.add_argument( "--block-client-timeout", help="If --block-client is given, do not block any allocation for " "longer than this timeout (us).", type=int) parser.add_argument( "--no-block-client", help="When buffer is full, stop the " "profile early.", action="store_true") parser.add_argument( "--idle-allocations", help="Keep track of how many " "bytes were unused since the last dump, per " "callstack", action="store_true") parser.add_argument( "--dump-at-max", help="Dump the maximum memory usage " "rather than at the time of the dump.", action="store_true") parser.add_argument( "--disable-fork-teardown", help="Do not tear down client in forks. This can be useful for programs " "that use vfork. Android 11+ only.", action="store_true") parser.add_argument( "--simpleperf", action="store_true", help="Get simpleperf profile of heapprofd. This is " "only for heapprofd development.") parser.add_argument( "--trace-to-text-binary", help="Path to local trace to text. For debugging.") parser.add_argument( "--print-config", action="store_true", help="Print config instead of running. For debugging.") parser.add_argument( "-o", "--output", help="Output directory.", metavar="DIRECTORY", default=None) args = parser.parse_args() fail = False if args.block_client and args.no_block_client: print( "FATAL: Both block-client and no-block-client given.", file=sys.stderr) fail = True if args.pid is None and args.name is None: print("FATAL: Neither PID nor NAME given.", file=sys.stderr) fail = True if args.duration is None: print("FATAL: No duration given.", file=sys.stderr) fail = True if args.interval is None: print("FATAL: No interval given.", file=sys.stderr) fail = True if args.shmem_size % 4096: print("FATAL: shmem-size is not a multiple of 4096.", file=sys.stderr) fail = True if args.shmem_size < 8192: print("FATAL: shmem-size is less than 8192.", file=sys.stderr) fail = True if args.shmem_size & (args.shmem_size - 1): print("FATAL: shmem-size is not a power of two.", file=sys.stderr) fail = True target_cfg = "" if not args.no_block_client: target_cfg += CFG_INDENT + "block_client: true\n" if args.block_client_timeout: target_cfg += ( CFG_INDENT + "block_client_timeout_us: %s\n" % args.block_client_timeout ) if args.no_startup: target_cfg += CFG_INDENT + "no_startup: true\n" if args.no_running: target_cfg += CFG_INDENT + "no_running: true\n" if args.dump_at_max: target_cfg += CFG_INDENT + "dump_at_max: true\n" if args.disable_fork_teardown: target_cfg += CFG_INDENT + "disable_fork_teardown: true\n" if args.all_heaps: target_cfg += CFG_INDENT + "all_heaps: true\n" if args.pid: for pid in args.pid.split(','): try: pid = int(pid) except ValueError: print("FATAL: invalid PID %s" % pid, file=sys.stderr) fail = True target_cfg += CFG_INDENT + 'pid: {}\n'.format(pid) if args.name: for name in args.name.split(','): target_cfg += CFG_INDENT + 'process_cmdline: "{}"\n'.format(name) if args.heaps: for heap in args.heaps.split(','): target_cfg += CFG_INDENT + 'heaps: "{}"\n'.format(heap) if args.functions: for functions in args.functions.split(','): target_cfg += CFG_INDENT + 'function_names: "{}"\n'.format(functions) if fail: parser.print_help() return 1 trace_to_text_binary = args.trace_to_text_binary if args.continuous_dump: target_cfg += CONTINUOUS_DUMP.format(dump_interval=args.continuous_dump) cfg = CFG.format( interval=args.interval, duration=args.duration, target_cfg=target_cfg, shmem_size=args.shmem_size) if not args.no_versions: cfg += PACKAGES_LIST_CFG if args.print_config: print(cfg) return 0 # Do this AFTER print_config so we do not download trace_to_text only to # print out the config. has_trace_to_text = True if trace_to_text_binary is None: os_name = None if sys.platform.startswith('linux'): os_name = 'linux' elif sys.platform.startswith('darwin'): os_name = 'mac' elif sys.platform.startswith('win32'): has_trace_to_text = False else: print("Invalid platform: {}".format(sys.platform), file=sys.stderr) return 1 arch = platform.machine() if arch not in ['x86_64', 'amd64']: has_trace_to_text = False if has_trace_to_text: trace_to_text_binary = load_trace_to_text(os_name) known_issues = maybe_known_issues() if known_issues: print('If you are experiencing problems, please see the known issues for ' 'your release: {}.'.format(known_issues)) # TODO(fmayer): Maybe feature detect whether we can remove traces instead of # this. uuid_trace = release_or_newer('R') if uuid_trace: profile_device_path = '/data/misc/perfetto-traces/profile-' + UUID else: user = subprocess.check_output( ['adb', 'shell', 'whoami']).decode('utf-8').strip() profile_device_path = '/data/misc/perfetto-traces/profile-' + user perfetto_cmd = ('CFG=\'{cfg}\'; echo ${{CFG}} | ' 'perfetto --txt -c - -o ' + profile_device_path + ' -d') if args.disable_selinux: enforcing = subprocess.check_output(['adb', 'shell', 'getenforce']) atexit.register( subprocess.check_call, ['adb', 'shell', 'su root setenforce %s' % enforcing]) subprocess.check_call(['adb', 'shell', 'su root setenforce 0']) if args.simpleperf: subprocess.check_call([ 'adb', 'shell', 'mkdir -p /data/local/tmp/heapprofd_profile && ' 'cd /data/local/tmp/heapprofd_profile &&' '(nohup simpleperf record -g -p $(pidof heapprofd) 2>&1 &) ' '> /dev/null' ]) profile_target = PROFILE_LOCAL_PATH if args.output is not None: profile_target = args.output else: os.mkdir(profile_target) if not os.path.isdir(profile_target): print("Output directory {} not found".format(profile_target), file=sys.stderr) return 1 if os.listdir(profile_target): print("Output directory {} not empty".format(profile_target), file=sys.stderr) return 1 perfetto_pid = subprocess.check_output( ['adb', 'exec-out', perfetto_cmd.format(cfg=cfg)]).strip() try: perfetto_pid = int(perfetto_pid.strip()) except ValueError: print("Failed to invoke perfetto: {}".format(perfetto_pid), file=sys.stderr) return 1 old_handler = signal.signal(signal.SIGINT, sigint_handler) print("Profiling active. Press Ctrl+C to terminate.") print("You may disconnect your device.") print() exists = True device_connected = True while not device_connected or (exists and not IS_INTERRUPTED): exists = subprocess.call( ['adb', 'shell', '[ -d /proc/{} ]'.format(perfetto_pid)], **NOOUT) == 0 device_connected = subprocess.call(['adb', 'shell', 'true'], **NOOUT) == 0 time.sleep(1) print("Waiting for profiler shutdown...") signal.signal(signal.SIGINT, old_handler) if IS_INTERRUPTED: # Not check_call because it could have existed in the meantime. subprocess.call(['adb', 'shell', 'kill', '-INT', str(perfetto_pid)]) if args.simpleperf: subprocess.check_call(['adb', 'shell', 'killall', '-INT', 'simpleperf']) print("Waiting for simpleperf to exit.") while subprocess.call( ['adb', 'shell', '[ -f /proc/$(pidof simpleperf)/exe ]'], **NOOUT) == 0: time.sleep(1) subprocess.check_call( ['adb', 'pull', '/data/local/tmp/heapprofd_profile', profile_target]) print( "Pulled simpleperf profile to " + profile_target + "/heapprofd_profile") # Wait for perfetto cmd to return. while exists: exists = subprocess.call( ['adb', 'shell', '[ -d /proc/{} ]'.format(perfetto_pid)]) == 0 time.sleep(1) profile_host_path = os.path.join(profile_target, 'raw-trace') subprocess.check_call( ['adb', 'pull', profile_device_path, profile_host_path], stdout=NULL) if uuid_trace: subprocess.check_call( ['adb', 'shell', 'rm', profile_device_path], stdout=NULL) if not has_trace_to_text: print('Wrote profile to {}'.format(profile_host_path)) print('This file can be opened using the Perfetto UI, https://ui.perfetto.dev') return 0 binary_path = os.getenv('PERFETTO_BINARY_PATH') if not args.no_android_tree_symbolization: product_out = os.getenv('ANDROID_PRODUCT_OUT') if product_out: product_out_symbols = product_out + '/symbols' else: product_out_symbols = None if binary_path is None: binary_path = product_out_symbols elif product_out_symbols is not None: binary_path += ":" + product_out_symbols trace_file = os.path.join(profile_target, 'raw-trace') concat_files = [trace_file] if binary_path is not None: with open(os.path.join(profile_target, 'symbols'), 'w') as fd: ret = subprocess.call([ trace_to_text_binary, 'symbolize', os.path.join(profile_target, 'raw-trace')], env=dict(os.environ, PERFETTO_BINARY_PATH=binary_path), stdout=fd) if ret == 0: concat_files.append(os.path.join(profile_target, 'symbols')) else: print("Failed to symbolize. Continuing without symbols.", file=sys.stderr) proguard_map = os.getenv('PERFETTO_PROGUARD_MAP') if proguard_map is not None: with open(os.path.join(profile_target, 'deobfuscation-packets'), 'w') as fd: ret = subprocess.call([ trace_to_text_binary, 'deobfuscate', os.path.join(profile_target, 'raw-trace')], env=dict(os.environ, PERFETTO_PROGUARD_MAP=proguard_map), stdout=fd) if ret == 0: concat_files.append( os.path.join(profile_target, 'deobfuscation-packets')) else: print("Failed to deobfuscate. Continuing without deobfuscated.", file=sys.stderr) if len(concat_files) > 1: with open(os.path.join(profile_target, 'symbolized-trace'), 'wb') as out: for fn in concat_files: with open(fn, 'rb') as inp: while True: buf = inp.read(4096) if not buf: break out.write(buf) trace_file = os.path.join(profile_target, 'symbolized-trace') trace_to_text_output = subprocess.check_output( [trace_to_text_binary, 'profile', trace_file]) profile_path = None print('caifc trace_file ' + str(trace_file)) print('caifc trace_to_text_output ' + str(trace_to_text_output)) for word in trace_to_text_output.decode('utf-8').split(): if 'heap_profile-' in word: profile_path = word if profile_path is None: print_no_profile_error() return 1 profile_files = os.listdir(profile_path) if not profile_files: print_no_profile_error() return 1 for profile_file in profile_files: shutil.copy(os.path.join(profile_path, profile_file), profile_target) subprocess.check_call( ['gzip'] + [os.path.join(profile_target, x) for x in profile_files]) symlink_path = None if args.output is None: symlink_path = os.path.join( os.path.dirname(profile_target), "heap_profile-latest") if os.path.lexists(symlink_path): os.unlink(symlink_path) os.symlink(profile_target, symlink_path) if symlink_path is not None: print("Wrote profiles to {} (symlink {})".format( profile_target, symlink_path)) else: print("Wrote profiles to {}".format(profile_target)) print("These can be viewed using pprof. Googlers: head to pprof/ and " "upload them.") if __name__ == '__main__': sys.exit(main(sys.argv))
09-10
还是以下面这个代码为蓝本,合并问题是解决了,不要用sdk了因为不支持我的授权。就是卡在填色的地方: import os import json import requests import sys from typing import Dict, Any, List, Tuple, Set, Optional import urllib.parse import time import datetime import re from dotenv import load_dotenv from collections import defaultdict # === 辅助工具 === class Timer: def __init__(self): self._start = None def start(self): self._start = time.perf_counter() def stop(self, msg=""): if self._start: print(f" - [计时] {msg}耗时: {time.perf_counter() - self._start:.4f}s"); self._start = None # --- 加载配置 --- try: script_dir = os.path.dirname(os.path.realpath(__file__)) except: script_dir = os.path.abspath(os.getcwd()) dotenv_path = os.path.join(script_dir, '.env') load_dotenv(dotenv_path) if os.path.exists(dotenv_path) else None APP_ID = os.getenv("APP_ID") APP_SECRET = os.getenv("APP_SECRET") SPREADSHEET_TOKEN = os.getenv("SPREADSHEET_TOKEN") REDIRECT_URI = os.getenv("REDIRECT_URI", "http://localhost:8080/auth") TOKEN_FILE = "feishu_token.json" # 列配置 ATTR_COL = os.getenv("ATTRIBUTE_COL", "B") # (新) 属性列 NAME_COL = os.getenv("MATERIAL_NAME_COL", "A") # 物料名称 S1_CODE_COL = os.getenv("SUPPLIER_1_CODE_COL", "C") S1_SUP_COL = os.getenv("SUPPLIER_1_SUPPLIER_COL", "D") S2_CODE_COL = os.getenv("SUPPLIER_2_CODE_COL", "E") S2_SUP_COL = os.getenv("SUPPLIER_2_SUPPLIER_COL", "F") START_ROW = int(os.getenv("START_ROW", "2")) END_ROW = int(os.getenv("END_ROW", "500")) SUMMARY_SHEET_NAME = os.getenv("SUMMARY_SHEET_NAME", "零部件比对汇总(新)") # === 1. 认证模块 (保持不变) === def save_token(data): try: with open(os.path.join(script_dir, TOKEN_FILE), 'w') as f: json.dump(data, f, indent=4) except Exception as e: print(f"Error saving token: {e}") def load_token(): path = os.path.join(script_dir, TOKEN_FILE) if not os.path.exists(path): return {} try: with open(path, 'r') as f: d = json.load(f) if 'expires_at' in d: d['expires_at'] = datetime.datetime.fromisoformat(d['expires_at']) return d except: return {} def get_access_token(): data = load_token() if data.get("access_token") and data.get("expires_at", datetime.datetime.min) > datetime.datetime.now(): return data["access_token"] if data.get("refresh_token"): try: url = "https://open.feishu.cn/open-apis/authen/v2/oauth/token" resp = requests.post(url, json={"grant_type": "refresh_token", "client_id": APP_ID, "client_secret": APP_SECRET, "refresh_token": data["refresh_token"]}) resp.raise_for_status() res = resp.json() if res.get("code") == 0: new_data = {"access_token": res["access_token"], "refresh_token": res["refresh_token"], "expires_at": (datetime.datetime.now() + datetime.timedelta(seconds=res["expires_in"]-300)).isoformat()} save_token({**new_data, "expires_at": new_data["expires_at"]}) return res["access_token"] except: pass scope = "sheets:spreadsheet sheets:spreadsheet:read sheets:spreadsheet:write_only sheets:spreadsheet.meta:read sheets:spreadsheet.meta:write" url = f"https://accounts.feishu.cn/open-apis/authen/v1/authorize?client_id={APP_ID}&response_type=code&redirect_uri={urllib.parse.quote(REDIRECT_URI)}&scope={scope}" print(f"请打开链接授权:\n{url}") code = input("请输入Code: ").strip() resp = requests.post("https://open.feishu.cn/open-apis/authen/v2/oauth/token", json={ "grant_type": "authorization_code", "client_id": APP_ID, "client_secret": APP_SECRET, "code": code, "redirect_uri": REDIRECT_URI}) res = resp.json() if res.get("code") != 0: raise Exception(f"Auth failed: {res}") token_data = {"access_token": res["access_token"], "refresh_token": res.get("refresh_token"), "expires_at": (datetime.datetime.now() + datetime.timedelta(seconds=res["expires_in"]-300)).isoformat()} save_token(token_data) return res["access_token"] # === 2. 飞书 API 基础操作 === def call_api(method, url, json_data=None, params=None): token = get_access_token() headers = {"Authorization": f"Bearer {token}", "Content-Type": "application/json; charset=utf-8"} resp = requests.request(method, url, headers=headers, json=json_data, params=params) try: resp.raise_for_status() except requests.exceptions.HTTPError as e: # 打印更详细的错误 print(f"API 请求失败: {e}") if e.response is not None: try: print(f"错误详情: {e.response.json()}") except json.JSONDecodeError: print(f"错误详情 (text): {e.response.text}") raise e # 重新抛出异常 res = resp.json() if res.get("code", 0) != 0: raise Exception(f"API Error: {res}") return res def get_sheet_id_by_name(sp_token, name): res = call_api("GET", f"https://open.feishu.cn/open-apis/sheets/v3/spreadsheets/{sp_token}/sheets/query") for sheet in res["data"]["sheets"]: if sheet["title"] == name: return sheet["sheet_id"] return None def create_sheet(sp_token, name): exist = get_sheet_id_by_name(sp_token, name) if exist: return exist # create_sheet 仍然使用 .../sheets_batch_update (这没错, V2的批量更新) res = call_api("POST", f"https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/{sp_token}/sheets_batch_update", json_data={"requests": [{"addSheet": {"properties": {"title": name}}}]}) return res["data"]["replies"][0]["addSheet"]["properties"]["sheetId"] def read_range(sp_token, sheet_id, col, s_row, e_row): rng = f"{sheet_id}!{col}{s_row}:{col}{e_row}" # read_range 使用 .../values_batch_get (这也没错, V2的读取) res = call_api("GET", f"https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/{sp_token}/values_batch_get", params={"ranges": rng}) vals = res["data"]["valueRanges"][0]["values"] result = [] for i, row in enumerate(vals): val = "" if row: cell = row[0] if isinstance(cell, dict): val = cell.get("text", "") else: val = str(cell) result.append({"row": s_row + i, "val": val.strip()}) return result # === 3. 工具函数:(保持不变) === def _col_str_to_idx(col_str: str) -> int: idx = 0 for char in col_str: idx = idx * 26 + (ord(char) - ord('A')) + 1 return idx - 1 def parse_range(range_str: str) -> Dict[str, Any]: try: sheet_id, coords = range_str.split('!') match = re.match(r"([A-Z]+)(\d+):([A-Z]+)(\d+)", coords) if match: start_col_str, start_row_str, end_col_str, end_row_str = match.groups() start_row_idx = int(start_row_str) - 1 end_row_idx = int(end_row_str) start_col_idx = _col_str_to_idx(start_col_str) end_col_idx = _col_str_to_idx(end_col_str) + 1 else: match_single = re.match(r"([A-Z]+)(\d+)", coords) if not match_single: raise ValueError(f"Invalid range coordinate format: {coords}") col_str, row_str = match_single.groups() start_row_idx = int(row_str) - 1 end_row_idx = start_row_idx + 1 start_col_idx = _col_str_to_idx(col_str) end_col_idx = start_col_idx + 1 return { "sheetId": sheet_id, "startRowIndex": start_row_idx, "endRowIndex": end_row_idx, "startColumnIndex": start_col_idx, "endColumnIndex": end_col_idx } except Exception as e: print(f"ERROR: 解析范围字符串 '{range_str}' 失败: {e}") return {} def num_to_col_char(n): string = "" while n > 0: n, remainder = divmod(n - 1, 26) string = chr(65 + remainder) + string return string # === 4. 核心逻辑:数据抓取与对齐 (保持不变) === ProjectData = Dict[str, Dict[str, Tuple[str, str]]] def fetch_all_data(sp_token, sheets): aligned_data = defaultdict(lambda: {"attr": "", "projects": {}}) for sheet in sheets: title, sid = sheet["title"], sheet["sheet_id"] print(f" - 读取项目: {title} ...") names = read_range(sp_token, sid, NAME_COL, START_ROW, END_ROW) attrs = read_range(sp_token, sid, ATTR_COL, START_ROW, END_ROW) s1_codes = read_range(sp_token, sid, S1_CODE_COL, START_ROW, END_ROW) s1_sups = read_range(sp_token, sid, S1_SUP_COL, START_ROW, END_ROW) s2_codes = read_range(sp_token, sid, S2_CODE_COL, START_ROW, END_ROW) s2_sups = read_range(sp_token, sid, S2_SUP_COL, START_ROW, END_ROW) for i in range(len(names)): name = names[i]["val"] if not name: continue attr = attrs[i]["val"] if i < len(attrs) else "" s1_c = s1_codes[i]["val"] if i < len(s1_codes) else "" s1_s = s1_sups[i]["val"] if i < len(s1_sups) else "" s2_c = s2_codes[i]["val"] if i < len(s2_codes) else "" s2_s = s2_sups[i]["val"] if i < len(s2_sups) else "" if not aligned_data[name]["attr"] and attr: aligned_data[name]["attr"] = attr aligned_data[name]["projects"][title] = { "s1": (s1_s, s1_c), # (供应商, 料号) "s2": (s2_s, s2_c) } return aligned_data def main(): print("=== 开始执行:多项目零部件比对与样式生成 ===") # 1. 获取所有 Sheet all_sheets_meta = call_api("GET", f"https://open.feishu.cn/open-apis/sheets/v3/spreadsheets/{SPREADSHEET_TOKEN}/sheets/query")["data"]["sheets"] valid_sheets = [s for s in all_sheets_meta if s["title"] != SUMMARY_SHEET_NAME] print("发现以下项目:") for i, s in enumerate(valid_sheets): print(f" [{i+1}] {s['title']}") sel_idx = input("请输入要比对的项目序号 (如 1,2,3 或 all): ").strip() if sel_idx.lower() == "all": selected = valid_sheets else: selected = [valid_sheets[int(x)-1] for x in sel_idx.split(",") if x.strip().isdigit()] if len(selected) < 2: print("至少选择两个项目!"); return proj_names = [s["title"] for s in selected] # 2. 读取并对齐数据 timer = Timer() timer.start() data_map = fetch_all_data(SPREADSHEET_TOKEN, selected) timer.stop("数据读取与对齐") # 3. 准备写入数据矩阵 sorted_names = sorted(data_map.keys()) matrix = [] # --- 构建表头行 --- row1 = ["项目", "属性"] for p in proj_names: row1.extend([p, "", "", ""]) matrix.append(row1) row2 = ["", ""] for _ in proj_names: row2.extend(["一供", "", "二供", ""]) matrix.append(row2) row3 = ["物料名称", ""] for _ in proj_names: row3.extend(["供应商", "物料料号", "供应商", "物料料号"]) matrix.append(row3) # --- 构建数据行与样式计算 --- COLOR_GREEN = "#b7e1cd" COLOR_RED = "#f4c7c3" green_cells = [] red_cells = [] current_row_idx = 4 for name in sorted_names: item = data_map[name] attr = item["attr"] row_data = [name, attr] pair_counts = defaultdict(int) for p_name in proj_names: p_data = item["projects"].get(p_name, {"s1":("",""), "s2":("","")}) s1 = p_data["s1"] s2 = p_data["s2"] if s1[0] and s1[1]: pair_counts[s1] += 1 if s2[0] and s2[1]: pair_counts[s2] += 1 col_idx = 3 for p_name in proj_names: p_data = item["projects"].get(p_name, {"s1":("",""), "s2":("","")}) s1 = p_data["s1"] s2 = p_data["s2"] row_data.extend([s1[0], s1[1]]) if s1[0] and s1[1]: cell_sup = f"{num_to_col_char(col_idx)}{current_row_idx}" cell_code = f"{num_to_col_char(col_idx+1)}{current_row_idx}" if pair_counts[s1] > 1: green_cells.extend([cell_sup, cell_code]) else: red_cells.extend([cell_sup, cell_code]) col_idx += 2 row_data.extend([s2[0], s2[1]]) if s2[0] and s2[1]: cell_sup = f"{num_to_col_char(col_idx)}{current_row_idx}" cell_code = f"{num_to_col_char(col_idx+1)}{current_row_idx}" if pair_counts[s2] > 1: green_cells.extend([cell_sup, cell_code]) else: red_cells.extend([cell_sup, cell_code]) col_idx += 2 matrix.append(row_data) current_row_idx += 1 # 4. 写入表格 timer.start() sum_sid = create_sheet(SPREADSHEET_TOKEN, SUMMARY_SHEET_NAME) print(f"目标Sheet: {SUMMARY_SHEET_NAME} (ID: {sum_sid})") if matrix and len(matrix) > 0: num_rows = len(matrix) num_cols = len(matrix[0]) end_col_letter = num_to_col_char(num_cols) full_range = f"{sum_sid}!A1:{end_col_letter}{num_rows}" # 之前的日志是 GangZaiXieRuFanWei,我改回中文 print(f" - 正在写入范围: {full_range} ...") call_api("PUT", f"https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/{SPREADSHEET_TOKEN}/values", json_data={"valueRange": {"range": full_range, "values": matrix}}) else: print(" - 警告: 没有数据需要写入。") print(" - 数据写入完成") # 5. 准备合并请求 print(" - 正在准备单元格合并 (表头)...") merge_list = [] merge_list.append(f"{sum_sid}!A1:A2") # 项目 (FIX: A1:A2) merge_list.append(f"{sum_sid}!B1:B3") # 属性 current_col = 3 # C列 for _ in proj_names: start = num_to_col_char(current_col) end = num_to_col_char(current_col + 3) merge_list.append(f"{sum_sid}!{start}1:{end}1") # Proj Name s1_start = num_to_col_char(current_col) s1_end = num_to_col_char(current_col + 1) merge_list.append(f"{sum_sid}!{s1_start}2:{s1_end}2") # 一供 s2_start = num_to_col_char(current_col + 2) s2_end = num_to_col_char(current_col + 3) merge_list.append(f"{sum_sid}!{s2_start}2:{s2_end}2") # 二供 current_col += 4 # ★★★ 使用专用的 .../merge_cells API (V2, 这个没问题) ★★★ API_V2_MERGE_URL = f"https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/{SPREADSHEET_TOKEN}/merge_cells" if merge_list: print(f" - 正在批量执行 {len(merge_list)} 个合并请求...") # 这个接口不是批量的,我们必须循环调用 for rng_str in merge_list: payload = { "range": rng_str, # A1 字符串格式,带 sheetId "mergeType": "MERGE_ALL" } try: call_api("POST", API_V2_MERGE_URL, json_data=payload) except Exception as e: print(f"WARN: 合并 {rng_str} 失败: {e}") print(" - 表头合并完成") # 6. 准备上色请求 print(f" - 正在准备应用样式 (红: {len(red_cells)/2}组, 绿: {len(green_cells)/2}组)...") # ★★★ 核心修复: 采纳你朋友的 V3 URL 建议 ★★★ API_V3_STYLE_URL = f"https://open.feishu.cn/open-apis/sheets/v3/spreadsheets/{SPREADSHEET_TOKEN}/styles_batch_update" style_data = [] # V3 期望的 payload 结构是 {"data": [...]} # 红色单元格 if red_cells: # 修复点: V3 的 ranges 需要 sheetId! 前缀 absolute_red_cells = [f"{sum_sid}!{cell}" for cell in red_cells] red_request = { "ranges": absolute_red_cells, # 修复点: 飞书 API key 是 backColor (camelCase) "style": {"backColor": COLOR_RED} } style_data.append(red_request) # 绿色单元格 if green_cells: # 修复点: V3 的 ranges 需要 sheetId! 前缀 absolute_green_cells = [f"{sum_sid}!{cell}" for cell in green_cells] green_request = { "ranges": absolute_green_cells, # 修复点: 飞书 API key 是 backColor (camelCase) "style": {"backColor": COLOR_GREEN} } style_data.append(green_request) if style_data: print(f" - 正在提交 {len(style_data)} 组样式更新请求 (红/绿各一组)...") try: # 构造正确的 V3 请求结构 payload = {"data": style_data} # 调用API call_api("POST", API_V3_STYLE_URL, json_data=payload) print(" - 样式更新成功") except Exception as e: # call_api 函数现在会打印详细错误 print(f" - 样式更新失败: {e}") else: print(" - 没有样式需要更新") timer.stop("写入、合并与上色") print("任务完成!请查看飞书文档。") if __name__ == "__main__": try: main() except Exception as e: import traceback traceback.print_exc() print(f"Error: {e}")
最新发布
11-16
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值