当设置RESULT_CACHE_MAX_SIZE参数并且重启过database后,Query Result Cache 还是被禁用的。

本文介绍了Oracle数据库标准版中查询结果缓存特性被禁用的问题。该特性仅在企业版中可用,重启数据库后设置的RESULT_CACHE_MAX_SIZE参数会重置为0,导致缓存功能失效。

 

来源于:
Query Result Cache is disabled After Setting RESULT_CACHE_MAX_SIZE And Restarting The Database (文档 ID 460581.1)

适用于:
Oracle Server - Standard Edition - Version: 11.1.0.6 to 11.1.0.6
 This problem can occur on any platform.

症状:
The RESULT_CACHE_MAX_SIZE parameter is zero after the restart although it was set to a non zero value in the spfile. The Query Result Cache feature is disabled when the parameter is set to 0.

原因:
This is happening on Standard Edition installations. The Query Result Cache feature is available in Oracle Enterprise Edition only.

查询结果缓存是Oracle database 企业版的特性

 

解决方案:
Install Oracle Enterprise Edition if this feature is needed.

import os import re import time import torch import torch.nn as nn import torch.optim as optim import pandas as pd import numpy as np import joblib # 添加缺失的导入 from datetime import datetime from torch.utils.data import Dataset, DataLoader, random_split from tqdm import tqdm import pickle import mysql.connector from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity from gensim.models import Word2Vec import spacy nlp = spacy.load("en_core_web_sm", disable=["parser", "ner"]) STOPWORDS = spacy.lang.en.stop_words.STOP_WORDS def clean_text(text): return re.sub(r'[^a-zA-Z0-9\s]', '', str(text)).strip().lower() def tokenize(text): """优化tokenize函数,移除冗余检查""" doc = nlp(clean_text(text)) return [token.text for token in doc if token.text not in STOPWORDS] # 移除isalnum检查 def preprocess(text): tokens = tokenize(text) return " ".join(tokens) class SemanticMatchModel(nn.Module): def __init__(self, input_dim): super().__init__() self.fc1 = nn.Linear(input_dim, 256) self.bn1 = nn.BatchNorm1d(256) self.fc2 = nn.Linear(256, 128) self.bn2 = nn.BatchNorm1d(128) self.fc3 = nn.Linear(128, 64) self.bn3 = nn.BatchNorm1d(64) self.fc4 = nn.Linear(64, 1) self.dropout = nn.Dropout(0.3) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() def forward(self, x): x = self.relu(self.bn1(self.fc1(x))) x = self.dropout(x) x = self.relu(self.bn2(self.fc2(x))) x = self.dropout(x) x = self.relu(self.bn3(self.fc3(x))) x = self.dropout(x) x = self.sigmoid(self.fc4(x)) return x class QADataset(Dataset): def __init__(self, qa_pairs, tfidf_vectorizer, negative_ratio=1.0): self.qa_pairs = qa_pairs self.vectorizer = tfidf_vectorizer self.samples = [] # 构建正样本 for i, (q, a) in enumerate(self.qa_pairs): self.samples.append((q, a, 1)) # 优化负样本构建逻辑 if negative_ratio > 0: total_pairs = len(self.qa_pairs) all_answers = [a for _, a in self.qa_pairs] # 预先生成负样本索引 neg_indices = np.random.choice( len(all_answers), size=int(total_pairs * negative_ratio), replace=True ) for idx, (q, a) in enumerate(self.qa_pairs): sample_count = int(negative_ratio) start = idx * sample_count end = start + sample_count for j in range(start, end): if j < len(neg_indices): neg_a = all_answers[neg_indices[j]] # 确保不是当前答案 if neg_a != a: self.samples.append((q, neg_a, 0)) def __len__(self): return len(self.samples) def __getitem__(self, idx): q, a, label = self.samples[idx] q_vec = self.vectorizer.transform([preprocess(q)]).toarray()[0] a_vec = self.vectorizer.transform([preprocess(a)]).toarray()[0] pair_vec = np.concatenate((q_vec, a_vec)) return torch.tensor(pair_vec, dtype=torch.float32), torch.tensor(label, dtype=torch.float32) class KnowledgeBase: def __init__(self, host='localhost', user='root', password='hy188747', database='ubuntu_qa', table='qa_pair', model_dir=r"D:\NLP-PT\PT4\model", negative_ratio=1.0): print("🔄 初始化知识库...") self.host = host self.user = user self.password = password self.database = database self.table = table self.model_dir = model_dir self.negative_ratio = negative_ratio os.makedirs(self.model_dir, exist_ok=True) self.qa_pairs = [] self.q_texts = [] self.a_texts = [] self.semantic_model = None self.word2vec_model = None self.tfidf_vectorizer = None self.tfidf_matrix = None # 调整初始化顺序 self.load_data_from_mysql() self.load_or_cache_processed_questions() self.load_cached_tfidf() self.load_cached_word2vec_model() # 最后加载模型(确保依赖项已初始化) model_path = os.path.join(self.model_dir, 'semantic_match_model.pth') if os.path.exists(model_path): self.load_model() else: print("⚠ 语义匹配模型未训练,请先训练模型。") def load_data_from_mysql(self): print("🔄 正在连接 MySQL,加载问答数据...") try: conn = mysql.connector.connect( host=self.host, user=self.user, password=self.password, database=self.database ) cursor = conn.cursor() query = f"SELECT question_text, answer_text FROM {self.table}" cursor.execute(query) rows = cursor.fetchall() self.qa_pairs = [(row[0], row[1]) for row in rows] self.q_texts = [pair[0] for pair in self.qa_pairs] self.a_texts = [pair[1] for pair in self.qa_pairs] print(f"✅ 成功从 MySQL 加载 {len(self.qa_pairs)} 条问答数据。") except Exception as e: print(f"❌ 数据库连接失败: {e}") self.qa_pairs = [] finally: if conn.is_connected(): conn.close() def load_or_cache_processed_questions(self): cache_path = os.path.join(self.model_dir, 'processed_questions.pkl') if os.path.exists(cache_path): print("🔄 使用缓存预处理后的分词文本。") with open(cache_path, 'rb') as f: self.processed_q_list = pickle.load(f) else: print("🔄 正在预处理问题文本(首次较慢)...") self.processed_q_list = [preprocess(q) for q in tqdm(self.q_texts)] with open(cache_path, 'wb') as f: pickle.dump(self.processed_q_list, f) print("✅ 预处理缓存已保存。") def load_cached_tfidf(self): cache_tfidf_matrix = os.path.join(self.model_dir, 'tfidf_matrix.npz') cache_qa_list = os.path.join(self.model_dir, 'tfidf_qa.pkl') tfidf_path = os.path.join(self.model_dir, 'tfidf_vectorizer.pkl') if os.path.exists(tfidf_path) and os.path.exists(cache_tfidf_matrix) and os.path.exists(cache_qa_list): print("🔄 加载 TF-IDF 缓存版本。") self.tfidf_vectorizer = joblib.load(tfidf_path) self.tfidf_matrix = np.load(cache_tfidf_matrix)['tfidf'] with open(cache_qa_list, 'rb') as f: self.tfidf_qa = pickle.load(f) else: print("🔄 创建并构建 TF-IDF(首次较慢)...") self.tfidf_vectorizer = TfidfVectorizer( tokenizer=lambda x: x.split(), lowercase=False, max_features=10000 ) self.tfidf_qa = self.processed_q_list self.tfidf_matrix = self.tfidf_vectorizer.fit_transform(self.tfidf_qa).toarray() print("✅ TF-IDF 构建完成。") joblib.dump(self.tfidf_vectorizer, tfidf_path) np.savez_compressed(cache_tfidf_matrix, tfidf=self.tfidf_matrix) with open(cache_qa_list, 'wb') as f: pickle.dump(self.tfidf_qa, f) def load_cached_word2vec_model(self): word2vec_path = os.path.join(self.model_dir, 'word2vec.model') if os.path.exists(word2vec_path): print("🔄 加载缓存中的 Word2Vec 模型...") self.word2vec_model = Word2Vec.load(word2vec_path) else: print("🔄 训练 Word2Vec 模型(首次较慢)...") tokenized_questions = [preprocess(q).split() for q in self.q_texts] self.word2vec_model = Word2Vec( sentences=tokenized_questions, vector_size=100, window=5, min_count=1, workers=4, epochs=10 ) self.word2vec_model.save(word2vec_path) print("✅ Word2Vec 模型训练完成并保存。") def sentence_to_vec(self, sentence): """修复空向量问题""" tokens = preprocess(sentence).split() if not tokens: return np.zeros(100) # 默认向量大小 if self.word2vec_model: vecs = [self.word2vec_model.wv[w] for w in tokens if w in self.word2vec_model.wv] return np.mean(vecs, axis=0) if vecs else np.zeros(self.word2vec_model.vector_size) else: vec = self.tfidf_vectorizer.transform([preprocess(sentence)]).toarray()[0] return vec def build_model(self, epochs=10, batch_size=128, lr=1e-3): # 创建数据集 full_dataset = QADataset(self.qa_pairs, self.tfidf_vectorizer, negative_ratio=self.negative_ratio) # 划分训练集/验证集 train_size = int(len(full_dataset) * 0.8) val_size = len(full_dataset) - train_size train_dataset, val_dataset = random_split(full_dataset, [train_size, val_size]) # 创建数据加载器 train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=2) val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=2) # 初始化模型 sample_input, _ = full_dataset[0] input_dim = sample_input.shape[0] self.semantic_model = SemanticMatchModel(input_dim) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") self.semantic_model.to(device) criterion = nn.BCELoss() optimizer = optim.Adam(self.semantic_model.parameters(), lr=lr) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'max', patience=2, factor=0.5) # 训练模型 best_val_acc = 0.0 print("\n开始模型训练...") start_time = time.time() for epoch in range(epochs): self.semantic_model.train() total_loss, total_correct, total_samples = 0.0, 0, 0 for X_batch, y_batch in tqdm(train_loader, desc=f"Epoch {epoch + 1}/{epochs} - 训练中"): X_batch, y_batch = X_batch.to(device), y_batch.to(device) optimizer.zero_grad() outputs = self.semantic_model(X_batch).squeeze() loss = criterion(outputs, y_batch) loss.backward() optimizer.step() total_loss += loss.item() * len(y_batch) preds = (outputs >= 0.5).float() total_correct += (preds == y_batch).sum().item() total_samples += len(y_batch) train_loss = total_loss / total_samples train_acc = total_correct / total_samples # 验证阶段 self.semantic_model.eval() val_loss, val_correct, val_samples = 0.0, 0, 0 with torch.no_grad(): for X_val, y_val in val_loader: X_val, y_val = X_val.to(device), y_val.to(device) outputs_val = self.semantic_model(X_val).squeeze() loss_val = criterion(outputs_val, y_val) val_loss += loss_val.item() * len(y_val) preds_val = (outputs_val >= 0.5).float() val_correct += (preds_val == y_val).sum().item() val_samples += len(y_val) val_loss /= val_samples val_acc = val_correct / val_samples # 更新学习率 scheduler.step(val_acc) print(f"Epoch [{epoch + 1}/{epochs}] | " f"Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.4f} | " f"Val Loss: {val_loss:.4f}, Val Acc: {val_acc:.4f}") # 保存最优模型 if val_acc > best_val_acc: best_val_acc = val_acc model_path = os.path.join(self.model_dir, 'semantic_match_model.pth') torch.save(self.semantic_model.state_dict(), model_path) print(f"✅ 新的最优模型已保存 (Val Acc: {best_val_acc:.4f})") end_time = time.time() print(f"\n训练完成,共耗时 {end_time - start_time:.2f} 秒。") # 加载最优模型权重 model_path = os.path.join(self.model_dir, 'semantic_match_model.pth') self.semantic_model.load_state_dict(torch.load(model_path, map_location=device)) self.semantic_model.eval() def load_model(self): """加载训练好的语义匹配模型""" input_dim = self.tfidf_matrix.shape[1] * 2 model_path = os.path.join(self.model_dir, 'semantic_match_model.pth') self.semantic_model = SemanticMatchModel(input_dim) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") self.semantic_model.load_state_dict(torch.load(model_path, map_location=device)) self.semantic_model.to(device) self.semantic_model.eval() print("✅ 语义匹配模型加载完成。") def retrieve(self, query, semantic_topk=100): # 粗检 query_tfidf = self.tfidf_vectorizer.transform([preprocess(query)]).toarray()[0] tfidf_scores = cosine_similarity([query_tfidf], self.tfidf_matrix).flatten() query_sent_vec = self.sentence_to_vec(query) sent_vecs = np.array([self.sentence_to_vec(q) for q in self.q_texts]) sent_scores = cosine_similarity([query_sent_vec], sent_vecs).flatten() # 归一化句子向量相似度到[0,1] sent_scores = (sent_scores + 1) / 2 sim_scores = tfidf_scores + sent_scores # 确保有足够的数据 if len(sim_scores) == 0: return ("抱歉,知识库中没有找到相关信息。", 0.0) topk_indices = np.argpartition(sim_scores, -semantic_topk)[-semantic_topk:] topk_indices = topk_indices[np.argsort(sim_scores[topk_indices])[::-1]] # 精检 if self.semantic_model: device = next(self.semantic_model.parameters()).device with torch.no_grad(): batch_inputs = [] for i in topk_indices: q = preprocess(self.q_texts[i]) a = preprocess(self.a_texts[i]) q_vec = self.tfidf_vectorizer.transform([q]).toarray()[0] a_vec = self.tfidf_vectorizer.transform([a]).toarray()[0] pair_input = np.concatenate((q_vec, a_vec)) batch_inputs.append(pair_input) if batch_inputs: batch_inputs = torch.tensor(np.stack(batch_inputs), dtype=torch.float32).to(device) batch_scores = self.semantic_model(batch_inputs).squeeze().cpu().numpy() semantic_scores = batch_scores else: semantic_scores = np.zeros(len(topk_indices)) # 综合得分(添加归一化权重) coarse_scores = sim_scores[topk_indices] / 2.0 # 归一化到[0,1] final_scores = 0.3 * coarse_scores + 0.7 * semantic_scores best_idx_in_topk = np.argmax(final_scores) best_idx = topk_indices[best_idx_in_topk] return self.qa_pairs[best_idx], final_scores[best_idx_in_topk] else: best_idx = topk_indices[0] if topk_indices.size > 0 else 0 return self.qa_pairs[best_idx], sim_scores[best_idx] def recommend_similar(self, query, topk=3): """修复索引越界问题""" query_tfidf = self.tfidf_vectorizer.transform([preprocess(query)]).toarray()[0] scores = cosine_similarity([query_tfidf], self.tfidf_matrix).flatten() # 安全获取topk索引 if len(scores) == 0: return [] if len(scores) < topk: topk = len(scores) topk_idx = np.argpartition(scores, -topk)[-topk:] topk_idx = topk_idx[np.argsort(scores[topk_idx])[::-1]] return [(self.q_texts[i], self.a_texts[i]) for i in topk_idx] class FeedbackRecorder: def __init__(self, file_path='unanswered_questions.csv'): self.file_path = file_path if not os.path.exists(self.file_path): with open(self.file_path, 'w', newline='', encoding='utf-8') as f: import csv csv.writer(f).writerow(['time', 'question']) def record_question(self, question): with open(self.file_path, 'a', newline='', encoding='utf-8') as f: import csv writer = csv.writer(f) writer.writerow([datetime.now().isoformat(), question]) def main(): kb = KnowledgeBase( host='localhost', user='root', password='hy188747', database='ubuntu_qa', table='qa_pair', model_dir=r"D:\NLP-PT\PT4\model", negative_ratio=1.0 ) if input("是否重新训练语义匹配模型?(y/n): ").strip().lower() == 'y': kb.build_model( epochs=5, batch_size=128, lr=1e-3 ) recorder = FeedbackRecorder() print("\n🎯 智能知识问答系统已启动(输入'q'退出聊天)\n") while True: query = input("🧐 问题:") if query.strip().lower() == 'q': break try: result, score = kb.retrieve(query) if result: print(f"💡 回答:{result[1]}") print(f"📊 匹配信心分数: {score:.4f}\n") else: print("⚠ 没有找到合适的答案,已将你的问题记录下来。") recorder.record_question(query) print("🔥 相似问题推荐:") similar_questions = kb.recommend_similar(query, topk=3) for q, a in similar_questions: print(f"Q: {q}\nA: {a}\n") except Exception as e: print(f"❌ 检索过程中发生错误: {e}") if __name__ == "__main__": main()
07-05
# influxd --help WARN[0000]log.go:228 gosnowflake.(*defaultLogger).Warn DBUS_SESSION_BUS_ADDRESS envvar looks to be not set, this can lead to runaway dbus-daemon processes. To avoid this, set envvar DBUS_SESSION_BUS_ADDRESS=$XDG_RUNTIME_DIR/bus (if it exists) or DBUS_SESSION_BUS_ADDRESS=/dev/null. Start up the daemon configured with flags/env vars/config file. The order of precedence for config options are as follows (1 highest, 3 lowest): 1. flags 2. env vars 3. config file A config file can be provided via the INFLUXD_CONFIG_PATH env var. If a file is not provided via an env var, influxd will look in the current directory for a config.{json|toml|yaml|yml} file. If one does not exist, then it will continue unchanged. Usage: influxd [flags] influxd [command] Available Commands: downgrade Downgrade metadata schema used by influxd to match the expectations of an older release help Help about any command inspect Commands for inspecting on-disk database data recovery Commands used to recover / regenerate operator access to the DB run Start the influxd server upgrade Upgrade a 1.x version of InfluxDB version Print the influxd server version Flags: --assets-path string override default assets by serving from a specific directory (developer mode) --bolt-path string path to boltdb database (default "/root/.influxdbv2/influxd.bolt") --e2e-testing add /debug/flush endpoint to clear stores; used for end-to-end tests --engine-path string path to persistent engine files (default "/root/.influxdbv2/engine") --feature-flags stringToString feature flag overrides (default []) --flux-log-enabled enables detailed logging for flux queries --hardening-enabled enable hardening options (disallow private IPs within flux and templates HTTP requests; disable file URLs in templates) -h, --help help for influxd --http-bind-address string bind address for the REST HTTP API (default ":8086") --http-idle-timeout duration max duration the server should keep established connections alive while waiting for new requests. Set to 0 for no timeout (default 3m0s) --http-read-header-timeout duration max duration the server should spend trying to read HTTP headers for new requests. Set to 0 for no timeout (default 10s) --http-read-timeout duration max duration the server should spend trying to read the entirety of new requests. Set to 0 for no timeout --http-write-timeout duration max duration the server should spend on processing+responding to requests. Set to 0 for no timeout --influxql-max-select-buckets int The maximum number of group by time bucket a SELECT can create. A value of zero will max the maximum number of buckets unlimited. --influxql-max-select-point int The maximum number of points a SELECT can process. A value of 0 will make the maximum point count unlimited. This will only be checked every second so queries will not be aborted immediately when hitting the limit. --influxql-max-select-series int The maximum number of series a SELECT can run. A value of 0 will make the maximum series count unlimited. --instance-id string add an instance id for replications to prevent collisions and allow querying by edge node --log-level Log-Level supported log levels are debug, info, and error (default info) --metrics-disabled Don't expose metrics over HTTP at /metrics --no-tasks disables the task scheduler --overwrite-pid-file overwrite PID file if it already exists instead of exiting --pid-file string write process ID to a file --pprof-disabled Don't expose debugging information over HTTP at /debug/pprof --query-concurrency int32 the number of queries that are allowed to execute concurrently. Set to 0 to allow an unlimited number of concurrent queries (default 1024) --query-initial-memory-bytes int the initial number of bytes allocated for a query when it is started. If this is unset, then query-memory-bytes will be used --query-max-memory-bytes int the maximum amount of memory used for queries. Can only be set when query-concurrency is limited. If this is unset, then this number is query-concurrency * query-memory-bytes --query-memory-bytes int maximum number of bytes a query is allowed to use at any given time. This must be greater or equal to query-initial-memory-bytes --query-queue-size int32 the number of queries that are allowed to be awaiting execution before new queries are rejected. Must be > 0 if query-concurrency is not unlimited (default 1024) --reporting-disabled disable sending telemetry data to https://telemetry.influxdata.com every 8 hours --secret-store string data store for secrets (bolt or vault) (default "bolt") --session-length int ttl in minutes for newly created sessions (default 60) --session-renew-disabled disables automatically extending session ttl on request --sqlite-path string path to sqlite database. if not set, sqlite database will be stored in the bolt-path directory as "influxd.sqlite". --storage-cache-max-memory-size Size The maximum size a shard's cache can reach before it starts rejecting writes. (default 1.0 GiB) --storage-cache-snapshot-memory-size Size The size at which the engine will snapshot the cache and write it to a TSM file, freeing up memory. (default 25 MiB) --storage-cache-snapshot-write-cold-duration Duration The length of time at which the engine will snapshot the cache and write it to a new TSM file if the shard hasn't received writes or deletes. (default 10m0s) --storage-compact-full-write-cold-duration Duration The duration at which the engine will compact all TSM files in a shard if it hasn't received a write or delete. (default 4h0m0s) --storage-compact-throughput-burst Size The rate limit in bytes per second that we will allow TSM compactions to write to disk. (default 48 MiB) --storage-max-concurrent-compactions int The maximum number of concurrent full and level compactions that can run at one time. A value of 0 results in 50% of runtime.GOMAXPROCS(0) used at runtime. Any number greater than 0 limits compactions to that value. This setting does not apply to cache snapshotting. --storage-max-index-log-file-size Size The threshold, in bytes, when an index write-ahead log file will compact into an index file. Lower sizes will cause log files to be compacted more quickly and result in lower heap usage at the expense of write throughput. (default 1.0 MiB) --storage-no-validate-field-size Skip field-size validation on incoming writes. --storage-retention-check-interval Duration The interval of time when retention policy enforcement checks run. (default 30m0s) --storage-series-file-max-concurrent-snapshot-compactions int The maximum number of concurrent snapshot compactions that can be running at one time across all series partitions in a database. --storage-series-id-set-cache-size int The size of the internal cache used in the TSI index to store previously calculated series results. --storage-shard-precreator-advance-period Duration The default period ahead of the endtime of a shard group that its successor group is created. (default 30m0s) --storage-shard-precreator-check-interval Duration The interval of time when the check to pre-create new shards runs. (default 10m0s) --storage-tsm-use-madv-willneed Controls whether we hint to the kernel that we intend to page in mmap'd sections of TSM files. --storage-validate-keys Validates incoming writes to ensure keys only have valid unicode characters. --storage-wal-flush-on-shutdown Flushes and clears the WAL on shutdown --storage-wal-fsync-delay Duration The amount of time that a write will wait before fsyncing. A duration greater than 0 can be used to batch up multiple fsync calls. This is useful for slower disks or when WAL write contention is seen. (default 0s) --storage-wal-max-concurrent-writes int The max number of writes that will attempt to write to the WAL at a time. (default <nprocs> * 2) --storage-wal-max-write-delay storage-wal-max-concurrent-writes The max amount of time a write will wait when the WAL already has storage-wal-max-concurrent-writes active writes. Set to 0 to disable the timeout. (default 10m0s) --storage-write-timeout duration The max amount of time the engine will spend completing a write request before cancelling with a timeout. (default 10s) --store string backing store for REST resources (disk or memory) (default "disk") --strong-passwords enable password strength enforcement --template-file-urls-disabled disable template file URLs --testing-always-allow-setup ensures the /api/v2/setup endpoint always returns true to allow onboarding --tls-cert string TLS certificate for HTTPs --tls-key string TLS key for HTTPs --tls-min-version string Minimum accepted TLS version (default "1.2") --tls-strict-ciphers Restrict accept ciphers to: ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, ECDHE_RSA_WITH_AES_128_GCM_SHA256, ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, ECDHE_RSA_WITH_AES_256_GCM_SHA384, ECDHE_ECDSA_WITH_CHACHA20_POLY1305, ECDHE_RSA_WITH_CHACHA20_POLY1305 --tracing-type string supported tracing types are log, jaeger --ui-disabled Disable the InfluxDB UI --vault-addr string address of the Vault server expressed as a URL and port, for example: https://127.0.0.1:8200/. --vault-cacert string path to a PEM-encoded CA certificate file on the local disk. This file is used to verify the Vault server's SSL certificate. This environment variable takes precedence over VAULT_CAPATH. --vault-capath string path to a directory of PEM-encoded CA certificate files on the local disk. These certificates are used to verify the Vault server's SSL certificate. --vault-client-cert string path to a PEM-encoded client certificate on the local disk. This file is used for TLS communication with the Vault server. --vault-client-key string path to an unencrypted, PEM-encoded private key on disk which corresponds to the matching client certificate. --vault-client-timeout duration timeout variable. The default value is 60s. --vault-max-retries int maximum number of retries when a 5xx error code is encountered. The default is 2, for three total attempts. Set this to 0 or less to disable retrying. --vault-skip-verify do not verify Vault's presented certificate before communicating with it. Setting this variable is not recommended and voids Vault's security model. --vault-tls-server-name string name to use as the SNI host when connecting via TLS. --vault-token string vault authentication token Use "influxd [command] --help" for more information about a command. 结合以上参数,提供一份完整的/etc/default/influxdb2配置文件,用于降低influxdbv2 CPU 的占用率。
最新发布
11-12
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值