python使用loaddata_Python data.load方法代码示例

本文详细介绍了Python中的data.load方法,并提供了10个实际应用代码示例,涵盖了不同场景下的使用方法,包括数据读取、图像处理、数据分析等。通过这些示例,读者可以深入理解data.load的用法。
部署运行你感兴趣的模型镜像

本文整理汇总了Python中data.load方法的典型用法代码示例。如果您正苦于以下问题:Python data.load方法的具体用法?Python data.load怎么用?Python data.load使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块data的用法示例。

在下文中一共展示了data.load方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: restart_required

​点赞 6

# 需要导入模块: import data [as 别名]

# 或者: from data import load [as 别名]

def restart_required(self):

"""Indicates whether splunkd is in a state that requires a restart.

:return: A ``boolean`` that indicates whether a restart is required.

"""

response = self.get("messages").body.read()

messages = data.load(response)['feed']

if 'entry' not in messages:

result = False

else:

if isinstance(messages['entry'], dict):

titles = [messages['entry']['title']]

else:

titles = [x['title'] for x in messages['entry']]

result = 'restart_required' in titles

return result

开发者ID:DanielSchwartz1,项目名称:SplunkForPCAP,代码行数:19,

示例2: load_test

​点赞 6

# 需要导入模块: import data [as 别名]

# 或者: from data import load [as 别名]

def load_test(self):

self.y_test = np.load(self.test_pred_file).astype(np.float32)

self.images_test = data.load('test')

features = np.load("data/features_test.pkl").item()

if "aaronmoments" in self.features:

print "aaronmoments"

def normalize(x):

return x

# return (x - x.mean(axis=0,keepdims=True))/x.std(axis=0,keepdims=True)

image_shapes = np.asarray([img.shape for img in self.images_test]).astype(np.float32)

moments = np.load("data/image_moment_stats_v1_test.pkl")

centroid_distance = np.abs(moments["centroids"][:, [1, 0]] - image_shapes / 2)

angles = moments["angles"][:, None]

minor_axes = moments["minor_axes"][:, None]

major_axes = moments["major_axes"][:, None]

centroid_distance = normalize(centroid_distance)

angles = normalize(angles)

minor_axes = normalize(minor_axes)

major_axes = normalize(major_axes)

features["aaronmoments"] = np.concatenate([centroid_distance,angles,minor_axes,major_axes], 1).astype(np.float32)

self.info_test = np.concatenate([features[feat] for feat in self.features], 1).astype(np.float32)

开发者ID:benanne,项目名称:kaggle-ndsb,代码行数:25,

示例3: load_train

​点赞 6

# 需要导入模块: import data [as 别名]

# 或者: from data import load [as 别名]

def load_train(self):

labels = utils.one_hot(data.labels_train, m=121).astype(np.float32)

split = np.load(DEFAULT_VALIDATION_SPLIT_PATH)

split = np.load(DEFAULT_VALIDATION_SPLIT_PATH)

indices_train = split['indices_train']

indices_valid = split['indices_valid']

image_shapes = np.asarray([img.shape for img in data.load('train')]).astype(np.float32)

moments = np.load("data/image_moment_stats_v1_train.pkl")

centroid_distance = np.abs(moments["centroids"][:, [1, 0]] - image_shapes / 2)

info = np.concatenate((centroid_distance, image_shapes, moments["angles"][:, None], moments["minor_axes"][:, None], moments["major_axes"][:, None]), 1).astype(np.float32)

self.info_train = info[indices_train]

self.info_valid = info[indices_valid]

self.y_train = np.load(self.train_pred_file).astype(np.float32)

self.y_valid = np.load(self.valid_pred_file).astype(np.float32)

self.labels_train = labels[indices_train]

self.labels_valid = labels[indices_valid]

开发者ID:benanne,项目名称:kaggle-ndsb,代码行数:23,

示例4: _load_atom

​点赞 5

# 需要导入模块: import data [as 别名]

# 或者: from data import load [as 别名]

def _load_atom(response, match=None):

return data.load(response.body.read(), match)

# Load an array of atom entries from the body of the given response

开发者ID:DanielSchwartz1,项目名称:SplunkForPCAP,代码行数:7,

示例5: refresh

​点赞 5

# 需要导入模块: import data [as 别名]

# 或者: from data import load [as 别名]

def refresh(self, state=None):

"""Refreshes the state of this entity.

If *state* is provided, load it as the new state for this

entity. Otherwise, make a roundtrip to the server (by calling

the :meth:`read` method of ``self``) to fetch an updated state,

plus at most two additional round trips if

the ``autologin`` field of :func:`connect` is set to ``True``.

:param state: Entity-specific arguments (optional).

:type state: ``dict``

:raises EntityDeletedException: Raised if the entity no longer exists on

the server.

**Example**::

import splunklib.client as client

s = client.connect(...)

search = s.apps['search']

search.refresh()

"""

if state is not None:

self._state = state

else:

self._state = self.read(self.get())

return self

开发者ID:DanielSchwartz1,项目名称:SplunkForPCAP,代码行数:28,

示例6: _load_atom

​点赞 5

# 需要导入模块: import data [as 别名]

# 或者: from data import load [as 别名]

def _load_atom(response, match=None):

return data.load(response.body.read(), match)

# Load an array of atom entries from the body of the given response

开发者ID:splunk,项目名称:splunk-ref-pas-code,代码行数:6,

示例7: iter

​点赞 4

# 需要导入模块: import data [as 别名]

# 或者: from data import load [as 别名]

def iter(self, offset=0, count=None, pagesize=None, **kwargs):

"""Iterates over the collection.

This method is equivalent to the :meth:`list` method, but

it returns an iterator and can load a certain number of entities at a

time from the server.

:param offset: The index of the first entity to return (optional).

:type offset: ``integer``

:param count: The maximum number of entities to return (optional).

:type count: ``integer``

:param pagesize: The number of entities to load (optional).

:type pagesize: ``integer``

:param kwargs: Additional arguments (optional):

- "search" (``string``): The search query to filter responses.

- "sort_dir" (``string``): The direction to sort returned items:

"asc" or "desc".

- "sort_key" (``string``): The field to use for sorting (optional).

- "sort_mode" (``string``): The collating sequence for sorting

returned items: "auto", "alpha", "alpha_case", or "num".

:type kwargs: ``dict``

**Example**::

import splunklib.client as client

s = client.connect(...)

for saved_search in s.saved_searches.iter(pagesize=10):

# Loads 10 saved searches at a time from the

# server.

...

"""

assert pagesize is None or pagesize > 0

if count is None:

count = self.null_count

fetched = 0

while count == self.null_count or fetched < count:

response = self.get(count=pagesize or count, offset=offset, **kwargs)

items = self._load_list(response)

N = len(items)

fetched += N

for item in items:

yield item

if pagesize is None or N < pagesize:

break

offset += N

logging.debug("pagesize=%d, fetched=%d, offset=%d, N=%d, kwargs=%s", pagesize, fetched, offset, N, kwargs)

# kwargs: count, offset, search, sort_dir, sort_key, sort_mode

开发者ID:DanielSchwartz1,项目名称:SplunkForPCAP,代码行数:55,

示例8: update_pickle_file

​点赞 4

# 需要导入模块: import data [as 别名]

# 或者: from data import load [as 别名]

def update_pickle_file(file_name, eps=0, k=0, v=0):

d_old = data_old.Data(file_name)

d_old.load()

print(file_name, 'loaded')

# d_old.print_fields()

d_new = data.Data()

d_new.set_agent('Wolp',

int(d_old.get_data('max_actions')[0]),

k,

v)

d_new.set_experiment(d_old.get_data('experiment')[0],

[-3],

[3],

eps)

space = action_space.Space([-3], [3], int(d_old.get_data('max_actions')[0]))

# print(space.get_space())

# d_new.print_data()

done = d_old.get_data('done')

actors_result = d_old.get_data('actors_result')

actions = d_old.get_data('actions')

state_0 = d_old.get_data('state_0').tolist()

state_1 = d_old.get_data('state_1').tolist()

state_2 = d_old.get_data('state_2').tolist()

state_3 = d_old.get_data('state_3').tolist()

rewards = d_old.get_data('rewards').tolist()

ep = 0

temp = 0

l = len(done)

for i in range(l):

d_new.set_action(space.import_point(actions[i]).tolist())

d_new.set_actors_action(space.import_point(actors_result[i]).tolist())

d_new.set_ndn_action(space.import_point(

space.search_point(actors_result[i], 1)[0]).tolist())

state = [state_0[i], state_1[i], state_2[i], state_3[i]]

d_new.set_state(state)

d_new.set_reward(1)

if done[i] > 0:

# print(ep, i - temp, 'progress', i / l)

temp = i

ep += 1

# if ep % 200 == 199:

# d_new.finish_and_store_episode()

# else:

d_new.end_of_episode()

d_new.save()

开发者ID:jimkon,项目名称:Deep-Reinforcement-Learning-in-Large-Discrete-Action-Spaces,代码行数:52,

示例9: get_minibatch

​点赞 4

# 需要导入模块: import data [as 别名]

# 或者: from data import load [as 别名]

def get_minibatch(file_name, batch_size, shuffle, with_pauses=False):

dataset = data.load(file_name)

if shuffle:

np.random.shuffle(dataset)

X_batch = []

Y_batch = []

if with_pauses:

P_batch = []

if len(dataset) < batch_size:

print("WARNING: Not enough samples in '%s'. Reduce mini-batch size to %d or use a dataset with at least %d words." % (

file_name,

len(dataset),

MINIBATCH_SIZE * data.MAX_SEQUENCE_LEN))

for subsequence in dataset:

X_batch.append(subsequence[0])

Y_batch.append(subsequence[1])

if with_pauses:

P_batch.append(subsequence[2])

if len(X_batch) == batch_size:

# Transpose, because the model assumes the first axis is time

X = np.array(X_batch, dtype=np.int32).T

Y = np.array(Y_batch, dtype=np.int32).T

if with_pauses:

P = np.array(P_batch, dtype=theano.config.floatX).T

if with_pauses:

yield X, Y, P

else:

yield X, Y

X_batch = []

Y_batch = []

if with_pauses:

P_batch = []

开发者ID:ottokart,项目名称:punctuator2,代码行数:44,

示例10: __classification_accuracy

​点赞 4

# 需要导入模块: import data [as 别名]

# 或者: from data import load [as 别名]

def __classification_accuracy(self, sess, iter_init, idx, y_ph=None):

"""

:param sess: TensorFlow session

:param iter_init: TensorFlow data iterator initializer associated

:param idx: insertion index (i.e. epoch - 1)

:param y_ph: TensorFlow placeholder for unseen labels

:return: None

"""

if self.perf is None or y_ph is None:

return

# initialize results

y = np.zeros([0, 1])

y_hats = [np.zeros([0, 1])] * self.num_B_sub_heads

# initialize unsupervised data iterator

sess.run(iter_init)

# loop over the batches within the unsupervised data iterator

print('Evaluating classification accuracy... ')

while True:

try:

# grab the results

results = sess.run([self.y_hats, y_ph], feed_dict={self.is_training: False})

# load metrics

for i in range(self.num_B_sub_heads):

y_hats[i] = np.concatenate((y_hats[i], np.expand_dims(results[0][i], axis=1)))

if y_ph is not None:

y = np.concatenate((y, np.expand_dims(results[1], axis=1)))

# _, ax = plt.subplots(2, 10)

# i_rand = np.random.choice(results[3].shape[0], 10)

# for i in range(10):

# ax[0, i].imshow(results[3][i_rand[i]][:, :, 0], origin='upper', vmin=0, vmax=1)

# ax[0, i].set_xticks([])

# ax[0, i].set_yticks([])

# ax[1, i].imshow(results[4][i_rand[i]][:, :, 0], origin='upper', vmin=0, vmax=1)

# ax[1, i].set_xticks([])

# ax[1, i].set_yticks([])

# plt.show()

# iterator will throw this error when its out of data

except tf.errors.OutOfRangeError:

break

# compute classification accuracy

if y_ph is not None:

class_errors = [unsupervised_labels(y, y_hats[i], self.k_B, self.k_B)

for i in range(self.num_B_sub_heads)]

self.perf['class_err_min'][idx] = np.min(class_errors)

self.perf['class_err_avg'][idx] = np.mean(class_errors)

self.perf['class_err_max'][idx] = np.max(class_errors)

# metrics are done

print('Done')

开发者ID:astirn,项目名称:IIC,代码行数:58,

注:本文中的data.load方法示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。

您可能感兴趣的与本文相关的镜像

Python3.9

Python3.9

Conda
Python

Python 是一种高级、解释型、通用的编程语言,以其简洁易读的语法而闻名,适用于广泛的应用,包括Web开发、数据分析、人工智能和自动化脚本

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值