velcoity基础

本文深入探讨了Velocity模板引擎的概念及其在实际应用中的实现方式,包括VelocityContext的使用、模板与vm模板的设计、遍历循环规则、条件判断、以及演示代码的解析。

概述:

velocity是一个典型的model和view分离设计思想的体现,因此其概念定义的几个主要元素和mvc中的概念也有一定的隐射关系。

比较重要的几个概念:

1.VelocityContext:velocity上下文,用来保存数据用的,主要是工程师书写代码是放数据用的,提供给模板使用

2.Template:模板,设计师设计的页面的加载器,具体的数据则由VelocityContext中获取,template.merge是将数据和vm模板合并输出具体的结果

3.vm模板:设计师设计的页面,包含样式以及替换的数据标识等内容,具体内容如下:

1、变量的定义、赋值与使用规则:赋值的左边必须是一个变量($name)或属性引用($member.Sex),右值为以下六种类型之一:且如果右值为null,则不进行赋值操作,即左值还是保留以前的值;
变量引用:#set($member.Sex=$sex)
字面字符串:#set($username="hzd")
属性引用:#set($color=$ball.Color)
变量引用:#set($member.Age=$ager.sum($age/123/...))
字面数字:#set($money=123456)
数组列表:#set($arr=["not",$m,"xxxx"])
velocity模板中未被定义的变量将被认为是一个字符串:
#set($name="hzd")
$newname=$name,则输出为$newname=hzd
velocity模板中不会将reference解释为对象的实例变量:
如:$foo.Name将被解释为foo对象的getName()方法,而不是foo对象的Name实例变量,
$data.Request.ServerName等同于$data.getRequest.getServerName()
2、遍历循环规则:语法#foreach ...#end,$velocityCount为当前循环的索引(可以通过在velocity配置文件中更改该名称).
循环数组: #foreach($item in ["one","two","three"]) $item #end ,将输出one two three
循环List:#foreach($item in $memberList) $item.name #end,item代表列表中的对象,如果item为值类型(int等),则可以直接使用$item输出值,如果为引用类型(String,User,...),则可以使用$item.name引用实例变量或$item.xxx()调用对象的方法
循环Map:#foreach($key in $memberMap.keySet() ) key:$key;value:$memberMap.get($key) #end
3、条件:使用#if #elseif #else #end 指令可以帮助决定程序的执行流程,如果$foo是boolean,则$foo要为true,否则,$foo不为null

判断对象是否为null的方法:
(1)、#if($member),不为空;#if(!$member),为空;
(2)、#ifnull() 、#ifnotnull():如,ifnull ($foo),要使用这个特性必须在velocity.properties文件中加入:


演示代码:

HelloWorld.java

[java:nogutter] view plain copy
  1. packagecom.wm.mad.tmp;
  2. importjava.io.StringWriter;
  3. importorg.apache.velocity.Template;
  4. importorg.apache.velocity.VelocityContext;
  5. importorg.apache.velocity.app.VelocityEngine;
  6. importorg.apache.log4j.Logger;
  7. importorg.apache.log4j.PropertyConfigurator;
  8. publicclassHelloWorld{
  9. staticLoggerlogger=Logger.getLogger(HelloWorld.class);
  10. publicstaticvoidmain(String[]args)throwsException{
  11. PropertyConfigurator.configure("log4j.properties");
  12. VelocityEngineve=newVelocityEngine();
  13. ve.init("velocity.properties");
  14. Templatetemplate=ve.getTemplate("helloWorld.vm");
  15. VelocityContextcontext=newVelocityContext();
  16. context.put("name","madding");
  17. context.put("password","madding");
  18. StringWriterwriter=newStringWriter();
  19. template.merge(context,writer);
  20. System.out.println(writer.toString());
  21. if(logger.isInfoEnabled()){
  22. logger.info("operatorissuccess");
  23. }
  24. }
  25. }

helloWorld.vm:

[xhtml:nogutter] view plain copy
  1. 你的
  2. 名字是:$name
  3. 密码是:$password

velocity.properties:

[xhtml:nogutter] view plain copy
  1. input.encoding=GBK
  2. output.encoding=GBK
  3. velocimacro.permissions.allow.inline=true
  4. runtime.log=d:/log/velocity.log

转自:http://blog.youkuaiyun.com/madding/article/details/4604414

http://gourmand.iteye.com/blog/1555322

http://blog.youkuaiyun.com/benbenming/article/details/886668

import numpy as np from matplotlib import pyplot as plt def xavier_initializer(layer_dims_, seed=16): np.random.seed(seed) parameters_ = {} num_L = len(layer_dims_) for l in range(num_L - 1): temp_w = np.random.randn(layer_dims_[l + 1], layer_dims_[l]) * np.sqrt(1 / layer_dims_[l]) temp_b = np.zeros((1, layer_dims_[l + 1])) parameters_['W' + str(l + 1)] = temp_w parameters_['b' + str(l + 1)] = temp_b return parameters_ def he_initializer(layer_dims_, seed=16): np.random.seed(seed) parameters_ = {} num_L = len(layer_dims_) for l in range(num_L - 1): temp_w = np.random.randn(layer_dims_[l + 1], layer_dims_[l]) * np.sqrt(2 / layer_dims_[l]) temp_b = np.zeros((1, layer_dims_[l + 1])) parameters_['W' + str(l + 1)] = temp_w parameters_['b' + str(l + 1)] = temp_b return parameters_ def cross_entry_sigmoid(y_hat_, y_): ''' 计算在二分类时的交叉熵 :param y_hat_: 模型输出值 :param y_: 样本真实标签值 :return: ''' m = y_.shape[0] loss = -(np.dot(y_.T, np.log(y_hat_)) + np.dot(1 - y_.T, np.log(1 - y_hat_))) / m return np.squeeze(loss) def cross_entry_softmax(y_hat_, y_): ''' 计算多分类时的交叉熵 :param y_hat_: :param y_: :return: ''' m = y_.shape[0] loss = -np.sum(y_ * np.log(y_hat_)) / m return loss def sigmoid(z): a = 1 / (1 + np.exp(-z)) return a def relu(z): a = np.maximum(0, z) return a def softmax(z): z -= np.max(z) # 防止过大,超出限制,导致计算结果为 nan z_exp = np.exp(z) softmax_z = z_exp / np.sum(z_exp, axis=1, keepdims=True) return softmax_z def sigmoid_backward(da_, cache_z): a = 1 / (1 + np.exp(-cache_z)) dz_ = da_ * a * (1 - a) assert dz_.shape == cache_z.shape return dz_ def softmax_backward(y_, cache_z): # a = softmax(cache_z) dz_ = a - y_ assert dz_.shape == cache_z.shape return dz_ def relu_backward(da_, cache_z): dz = np.array(da_, copy=True) dz[cache_z <= 0] = 0 assert (dz.shape == cache_z.shape) return dz def update_parameters_with_gd(parameters_, grads, learning_rate): L_ = int(len(parameters_) / 2) for l in range(1, L_ + 1): parameters_['W' + str(l)] -= learning_rate * grads['dW' + str(l)] parameters_['b' + str(l)] -= learning_rate * grads['db' + str(l)] return parameters_ def update_parameters_with_sgd(parameters_, grads, learning_rate): L_ = int(len(parameters_) / 2) for l in range(1, L_ + 1): parameters_['W' + str(l)] -= learning_rate * grads['dW' + str(l)] parameters_['b' + str(l)] -= learning_rate * grads['db' + str(l)] return parameters_ def initialize_velcoity(paramters): v = {} L_ = int(len(paramters) / 2) for l in range(1, L_ + 1): v['dW' + str(l)] = np.zeros(paramters['W' + str(l)].shape) v['db' + str(l)] = np.zeros(paramters['b' + str(l)].shape) return v def update_parameters_with_sgd_momentum(parameters, grads, velcoity, beta, learning_rate): L_ = int(len(parameters) / 2) for l in range(1, L_ + 1): velcoity['dW' + str(l)] = beta * velcoity['dW' + str(l)] + (1 - beta) * grads['dW' + str(l)] velcoity['db' + str(l)] = beta * velcoity['db' + str(l)] + (1 - beta) * grads['db' + str(l)] parameters['W' + str(l)] -= learning_rate * velcoity['dW' + str(l)] parameters['b' + str(l)] -= learning_rate * velcoity['db' + str(l)] return parameters, velcoity def initialize_adam(paramters_): l = int(len(paramters_) / 2) square_grad = {} velcoity = {} for i in range(l): for i in range(l): square_grad['dW' + str(i + 1)] = np.zeros(paramters_['W' + str(i + 1)].shape) square_grad['db' + str(i + 1)] = np.zeros(paramters_['b' + str(i + 1)].shape) velcoity['dW' + str(i + 1)] = np.zeros(paramters_['W' + str(i + 1)].shape) velcoity['db' + str(i + 1)] = np.zeros(paramters_['b' + str(i + 1)].shape) return velcoity, square_grad def update_parameters_with_sgd_adam(parameters_, grads_, velcoity, square_grad, epoch, learning_rate=0.1, beta1=0.9, beta2=0.999, epsilon=1e-8): l = int(len(parameters_) / 2) for i in range(l): velcoity['dW' + str(i + 1)] = beta1 * velcoity['dW' + str(i + 1)] + (1 - beta1) * grads_['dW' + str(i + 1)] velcoity['db' + str(i + 1)] = beta1 * velcoity['db' + str(i + 1)] + (1 - beta1) * grads_['db' + str(i + 1)] vw_correct = velcoity['dW' + str(i + 1)] / (1 - np.power(beta1, epoch)) # 这里是对迭代初期的梯度进行修正 vb_correct = velcoity['db' + str(i + 1)] / (1 - np.power(beta1, epoch)) square_grad['dW' + str(i + 1)] = beta2 * square_grad['dW' + str(i + 1)] + (1 - beta2) * ( grads_['dW' + str(i + 1)] ** 2) square_grad['db' + str(i + 1)] = beta2 * square_grad['db' + str(i + 1)] + (1 - beta2) * ( grads_['db' + str(i + 1)] ** 2) sw_correct = square_grad['dW' + str(i + 1)] / (1 - np.power(beta2, epoch)) sb_correct = square_grad['db' + str(i + 1)] / (1 - np.power(beta2, epoch)) parameters_['W' + str(i + 1)] -= learning_rate * vw_correct / np.sqrt(sw_correct + epsilon) parameters_['b' + str(i + 1)] -= learning_rate * vb_correct / np.sqrt(sb_correct + epsilon) return parameters_, velcoity, square_grad def set_ax_gray(ax): ax.patch.set_facecolor("gray") ax.patch.set_alpha(0.1) ax.spines['right'].set_color('none') # 设置隐藏坐标轴 ax.spines['top'].set_color('none') ax.spines['bottom'].set_color('none') ax.spines['left'].set_color('none') ax.grid(axis='y', linestyle='-.') def plot_costs(costs, labels, colors=None): if colors is None: colors = ['b', 'lightcoral'] ax = plt.subplot() assert len(costs) == len(labels) for i in range(len(costs)): ax.plot(costs[i], color=colors[i], label=labels[i]) set_ax_gray(ax) ax.legend(loc='upper right') ax.set_xlabel('num epochs') ax.set_ylabel('cost') plt.show() 以上是bpnnUtil.py
最新发布
06-13
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值