Haxel Engine learning 21-- Moving to Sandbox

Cherno视频:https://www.youtube.com/watch?v=4zj-0FQ7Xbg&list=PLlrATfBNZ98dC-V-N3m0Go4deliWHPFwT&index=36

完整代码:https://github.com/DXT00/Hazel_study/tree/cd781b6ae9883f0884ccbf06bc0b5fa789fcdd1b/Hazel

把渲染从Hazel::Applicatioin移到SandBox

并通过按键控制相机移动

方法一:使用Event 控制相机移动 

缺点--》有事件就响应,移动不连续

#include <Hazel.h>

#include "imgui/imgui.h"


class ExampleLayer : public Hazel::Layer
{
public:
	ExampleLayer()
		: Layer("Example"), m_Camera(-1.6f, 1.6f, -0.9f, 0.9f) //对应窗口大小 1280 :720
	{

		float vertices[3 * 7] = {
			//Position          //Color
			-0.3f,-0.3f,0.0f,	1.0f,0.0f,0.0f,1.0f,
			 0.3f,-0.3f,0.0f,	0.0f,1.0f,0.0f,1.0f,
			 0.0f, 0.3f,0.0f,	0.0f,0.0f,1.0f,1.0f,

		};
		float squareVertices[3 * 4] = {
			//Position          //Color
			-0.5f, -0.5f, 0.0f,
			 0.5f, -0.5f, 0.0f,
			 0.5f,  0.5f, 0.0f,
			-0.5f,  0.5f, 0.0f,

		};
		unsigned int indices[3] = { 0,1,2 };
		unsigned int squareIndices[6] = { 0,1,2,0,2,3 };

		Hazel::BufferLayout layout = {
				{  Hazel::ShaderDataType::Float3,"a_Position", false },
				{  Hazel::ShaderDataType::Float4,"a_Color",    false },


		};
		Hazel::BufferLayout squareLayout = {
			{  Hazel::ShaderDataType::Float3,"a_Position", false },


		};

		std::shared_ptr< Hazel::VertexBuffer>vertexBuffer;
		std::shared_ptr< Hazel::IndexBuffer> indexBuffer;
		m_VertexArray.reset(Hazel::VertexArray::Create());
		vertexBuffer.reset(Hazel::VertexBuffer::Create(vertices, sizeof(vertices)));
		indexBuffer.reset(Hazel::IndexBuffer::Create(indices, 3));

		vertexBuffer->SetLayout(layout);
		m_VertexArray->AddVertexBuffer(vertexBuffer);
		m_VertexArray->SetIndexBuffer(indexBuffer);


		std::shared_ptr< Hazel::VertexBuffer>squareVB;
		std::shared_ptr< Hazel::IndexBuffer> squareIB;
		m_SquareVA.reset(Hazel::VertexArray::Create());
		squareVB.reset(Hazel::VertexBuffer::Create(squareVertices, sizeof(squareVertices)));
		squareIB.reset(Hazel::IndexBuffer::Create(squareIndices, 6));

		squareVB->SetLayout(squareLayout);
		m_SquareVA->AddVertexBuffer(squareVB);
		m_SquareVA->SetIndexBuffer(squareIB);


		const std::string vertexSrc = R"(
				#version 330 core
				
				layout(location = 0) in vec3 a_Position;
				layout(location = 1) in vec4 a_Color;

				out vec3 v_Position;
				out vec4 v_Color;

				uniform mat4 u_ViewProjection;
				

				void main()
				{
					v_Position = a_Position;
					v_Color = a_Color;
					gl_Position = u_ViewProjection * vec4(a_Position,1.0f);

				}


				)";
		const std::string fragmentSrc = R"(
				#version 330 core
				
				layout(location = 0) out vec4 color;
				layout(location = 1) out vec4 color1;

				in vec3 v_Position;
				in vec4 v_Color;
				
				void main()
				{

					color = v_Color;//vec4(v_Position*0.5+0.5, 1.0);

				}
				)";

		const std::string squareVertexSrc = R"(
				#version 330 core
				
				layout(location = 0) in vec3 a_Position;

				uniform mat4 u_ViewProjection;

				out vec3 v_Position;
			

				void main()
				{
					v_Position = a_Position;
				
					gl_Position = u_ViewProjection * vec4(a_Position,1.0);

				}


				)";
		const std::string squareFragmentSrc = R"(
				#version 330 core
				
				layout(location = 0) out vec4 color;

				in vec3 v_Position;

				
				void main()
				{

					color =vec4(v_Position*0.5+0.5, 1.0);

				}
				)";
		//m_Shader = std::make_unique<Shader>(vertexSrc,fragmentSrc);

		m_Shader.reset(new  Hazel::Shader(vertexSrc, fragmentSrc));
		m_SquareShader.reset(new  Hazel::Shader(squareVertexSrc, squareFragmentSrc));
	}
	
	void OnUpdate() override
	{
		Hazel::RenderCommand::SetClearColor({ 0.1f, 0.1f, 0.1f, 1 });
		Hazel::RenderCommand::Clear();
	/*	m_Camera.SetPosition({ 0.5f,0.5f,0.0f });
		m_Camera.SetRotation(45);*/
		
		Hazel::Renderer::BeginScene(m_Camera);
		Hazel::Renderer::Submit(m_SquareVA, m_SquareShader);

		Hazel::Renderer::Submit(m_VertexArray, m_Shader);
		Hazel::Renderer::EndScene();

	}

	virtual void OnImGuiRender() override
	{
			
	}

	void OnEvent(Hazel::Event& event) override
	{


		Hazel::EventDispatcher dispatcher(event);
		dispatcher.Dispatch<Hazel::KeyPressedEvent>(HZ_BIND_EVENT_FN(ExampleLayer::KeyPressedHandle));

	}
	bool KeyPressedHandle(Hazel::KeyPressedEvent& event) {
		if (event.GetKeyCode() == HZ_KEY_LEFT)
		{
			m_Camera.SetPosition({ m_Camera.GetPosition().x - m_MoveSpeed, m_Camera.GetPosition().y, m_Camera.GetPosition().z });
		}
		if (event.GetKeyCode() == HZ_KEY_RIGHT)
		{
			m_Camera.SetPosition({ m_Camera.GetPosition().x + m_MoveSpeed, m_Camera.GetPosition().y, m_Camera.GetPosition().z });
		}
		if (event.GetKeyCode() == HZ_KEY_UP)
		{
			m_Camera.SetRotation(m_Camera.GetRotation()+m_RotateSpeed);
		}
		if (event.GetKeyCode() == HZ_KEY_DOWN)
		{
			m_Camera.SetRotation(m_Camera.GetRotation()- m_RotateSpeed);
		}
		return false;
	}
public:
		inline Hazel::OrthographicCamera& GetCamera() { return m_Camera; }
public:

	std::shared_ptr<Hazel::VertexArray> m_VertexArray;
	std::shared_ptr<Hazel::Shader> m_Shader;

	std::shared_ptr<Hazel::VertexArray> m_SquareVA;
	std::shared_ptr<Hazel::Shader> m_SquareShader;
private:
	Hazel::OrthographicCamera m_Camera;
	glm::vec3 m_CameraPosition = { 0.0f,0.0f,0.0f };
	float m_MoveSpeed = 1;
	float m_CameraRotation = 0;
	float m_RotateSpeed = 10;


};

class Sandbox : public Hazel::Application
{
public:
	Sandbox()
	{
		PushLayer(new ExampleLayer());
	}

	~Sandbox()
	{

	}

};

Hazel::Application* Hazel::CreateApplication()
{
	return new Sandbox();
}

方法二: 通过Input类在Update()中控制相机移动,旋转,由于帧率一定,所以是连续的!

#include <Hazel.h>

#include "imgui/imgui.h"


class ExampleLayer : public Hazel::Layer
{
public:
	ExampleLayer()
		: Layer("Example"), m_Camera(-1.6f, 1.6f, -0.9f, 0.9f) //对应窗口大小 1280 :720
	{

		float vertices[3 * 7] = {
			//Position          //Color
			-0.3f,-0.3f,0.0f,	1.0f,0.0f,0.0f,1.0f,
			 0.3f,-0.3f,0.0f,	0.0f,1.0f,0.0f,1.0f,
			 0.0f, 0.3f,0.0f,	0.0f,0.0f,1.0f,1.0f,

		};
		float squareVertices[3 * 4] = {
			//Position          //Color
			-0.5f, -0.5f, 0.0f,
			 0.5f, -0.5f, 0.0f,
			 0.5f,  0.5f, 0.0f,
			-0.5f,  0.5f, 0.0f,

		};
		unsigned int indices[3] = { 0,1,2 };
		unsigned int squareIndices[6] = { 0,1,2,0,2,3 };

		Hazel::BufferLayout layout = {
				{  Hazel::ShaderDataType::Float3,"a_Position", false },
				{  Hazel::ShaderDataType::Float4,"a_Color",    false },


		};
		Hazel::BufferLayout squareLayout = {
			{  Hazel::ShaderDataType::Float3,"a_Position", false },


		};

		std::shared_ptr< Hazel::VertexBuffer>vertexBuffer;
		std::shared_ptr< Hazel::IndexBuffer> indexBuffer;
		m_VertexArray.reset(Hazel::VertexArray::Create());
		vertexBuffer.reset(Hazel::VertexBuffer::Create(vertices, sizeof(vertices)));
		indexBuffer.reset(Hazel::IndexBuffer::Create(indices, 3));

		vertexBuffer->SetLayout(layout);
		m_VertexArray->AddVertexBuffer(vertexBuffer);
		m_VertexArray->SetIndexBuffer(indexBuffer);


		std::shared_ptr< Hazel::VertexBuffer>squareVB;
		std::shared_ptr< Hazel::IndexBuffer> squareIB;
		m_SquareVA.reset(Hazel::VertexArray::Create());
		squareVB.reset(Hazel::VertexBuffer::Create(squareVertices, sizeof(squareVertices)));
		squareIB.reset(Hazel::IndexBuffer::Create(squareIndices, 6));

		squareVB->SetLayout(squareLayout);
		m_SquareVA->AddVertexBuffer(squareVB);
		m_SquareVA->SetIndexBuffer(squareIB);


		const std::string vertexSrc = R"(
				#version 330 core
				
				layout(location = 0) in vec3 a_Position;
				layout(location = 1) in vec4 a_Color;

				out vec3 v_Position;
				out vec4 v_Color;

				uniform mat4 u_ViewProjection;
				

				void main()
				{
					v_Position = a_Position;
					v_Color = a_Color;
					gl_Position = u_ViewProjection * vec4(a_Position,1.0f);

				}


				)";
		const std::string fragmentSrc = R"(
				#version 330 core
				
				layout(location = 0) out vec4 color;
				layout(location = 1) out vec4 color1;

				in vec3 v_Position;
				in vec4 v_Color;
				
				void main()
				{

					color = v_Color;//vec4(v_Position*0.5+0.5, 1.0);

				}
				)";

		const std::string squareVertexSrc = R"(
				#version 330 core
				
				layout(location = 0) in vec3 a_Position;

				uniform mat4 u_ViewProjection;

				out vec3 v_Position;
			

				void main()
				{
					v_Position = a_Position;
				
					gl_Position = u_ViewProjection * vec4(a_Position,1.0);

				}


				)";
		const std::string squareFragmentSrc = R"(
				#version 330 core
				
				layout(location = 0) out vec4 color;

				in vec3 v_Position;

				
				void main()
				{

					color =vec4(v_Position*0.5+0.5, 1.0);

				}
				)";
		//m_Shader = std::make_unique<Shader>(vertexSrc,fragmentSrc);

		m_Shader.reset(new  Hazel::Shader(vertexSrc, fragmentSrc));
		m_SquareShader.reset(new  Hazel::Shader(squareVertexSrc, squareFragmentSrc));
	}
	
	void OnUpdate() override
	{

		if (Hazel::Input::IsKeyPressed(HZ_KEY_LEFT))
		{
			m_CameraPosition.x -= m_MoveSpeed;
		}
		else if (Hazel::Input::IsKeyPressed(HZ_KEY_RIGHT))
		{
			m_CameraPosition.x += m_MoveSpeed;
		}
		if (Hazel::Input::IsKeyPressed(HZ_KEY_UP))
		{
			m_CameraPosition.y += m_MoveSpeed;
		}
		else if (Hazel::Input::IsKeyPressed(HZ_KEY_DOWN))
		{
			m_CameraPosition.y -= m_MoveSpeed;

		}
		if (Hazel::Input::IsKeyPressed(HZ_KEY_A))
		{
			m_CameraRotation -= m_RotateSpeed;
		}
		else if (Hazel::Input::IsKeyPressed(HZ_KEY_D))
		{
			m_CameraRotation += m_RotateSpeed;
		}
		
		Hazel::RenderCommand::SetClearColor({ 0.1f, 0.1f, 0.1f, 1 });
		Hazel::RenderCommand::Clear();
	/*	m_Camera.SetPosition({ 0.5f,0.5f,0.0f });
		m_Camera.SetRotation(45);*/
		m_Camera.SetPosition(m_CameraPosition);
		m_Camera.SetRotation(m_CameraRotation);
		Hazel::Renderer::BeginScene(m_Camera);
		Hazel::Renderer::Submit(m_SquareVA, m_SquareShader);

		Hazel::Renderer::Submit(m_VertexArray, m_Shader);
		Hazel::Renderer::EndScene();

	}

	virtual void OnImGuiRender() override
	{
			
	}

	void OnEvent(Hazel::Event& event) override
	{


		/*Hazel::EventDispatcher dispatcher(event);
		dispatcher.Dispatch<Hazel::KeyPressedEvent>(HZ_BIND_EVENT_FN(ExampleLayer::KeyPressedHandle));
*/
	}
	//bool KeyPressedHandle(Hazel::KeyPressedEvent& event) {
	//	if (event.GetKeyCode() == HZ_KEY_LEFT)
	//	{
	//		m_Camera.SetPosition({ m_Camera.GetPosition().x - m_MoveSpeed, m_Camera.GetPosition().y, m_Camera.GetPosition().z });
	//	}
	//	if (event.GetKeyCode() == HZ_KEY_RIGHT)
	//	{
	//		m_Camera.SetPosition({ m_Camera.GetPosition().x + m_MoveSpeed, m_Camera.GetPosition().y, m_Camera.GetPosition().z });
	//	}
	//	if (event.GetKeyCode() == HZ_KEY_UP)
	//	{
	//		m_Camera.SetRotation(m_Camera.GetRotation()+m_RotateSpeed);
	//	}
	//	if (event.GetKeyCode() == HZ_KEY_DOWN)
	//	{
	//		m_Camera.SetRotation(m_Camera.GetRotation()- m_RotateSpeed);
	//	}
	//	return false;
	//}
public:
		inline Hazel::OrthographicCamera& GetCamera() { return m_Camera; }
public:

	std::shared_ptr<Hazel::VertexArray> m_VertexArray;
	std::shared_ptr<Hazel::Shader> m_Shader;

	std::shared_ptr<Hazel::VertexArray> m_SquareVA;
	std::shared_ptr<Hazel::Shader> m_SquareShader;
private:
	Hazel::OrthographicCamera m_Camera;
	glm::vec3 m_CameraPosition = { 0.0f,0.0f,0.0f };
	float m_MoveSpeed = 0.1;
	float m_CameraRotation = 0;
	float m_RotateSpeed = 10;


};

class Sandbox : public Hazel::Application
{
public:
	Sandbox()
	{
		PushLayer(new ExampleLayer());
	}

	~Sandbox()
	{

	}

};

Hazel::Application* Hazel::CreateApplication()
{
	return new Sandbox();
}

 

下载前可以先看下教程 https://pan.quark.cn/s/a426667488ae 标题“仿淘宝jquery图片左右切换带数字”揭示了这是一个关于运用jQuery技术完成的图片轮播机制,其特色在于具备淘宝在线平台普遍存在的图片切换表现,并且在整个切换环节中会展示当前图片的序列号。 此类功能一般应用于电子商务平台的产品呈现环节,使用户可以便捷地查看多张商品的照片。 说明中的“NULL”表示未提供进一步的信息,但我们可以借助标题来揣摩若干核心的技术要点。 在构建此类功能时,开发者通常会借助以下技术手段:1. **jQuery库**:jQuery是一个应用广泛的JavaScript框架,它简化了HTML文档的遍历、事件管理、动画效果以及Ajax通信。 在此项目中,jQuery将负责处理用户的点击动作(实现左右切换),并且制造流畅的过渡效果。 2. **图片轮播扩展工具**:开发者或许会采用现成的jQuery扩展,例如Slick、Bootstrap Carousel或个性化的轮播函数,以达成图片切换的功能。 这些扩展能够辅助迅速构建功能完善的轮播模块。 3. **即时数字呈现**:展示当前图片的序列号,这需要通过JavaScript或jQuery来追踪并调整。 每当图片切换时,相应的数字也会同步更新。 4. **CSS美化**:为了达成淘宝图片切换的视觉效果,可能需要设计特定的CSS样式,涵盖图片的排列方式、过渡效果、点状指示器等。 CSS3的动画和过渡特性(如`transition`和`animation`)在此过程中扮演关键角色。 5. **事件监测**:运用jQuery的`.on()`方法来监测用户的操作,比如点击左右控制按钮或自动按时间间隔切换。 根据用户的交互,触发相应的函数来执行...
垃圾实例分割数据集 一、基础信息 • 数据集名称:垃圾实例分割数据集 • 图片数量: 训练集:7,000张图片 验证集:426张图片 测试集:644张图片 • 训练集:7,000张图片 • 验证集:426张图片 • 测试集:644张图片 • 分类类别: 垃圾(Sampah) • 垃圾(Sampah) • 标注格式:YOLO格式,包含实例分割的多边形点坐标,适用于实例分割任务。 • 数据格式:图片文件 二、适用场景 • 智能垃圾检测系统开发:数据集支持实例分割任务,帮助构建能够自动识别和分割图像中垃圾区域的AI模型,适用于智能清洁机器人、自动垃圾桶等应用。 • 环境监控与管理:集成到监控系统中,用于实时检测公共区域的垃圾堆积,辅助环境清洁和治理决策。 • 计算机视觉研究:支持实例分割算法的研究和优化,特别是在垃圾识别领域,促进AI在环保方面的创新。 • 教育与实践:可用于高校或培训机构的AI课程,作为实例分割技术的实践数据集,帮助学生理解计算机视觉应用。 三、数据集优势 • 精确的实例分割标注:每个垃圾实例都使用详细的多边形点进行标注,确保分割边界准确,提升模型训练效果。 • 数据多样性:包含多种垃圾物品实例,覆盖不同场景,增强模型的泛化能力和鲁棒性。 • 格式兼容性强:YOLO标注格式易于与主流深度学习框架集成,如YOLO系列、PyTorch等,方便研究人员和开发者使用。 • 实际应用价值:直接针对现实世界的垃圾管理需求,为自动化环保解决方案提供可靠数据支持,具有重要的社会意义。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值