Day10.16

本文介绍了使用C语言中的semaphore(信号量)和mutex(互斥锁)实现的并发进程间通信,展示了在多个线程中控制对文件操作的同步,以避免数据竞争和资源冲突。
#include <myhead.h>

sem_t sem1;
sem_t sem2;
sem_t sem3;

void* funca(void* fd)
{
	while(1)
	{

		sem_wait(&sem3);
		
		printf("A\n");

		sem_post(&sem1);


	}

	return NULL;
}


void* funcb(void* fd)
{
	while(1)
	{
		sem_wait(&sem1);

		printf("B\n");

		sem_post(&sem2);


	}

	return NULL;
}

void* funcc(void* fd)
{
	while(1)
	{
		sem_wait(&sem2);

		printf("C\n");

		sem_post(&sem3);


	}

	return NULL;
}


int main(int argc, const char *argv[])
{

	sem_init(&sem1,0,0);
	sem_init(&sem2,0,0);
	sem_init(&sem3,0,1);




	pthread_t tid1;
	if(pthread_create(&tid1,NULL,funca,NULL)!=0)
	{
		printf("pthread create error!\n");
		return -1;
	}
	pthread_t tid2;
	if(pthread_create(&tid2,NULL,funcb,NULL)!=0)
	{
		printf("pthread create error!\n");
		return -1;
	}

	pthread_t tid3;
	if(pthread_create(&tid3,NULL,funcc,NULL)!=0)
	{
		printf("pthread create error!\n");
		return -1;
	}


	pthread_join(tid1,NULL);
	pthread_join(tid2,NULL);
	pthread_join(tid3,NULL);

	sem_destroy(&sem1);
	sem_destroy(&sem2);
	sem_destroy(&sem3);

	return 0;
}

#include <myhead.h>

pthread_mutex_t mutex;

typedef struct fdnode
{
	int fd1;
	int fd2;
}fdall;


void* func(void* fd)
{
	fdall* fk=(fdall*)fd;

	//文件大小
	struct stat buf;
	if(stat("/home/ubuntu/process/test1.c",&buf)<0)
	{
		perror("stat");
		return NULL;
	}
	off_t size=buf.st_size;


	int count=0;
	char temp=0;
	while(1)
	{
		pthread_mutex_lock(&mutex);
		lseek(fk->fd1,size/2,SEEK_SET);
		lseek(fk->fd2,size/2,SEEK_SET); 
		lseek(fk->fd1,count,SEEK_CUR);
		lseek(fk->fd2,count,SEEK_CUR);
		if(read(fk->fd1,&temp,1)==0)
			break;
		write(fk->fd2,&temp,1);
		count++;
		pthread_mutex_unlock(&mutex);
	}


	return NULL;

}


void* func1(void* fd)
{
	
	fdall* fk=(fdall*)fd;
	//文件大小
	struct stat buf;
	if(stat("/home/ubuntu/process/test1.c",&buf)<0)
	{
		perror("stat");
		return NULL;
	}
	off_t size=buf.st_size;


	int count=0;
	char temp;
	while(count<size/2)
	{
		pthread_mutex_lock(&mutex);
		lseek(fk->fd1,count,SEEK_SET);
		lseek(fk->fd2,count,SEEK_SET);
		pthread_mutex_unlock(&mutex);
		
	}

	return NULL;

}

int main(int argc, const char *argv[])
{

	pthread_mutex_init(&mutex,NULL);

	fdall fd;
	int fd1=open("/home/ubuntu/process/test1.c",O_RDONLY);
	int fd2=open("/home/ubuntu/process/newtest1.c",O_RDWR|O_CREAT|O_TRUNC,0664);
	fd.fd1=fd1;
	fd.fd2=fd2;

	pthread_t tid;
	if(pthread_create(&tid,NULL,func,&fd)!=0)
	{
		printf("error!\n");
		return -1;
	}


	pthread_t tid1;
	if(pthread_create(&tid1,NULL,func1,&fd)!=0)
	{
		return -1;
	}


		read(fk->fd1,&temp,1);
		write(fk->fd2,&temp,1);
		count++;
		pthread_mutex_unlock(&mutex);
		
	}

	return NULL;

}

int main(int argc, const char *argv[])
{

	pthread_mutex_init(&mutex,NULL);

	fdall fd;
	int fd1=open("/home/ubuntu/process/test1.c",O_RDONLY);
	int fd2=open("/home/ubuntu/process/newtest1.c",O_RDWR|O_CREAT|O_TRUNC,0664);
	fd.fd1=fd1;
	fd.fd2=fd2;

	pthread_t tid;
	if(pthread_create(&tid,NULL,func,&fd)!=0)
	{
		printf("error!\n");
		return -1;
	}


	pthread_t tid1;
	if(pthread_create(&tid1,NULL,func1,&fd)!=0)
	{
		return -1;
	}


	pthread_join(tid,NULL);
	pthread_join(tid1,NULL);

	pthread_mutex_destroy(&mutex);

	return 0;
}

java.sql.SQLException: java.io.IOException: 10.16.27.24:8902 Server response: Not allowed to create a void vector. RefId: 505005, script: CREATE TABLE type_test_tb (day SYMBOL , instrument SYMBOL , name SYMBOL NOT NULL, value DOUBLE PRECISION)at com.dolphindb.jdbc.JDBCStatement.executeUpdateInternal (JDBCStatement.java:java:260)at com.dolphindb.jdbc.JDBCStatement.executeUpdate (JDBCStatement.java:200)at org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils\(.createTable(JdbcUtils.scala:1273) at org.apache.spark.sql.execution.datasources.jdbc.JdbcRelationProvider.createTable(JdbcRelationProvider.scala:86) at org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:46) at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult\)lzycompute(commands.scala:70)at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)at org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:90)at org.apache.spark.sql.execution.SparkPlan.anonfunexecute\(1(SparkPlan.scala:175) at org.apache.spark.sql.execution.SparkPlan.\)anonfun$executeQuery\(1(SparkPlan.scala:213) at org.apache.spark.rdd.RDDOperationScope\).withScope(RDDOperationScope.scala:151)at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:210)at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:171)at org.apache.spark.sql.execution.QueryExecution.toRdd\(lzycompute(QueryExecution.scala:134) at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:133) at org.apache.spark.sql.DataFrameWriter.\)anonfun$runCommand\(1(DataFrameWriter.scala:963) at org.apache.spark.sql.execution.SQLExecution\).anonfunwithNewExecutionId\(5(SQLExecution.scala:102) at org.apache.spark.sql.execution.SQLExecution\).withSQLConfPropagated(SQLExecution.scala:169)at org.apache.spark.sql.execution.SQLExecution\(.\)anonfun$withNewExecutionId$1(SQLExecution.scala:89)at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:787)at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:66)at org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:963)at org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:415)at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:399)at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)at java.lang.reflect.Method.invoke(Method.java:498)at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)at py4j.Gateway.invoke(Gateway.java:282)at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)at py4j.commands.CallCommand.execute(CallCommand.java:79)at py4j.GatewayConnection.run(GatewayConnection.java:238)什么原因,怎么解决
07-26
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值