在当今社会,心理健康问题日益受到关注,而情绪的有效识别与理解是维护个体心理健康的重要一环。本项目基于paddlepaddle框架,搭建CNN模型,实现了对面部表情特征的分析以及对其负面情绪的应对指南。
☞☞☞AI 智能聊天, 问答助手, AI 智能搜索, 免费无限量使用 DeepSeek R1 模型☜☜☜

一键运行
在当代社会,心理健康已成为个人成长与幸福感不可或缺的一部分,随着人工智能技术的迅猛进步,特别是神经网络在情感计算、行为分析等领域的突破性应用,为心理健康领域带来了革命性的变革。神经网络以其卓越的数据处理与模式识别能力,为精准捕捉与分析复杂心理状态提供了可能。
近年来,随着人工智能技术的飞速发展,特别是神经网络技术在图像识别、自然语言处理等领域的广泛应用,为心理健康领域带来了前所未有的变革机遇。神经网络技术以其强大的数据处理能力和模式识别能力,为心理健康数据的深度挖掘和智能分析提供了强有力的技术支持。在此背景下,开发一种能够实时、准确地识别心理病人情绪状态的智能系统显得尤为重要。
本项目旨在通过运用先进的神经网络技术,构建一套“面部表情特征分析系统”。该系统能够自动捕捉并分析心理病人的面部表情特征,实时识别出心理病人的情绪状态(如高兴、平静、沮丧等),深入剖析这些情绪背后的心理状态与潜在需求,并提供其负面情绪的应对指南。
%%capture !unzip /home/aistudio/data/data293196/expression.zip -d /home/aistudio/
import paddlefrom paddle.vision.datasets import DatasetFolderfrom paddle.vision import transforms# 定义训练集的数据增强和数据处理方式train_transform=transforms.Compose([transforms.ColorJitter(brightness=0.2),
transforms.Resize(128),
transforms.RandomHorizontalFlip(prob=0.5),
transforms.RandomRotation(degrees=7),
transforms.ToTensor(data_format='CHW'),
transforms.Normalize(mean=0.0, std=1.0, data_format='CHW')])
train_dataset=DatasetFolder(root="/home/aistudio/expression_7/expression_7/train",transform=train_transform)# 定义测试集的数据处理方test_transform=transforms.Compose([transforms.Resize(128),
transforms.ToTensor(data_format='CHW'),
transforms.Normalize(mean=0.0, std=1.0, data_format='CHW')])
test_dataset=DatasetFolder(root="/home/aistudio/expression_7/expression_7/test",transform=test_transform)print(f"训练集共有图片:{train_dataset.__len__()}张")print(f"测试集共有图片:{test_dataset.__len__()}张")print(f"总计有图片:{train_dataset.__len__()+test_dataset.__len__()}张")训练集共有图片:28709张 测试集共有图片:7178张 总计有图片:35887张
import paddleimport paddle.nn as nnfrom paddle.optimizer.lr import CosineAnnealingDecayfrom paddle.optimizer import AdamW
import warnings
warnings.filterwarnings("ignore")def conv_bn(in_channels, out_channels, kernel_size, stride, padding, groups=1):
result = paddle.nn.Sequential(
('conv',nn.Conv2D(in_channels=in_channels, out_channels=out_channels,kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, bias_attr=False)),
('bn',nn.BatchNorm2D(num_features=out_channels))
) return result# 构建RepVGGBlock模块class RepVGGBlock(nn.Layer):
def __init__(self, in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, groups=1, padding_mode='zeros', deploy=False):
super(RepVGGBlock, self).__init__()
self.deploy = deploy # deploy是推理部署的意思
self.groups = groups # 输入的特征层分为几组,这是分组卷积概念,单卡GPU不用考虑,默认为1,分组卷积概念详见下面
self.in_channels = in_channels # 输入通道
assert kernel_size == 3
assert padding == 1 # 为什么这么设置呢,图像padding=1后经过 3x3 卷积之后图像大小不变
padding_11 = padding - kernel_size // 2
self.nonlinearity = nn.ReLU() if deploy:
self.rbr_reparam = nn.Conv2D(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias_attr=True, padding_mode=padding_mode) # 定义推理模型时,基本block就是一个简单的 conv2D
else:
self.rbr_identity = nn.BatchNorm2D(num_features=in_channels) if out_channels == in_channels and stride == 1 else None
# 直接连接,类似resnet残差连接,注意当输入通道和输出通道不同时候,只有 1x1 和 3x3 卷积,没有identity,下面网络图自己体会
self.rbr_dense = conv_bn(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups) #3x3卷积+BN
self.rbr_1x1 = conv_bn(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=padding_11, groups=groups) #1x1卷积+BN
def forward(self, inputs):
if hasattr(self, 'rbr_reparam'): return self.nonlinearity(self.rbr_reparam(inputs)) # 推理阶段, conv2D 后 ReLU
if self.rbr_identity is None:
id_out = 0
else:
id_out = self.rbr_identity(inputs) return self.nonlinearity(self.rbr_dense(inputs) + self.rbr_1x1(inputs) + id_out) # 训练阶段,3x3、1x1、identity 相加后 ReLU
def get_equivalent_kernel_bias(self):
kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense) # 卷积核两个参数 W 和 b 提出来
kernel1x1, bias1x1 = self._fuse_bn_tensor(self.rbr_1x1)
kernelid, biasid = self._fuse_bn_tensor(self.rbr_identity) # 为啥可以提出两个参数,看论文公式
return kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid, bias3x3 + bias1x1 + biasid
def _pad_1x1_to_3x3_tensor(self, kernel1x1):
if kernel1x1 is None: return 0
else: return nn.functional.pad(kernel1x1, [1,1,1,1]) def _fuse_bn_tensor(self, branch):
if branch is None: return 0, 0
# 当branch不是3x3、1x1、BN,那就返回 W=0, b=0
if isinstance(branch, nn.Sequential):
kernel = branch.conv.weight # conv权重
running_mean = branch.bn._mean # BN mean
running_var = branch.bn._variance # BN var
gamma = branch.bn.weight # BN γ
beta = branch.bn.bias # BN β
eps = branch.bn._epsilon # 防止分母为0
# 当branch是3x3、1x1时候,返回以上数据,为后面做融合
else: assert isinstance(branch, nn.BatchNorm2D) if not hasattr(self, 'id_tensor'):
input_dim = self.in_channels // self.groups # 通道分组,单个GPU不用考虑,详情去搜索分组卷积
kernel_value = np.zeros((self.in_channels, input_dim, 3, 3), dtype=np.float32) # 定义新的3x3卷积核,参数为0,这里用到DepthWise,详情去搜索MobileNetV1
# 这部分看后面讲解
for i in range(self.in_channels):
kernel_value[i, i % input_dim, 1, 1] = 1 # 将卷积核对角线部分赋予1
self.id_tensor = paddle.to_tensor(kernel_value)
kernel = self.id_tensor # conv权重
running_mean = branch._mean # BN mean
running_var = branch._variance # BN var
gamma = branch.weight # BN γ
beta = branch.bias # BN β
eps = branch._epsilon # 防止分母为0
std = (running_var + eps).sqrt()
t = (gamma / std).reshape((-1, 1, 1, 1)) return kernel * t, beta - running_mean * gamma / std def repvgg_convert(self):
kernel, bias = self.get_equivalent_kernel_bias() return kernel.numpy(), bias.numpy()# 构建RepVGGBlock模块class RepVGGBlock(nn.Layer):
def __init__(self, in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, groups=1, padding_mode='zeros', deploy=False):
super(RepVGGBlock, self).__init__()
self.deploy = deploy # deploy是推理部署的意思
self.groups = groups # 输入的特征层分为几组,这是分组卷积概念,单卡GPU不用考虑,默认为1,分组卷积概念详见下面
self.in_channels = in_channels # 输入通道
assert kernel_size == 3
assert padding == 1 # 为什么这么设置呢,图像padding=1后经过 3x3 卷积之后图像大小不变
padding_11 = padding - kernel_size // 2
self.nonlinearity = nn.ReLU() if deploy:
self.rbr_reparam = nn.Conv2D(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias_attr=True, padding_mode=padding_mode) # 定义推理模型时,基本block就是一个简单的 conv2D
else:
self.rbr_identity = nn.BatchNorm2D(num_features=in_channels) if out_channels == in_channels and stride == 1 else None
# 直接连接,类似resnet残差连接,注意当输入通道和输出通道不同时候,只有 1x1 和 3x3 卷积,没有identity,下面网络图自己体会
self.rbr_dense = conv_bn(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups) #3x3卷积+BN
self.rbr_1x1 = conv_bn(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=padding_11, groups=groups) #1x1卷积+BN
def forward(self, inputs):
if hasattr(self, 'rbr_reparam'): return self.nonlinearity(self.rbr_reparam(inputs)) # 推理阶段, conv2D 后 ReLU
if self.rbr_identity is None:
id_out = 0
else:
id_out = self.rbr_identity(inputs) return self.nonlinearity(self.rbr_dense(inputs) + self.rbr_1x1(inputs) + id_out) # 训练阶段,3x3、1x1、identity 相加后 ReLU
def get_equivalent_kernel_bias(self):
kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense) # 卷积核两个参数 W 和 b 提出来
kernel1x1, bias1x1 = self._fuse_bn_tensor(self.rbr_1x1)
kernelid, biasid = self._fuse_bn_tensor(self.rbr_identity) # 为啥可以提出两个参数,看论文公式
return kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid, bias3x3 + bias1x1 + biasid
def _pad_1x1_to_3x3_tensor(self, kernel1x1):
if kernel1x1 is None: return 0
else: return nn.functional.pad(kernel1x1, [1,1,1,1]) def _fuse_bn_tensor(self, branch):
if branch is None: return 0, 0
# 当branch不是3x3、1x1、BN,那就返回 W=0, b=0
if isinstance(branch, nn.Sequential):
kernel = branch.conv.weight # conv权重
running_mean = branch.bn._mean # BN mean
running_var = branch.bn._variance # BN var
gamma = branch.bn.weight # BN γ
beta = branch.bn.bias # BN β
eps = branch.bn._epsilon # 防止分母为0
# 当branch是3x3、1x1时候,返回以上数据,为后面做融合
else: assert isinstance(branch, nn.BatchNorm2D) if not hasattr(self, 'id_tensor'):
input_dim = self.in_channels // self.groups # 通道分组,单个GPU不用考虑,详情去搜索分组卷积
kernel_value = np.zeros((self.in_channels, input_dim, 3, 3), dtype=np.float32) # 定义新的3x3卷积核,参数为0,这里用到DepthWise,详情去搜索MobileNetV1
# 这部分看后面讲解
for i in range(self.in_channels):
kernel_value[i, i % input_dim, 1, 1] = 1 # 将卷积核对角线部分赋予1
self.id_tensor = paddle.to_tensor(kernel_value)
kernel = self.id_tensor # conv权重
running_mean = branch._mean # BN mean
running_var = branch._variance # BN var
gamma = branch.weight # BN γ
beta = branch.bias # BN β
eps = branch._epsilon # 防止分母为0
# 当branch是 identity,也即只有BN时候返回以上数据
std = (running_var + eps).sqrt()
t = (gamma / std).reshape((-1, 1, 1, 1)) # 提取W、b,不管你是 3x3 1x1 identity都要提取
return kernel * t, beta - running_mean * gamma / std def repvgg_convert(self):
kernel, bias = self.get_equivalent_kernel_bias() return kernel.numpy(), bias.numpy()class RepVGG(nn.Layer):
def __init__(self, num_blocks, num_classes=1000, width_multiplier=None, override_groups_map=None, deploy=False):
super(RepVGG, self).__init__() assert len(width_multiplier) == 4 # 江湖人称瘦身因子,减小网络的宽度,就是输出通道乘以权重变小还是变大
self.deploy = deploy
self.override_groups_map = override_groups_map or dict() # 这部分是分组卷积,单个GPU不用考虑
assert 0 not in self.override_groups_map
self.in_planes = min(64, int(64 * width_multiplier[0]))
self.stage0 = RepVGGBlock(in_channels=3, out_channels=self.in_planes, kernel_size=3, stride=2, padding=1, deploy=self.deploy)
self.cur_layer_idx = 1 # 分组卷积
self.stage1 = self._make_stage(int(64 * width_multiplier[0]), num_blocks[0], stride=2)
self.stage2 = self._make_stage(int(128 * width_multiplier[1]), num_blocks[1], stride=2)
self.stage3 = self._make_stage(int(256 * width_multiplier[2]), num_blocks[2], stride=2)
self.stage4 = self._make_stage(int(512 * width_multiplier[3]), num_blocks[3], stride=2)
self.gap = nn.AdaptiveAvgPool2D(output_size=1) # 全局池化,变成 Nx1x1(CxHxW),类似 flatten
self.linear = nn.Linear(int(512 * width_multiplier[3]), num_classes)
def _make_stage(self, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
blocks = [] for stride in strides:
cur_groups = self.override_groups_map.get(self.cur_layer_idx, 1) # 分组卷积
blocks.append(RepVGGBlock(in_channels=self.in_planes, out_channels=planes, kernel_size=3,
stride=stride, padding=1, groups=cur_groups, deploy=self.deploy))
self.in_planes = planes
self.cur_layer_idx += 1
return nn.Sequential(*blocks) def forward(self, x):
out = self.stage0(x)
out = self.stage1(out)
out = self.stage2(out)
out = self.stage3(out)
out = self.stage4(out)
out = self.gap(out)
out = paddle.flatten(out,start_axis=1)
out = self.linear(out) return outdef create_RepVGG_B2(deploy=False,num_classes=7):
return RepVGG(num_blocks=[4, 6, 16, 1], num_classes=num_classes,
width_multiplier=[2.5, 2.5, 2.5, 5], override_groups_map=None, deploy=deploy)
model=create_RepVGG_B2(num_classes=7)W0927 13:05:40.066419 242 gpu_resources.cc:119] Please NOTE: device: 0, GPU Compute Capability: 7.0, Driver API Version: 12.0, Runtime API Version: 11.8 W0927 13:05:40.068603 242 gpu_resources.cc:164] device: 0, cuDNN Version: 8.9.
import warnings
warnings.filterwarnings("ignore")
paddle.jit.save(model, '/home/aistudio/work/repvgg', [paddle.static.InputSpec([-1,3,128,128])])I0927 13:05:56.572229 242 program_interpreter.cc:243] New Executor is Running.
model=paddle.Model(model)
scheduler = CosineAnnealingDecay(
learning_rate=0.01,
T_max=80,
) # 定义学习率衰减器opti = AdamW(
learning_rate=scheduler,
parameters=model.parameters(),
weight_decay=1e-5) # 定义Adam优化器model.prepare(optimizer=opti,loss=paddle.nn.CrossEntropyLoss(), metrics=paddle.metric.Accuracy())callback = paddle.callbacks.VisualDL(log_dir='work/log_dir')
model.fit(train_data=train_dataset, # 指定训练集
eval_data=test_dataset, # 指定验证集
batch_size=256, # 一次性读取样本数
epochs=80, # 总共训练的轮数
eval_freq=1, # 每隔1轮测试一次
verbose=1, # 输出级别
save_dir="work/output/", # 权重等文件保存地址
num_workers=6, # 多线程用于加快数据读取
callbacks=[callback]) # 将VisualDL指定进去model=create_RepVGG_B2(num_classes=7)
import cv2import paddlefrom paddle.vision import transformsimport matplotlib.pyplot as plt
label={0:'anger',1:'disgust',2:'fear',3:'happy',4:'sad',5:'surprised',6:'normal'}
label2={0:'愤怒',1:'厌恶',2:'恐惧',3:'快乐',4:'难过',5:'惊讶',6:'平静'}# 加载训练好的权重文件,该网络已经具备表情识别的能力best_model_path = "/home/aistudio/work/output/final.pdparams"para_state_dict = paddle.load(best_model_path)
model.eval() # 测试时要设置eval模式model.set_state_dict(para_state_dict)# 加载一张图片并进行预处理img=cv2.imread("/home/aistudio/person.jpg")
img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB) # cv2默认是BGR模式img=cv2.resize(img,(128,128))
input_tensor=transforms.to_tensor(img, data_format='CHW') # 转为paddle.tensor类型input_tensor=paddle.unsqueeze(input_tensor,axis=0) #因为输出shape应为(b,c,h,w),而现在是(c,h,w),所以升维变成(1,c,h,w)从而符合模型的输入格式# 推理图片获取识别结果test_result = model(input_tensor) # 输出会给出7个类的概率值,而最大值所在的类就是预测结果max_prob_index=paddle.argmax(test_result[0])
predict_result=label2[int(max_prob_index)] # 得到预测结果# 展示图片和预测结果plt.figure()
plt.title('predict: {}'.format(label[int(max_prob_index)] ))
plt.imshow(img)
plt.show()<Figure size 640x480 with 1 Axes>
!pip install erniebot
import erniebot# 配置api和token,以及所使用的模型erniebot.api_type = 'aistudio'# aistudio后端的access tokenerniebot.access_token = '<你的访问令牌>' model = 'ernie-4.0'
predict_result=label2[int(max_prob_index)]
prompt = f'''你是一个心理健康专家,擅长针对病人的负面情绪提供科学的应对指南,根据用户的负面情绪类型和程度,你需详细提出应对{predict_result}这种负面情绪的办法,
结合心理学原理和实践经验,为用户提供个性化的应对指南。
指南内容包括但不限于情绪调节技巧、积极心理建设、寻求专业帮助等建议。
对于负面情绪识别,输出情绪类型和可能的原因分析。
对于应对指南,输出具体的建议措施和实施步骤。
对于跟踪反馈,输出用户的情绪变化情况和后续建议。
注意,所有建议必须基于心理学原理和实践经验,不得提供无科学依据的方案。
尊重用户的隐私和感受,避免使用可能造成二次伤害的语言。
在用户情绪严重或需要专业治疗时,必须引导用户寻求专业心理医生的帮助。
开场白:你好,我是你的心理健康专家。无论你现在正面临什么样的负面情绪,我都会在这里为你提供专业的支持和应对指南。让我们一起努力,找回内心的平静与快乐。
'''response = erniebot.ChatCompletion.create(
model=model,
messages=[{"role": "user", "content": f"{prompt}"}],
)
print(response.result)
以上就是【专属你的心理医生】情感洞察与应对指南的详细内容,更多请关注php中文网其它相关文章!
每个人都需要一台速度更快、更稳定的 PC。随着时间的推移,垃圾文件、旧注册表数据和不必要的后台进程会占用资源并降低性能。幸运的是,许多工具可以让 Windows 保持平稳运行。
Copyright 2014-2025 https://www.php.cn/ All Rights Reserved | php.cn | 湘ICP备2023035733号