使用pytorch搭建textCNN、BERT、transformer进行文本分类

首先展示数据处理后的类型:
在这里插入图片描述
第一列为文本,第二类为标注的标签,数据保存在xlsx的表格中,分为训练集和验证集。

textCNN

直接上整个工程代码:

import pandas as pd
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from transformers import BertTokenizer, BertModel
import random
from sklearn.metrics import classification_report

# 设置随机种子以确保结果可复现
def set_seed(seed):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)

set_seed(42)

# 使用预训练的 BERT 模型和分词器
tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
bert_model = BertModel.from_pretrained('bert-base-chinese')

# 定义一个函数来处理文本数据
def preprocess(text):
    encoding = tokenizer.encode_plus(
        text,
        add_special_tokens=True,
        max_length=150,
        padding='max_length',
        return_attention_mask=True,
        return_tensors='pt',
        truncation=True
    )
    return encoding['input_ids'].squeeze(), encoding['attention_mask'].squeeze()

# 读取训练和验证数据
train_df = pd.read_excel('../train.xlsx')
val_df = pd.read_excel('../val.xlsx')

# 处理训练数据
train_texts = train_df['comment'].apply(preprocess)
train_labels = torch.tensor(train_df['label'].values)
train_input_ids = torch.stack([x[0] for x in train_texts])
train_attention_masks = torch.stack([x[1] for x in train_texts])

# 处理验证数据
val_texts = val_df['comment'].apply(preprocess)
val_labels = torch.tensor(val_df['label'].values)
val_input_ids = torch.stack([x[0] for x in val_texts])
val_attention_masks = torch.stack([x[1] for x in val_texts])

# 创建数据集和数据加载器
class TextDataset(Dataset):
    def __init__(self, input_ids, attention_masks, labels):
        self.input_ids = input_ids
        self.attention_masks = attention_masks
        self.labels = labels

    def __len__(self):
        return len(self.labels)

    def __getitem__(self, idx):
        return {
            'input_ids': self.input_ids[idx],
            'attention_mask': self.attention_masks[idx],
            'labels': self.labels[idx]
        }

train_dataset = TextDataset(train_input_ids, train_attention_masks, train_labels)
val_dataset = TextDataset(val_input_ids, val_attention_masks, val_labels)

train_dataloader = DataLoader(train_dataset, batch_size=32, shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=32, shuffle=False)

# 检查是否有可用的 GPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f'Using device: {device}')

# 定义模型
class TextCNN(nn.Module):
    def __init__(self, bert_model, num_classes):
        super(TextCNN, self).__init__()
        self.bert_model = bert_model
        self.conv1 = nn.Conv2d(1, 100, (3, 768))
        self.conv2 = nn.Conv2d(1, 100, (4, 768))
        self.conv3 = nn.Conv2d(1, 100, (5, 768))
        self.dropout = nn.Dropout(0.5)
        self.fc = nn.Linear(300, num_classes)

    def forward(self, input_ids, attention_mask):
        with torch.no_grad():
            embedded = self.bert_model(input_ids, attention_mask).last_hidden_state
        embedded = embedded.unsqueeze(1)
        conv1 = F.relu(self.conv1(embedded)).squeeze(3)
        conv2 = F.relu(self.conv2(embedded)).squeeze(3)
        conv3 = F.relu(self.conv3(embedded)).squeeze(3)
        pooled1 = F.max_pool1d(conv1, conv1.size(2)).squeeze(2)
        pooled2 = F.max_pool1d(conv2, conv2.size(2)).squeeze(2)
        pooled3 = F.max_pool1d(conv3, conv3.size(2)).squeeze(2)
        out = torch.cat((pooled1, pooled2, pooled3), 1)
        out = self.dropout(out)
        return self.fc(out)

# 初始化模型、损失函数和优化器
model = TextCNN(bert_model, num_classes=2).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

# 训练和验证模型,并保存最好的模型和最后一轮的模型
def train_and_evaluate(model, train_dataloader, val_dataloader, criterion, optimizer, device, epochs=10):
    best_val_accuracy = 0.0
    best_model_path = "best_model.pth"
    last_model_path = "last_model.pth"

    for epoch in range(epochs):
        model.train()
        train_loss = 0
        for batch in train_dataloader:
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            labels = batch['labels'].to(device)

            optimizer.zero_grad()
            outputs = model(input_ids, attention_mask)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            train_loss += loss.item()

        train_loss /= len(train_dataloader)
        print(f"Epoch {epoch + 1}, Training Loss: {train_loss}")

        model.eval()
        val_loss = 0
        correct = 0
        total = 0
        all_preds = []
        all_labels = []
        with torch.no_grad():
            for batch in val_dataloader:
                input_ids = batch['input_ids'].to(device)
                attention_mask = batch['attention_mask'].to(device)
                labels = batch['labels'].to(device)

                outputs = model(input_ids, attention_mask)
                loss = criterion(outputs, labels)
                val_loss += loss.item()

                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()

                all_preds.extend(predicted.cpu().numpy())
                all_labels.extend(labels.cpu().numpy())

        val_loss /= len(val_dataloader)
        accuracy = 100 * correct / total
        print(f"Validation Loss: {val_loss}, Validation Accuracy: {accuracy}%")

        # 打印分类报告
        print("Classification Report:")
        print(classification_report(all_labels, all_preds, digits=3))

        # 保存验证集上表现最好的模型
        if accuracy > best_val_accuracy:
            best_val_accuracy = accuracy
            torch.save(model.state_dict(), best_model_path)
            print(f"Best model saved with accuracy: {best_val_accuracy}%")

    # 保存最后一轮的模型
    torch.save(model.state_dict(), last_model_path)
    print(f"Last model saved.")

train_and_evaluate(model, train_dataloader, val_dataloader, criterion, optimizer, device)

可以根据自己爬取的文本的长度来自定义preprocess()函数里面的max_length值,若文本长度超过定义的最大值将进行截断,若不足则padding。最好包括大部分文本的长度,模型效果会比较好。

运行起来可能会报以下错误:

OSError: Can't load tokenizer for 'bert-base-chinese'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure 'bert-base-chinese' is the correct path to a directory containing all relevant files for a BertTokenizer tokenizer.

说明自动下载可能出现问题我们可以手动下载。
访问给出的网站https://huggingface.co/models,然后搜索bert-base-chinese,如图选择第一个。
在这里插入图片描述
点进去后下载config.json、pytorch_model.bin 和 vocab.txt三个文件,在工程的同一路径下创建“bert-base-chinese”的文件夹,将三个文件放入其中。
在这里插入图片描述
将使用预训练的 BERT 模型和分词器的两行代码该文调用本地:

tokenizer = BertTokenizer.from_pretrained('./bert-base-chinese')
bert_model = BertModel.from_pretrained('./bert-base-chinese')

运行结果:
在这里插入图片描述

BERT

跟textCNN一样用相同的BERT 模型和分词器:

import pandas as pd
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.optim as optim
from transformers import BertTokenizer, BertModel, BertConfig
import random
from sklearn.metrics import classification_report

# 设置随机种子以确保结果可复现
def set_seed(seed):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)

set_seed(42)

# 使用预训练的 BERT 模型和分词器
tokenizer = BertTokenizer.from_pretrained('./bert-base-chinese')
bert_model = BertModel.from_pretrained('./bert-base-chinese')

# 定义一个函数来处理文本数据
def preprocess(text):
    encoding = tokenizer.encode_plus(
        text,
        add_special_tokens=True,
        max_length=150,
        padding='max_length',
        return_attention_mask=True,
        return_tensors='pt',
        truncation=True
    )
    return encoding['input_ids'].squeeze(), encoding['attention_mask'].squeeze()

# 读取训练和验证数据
train_df = pd.read_excel('../train.xlsx')
val_df = pd.read_excel('../val.xlsx')

# 处理训练数据
train_texts = train_df['comment'].apply(preprocess)
train_labels = torch.tensor(train_df['label'].values)
train_input_ids = torch.stack([x[0] for x in train_texts])
train_attention_masks = torch.stack([x[1] for x in train_texts])

# 处理验证数据
val_texts = val_df['comment'].apply(preprocess)
val_labels = torch.tensor(val_df['label'].values)
val_input_ids = torch.stack([x[0] for x in val_texts])
val_attention_masks = torch.stack([x[1] for x in val_texts])

# 创建数据集和数据加载器
class TextDataset(Dataset):
    def __init__(self, input_ids, attention_masks, labels):
        self.input_ids = input_ids
        self.attention_masks = attention_masks
        self.labels = labels

    def __len__(self):
        return len(self.labels)

    def __getitem__(self, idx):
        return {
            'input_ids': self.input_ids[idx],
            'attention_mask': self.attention_masks[idx],
            'labels': self.labels[idx]
        }

train_dataset = TextDataset(train_input_ids, train_attention_masks, train_labels)
val_dataset = TextDataset(val_input_ids, val_attention_masks, val_labels)

train_dataloader = DataLoader(train_dataset, batch_size=32, shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=32, shuffle=False)

# 检查是否有可用的 GPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f'Using device: {device}')

# 定义模型
class BertForTextClassification(nn.Module):
    def __init__(self, bert_model, num_classes):
        super(BertForTextClassification, self).__init__()
        self.bert = bert_model
        self.dropout = nn.Dropout(0.5)
        self.classifier = nn.Linear(bert_model.config.hidden_size, num_classes)

    def forward(self, input_ids, attention_mask):
        outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
        pooled_output = outputs.pooler_output
        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)
        return logits

# 初始化模型、损失函数和优化器
model = BertForTextClassification(bert_model, num_classes=2).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.00001)

# 训练和验证模型,并保存最好的模型和最后一轮的模型
def train_and_evaluate(model, train_dataloader, val_dataloader, criterion, optimizer, device, epochs=10):
    best_val_accuracy = 0.0
    best_model_path = "best_model.pth"
    last_model_path = "last_model.pth"

    for epoch in range(epochs):
        model.train()
        train_loss = 0
        for batch in train_dataloader:
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            labels = batch['labels'].to(device)

            optimizer.zero_grad()
            outputs = model(input_ids, attention_mask)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            train_loss += loss.item()

        train_loss /= len(train_dataloader)
        print(f"Epoch {epoch + 1}, Training Loss: {train_loss}")

        model.eval()
        val_loss = 0
        correct = 0
        total = 0
        all_preds = []
        all_labels = []
        with torch.no_grad():
            for batch in val_dataloader:
                input_ids = batch['input_ids'].to(device)
                attention_mask = batch['attention_mask'].to(device)
                labels = batch['labels'].to(device)

                outputs = model(input_ids, attention_mask)
                loss = criterion(outputs, labels)
                val_loss += loss.item()

                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()

                all_preds.extend(predicted.cpu().numpy())
                all_labels.extend(labels.cpu().numpy())

        val_loss /= len(val_dataloader)
        accuracy = 100 * correct / total
        print(f"Validation Loss: {val_loss}, Validation Accuracy: {accuracy}%")

        # 打印分类报告
        print("Classification Report:")
        print(classification_report(all_labels, all_preds, digits=3))

        # 保存验证集上表现最好的模型
        if accuracy > best_val_accuracy:
            best_val_accuracy = accuracy
            torch.save(model.state_dict(), best_model_path)
            print(f"Best model saved with accuracy: {best_val_accuracy}%")

    # 保存最后一轮的模型
    torch.save(model.state_dict(), last_model_path)
    print(f"Last model saved.")

train_and_evaluate(model, train_dataloader, val_dataloader, criterion, optimizer, device)

运行结果:
在这里插入图片描述

transformer

transformer使用的是自己的预训练模型和分词器,如果需要提前手动下载的话访问Hugging Face的官网还是下载config.json、pytorch_model.bin 和 vocab.txt三个文件,保存在hfl/chinese-roberta-wwm-ext目录中。

import pandas as pd
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.optim as optim
from transformers import AutoTokenizer, AutoModel
import random
from sklearn.metrics import classification_report

# 设置随机种子以确保结果可复现
def set_seed(seed):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)

set_seed(42)

# 使用预训练的 Transformer 模型和分词器
model_name = './hfl/chinese-roberta-wwm-ext'
tokenizer = AutoTokenizer.from_pretrained(model_name)
transformer_model = AutoModel.from_pretrained(model_name)

# 定义一个函数来处理文本数据
def preprocess(text):
    encoding = tokenizer.encode_plus(
        text,
        add_special_tokens=True,
        max_length=150,
        padding='max_length',
        return_attention_mask=True,
        return_tensors='pt',
        truncation=True
    )
    return encoding['input_ids'].squeeze(), encoding['attention_mask'].squeeze()

# 读取训练和验证数据
train_df = pd.read_excel('../train.xlsx')
val_df = pd.read_excel('../val.xlsx')

# 处理训练数据
train_texts = train_df['comment'].apply(preprocess)
train_labels = torch.tensor(train_df['label'].values)
train_input_ids = torch.stack([x[0] for x in train_texts])
train_attention_masks = torch.stack([x[1] for x in train_texts])

# 处理验证数据
val_texts = val_df['comment'].apply(preprocess)
val_labels = torch.tensor(val_df['label'].values)
val_input_ids = torch.stack([x[0] for x in val_texts])
val_attention_masks = torch.stack([x[1] for x in val_texts])

# 创建数据集和数据加载器
class TextDataset(Dataset):
    def __init__(self, input_ids, attention_masks, labels):
        self.input_ids = input_ids
        self.attention_masks = attention_masks
        self.labels = labels

    def __len__(self):
        return len(self.labels)

    def __getitem__(self, idx):
        return {
            'input_ids': self.input_ids[idx],
            'attention_mask': self.attention_masks[idx],
            'labels': self.labels[idx]
        }

train_dataset = TextDataset(train_input_ids, train_attention_masks, train_labels)
val_dataset = TextDataset(val_input_ids, val_attention_masks, val_labels)

train_dataloader = DataLoader(train_dataset, batch_size=32, shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=32, shuffle=False)

# 检查是否有可用的 GPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f'Using device: {device}')

# 定义模型
class TransformerForTextClassification(nn.Module):
    def __init__(self, transformer_model, num_classes):
        super(TransformerForTextClassification, self).__init__()
        self.transformer = transformer_model
        self.dropout = nn.Dropout(0.5)
        self.classifier = nn.Linear(transformer_model.config.hidden_size, num_classes)

    def forward(self, input_ids, attention_mask):
        outputs = self.transformer(input_ids=input_ids, attention_mask=attention_mask)
        pooled_output = outputs.pooler_output if 'pooler_output' in outputs else outputs.last_hidden_state[:, 0]
        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)
        return logits

# 初始化模型、损失函数和优化器
model = TransformerForTextClassification(transformer_model, num_classes=2).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.00001)

# 训练和验证模型,并保存最好的模型和最后一轮的模型
def train_and_evaluate(model, train_dataloader, val_dataloader, criterion, optimizer, device, epochs=10):
    best_val_accuracy = 0.0
    best_model_path = "best_model.pth"
    last_model_path = "last_model.pth"

    for epoch in range(epochs):
        model.train()
        train_loss = 0
        for batch in train_dataloader:
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            labels = batch['labels'].to(device)

            optimizer.zero_grad()
            outputs = model(input_ids, attention_mask)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            train_loss += loss.item()

        train_loss /= len(train_dataloader)
        print(f"Epoch {epoch + 1}, Training Loss: {train_loss}")

        model.eval()
        val_loss = 0
        correct = 0
        total = 0
        all_preds = []
        all_labels = []
        with torch.no_grad():
            for batch in val_dataloader:
                input_ids = batch['input_ids'].to(device)
                attention_mask = batch['attention_mask'].to(device)
                labels = batch['labels'].to(device)

                outputs = model(input_ids, attention_mask)
                loss = criterion(outputs, labels)
                val_loss += loss.item()

                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()

                all_preds.extend(predicted.cpu().numpy())
                all_labels.extend(labels.cpu().numpy())

        val_loss /= len(val_dataloader)
        accuracy = 100 * correct / total
        print(f"Validation Loss: {val_loss}, Validation Accuracy: {accuracy}%")

        # 打印分类报告
        print("Classification Report:")
        print(classification_report(all_labels, all_preds, digits=3))

        # 保存验证集上表现最好的模型
        if accuracy > best_val_accuracy:
            best_val_accuracy = accuracy
            torch.save(model.state_dict(), best_model_path)
            print(f"Best model saved with accuracy: {best_val_accuracy}%")

    # 保存最后一轮的模型
    torch.save(model.state_dict(), last_model_path)
    print(f"Last model saved.")

train_and_evaluate(model, train_dataloader, val_dataloader, criterion, optimizer, device)

运行结果:
在这里插入图片描述

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:/a/687483.html

如若内容造成侵权/违法违规/事实不符,请联系我们进行投诉反馈qq邮箱809451989@qq.com,一经查实,立即删除!

相关文章

python基础-数据结构——hash表、线性探测、二重探测、双重哈希、闭散列(分离链接)(拉链法)Python代码实现

文章目录 哈希表及其碰撞解决策略1. 引言2. 哈希表简介3. 哈希函数4. 碰撞解决策略4.1 分离链接法(拉链法)4.2 开放寻址法4.2.1 线性探测4.2.2 二次探测4.2.3 双重哈希 5. 总结 哈希表及其碰撞解决策略 1. 引言 哈希表是一种高效的数据结构&#xff0c…

利用WK2168实现串口服务器

ESP32 SPI与WK2168实现串口服务器 概述系统组成代码 概述 一些老设备通过RS485采集数据,如果在一个系统中采用几个RS485设备可能是一个不错的选择,但要是使用46个RS485数据采集设备为一个PLC提供外部数据,系统的性能就很难有保障了。通过一个…

软考对进入事业编的人有帮助吗?

首先肯定的说,软考是可以用来评职称的。 但是, 这取决于你所在公司的性质和工作岗位。 软考全国统一实施后,不再进行计算机技术与软件相应专业和级别的专业技术职务任职资格评审工作。计算机软件资格考试既是职业资格考试,又是…

Element - UI <el-table-column>多选数据提交后禁用已提交的多选框

1. 通过 selection-change"selectionChange" 将已选择的数据存入selectData数组中 <el-table :data"tableData" class"my-5" selection-change"selectionChange" > //多选框已选择的数据 const selectData ref([]); const sel…

品致分档高频电流探头交直流电流探头的功能和应用领域

品致分档高频电流探头可以精确测量最大直流 DC-12MHz&#xff0c;具有高带宽&#xff0c;高精度的测量特点&#xff0c;且支持量程档位切换&#xff0c;满足用户一机多用的需求。探头自带归零和消磁功能&#xff0c;在包容用户的使用环境下&#xff0c;精度也高达 1%。标准的BN…

SpringBoot集成ireport打印,并解决PDF中文显示问题

1、相关jar包引入 <!-- ireport报表相关 start--><dependency><groupId>net.sf.jasperreports</groupId><artifactId>jasperreports</artifactId><version>4.5.1</version><exclusions><exclusion><groupId…

蚓链数字化营销生态的影响力分享!

​家人们&#xff0c;今天来给大家分享一些关于数字化平台生态化对数字营销影响的具体案例。 比如某电商平台&#xff0c;通过生态化的建设&#xff0c;实现了精准的推荐算法。根据用户的浏览历史和购买行为&#xff0c;为他们推荐最符合需求的商品&#xff0c;大大提高了购买…

toB市场 | 我们喜欢赞助这样的展会活动

过去的六一儿童节&#xff0c;刚去成都参加了个行业内的展会。受护网行动、儿童节等等的影响&#xff0c;这次去成都的客户并不算太多&#xff0c;但会议延续了一贯的高品质&#xff0c;让我们收货满满。 选择目标受众来得多的展会 不同厂商会视自己的产品和模式、目标客户来…

文章解读与仿真程序复现思路——电力系统自动化EI\CSCD\北大核心《考虑发用电相似性的海上风电中长期双边协商交易优化决策模型》

本专栏栏目提供文章与程序复现思路&#xff0c;具体已有的论文与论文源程序可翻阅本博主免费的专栏栏目《论文与完整程序》 论文与完整源程序_电网论文源程序的博客-CSDN博客https://blog.csdn.net/liang674027206/category_12531414.html 电网论文源程序-CSDN博客电网论文源…

Django 传递额外参数给视图函数

本书1-7章样章及配套资源下载链接: https://pan.baidu.com/s/1OGmhHxEMf2ZdozkUnDkAkA?pwdnanc 源码、PPT课件、教学视频等&#xff0c;可以从前言给出的下载信息下载&#xff0c;大家可以评估一下。 在Django框架中&#xff0c;URLconf模块还支持一种传递额外参数给视图函…

深度文章 | 管理驾驶舱成摆设?如何激活核心数据?

经营企业就跟驾驶车一样&#xff0c;小规模时像骑自行车&#xff0c;不需要仪表盘。而随着企业的成长&#xff0c;就像开摩托、汽车&#xff0c;需要仪表盘&#xff08;管理驾驶舱&#xff09;展示经营数据&#xff0c;指导管理者进行业务决策。 试想&#xff0c;领导面对一堆…

CLIP计算图片与文本相似度(多幅图片与一个文本)

一、CLIP介绍 CLIP&#xff08;Contrastive Language-Image Pre-training&#xff09;模型是一种通过自然语言监督来学习可迁移视觉模型的方法。CLIP模型利用海量的图像和相关文本数据对图像编码器和文本编码器进行联合训练&#xff0c;目标是最大化图像和文本对的余弦相似性。…

IP纯净度对跨境电商有影响吗?

当我们谈论代理IP时&#xff0c;通常会提到一个重要概念&#xff0c;那就是“IP纯净度”。 IP纯净度是指代理IP服务中所提供的IP地址的质量、干净程度和安全性&#xff0c;纯净度高的IP地址通常具备低恶意软件攻击的风险、良好的访问效果、稳定性和速度以及隐私保护等特点。在…

TEE的密码学算法都支持哪些?你们都是如何实现的?

先说TEE OS中都支持哪些密码学算法?需要支持哪些密码学算法? 我们从GP规范的角度来看。在一般的tee中,都号称支持GP规范。GP规范里要求的强制支持的算法,一般都会实现。对于GP规范里可选的算法,并不是每一家TEE都能支持的。 强制要求的算法 可选支持的算法有 再来看TEE O…

whistle手机抓包

环境&#xff1a;whistle&#xff1a;2.9.59 whistle手机抓包&#xff08;ios可以抓小程序的包&#xff1b;安卓机不能抓小程序的包&#xff0c;但是小程序的有开发者工具就够用了&#xff09; 以安卓手机为例&#xff08;手机跟电脑要连同一个wifi&#xff09; 1.电脑安装w…

Jmeter压测 —— 1秒发送1次请求

场景&#xff1a;有时候测试场景需要设置请求频率为一秒一次&#xff08;或几秒一次&#xff09;实现方法一&#xff1a;1、首先需要在线程组下设置循环次数&#xff08;可以理解为请求的次数&#xff09; 次数设置为请求300次&#xff0c;其中线程数跟时间自行设置 2、在设置…

git 大文件上传失败 Please remove the file from history and try again.

根据提示执行命令 --- 查找到当前文件 git rev-list --objects --all | grep b24e74b34e7d482e2bc687e017c8ab28cd1d24b6git filter-branch --tree-filter rm -f 文件名 --tag-name-filter cat -- --all git push origin --tags --force git push origin --all --force

11.爬虫---BeautifulSoup安装并解析爬取数据

11.BeautifulSoup安装并解析爬取数据 1.简介2.安装3.基本使用3.1 获取第一个div标签的html代码3.2 获取第一个li标签3.3 获取第一个li标签内容3.4 返回第一个li的字典&#xff0c;里面是多个属性和值3.5 查看第一个li返回的数据类型3.6 根据属性&#xff0c;获取标签的属性值&a…

Android 安装调试 TelephonyProvider不生效

直接安装TelephonyProvider的时候&#xff0c;&#xff08;没有重启&#xff09;发现数据库没有生效。 猜测应该是原本的数据库没有删除后重建更新。 解决方法&#xff1a;杀掉phone进程 adb shell am force-stop com.android.phone 查看device进程 adb shell ps | grep <…

AI视频教程下载:用LangChain开发 ChatGPT和 LLMs 应用

在这个快速变化的人工智能时代&#xff0c;我们为您带来了一场关于语言模型和生成式人工智能的革命性课程。这不仅仅是一个课程&#xff0c;而是一次探险&#xff0c;一次深入人工智能核心的奇妙之旅。 在这里&#xff0c;您将开启一段激动人心的旅程&#xff0c;探索语言模型…