通用信息提取数据预处理

train_data='./datasets/duuie'
output_folder='./datasets/duuie_pre'
ignore_datasets=["DUEE", "DUEE_FIN_LITE"]
schema_folder='./datasets/seen_schema'

# 对CCKS2022 竞赛数据进行预处理
import shutil

# shutil.copytree(train_data,output_folder)

import os

life_folder = os.path.join(output_folder, "DUIE_LIFE_SPO")
org_folder = os.path.join(output_folder, "DUIE_ORG_SPO")

print(life_folder,org_folder)

import json

def load_jsonlines_file(filename):
    return [json.loads(line) for line in open(filename, encoding="utf8")]

life_train_instances = load_jsonlines_file(f"{life_folder}/train.json")
org_train_instances = load_jsonlines_file(f"{org_folder}/train.json")

for i in range(27695,27698):
    print(life_train_instances[i],'|',org_train_instances[i])

class RecordSchema:
    def __init__(self, type_list, role_list, type_role_dict):
        self.type_list = type_list
        self.role_list = role_list
        self.type_role_dict = type_role_dict
    def __repr__(self) -> str:
        repr_list = [f"Type: {self.type_list}\n", f"Role: {self.role_list}\n", f"Map: {self.type_role_dict}"]
        return "\n".join(repr_list)
    @staticmethod
    def get_empty_schema():
        return RecordSchema(type_list=list(), role_list=list(), type_role_dict=dict())
    @staticmethod
    def read_from_file(filename):
        lines = open(filename, encoding="utf8").readlines()
        type_list = json.loads(lines[0])# 类型
        role_list = json.loads(lines[1]) # 角色
        type_role_dict = json.loads(lines[2])#类型-角色
        return RecordSchema(type_list, role_list, type_role_dict)
    def write_to_file(self, filename):
        with open(filename, "w", encoding="utf8") as output:
            # 用于将Python对象编码(序列化)为JSON格式的字符串。设置ensure_ascii=False参数
            # 会告诉json.dumps()函数不要转义非ASCII字符
            output.write(json.dumps(self.type_list, ensure_ascii=False) + "\n")
            output.write(json.dumps(self.role_list, ensure_ascii=False) + "\n")
            output.write(json.dumps(self.type_role_dict, ensure_ascii=False) + "\n")

RecordSchema.read_from_file(f"{life_folder}/record.schema")

life_relation = RecordSchema.read_from_file(f"{life_folder}/record.schema").role_list

org_relation = RecordSchema.read_from_file(f"{org_folder}/record.schema").role_list

from collections import defaultdict

instance_dict = defaultdict(list)

for instance in life_train_instances + org_train_instances:
    instance_dict[instance["text"]] += [instance]

a=[i for i in life_train_instances for j in org_train_instances if i['text']==j['text']]

b=[i for i in org_train_instances for j in a if i['text']==j['text']]

for i in range(3):
    print(a[i]['relation'],'|',b[i]['relation'])

dict_1={1:2,3:4}
for i in dict_1:#相当于字典的keys()
    print(i)

from typing import Tuple, List, Dict

def merge_instance(instance_list):
    def all_equal(_x):#判断是否全相同
        for __x in _x:
            if __x != _x[0]:
                return False
        return True
    def entity_key(_x):
        return (tuple(_x["offset"]), _x["type"])
    def relation_key(_x):
        return (
            tuple(_x["type"]),
            tuple(_x["args"][0]["offset"]),
            _x["args"][0]["type"],
            tuple(_x["args"][1]["offset"]),
            _x["args"][1]["type"],
        )

    def event_key(_x):
        return (tuple(_x["offset"]), _x["type"])
    assert all_equal([x["text"] for x in instance_list])
    element_dict = {
        "entity": dict(),
        "relation": dict(),
        "event": dict(),
    }
    instance_id_list = list()
    for x in instance_list:
        instance_id_list += [x["id"]]
        for entity in x.get("entity", list()):
            element_dict["entity"][entity_key(entity)] = entity
        for relation in x.get("relation", list()):
            element_dict["relation"][relation_key(relation)] = relation
        for event in x.get("event", list()):
            element_dict["event"][event_key(event)] = event

    return {
        "id": "-".join(instance_id_list),
        "text": instance_list[0]["text"],
        "tokens": instance_list[0]["tokens"],
        "entity": list(element_dict["entity"].values()),
        "relation": list(element_dict["relation"].values()),
        "event": list(element_dict["event"].values()),
    }

 for text in instance_dict:
    instance_dict[text] = merge_instance(instance_dict[text])

for i in range(800,802):
    print(list(instance_dict.values())[i]['relation'])

import copy

with open(f"{life_folder}/train.json", "w") as output:
    for instance in instance_dict.values():
        new_instance = copy.deepcopy(instance)
        new_instance["relation"] = list(filter(lambda x: x["type"] in life_relation, instance["relation"]))
        output.write(json.dumps(new_instance) + "\n")

 with open(f"{org_folder}/train.json", "w") as output:
    for instance in instance_dict.values():
        new_instance = copy.deepcopy(instance)
        new_instance["relation"] = list(filter(lambda x: x["type"] in org_relation, instance["relation"]))
        output.write(json.dumps(new_instance) + "\n")

a_instances = load_jsonlines_file(f"{life_folder}/train.json")
b_instances = load_jsonlines_file(f"{org_folder}/train.json")

print(len(a_instances),len(b_instances))

import yaml

def load_definition_schema_file(filename):
    return yaml.load(open(filename, encoding="utf8"), Loader=yaml.FullLoader)

aa = load_definition_schema_file(os.path.join(schema_folder,'体育竞赛.yaml'))

mm=list()
for i in aa['事件'].values():
    mm+=i["参数"]   
mm=list(set(mm))

[x for x in aa['事件']]

aa['事件']['退役']["参数"].keys()

aaa={1:2,3:4}
for k,v in aaa.items():
    print(k,v)

def dump_schema(output_folder, schema_dict):
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)
    for schema_name, schema in schema_dict.items():
        schema_file = f"{output_folder}/{schema_name}.schema"
        with open(schema_file, "w", encoding="utf8") as output:
            for element in schema:
                output.write(json.dumps(element, ensure_ascii=False) + "\n")

def dump_event_schema(event_map, output_folder):
    role_list = list()
    for roles in event_map.values():
        role_list += roles["参数"]
    rols_list = list(set(role_list))
    type_list = list(event_map.keys())
    type_role_map = {event_type: list(event_map[event_type]["参数"].keys()) for event_type in event_map}
    dump_schema(
        output_folder=output_folder,
        schema_dict={
            "entity": [[], [], {}],
            "relation": [[], [], {}],
            "event": [type_list, rols_list, type_role_map],
            "record": [type_list, rols_list, type_role_map],
        },
    )

def filter_event_in_instance(instances,required_event_types):
    """Filter events in the instance, keep event mentions with `required_event_types`
    过滤实例中的事件,只保留需要的事件类别的事件标注
    """
    new_instances = list()
    for instance in instances:
        new_instance = copy.deepcopy(instance)
        new_instance["event"] = list(filter(lambda x: x["type"] in required_event_types, new_instance["event"]))
        new_instances += [new_instance]
    return new_instances

def dump_instances(instances, output_filename):
    with open(output_filename, "w", encoding="utf8") as output:
        for instance in instances:
            output.write(json.dumps(instance, ensure_ascii=False) + "\n")

def filter_event(data_folder, event_types, output_folder):
    """Keep event with `event_types` in `data_folder` save to `output_folder`
    过滤 `data_folder` 中的事件,只保留 `event_types` 类型事件保存到 `output_folder`"""
    dump_event_schema(event_types, output_folder)
    for split in ["train", "val"]:
        filename = os.path.join(data_folder, f"{split}.json")
        instances = [json.loads(line.strip()) for line in open(filename, encoding="utf8")]
        new_instances = filter_event_in_instance(instances, required_event_types=event_types)
        dump_instances(new_instances, os.path.join(output_folder, f"{split}.json"))

# 对事件数据进行预处理,过滤除 `灾害意外` 和 `体育竞赛` 外的事件标注
for schema in ["灾害意外", "体育竞赛"]:
    print(f"Building {schema} dataset ...")
    duee_folder = os.path.join(output_folder, "DUEE")
    schema_file = os.path.join(schema_folder, f"{schema}.yaml")
    output_folder2 = os.path.join(output_folder, schema)
    schema = load_definition_schema_file(schema_file)
    filter_event(
        duee_folder,
        schema["事件"],
        output_folder2,
    )

ty_instances = load_jsonlines_file(f"{output_folder}/体育竞赛/train.json")
zh_instances = load_jsonlines_file(f"{output_folder}/灾害意外/train.json")

print(len(ty_instances),len(zh_instances))

for i in range(11508,11608):
    print(ty_instances[i],'|',zh_instances[i])

bb=load_definition_schema_file(os.path.join(schema_folder, "金融信息.yaml"))

for i in bb['事件'].keys():
    print(i)

mm=list()
mm+=bb['事件']['中标']["参数"]   
mm=list(set(mm))

bb["事件"]['中标']["参数"] .keys()

for schema in ["金融信息"]:
    print(f"Building {schema} dataset ...")
    duee_fin_folder = os.path.join(output_folder, "DUEE_FIN_LITE")
    schema_file = os.path.join(schema_folder, f"{schema}.yaml")
    output_folder2 = os.path.join(output_folder, schema)
    schema = load_definition_schema_file(schema_file)
    # 依据不同事件类别将多事件抽取分割成多个单事件类型抽取
    # Separate multi-type extraction to multiple single-type extraction
    for event_type in schema["事件"]:
        filter_event(
           duee_fin_folder,
           {event_type: schema["事件"][event_type]},
            output_folder2 + "_" + event_type,
        )

vv=load_jsonlines_file(f"{output_folder}/DUEE_FIN_LITE/train.json")

zb_instances = load_jsonlines_file(f"{output_folder}/金融信息_中标/train.json")
zy_instances = load_jsonlines_file(f"{output_folder}/金融信息_质押/train.json")

print(len(zb_instances),len(zy_instances))

for i in range(6985,7015):
    print(zb_instances[i],'|',zy_instances[i])

def annonote_graph(
    entities: List[Dict] = [],
    relations: List[Dict] = [],
    events: List[Dict] = []):
    spot_dict = dict()
    asoc_dict = defaultdict(list)
    # 将实体关系事件转换为点关联图
    def add_spot(spot):
        spot_key = (tuple(spot["offset"]), spot["type"])
        spot_dict[spot_key] = spot
    def add_asoc(spot, asoc, tail):
        spot_key = (tuple(spot["offset"]), spot["type"])
        asoc_dict[spot_key] += [(tuple(tail["offset"]), tail["text"], asoc)]
    for entity in entities:
        add_spot(spot=entity)
    for relation in relations:
        add_spot(spot=relation["args"][0])
        add_asoc(spot=relation["args"][0], asoc=relation["type"], tail=relation["args"][1])
    for event in events:
        add_spot(spot=event)
        for argument in event["args"]:
            add_asoc(spot=event, asoc=argument["type"], tail=argument)
    spot_asoc_instance = list()
    for spot_key in sorted(spot_dict.keys()):
        offset, label = spot_key
        if len(spot_dict[spot_key]["offset"]) == 0:
            continue
        spot_instance = {
            "span": spot_dict[spot_key]["text"],
            "label": label,
            "asoc": list(),
        }
        for tail_offset, tail_text, asoc in sorted(asoc_dict.get(spot_key, [])):
            if len(tail_offset) == 0:
                continue
            spot_instance["asoc"] += [(asoc, tail_text)]
        spot_asoc_instance += [spot_instance]
    spot_labels = set([label for _, label in spot_dict.keys()])
    asoc_labels = set()
    for _, asoc_list in asoc_dict.items():
        for _, _, asoc in asoc_list:
            asoc_labels.add(asoc)
    return spot_labels, asoc_labels, spot_asoc_instance

def add_spot_asoc_to_single_file(filename):
    instances = [json.loads(line) for line in open(filename, encoding="utf8")]
    print(f"Add spot asoc to {filename} ...")
    with open(filename, "w", encoding="utf8") as output:
        for instance in instances:
            spots, asocs, spot_asoc_instance = annonote_graph(
                entities=instance["entity"],#实体
                relations=instance["relation"],#关系
                events=instance["event"],#事件
            )
            # 为对象添加spot_asoc
            instance["spot_asoc"] = spot_asoc_instance
            # 为对象添加spot
            instance["spot"] = list(spots)
            # 为对象添加asoc
            instance["asoc"] = list(asocs)
            output.write(json.dumps(instance, ensure_ascii=False) + "\n")

ff = os.path.join(output_folder,'金融信息_企业破产',"train.json")

ff_instances = [json.loads(line) for line in open(ff, encoding="utf8")]

for i in range(1046,1050):
    print(ff_instances[i])

a,b,yyj=annonote_graph( entities=ff_instances[11000]["entity"],
                relations=ff_instances[11000]["relation"],
                events=ff_instances[11000]["event"],)

data_folder=output_folder

def merge_schema(schema_list: List[RecordSchema]):
    type_set = set()
    role_set = set()
    type_role_dict = defaultdict(list)
    for schema in schema_list:
        for type_name in schema.type_list:
            type_set.add(type_name)
        for role_name in schema.role_list:
            role_set.add(role_name)
        for type_name in schema.type_role_dict:
            type_role_dict[type_name] += schema.type_role_dict[type_name]
    for type_name in type_role_dict:
        type_role_dict[type_name] = list(set(type_role_dict[type_name]))
    return RecordSchema(
        type_list=list(type_set),
        role_list=list(role_set),
        type_role_dict=type_role_dict,
    )

def convert_duuie_to_spotasoc(data_folder, ignore_datasets):
    schema_list = list()
    for task_folder in os.listdir(data_folder):#过滤无效
        if task_folder in ignore_datasets:
            continue
        if not os.path.isdir(os.path.join(data_folder, task_folder)):#过滤非文件夹
            continue
        print(f"Add spot asoc to {task_folder} ...")
        # 读取单任务的 Schema
        task_schema_file = os.path.join(data_folder, task_folder, "record.schema")
        # 向单任务数据中添加 Spot Asoc 标注
        add_spot_asoc_to_single_file(os.path.join(data_folder, task_folder, "train.json"))
        add_spot_asoc_to_single_file(os.path.join(data_folder, task_folder, "val.json"))
        record_schema = RecordSchema.read_from_file(task_schema_file)
        schema_list += [record_schema]
    # 融合不同任务的 Schema
    multi_schema = merge_schema(schema_list)
    multi_schema.write_to_file(os.path.join(data_folder, "record.schema"))

convert_duuie_to_spotasoc(output_folder,ignore_datasets)

 

 

 

 

 

 

 

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:/a/697265.html

如若内容造成侵权/违法违规/事实不符,请联系我们进行投诉反馈qq邮箱809451989@qq.com,一经查实,立即删除!

相关文章

Mysql 的分布式策略

1. 前言 MySQL 作为最最常用的数据库,了解 Mysql 的分布式策略对于掌握 MySQL 的高性能使用方法和更安全的储存方式有非常重要的作用。 它同时也是面试中最最常问的考点,我们这里就简单总结下 Mysq 的常用分布式策略。 2. 复制 复制主要有主主复制和…

5 个你不知道的隐藏 CSS 属性

层叠样式表 (CSS) 是网页设计的骨架,它可以帮助我们轻松的设置网页的样式和格式。虽然大多数的 CSS 属性,例如颜色、字体大小和边距都被大家熟知,但还有许多鲜为人知的属性可以帮助我们设计添加功能。在这篇文章中,我们将介绍 5 个…

GD32F4XX的ISP方式下载程序时的串口选择

官方资料 详细信息可参考GD32F4xx的用户手册,第 1.4 章节 引导配置 。 版本是 :GD32F4xx_User_Manual_Rev3.0_CN 资料链接: https://www.gd32mcu.com/cn/download/6?kwGD32F4

【解读】小提琴图

ref:解读文献中的箱线图(Box-plot)和小提琴图(Violin-plot))_小提琴图和箱线图的区别-CSDN博客小提琴图展示了每个变量的数据分布情况,通过图中的“小提琴”形状可以看出数据的密度和分布情况。 在图中&…

Layui实现下拉多选功能

1、问题概述? 提供源码下载 在项目中有很多地方需要使用到下拉框,并且实现选择多个信息,下面是展示。 支持如下功能: 1、分页 2、主题自定义 3、国际化 4、下拉方向 5、Tips修改等 6、Style自定义样式 7、取值 8、赋值 2、资源准备及测试? 2.1、资源下载

管理数据必备;侦听器watch用法详解,vue2与vue3中watch的变化与差异

目录 一、侦听器(watch)是什么? 二、Vue2中的watch(Options API) 2.1、函数式写法 2.2、对象式写法 ①对象式基础写法 ②回调函数handler ③deep属性 ④immediate属性 三、Vue3中的watch 3.1、向下兼容&#xff…

Qt实现简易播放器

效果如图 源码地址: 简易播放器: 基于Qt的简易播放器,底层采用VLC源码 - Gitee.com GitHub:GitHub - a-mo-xi-wei/easy-player: 基于Qt的调用VLC的API的简易播放器

Running Gradle task ‘assembleDebug‘ Flutter项目

基于Android方面运行Flutter项目一直卡在 Launching lib\main.dart on Android SDK built for x86 in debug mode… Running Gradle task ‘assembleDebug’… 基础原因: 默认存放Gradle插件包的Maven仓库是国外(需VPN) 我的原因: 缺少JDK和缺少Androi…

tcp协议中机制的总结

目录 总结 分析 三次握手 总结 分析 其中,序列号不止用来排序,还可以用在重传时去重 确认应答是机制中的核心 因为都需要依靠应答来拿到协议字段,从而判断是否触发机制 保证可靠性的策略也可以提高效率,比如: 流量控制,可以根据多个因素来动态调整数据发送量拥塞控制也是,让…

支持YUV和RGB格式两路视频同时播放

1.头文件&#xff1a; sdlqtrgb.h #pragma once #include <QtWidgets/QWidget> #include "ui_sdlqtrgb.h" #include <thread> class SdlQtRGB : public QWidget {Q_OBJECTpublic:SdlQtRGB(QWidget* parent Q_NULLPTR);~SdlQtRGB(){is_exit_ true;//等…

现实转虚拟:Video2Game引领3D互动体验

在当今数字化时代&#xff0c;虚拟环境的创建对于游戏开发、虚拟现实应用和自动驾驶模拟器等多个领域至关重要。然而&#xff0c;传统的虚拟环境创建过程不仅复杂而且成本高昂&#xff0c;通常需要专业人员和专业软件开发工具的参与。例如&#xff0c;著名的《侠盗猎车手V》以其…

「51媒体」江苏媒体宣传报道,邀请媒体报道资源汇总

传媒如春雨&#xff0c;润物细无声&#xff0c;大家好&#xff0c;我是51媒体网胡老师。 江苏作为中国东部的重要省份&#xff0c;拥有丰富的媒体资源&#xff0c;包括电视台、广播电台、报纸以及网络媒体。 电视台 江苏卫视&#xff1a;作为江苏省唯一的省级卫视台&#xff…

Nvidia/算能 +FPGA+AI大算力边缘计算盒子:桥梁结构安全监测

中国铁路设计集团有限公司&#xff08;简称中国铁设&#xff09;&#xff0c;原铁道第三勘察设计院集团有限公司&#xff08;铁三院&#xff09;&#xff0c;是中国国家铁路集团有限公司所属的唯一设计企业&#xff0c;成立于1953年&#xff0c;总部位于天津市&#xff0c;是以…

基于机器学习的锂电池RUL SOH预测

数据集为NASA锂电池数据集。 import datetimeimport numpy as npimport pandas as pdfrom scipy.io import loadmatfrom sklearn.preprocessing import MinMaxScalerfrom sklearn.metrics import mean_squared_errorfrom sklearn import metricsimport matplotlib.pyplot as p…

python tushare股票量化数据处理:笔记

1、安装python和tushare及相关库 matplotlib pyplot pandas pandas_datareader >>> import matplotlib.pyplot as plt >>> import pandas as pd >>> import datetime as dt >>> import pandas_datareader.data as web 失败的尝试yf…

01——生产监控平台——WPF

生产监控平台—— 一、介绍 VS2022 .net core(net6版本&#xff09; 1、文件夹&#xff1a;MVVM /静态资源&#xff08;图片、字体等&#xff09; 、用户空间、资源字典等。 2、图片资源库&#xff1a; https://www.iconfont.cn/ ; 1.资源字典Dictionary 1、…

攻防演练之-动员大会

清晨的阳光透过薄雾洒在甲方的攻防演练中心。由于国家对于重点行业的数据灾备的要求。因此每一家企业都会选择在不同的地理位置建多个数据中心&#xff0c;包括一个生产中心、一个同城灾难备份中心、一个异地灾难备份中心。通过这种方式将业务分布在不同地理位置的数据中心&…

PowerDesigner 16.5安装教程

&#x1f4d6;PowerDesigner 16.5安装教程 ✅1. 下载✅2. 安装 ✅1. 下载 官网地址&#xff1a;https://www.powerdesigner.biz/EN/powerdesigner/powerdesigner-licensing-history.php 云盘下载&#xff1a;https://www.123pan.com/s/4brbVv-aUoWA.html ✅2. 安装 1.运行P…

Linux网络诊断工具mtr命令详解

目录 一、mtr概述 二、mtr的特点 1、动态路由显示 2、数据包类型 3、显示延迟和丢包 4、过滤和日志 5、网络探测 三、基本用法 1、基本语法 2、帮助 3、常用选项 四、输出解释 1、常见mtr命令及其输出 2、输出解释 四、命令实例 1. 最基本的用法 2. 显示报告形式…

Leetcode3170. 删除星号以后字典序最小的字符串

Every day a Leetcode 题目来源&#xff1a;3170. 删除星号以后字典序最小的字符串 解法1&#xff1a;栈 由于要去掉最小的字母&#xff0c;为了让字典序尽量小&#xff0c;相比去掉前面的字母&#xff0c;去掉后面的字母更好。 从左到右遍历字符串 s&#xff0c;用 26 个栈…