论文辅助笔记:LLM-MOB代码解读

论文笔记 Where Would I Go Next? Large Language Models as Human Mobility Predictor-CSDN博客

1 主函数

1.1 导入库

import os
import pickle
import time
import ast
import logging
from datetime import datetime
import pandas as pd
from openai import OpenAI

client = OpenAI(
        api_key=...
    )

1.2 参数读取

dataname = "geolife"  
# 数据集名称
    
num_historical_stay = 40  
# 长期mobility 的跨度
    
num_context_stay = 5  
# 短期mobility的跨度

top_k = 10  
# 输出location的数量

with_time = False  
# 是否将目标stay的时间信息融入进来

sleep_single_query = 0.1 
''' 
the sleep time between queries 
after the recent updates, the reliability of the API is greatly improved
so we can reduce the sleep time
'''


sleep_if_crash = 1  
'''
the sleep time if the server crashes
'''


output_dir = f"output/{dataname}/top10_wot"  
'''
the output path
'''
    
log_dir = f"logs/{dataname}/top10_wot"  
'''
the log dir
'''

1.3 读取参数

tv_data, test_file = get_dataset(dataname)
#Number of total test sample:  3459

'''
这个数量是比f"data/{dataname}/{dataname}_test.csv" 要少的
'''

1.4 日志文件生成

logger = get_logger('my_logger', log_dir=log_dir)

1.5 user id 提取

uid_list = get_unqueried_user(dataname, output_dir)
print(f"uid_list: {uid_list}")

'''
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45]
Number of the remaining id: 45
uid_list: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45]
'''

 1.6 生成query

query_all_user(
    client, 
    dataname, 
    uid_list, 
    logger, 
    tv_data, 
    num_historical_stay, 
    num_context_stay,
    test_file, 
    output_dir=output_dir, 
    top_k=top_k, 
    is_wt=with_time,
    sleep_query=sleep_single_query, 
    sleep_crash=sleep_if_crash)

2 get_dataset函数

def get_dataset(dataname):
    
    # Get training and validation set and merge them
    train_data = pd.read_csv(f"data/{dataname}/{dataname}_train.csv")
    valid_data = pd.read_csv(f"data/{dataname}/{dataname}_valid.csv")
    #读取训练+验证集

    # Get test data
    with open(f"data/{dataname}/{dataname}_testset.pk", "rb") as f:
        test_file = pickle.load(f)  # test_file is a list of dict
    #测试集

    # merge train and valid data
    tv_data = pd.concat([train_data, valid_data], ignore_index=True)
    tv_data.sort_values(['user_id', 'start_day', 'start_min'], inplace=True)
    if dataname == 'geolife':
        tv_data['duration'] = tv_data['duration'].astype(int)
    #合并训练+验证集

    print("Number of total test sample: ", len(test_file))
    return tv_data, test_file

 3 get_logger

def get_logger(logger_name, log_dir='logs/'):
    # Create log dir
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    # Create a logger instance
    logger = logging.getLogger(logger_name)
    logger.setLevel(logging.DEBUG)
    #创建一个日志记录器实例,并将其命名为 logger_name,并设置日志记录器的级别为 DEBUG

    # Create a console handler and set its log level
    console_handler = logging.StreamHandler()
    console_handler.setLevel(logging.DEBUG)
    # 创建一个控制台处理器,其作用是将接收到的日志消息输出到控制台

    # Create a file handler and set its log level
    current_datetime = datetime.now()
    formatted_datetime = current_datetime.strftime("%Y%m%d_%H%M%S")
    #获取当前日期和时间,格式化为 "YYYYMMDD_HHMMSS" 形式,这部分用于生成日志文件名
    log_file = 'log_file' + formatted_datetime + '.log'
    #将格式化的时间字符串添加到 "log_file" 后面,构成日志文件名,例如 log_file20230424_153000.log
    log_file_path = os.path.join(log_dir, log_file)
    #使用 os.path.join(log_dir, log_file) 创建完整的日志文件路径

    
    file_handler = logging.FileHandler(log_file_path)
    file_handler.setLevel(logging.DEBUG)
    #创建一个文件处理器,用于将日志消息写入到指定的文件中

    

    # Create a formatter and add it to the handlers
    formatter = logging.Formatter('%(message)s')
    #创建一个格式器 formatter,设置日志格式为仅包含消息体,即 '%(message)s'
    
    console_handler.setFormatter(formatter)
    file_handler.setFormatter(formatter)
    #将控制台处理器和文件处理器添加到日志记录器实例上

    # Add the handlers to the logger
    logger.addHandler(console_handler)
    logger.addHandler(file_handler)

    return logger

4 get_unqueried_user

提取数据集对应的user id

def get_unqueried_user(dataname, output_dir='output/'):
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)


    if dataname == "geolife":
        all_user_id = [i+1 for i in range(45)]
    elif dataname == "fsq":
        all_user_id = [i+1 for i in range(535)]


    processed_id = [int(file.split('.')[0]) for file in os.listdir(output_dir) if file.endswith('.csv')]
    remain_id = [i for i in all_user_id if i not in processed_id]
    print(remain_id)
    print(f"Number of the remaining id: {len(remain_id)}")
    return remain_id

5 query_all_user

def query_all_user(client, dataname, uid_list, logger, train_data, num_historical_stay,
                   num_context_stay, test_file, top_k, is_wt, output_dir, sleep_query, sleep_crash):
    for uid in uid_list:
        logger.info(f"=================Processing user {uid}==================")
        user_train = get_user_data(train_data, uid, num_historical_stay, logger)
        #当前研究的uid的长期历史mobility(M条)




        historical_data, predict_X, predict_y = organise_data(dataname, user_train, test_file, uid, logger, num_context_stay)
        '''
            返回这个user id的:
```长期mobility(不同的test数据共享)
```短期mobility(临近5段location)
```ground truth



             每一条记录的格式是:('09:08 PM', 'Wednesday', 466, 10),
        '''
        
        single_user_query(client, dataname, uid, historical_data, predict_X, predict_y, logger, top_k=top_k, 
                          is_wt=is_wt, output_dir=output_dir, sleep_query=sleep_query, sleep_crash=sleep_crash)

5.1 get_user_data

提取当前研究的uid的长期历史mobility(M条)

def get_user_data(train_data, uid, num_historical_stay, logger):
    user_train = train_data[train_data['user_id']==uid]
    #找到当下研究的user id对应的所有record

    logger.info(f"Length of user {uid} train data: {len(user_train)}")
    #user id一共多少条记录

    user_train = user_train.tail(num_historical_stay)
  

    logger.info(f"Number of user historical stays: {len(user_train)}")
    #long term mobility需要考虑多长的历史轨迹
    return user_train

5.2 organise_data

    返回这个user id的:
```长期mobility(不同的test数据共享)
```短期mobility(临近5段location)
```ground truth

每一条记录的格式是:('09:08 PM', 'Wednesday', 466, 10),

def organise_data(dataname, user_train, test_file, uid, logger, num_context_stay=5):
    # Use another way of organising data
    # user_train只是临近的M个record
    historical_data = []

    if dataname == 'geolife':
        for _, row in user_train.iterrows():
            historical_data.append(
                (convert_to_12_hour_clock(int(row['start_min'])), 
                int2dow(row['weekday']),
                int(row['duration']),
                row['location_id'])
                )
    elif dataname == 'fsq':
        for _, row in user_train.iterrows():
            historical_data.append(
                (convert_to_12_hour_clock(int(row['start_min'])),
                int2dow(row['weekday']),
                row['location_id'])
                )
    '''
每次append如下内容

time-of-day:时间转化成几点几分 AM/PM的形式
day-of-week:日子转化成星期几的形式
duration   :持续时间类型转化为整型
location id:location 对应的id

eg,
[('09:08 PM', 'Wednesday', 466, 10),
 ('04:58 AM', 'Thursday', 187, 17),
 ('08:07 AM', 'Thursday', 146, 1),
 ('10:35 AM', 'Thursday', 193, 17),
 ('01:54 PM', 'Thursday', 556, 10)]
    '''

    logger.info(f"historical_data: {historical_data}")
    logger.info(f"Number of historical_data: {len(historical_data)}")

    # Get user ith test data
    list_user_dict = []
    for i_dict in test_file:
        if dataname == 'geolife':
            i_uid = i_dict['user_X'][0]
        elif dataname == 'fsq':
            i_uid = i_dict['user_X']
        if i_uid == uid:
            list_user_dict.append(i_dict)
    #测试集中和user id 相同的 record 放入 list_user_dict
    #这个user id 需要测试的轨迹

    predict_X = []
    predict_y = []
    for i_dict in list_user_dict:
        construct_dict = {}
        if dataname == 'geolife':
            context = list(zip([convert_to_12_hour_clock(int(item)) for item in i_dict['start_min_X'][-num_context_stay:]], 
                            [int2dow(i) for i in i_dict['weekday_X'][-num_context_stay:]], 
                            [int(i) for i in i_dict['dur_X'][-num_context_stay:]], 
                            i_dict['X'][-num_context_stay:]))
        elif dataname == 'fsq':
            context = list(zip([convert_to_12_hour_clock(int(item)) for item in i_dict['start_min_X'][-num_context_stay:]], 
                            [int2dow(i) for i in i_dict['weekday_X'][-num_context_stay:]], 
                            i_dict['X'][-num_context_stay:]))
        '''
    只看geolife的话,context是一个有五个元素的list
    每个元素和前面append到historical_data的格式是一样的
        '''


        target = (convert_to_12_hour_clock(int(i_dict['start_min_Y'])), int2dow(i_dict['weekday_Y']), None, "<next_place_id>")
        #('12:36 AM', 'Friday', None, '<next_place_id>')

        construct_dict['context_stay'] = context
        construct_dict['target_stay'] = target
        #构造输入,临近的N个location+目标的时刻和星期

        predict_y.append(i_dict['Y'])
        #ground-truth的station id
        predict_X.append(construct_dict)
        #构造的输入
    
    

    logger.info(f"Number of predict_data: {len(predict_X)}")
    #这个user_id在test 数据中有多少条记录

    logger.info(f"predict_y: {predict_y}")
    
    logger.info(f"Number of predict_y: {len(predict_y)}")
    #虽然这个数量应该和predict_X的一样

    return historical_data, predict_X, predict_y
    '''
    返回这个user id的:
```长期mobility(不同的test数据共享)
```短期mobility(临近5段location)
```ground truth
    '''

5.2.1  convert_to_12_hour_clock

#转化成几点几分 AM/PM的形式

def convert_to_12_hour_clock(minutes):
    #原始数据的minutes 从这一天的0点算起,第几分钟
    if minutes < 0 or minutes >= 1440:
        return "Invalid input. Minutes should be between 0 and 1439."

    hours = minutes // 60
    minutes %= 60

    period = "AM"
    if hours >= 12:
        period = "PM"

    if hours == 0:
        hours = 12
    elif hours > 12:
        hours -= 12

    return f"{hours:02d}:{minutes:02d} {period}"
    #转化成几点几分 AM/PM的形式

5.2.2 int2dow

#转化成星期几的形式

def int2dow(int_day):
    tmp = {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday',
           3: 'Thursday', 4: 'Friday', 5: 'Saturday', 6: 'Sunday'}
    return tmp[int_day]

5.3 single_user_query

保存location的预测结果

def single_user_query(client, dataname, uid, historical_data, predict_X, predict_y,logger, top_k, is_wt, output_dir, sleep_query, sleep_crash):
    # Initialize variables
    total_queries = len(predict_X)
    logger.info(f"Total_queries: {total_queries}")
    #这个user id 一共有多少条查询

    processed_queries = 0
    current_results = pd.DataFrame({
        'user_id': None,
        'ground_truth': None,
        'prediction': None,
        'reason': None
    }, index=[])

    out_filename = f"{uid:02d}" + ".csv"
    out_filepath = os.path.join(output_dir, out_filename)



    try:
        # Attempt to load previous results if available
        current_results = load_results(out_filepath)
        processed_queries = len(current_results)
        logger.info(f"Loaded {processed_queries} previous results.")
    except FileNotFoundError:
        logger.info("No previous results found. Starting from scratch.")
    '''
    读取这个用户已经处理了的预测结果
    '''



    # Process remaining queries
    for i in range(processed_queries, total_queries):
        #预测这个用户剩余的查询

        logger.info(f'The {i+1}th sample: ')
        if dataname == 'geolife':
            if is_wt is True:
                if top_k == 1:
                    completions = single_query_top1(client, historical_data, predict_X[i])
                elif top_k == 10:
                    completions = single_query_top10(client, historical_data, predict_X[i])
                else:
                    raise ValueError(f"The top_k must be one of 1, 10. However, {top_k} was provided")
            else:
                if top_k == 1:
                    completions = single_query_top1_wot(client, historical_data, predict_X[i])
                elif top_k == 10:
                    completions = single_query_top10_wot(client, historical_data, predict_X[i])
                else:
                    raise ValueError(f"The top_k must be one of 1, 10. However, {top_k} was provided")
        elif dataname == 'fsq':
            if is_wt is True:
                if top_k == 1:
                    completions = single_query_top1_fsq(client, historical_data, predict_X[i])
                elif top_k == 10:
                    completions = single_query_top10_fsq(client, historical_data, predict_X[i])
                else:
                    raise ValueError(f"The top_k must be one of 1, 10. However, {top_k} was provided")
            else:
                if top_k == 1:
                    completions = single_query_top1_wot_fsq(client, historical_data, predict_X[i])
                elif top_k == 10:
                    completions = single_query_top10_wot_fsq(client, historical_data, predict_X[i])
                else:
                    raise ValueError(f"The top_k must be one of 1, 10. However, {top_k} was provided")
        '''
        gpt针对不同情况的完整response
        '''




        response = completions.choices[0].message.content
        #gpt的response

        # Log the prediction results and usage.
        logger.info(f"Pred results: {response}")
        logger.info(f"Ground truth: {predict_y[i]}")
        logger.info(dict(completions).get('usage'))
        #使用的token数



        try:
            res_dict = ast.literal_eval(response)  
            # 解析gpt的输出,至字典的形式

            if top_k != 1:
                res_dict['prediction'] = str(res_dict['prediction'])

            res_dict['user_id'] = uid
            res_dict['ground_truth'] = predict_y[i]
        except Exception as e:
            res_dict = {'user_id': uid, 'ground_truth': predict_y[i], 'prediction': -100, 'reason': None}
            logger.info(e)
            logger.info(f"API request failed for the {i+1}th query")
            # time.sleep(sleep_crash)
            #如果上述任何一步出问题,说明预测失败
        finally:
            new_row = pd.DataFrame(res_dict, index=[0])  
            # A dataframe with only one record
            #当前这个location 预测的dataframe

            current_results = pd.concat([current_results, new_row], ignore_index=True)  
            # Add new row to the current df
            #这个user id的累计 预测location

    # Save the current results
    current_results.to_csv(out_filepath, index=False)
    #save_results(current_results, out_filename)
    logger.info(f"Saved {len(current_results)} results to {out_filepath}")
    #保存这个user的location 预测结果

    # Continue processing remaining queries
    if len(current_results) < total_queries:
        #remaining_predict_X = predict_X[len(current_results):]
        #remaining_predict_y = predict_y[len(current_results):]
        #remaining_queries = queries[len(current_results):]
        logger.info("Restarting queries from the last successful point.")
        single_user_query(client, dataname, uid, historical_data, predict_X, predict_y,
                          logger, top_k, is_wt, output_dir, sleep_query, sleep_crash)

5.3.1 load_results

就是读取这个user 之前已经保存的预测记录

def load_results(filename):
    # Load previously saved results from a CSV file    
    results = pd.read_csv(filename)
    return results

5.3.2 single_query_top1_wot

5.3.3 single_query_top1

5.3.4 single_query_top10

top10的区别不大(几乎一模一样),就多了这样一句话:

5.3.5 get_chat_completion

提供gpt prompt获得相应的结果

def get_chat_completion(client, prompt, model="gpt-3.5-turbo-0613", json_mode=False, max_tokens=1200):
    """
    args:
        client: the openai client object (new in 1.x version)
        prompt: the prompt to be completed
        model: specify the model to use
        json_mode: whether return the response in json format (new in 1.x version)
    """
    messages = [{"role": "user", "content": prompt}]
    if json_mode:
        completion = client.chat.completions.create(
            model=model,
            response_format={"type": "json_object"},
            messages=messages,
            temperature=0,  # the degree of randomness of the model's output
            max_tokens=max_tokens  # the maximum number of tokens to generate
        )
    else:
        completion = client.chat.completions.create(
            model=model,
            messages=messages,
            temperature=0,
            max_tokens=max_tokens
        )
    # res_content = response.choices[0].message["content"]
    # token_usage = response.usage
    return completion

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:/a/571522.html

如若内容造成侵权/违法违规/事实不符,请联系我们进行投诉反馈qq邮箱809451989@qq.com,一经查实,立即删除!

相关文章

Sqli-labs靶场第25关[Sqli-labs-less-25]自动化注入-SQLmap工具注入

过滤了AND OR 使用的函数是 preg_replace 特点&#xff1a;只对值进行一次检测闭合方式为 单引号 可以使用双写进行绕过 手工注入 ?id0 union select 1,database(),user() -- sqlmap自动化注入 sqlmap.py -u http://192.168.58.114:802/sqli-labs/Less-25/?id2 --batch -…

Aurora-64B/10B、XDMA与DDR结合设计高速数据流通路设计/Aurora光纤设计/XDMA读取DDR设计/基于FPGA的高速数据传输设计

因最近想通过FPGA把数据从光纤传到PC&#xff0c;借此机会和大家一起学习Aurora、XDMA结合DDR 制作不易&#xff0c;记得三连哦&#xff0c;给我动力&#xff0c;持续更新&#xff01;&#xff01;&#xff01; 完整工程文件下载&#xff1a;XDMA读写DDR工程 提取码&…

[Algorithm][前缀和][和为K的子数组][和可被K整除的子数组][连续数组][矩阵区域和]详细讲解

目录 1.和为 K 的子数组1.题目链接2.算法原理详解3.代码实现 2.和可被 K 整除的子数组1.题目链接2.算法原理详解3.代码实现 3.连续数组1.题目链接2.算法原理详解3.代码实现 4.矩阵区域和1.题目链接2.算法原理详解3.代码实现 1.和为 K 的子数组 1.题目链接 和为 K 的子数组 2.…

网络安全攻击溯源的重要性及挑战

网络安全攻击溯源是一个复杂且至关重要的过程&#xff0c;它涉及对网络攻击事件的来源进行追踪和分析&#xff0c;以便确定攻击者的身份、动机和攻击路径。在IP技术背景下&#xff0c;网络安全攻击溯源更是显得尤为重要&#xff0c;因为IP地址作为网络设备的唯一标识&#xff0…

Kafka 3.x.x 入门到精通(02)——对标尚硅谷Kafka教程

Kafka 3.x.x 入门到精通&#xff08;02&#xff09;——对标尚硅谷Kafka教程 2. Kafka基础2.1 集群部署2.1.1 解压文件2.1.2 安装ZooKeeper2.1.3 安装Kafka2.1.4 封装启动脚本 2.2 集群启动2.2.1 相关概念2.2.1.1 代理&#xff1a;Broker2.2.1.2 控制器&#xff1a;Controller …

css中新型的边框设置属性border-inline

一、概念与背景 border-inline 是 CSS Logical Properties and Values 模块中的一个属性&#xff0c;用于控制元素在流内&#xff08;inline&#xff09;方向上的边框。该模块旨在提供与书写模式&#xff08;writing mode&#xff09;无关的布局和样式描述方式&#xff0c;使得…

【现代交换原理与通信网技术】期末突击

文章目录 自己老师画的重点1. 程控交换机结构2. 测试模拟电路的七项功能3.中继电路的六项功能4.数字用户电路和模拟用户电路比较5.路由规划的基本原则6.七路信令的结构7.随路信令和公共信道信令8.软交换9.无极网和分级网10.路由选择.流量控制的原则/方法11.电路交换&&分…

解决 Tomcat 跨域问题 - Tomcat 配置静态文件和 Java Web 服务(Spring MVC Springboot)同时允许跨域

解决 Tomcat 跨域问题 - Tomcat 配置静态文件和 Java Web 服务&#xff08;Spring MVC Springboot&#xff09;同时允许跨域 Tomcat 配置允许跨域Web 项目配置允许跨域Tomcat 同时允许静态文件和 Web 服务跨域 偶尔遇到一个 Tomcat 部署项目跨域问题&#xff0c;因为已经处理过…

企业微信hook接口协议,ipad协议http,外部联系人图片视频文件下载

外部联系人文件下载 参数名必选类型说明file_id是StringCDNkeyopenim_cdn_authkey是String认证keyaes_key是Stringaes_keysize是int文件大小 请求示例 {"url": "https://imunion.weixin.qq.com/cgi-bin/mmae-bin/tpdownloadmedia?paramv1_e80c6c6c0cxxxx3544d9…

设计模式-状态模式在Java中的使用示例-信用卡业务系统

场景 在软件系统中&#xff0c;有些对象也像水一样具有多种状态&#xff0c;这些状态在某些情况下能够相互转换&#xff0c;而且对象在不同的状态下也将具有不同的行为。 为了更好地对这些具有多种状态的对象进行设计&#xff0c;我们可以使用一种被称之为状态模式的设计模式…

【Android】android 10 jar_sdk_library添加

前言 当前项目遇到客户&#xff0c;Android 10 平台&#xff0c;需要封装jar_sdk_library给第三方应用使用。其中jar_sdk_library中存在aidl文件。遇到无法编译通过问题。 解决 system/tools/aidl修改 Android.bp修改

vue中web端播放rtsp视频流(摄像头监控视频)(海康威视录像机)

一、ffmpeg安装​​​​​​ ffmpeg下载 https://ffmpeg.org/download.html找ffmpeg-release-essentials.zip点击下载&#xff0c;下载完解压ffmpeg.exe 程序运行 二、配置ffmpeg环境变量 添加成功后验证是否生效任意地方打开cmd窗口输入 ffmpeg 打印如下表示成功 三、node…

Ribbon负载均衡器

1. 负载均衡器 目前主流的负载方案分为以下两种&#xff1a;&#xff08;面试题&#xff09; 1.1 服务端负载均衡 在消费者和服务提供方中间使用独立的代理方式进行负载&#xff0c;有硬件的&#xff08;比如 F5&#xff09;&#xff0c;也有软件的&#xff08;比如 Nginx&a…

【重磅开源】MapleBoot项目开发规范

基于SpringBootVue3开发的轻量级快速开发脚手架 &#x1f341;项目简介 一个通用的前、后端项目模板 一个快速开发管理系统的项目 一个可以生成SpringBootVue代码的项目 一个持续迭代的开源项目 一个程序员的心血合集 度过严寒&#xff0c;终有春日&#xff…

uniapp配置了pages.json 的 tabbar 国际化,小程序切换语言没有实时切换

如上图&#xff0c;按照uniapp官方文档配置了tabbar的国际化 但是微信小程序实时切换语言没有实时刷新 解决方案&#xff1a; 在App.vue中加入以下代码&#xff1a; 在onLaunch中执行方法即可

LLM大语言模型(十二):关于ChatGLM3-6B不兼容Langchain 的Function Call

背景 基于本地的ChatGLM3-6B直接开发LangChain Function Call应用&#xff0c;发现其输出的action和action_input非常不稳定。 表现为生成的JSON格式回答非常容易出现不规范的情况&#xff0c;导致LangChain的Agent执行报错&#xff0c;或者进入死循环。 ChatGLM3-6B不兼容La…

SQLAlchemy的使用

SQLAlchemy中filter函数的使用 https://blog.csdn.net/m0_67093160/article/details/133318889 创建临时字段 select id , CONCAT(‘内容’) AS fullname from example_table;

设计模式之外观模式

1、详细介绍 外观模式&#xff08;Facade Pattern&#xff09;是一种结构型设计模式&#xff0c;它为子系统的一组接口提供了一个统一的入口点&#xff08;外观类&#xff09;。外观模式简化了客户端与子系统之间的交互&#xff0c;屏蔽了子系统内部的复杂性&#xff0c;使客户…

【Unity】UnityEvent(一)

​UnityEvent----高效管理游戏事件的利器 在游戏开发中&#xff0c;事件系统是实现各种功能的关键组成部分。它允许我们将不同对象之间的交互解耦&#xff0c;使得代码更加模块化和易于维护。而UnityEvent作为Unity引擎提供的一种强大的事件系统工具&#xff0c;为开发者提供了…

c++ - 模板(一)

文章目录 一、函数模板 一、函数模板 1、概念 函数模板代表了一个函数家族&#xff0c;该函数模板与类型无关&#xff0c;在使用时被参数化&#xff0c;根据实参类型产生函数的特定 类型版本。 2、原理 函数模板是一个蓝图&#xff0c;它本身并不是函数&#xff0c;是编译器用…