集成学习模型对比优化—银行业务

1.Data Understanding¶

2.Data Exploration

3.Data Preparation

4.Training Models

5.Optimization Model

集成学习模型对比优化—银行业务

1.Data Understanding¶

import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
df = pd.read_csv("D:\\课程学习\\机器学习\\银行客户开设定期存款账户情况预测\\banking.csv")
#Print the shape of the DataFrame
print("1.the shape of the DataFrame")
print(df.shape)

在这里插入图片描述

# Print the head of the DataFrame
print("2.the head of the DataFrame")
print(df.head())

在这里插入图片描述

# Print info of the DataFrame
print("3.the info of the DataFrame")
print(df.info())

在这里插入图片描述

# Print statistical description of the DataFrame
print("4.the statistical description of the DataFrame")
print(df.describe())

在这里插入图片描述

# Check for any null values in the DataFrame
print("5.Check for any null values in the DataFrame")
datacheck = df.isnull().any()
print(datacheck)

在这里插入图片描述

# Check for duplicates
print("6.Check for duplicates")
duplicates = df.duplicated()
print(f"Number of duplicated rows: {duplicates.sum()}")

在这里插入图片描述

print("7.See the duplicated rows")
# See the duplicated rows:
if duplicates.sum() > 0:
    print("\nDuplicated Rows:")
    print(df[duplicates])

在这里插入图片描述

#pick out the non_numeric_columns
non_numeric_columns = df.select_dtypes(exclude=['number']).columns.to_list()
numeric_columns = df.select_dtypes(include=['number']).columns
print(non_numeric_columns)

2.Data Exploration

#statistics description of continous variable
print("1.statistics description of continous variable")
df.describe()

在这里插入图片描述

print("2.Check the distribution of labels")
print(df['y'].value_counts())

在这里插入图片描述

# histograms of continuous variables
print("3.histograms of continuous variables")
fig, axes = plt.subplots(nrows=len(df.select_dtypes(include=['int64', 'float64']).columns), figsize=(6, 3*len(df.select_dtypes(include=['int64', 'float64']).columns)))
df.select_dtypes(include=['int64', 'float64']).hist(ax=axes,grid=False)
plt.tight_layout()
plt.show()

在这里插入图片描述

# histogram of categorical variables
print("4.plt the histogram of categorical variables")
#categorical_features = non_numeric_columns = df.select_dtypes(exclude=['number']).columns.to_list()
categorical_features = ["marital", "default", "housing","loan","contact","poutcome","y"]
fig, ax = plt.subplots(1, len(categorical_features), figsize=(25,3))

for i, categorical_feature in enumerate(df[categorical_features]):
    df[categorical_feature].value_counts().plot(kind="bar", ax=ax[i], rot=0).set_title(categorical_feature)

plt.tight_layout()
plt.show()

在这里插入图片描述

print("5.Check for high correlations")
import numpy as np

# Check for high correlations
correlation_matrix = df.corr().abs()

# Get pairs of highly correlated features
high_corr_var = np.where(correlation_matrix > 0.6)
high_corr_var = [(correlation_matrix.columns[x], correlation_matrix.columns[y])
                 for x, y in zip(*high_corr_var) if x != y and x < y]
print("Highly correlated pairs:", high_corr_var)

# Filter pairs by correlation threshold
threshold = 0.7
high_corr_pairs = {}
for column in correlation_matrix.columns:
    for index in correlation_matrix.index:
        # We'll only consider pairs of different columns and correlations above the threshold
        if column != index and abs(correlation_matrix[column][index]) > threshold:
            # We'll also ensure we don't duplicate pairs (i.e., A-B and B-A)
            sorted_pair = tuple(sorted([column, index]))
            if sorted_pair not in high_corr_pairs:
                high_corr_pairs[sorted_pair] = correlation_matrix[column][index]

# Display the high correlation pairs
for pair, corr_value in high_corr_pairs.items():
    print(f"Correlation between {pair[0]} and {pair[1]}: {corr_value:.2f}")

在这里插入图片描述

#Correlation Heatmap
print("6.Correlation Heatmap")
import seaborn as sns
df_duplicate = df.copy()
df_duplicate['y'] = df['y'].map({'no': 0, 'yes': 1})
correlation_matrix2 = df_duplicate.corr().abs()
plt.figure(figsize=(12, 10))
sns.heatmap(correlation_matrix2, cmap='Greens', annot=True)
plt.show()

在这里插入图片描述

# Plotting count plots for each categorical variable with respect to the 'y' column
print("Plotting count plots for each categorical variable with respect to the 'y' column")
categorical_cols = df.select_dtypes(include=['object']).columns.tolist()
print(categorical_cols)
#categorical_cols.remove('y')

# Setting up the figure size
plt.figure(figsize=(20, 20))

for i, col in enumerate(categorical_cols, 1):
    plt.subplot(4, 3, i)
    sns.countplot(data=df, x=col, hue='y', order=df[col].value_counts().index)
    plt.title(f'Distribution of {col} by y')
    plt.xticks(rotation=45)
    plt.tight_layout()

plt.show()

在这里插入图片描述

# Distribution of y by Month
print("Distribution of y by Month")
plt.figure(figsize=(11, 5))
sns.countplot(data=df, x='month', hue='y', order=['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'])
plt.title('Distribution of y by Month')
plt.ylabel('Count')
plt.xlabel('Month')
plt.tight_layout()
plt.show()

在这里插入图片描述

3.Data Preparation

import pandas as pd
from sklearn.preprocessing import LabelEncoder

# Load the dataset and drop duplicates
df = pd.read_csv("D:\\课程学习\\机器学习\\银行客户开设定期存款账户情况预测\\banking.csv").drop_duplicates()
# Convert the 'y' column to dummy variables
#y = pd.get_dummies(df['y'], drop_first=True)
y = df.iloc[:, -1]


# Process client-related data
bank_client = df.iloc[:, 0:7]
labelencoder_X = LabelEncoder()
columns_to_encode = ['job', 'marital', 'education', 'default', 'housing', 'loan']
for col in columns_to_encode:
    bank_client[col] = labelencoder_X.fit_transform(bank_client[col])

# Process bank-related data
bank_related = df.iloc[:, 7:11]
#columns_to_encode=df.select_dtypes(include=['object']).columns.tolist()
columns_to_encode = ['contact', 'month', 'day_of_week']
for col in columns_to_encode:
    bank_related[col] = labelencoder_X.fit_transform(bank_related[col])

# Process sociol & economic data (se) and other bank data (others)
bank_se = df.loc[:, ['emp_var_rate', 'cons_price_idx', 'cons_conf_idx', 'euribor3m', 'nr_employed']]
bank_others = df.loc[:, ['campaign', 'pdays', 'previous', 'poutcome']]
bank_others['poutcome'].replace(['nonexistent', 'failure', 'success'], [1, 2, 3], inplace=True)

# Concatenate all the processed parts, reorder columns, and save to CSV
bank_final = pd.concat([bank_client, bank_related,bank_others, bank_se,y], axis=1)
columns_order = ['age', 'job', 'marital', 'education', 'default', 'housing', 'loan', 'contact', 'month',
                 'day_of_week', 'duration', 'campaign', 'pdays', 'previous', 'poutcome', 'emp_var_rate',
                 'cons_price_idx', 'cons_conf_idx', 'euribor3m', 'nr_employed', 'y']

bank_final = bank_final[columns_order]

bank_final.to_csv('bank_final.csv', index=False)

# Display basic information about the final dataframe
print(bank_final.shape)
print(bank_final.head())
print(bank_final.info())
print(bank_final.describe())

# Check for any null values in the DataFrame
datacheck = bank_final.isnull().any()
print(datacheck)

count = bank_final['y'].value_counts()
print(count)

在这里插入图片描述

4.Training Models

import warnings
warnings.filterwarnings("ignore")
import pandas as pd
from sklearn.model_selection import train_test_split  


x = bank_final.drop('y', axis=1)
y = bank_final['y']

# Split bank_final into training and testing sets (80%-20%)
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)

print(X_train.shape)
print(X_test.shape)

import numpy as np 
import pandas as pd 
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score

#计算评价指标:用df_eval数据框装起来计算的评价指标数值
def evaluation(y_test, y_predict):
    accuracy=classification_report(y_test, y_predict,output_dict=True)['accuracy']
    s=classification_report(y_test, y_predict,output_dict=True)['weighted avg']
    precision=s['precision']
    recall=s['recall']
    f1_score=s['f1-score']
    #kappa=cohen_kappa_score(y_test, y_predict)
    return accuracy,precision,recall,f1_score


import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score, roc_curve,auc

def roc(y_test,y_proba):
    gbm_auc = roc_auc_score(y_test, y_proba[:, 1])  # 计算auc
    gbm_fpr, gbm_tpr, gbm_threasholds = roc_curve(y_test, y_proba[:, 1])  # 计算ROC的值
    return gbm_auc,gbm_fpr, gbm_tpr
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from sklearn.model_selection import cross_val_score
import time
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier

model0 = SVC(kernel="rbf", random_state=77,probability=True)
model1 = MLPClassifier(hidden_layer_sizes=(16,8), random_state=77, max_iter=10000)
model2 = LogisticRegression()
model3 = RandomForestClassifier()
model4 = AdaBoostClassifier()
model5 = GradientBoostingClassifier()
model6 = XGBClassifier()
model7 = LGBMClassifier()


model_list=[model1,model2,model3,model4,model5,model6,model7]
model_name=['BP','Logistic Regression', 'Random Forest', 'AdaBoost', 'GBDT', 'XGBoost','LightGBM']

df_eval=pd.DataFrame(columns=['Accuracy','Precision','Recall','F1_score','Running_time'])
plt.figure(figsize=(9, 7))
plt.title("ROC")
plt.plot([0,1],[0,1],'r--')
plt.xlim([0,1])
plt.ylim([0,1])
plt.xlabel('false positive rate')  # specificity = 1 - np.array(gbm_fpr))
plt.ylabel('true positive rate')  # sensitivity = gbm_tpr
for i in range(7):
    model_C=model_list[i]
    name=model_name[i]
    
    start = time.time()   
    model_C.fit(X_train, y_train)
    end = time.time()
    running_time = end - start
    
    pred=model_C.predict(X_test)
    pred_proba = model_C.predict_proba(X_test) 
    #s=classification_report(y_test, pred)
    s=evaluation(y_test,pred)
    s=list(s)
    s.append(running_time)
    df_eval.loc[name,:]=s
    
    gbm_auc,gbm_fpr, gbm_tpr=roc(y_test,pred_proba)
    plt.plot(list(np.array(gbm_fpr)), gbm_tpr, label="%s(AUC=%.4f)"% (name, gbm_auc))
    
print(df_eval)

plt.legend(loc='upper right')
plt.show()


在这里插入图片描述

# Confusion matrix graph
from lightgbm import LGBMClassifier
from sklearn.metrics import confusion_matrix

model=LGBMClassifier()
model.fit(X_train, y_train)   
pred=model.predict(X_test)
Confusion_Matrixn = confusion_matrix(y_test, pred)
Confusion_Matrix = confusion_matrix(y_test, pred,normalize='true')
print(Confusion_Matrixn)

from sklearn.metrics import ConfusionMatrixDisplay

#one way
disp = ConfusionMatrixDisplay(confusion_matrix=Confusion_Matrix)#display_labels=LR_model.classes_
disp.plot(cmap='Greens')#supported values are 'Accent', 'Accent_r', 'Blues', 'Blues_r', 'BrBG', 'BrBG_r', 'BuGn', 'BuGn_r', 'BuPu', 'BuPu_r', 'CMRmap', 'CMRmap_r', 'Dark2', 'Dark2_r', 'GnBu', 'GnBu_r', 'Greens', 'Greens_r', 'Greys', 'Greys_r', 'OrRd', 'OrRd_r', 'Oranges', 'Oranges_r', 'PRGn', 'PRGn_r', 'Paired', 'Paired_r', 'Pastel1', 'Pastel1_r', 'Pastel2', 'Pastel2_r', 'PiYG', 'PiYG_r', 'PuBu', 'PuBuGn', 'PuBuGn_r', 'PuBu_r', 'PuOr', 'PuOr_r', 'PuRd', 'PuRd_r', 'Purples', 'Purples_r', 'RdBu', 'RdBu_r', 'RdGy', 'RdGy_r', 'RdPu', 'RdPu_r', 'RdYlBu', 'RdYlBu_r', 'RdYlGn', 'RdYlGn_r', 'Reds', 'Reds_r', 'Set1', 'Set1_r', 'Set2', 'Set2_r', 'Set3', 'Set3_r', 'Spectral', 'Spectral_r', 'Wistia', 'Wistia_r', 'YlGn', 'YlGnBu', 'YlGnBu_r', 'YlGn_r', 'YlOrBr', 'YlOrBr_r', 'YlOrRd', 'YlOrRd_r', 'afmhot', 'afmhot_r', 'autumn', 'autumn_r', 'binary', 'binary_r', 'bone', 'bone_r', 'brg', 'brg_r', 'bwr', 'bwr_r', 'cividis', 'cividis_r', 'cool', 'cool_r', 'coolwarm', 'coolwarm_r', 'copper', 'copper_r', 'crest', 'crest_r', 'cubehelix', 'cubehelix_r', 'flag', 'flag_r', 'flare', 'flare_r', 'gist_earth', 'gist_earth_r', 'gist_gray', 'gist_gray_r', 'gist_heat', 'gist_heat_r', 'gist_ncar', 'gist_ncar_r', 'gist_rainbow', 'gist_rainbow_r', 'gist_stern', 'gist_stern_r', 'gist_yarg', 'gist_yarg_r', 'gnuplot', 'gnuplot2', 'gnuplot2_r', 'gnuplot_r', 'gray', 'gray_r', 'hot', 'hot_r', 'hsv', 'hsv_r', 'icefire', 'icefire_r', 'inferno', 'inferno_r', 'jet', 'jet_r', 'magma', 'magma_r', 'mako', 'mako_r', 'nipy_spectral', 'nipy_spectral_r', 'ocean', 'ocean_r', 'pink', 'pink_r', 'plasma', 'plasma_r', 'prism', 'prism_r', 'rainbow', 'rainbow_r', 'rocket', 'rocket_r', 'seismic', 'seismic_r', 'spring', 'spring_r', 'summer', 'summer_r', 'tab10', 'tab10_r', 'tab20', 'tab20_r', 'tab20b', 'tab20b_r', 'tab20c', 'tab20c_r', 'terrain', 'terrain_r', 'turbo', 'turbo_r', 'twilight', 'twilight_r', 'twilight_shifted', 'twilight_shifted_r', 'viridis', 'viridis_r', 'vlag', 'vlag_r', 'winter', 'winter_r'

#disp.color='blue'
plt.show()

在这里插入图片描述

5.Optimization Model

#Train Test split with Undersampling to bring 0 to 10,000 and Oversampling to bring 1 to 10,000
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import RandomUnderSampler
from sklearn.preprocessing import StandardScaler
# Apply SMOTE to oversample class 1 to 10,000 samples
smote_strategy = {1: 10000}
smote = SMOTE(sampling_strategy=smote_strategy, random_state=42)
x_train_resampled, y_train_resampled = smote.fit_resample(X_train, y_train)

# Apply RandomUnderSampler to undersample class 0 to 10,000 samples
undersample_strategy = {0: 10000}
undersampler = RandomUnderSampler(sampling_strategy=undersample_strategy, random_state=42)
x_train_resampled, y_train_resampled = undersampler.fit_resample(x_train_resampled, y_train_resampled)


# Check the new class distribution
print(y_train_resampled.value_counts())

在这里插入图片描述

# Check for NaN values in x_train_resampled after resampling
nan_values = x_train_resampled.isnull().sum()
print("\nNumber of NaN values in each column:")
print(nan_values)

在这里插入图片描述

# If there are any NaN values, print the columns that contain them
nan_columns = nan_values[nan_values > 0].index.tolist()
if nan_columns:
    print("\nColumns with NaN values:", nan_columns)
else:
    print("\nThere are no NaN values in the resampled data.")

在这里插入图片描述

# Before resampling
print(y_train.value_counts())

# After resampling
print(y_train_resampled.value_counts())

在这里插入图片描述

from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from sklearn.model_selection import cross_val_score
import time
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier

model0 = SVC(kernel="rbf", random_state=77,probability=True)
model1 = MLPClassifier(hidden_layer_sizes=(16,8), random_state=77, max_iter=10000)
model2 = LogisticRegression()
model3 = RandomForestClassifier()
model4 = AdaBoostClassifier()
model5 = GradientBoostingClassifier()
model6 = XGBClassifier()
model7 = LGBMClassifier()


model_list=[model1,model2,model3,model4,model5,model6,model7]
model_name=['BP','Logistic Regression', 'Random Forest', 'AdaBoost', 'GBDT', 'XGBoost','LightGBM']

df_eval=pd.DataFrame(columns=['Accuracy','Precision','Recall','F1_score','Running_time'])
plt.figure(figsize=(9, 7))
plt.title("ROC")
plt.plot([0,1],[0,1],'r--')
plt.xlim([0,1])
plt.ylim([0,1])
plt.xlabel('false positive rate')  # specificity = 1 - np.array(gbm_fpr))
plt.ylabel('true positive rate')  # sensitivity = gbm_tpr
for i in range(7):
    model_C=model_list[i]
    name=model_name[i]
    
    start = time.time()   
    model_C.fit(x_train_resampled, y_train_resampled)
    end = time.time()
    running_time = end - start
    
    pred=model_C.predict(X_test)
    pred_proba = model_C.predict_proba(X_test) 
    #s=classification_report(y_test, pred)
    s=evaluation(y_test,pred)
    s=list(s)
    s.append(running_time)
    df_eval.loc[name,:]=s
    
    gbm_auc,gbm_fpr, gbm_tpr=roc(y_test,pred_proba)
    plt.plot(list(np.array(gbm_fpr)), gbm_tpr, label="%s(AUC=%.4f)"% (name, gbm_auc))
    
print(df_eval)
plt.legend(loc='upper right')
plt.show()

在这里插入图片描述
在这里插入图片描述

# Confusion matrix graph
from lightgbm import LGBMClassifier
from xgboost import XGBClassifier
from sklearn.metrics import confusion_matrix

model=XGBClassifier()
model.fit(x_train_resampled, y_train_resampled)   
pred=model.predict(X_test)

Confusion_Matrixn = confusion_matrix(y_test, pred)
Confusion_Matrix = confusion_matrix(y_test, pred,normalize='true')
print(Confusion_Matrixn)

from sklearn.metrics import ConfusionMatrixDisplay

#one way
disp = ConfusionMatrixDisplay(confusion_matrix=Confusion_Matrix)#display_labels=LR_model.classes_
disp.plot(cmap='Greens')#supported values are 'Accent', 'Accent_r', 'Blues', 'Blues_r', 'BrBG', 'BrBG_r', 'BuGn', 'BuGn_r', 'BuPu', 'BuPu_r', 'CMRmap', 'CMRmap_r', 'Dark2', 'Dark2_r', 'GnBu', 'GnBu_r', 'Greens', 'Greens_r', 'Greys', 'Greys_r', 'OrRd', 'OrRd_r', 'Oranges', 'Oranges_r', 'PRGn', 'PRGn_r', 'Paired', 'Paired_r', 'Pastel1', 'Pastel1_r', 'Pastel2', 'Pastel2_r', 'PiYG', 'PiYG_r', 'PuBu', 'PuBuGn', 'PuBuGn_r', 'PuBu_r', 'PuOr', 'PuOr_r', 'PuRd', 'PuRd_r', 'Purples', 'Purples_r', 'RdBu', 'RdBu_r', 'RdGy', 'RdGy_r', 'RdPu', 'RdPu_r', 'RdYlBu', 'RdYlBu_r', 'RdYlGn', 'RdYlGn_r', 'Reds', 'Reds_r', 'Set1', 'Set1_r', 'Set2', 'Set2_r', 'Set3', 'Set3_r', 'Spectral', 'Spectral_r', 'Wistia', 'Wistia_r', 'YlGn', 'YlGnBu', 'YlGnBu_r', 'YlGn_r', 'YlOrBr', 'YlOrBr_r', 'YlOrRd', 'YlOrRd_r', 'afmhot', 'afmhot_r', 'autumn', 'autumn_r', 'binary', 'binary_r', 'bone', 'bone_r', 'brg', 'brg_r', 'bwr', 'bwr_r', 'cividis', 'cividis_r', 'cool', 'cool_r', 'coolwarm', 'coolwarm_r', 'copper', 'copper_r', 'crest', 'crest_r', 'cubehelix', 'cubehelix_r', 'flag', 'flag_r', 'flare', 'flare_r', 'gist_earth', 'gist_earth_r', 'gist_gray', 'gist_gray_r', 'gist_heat', 'gist_heat_r', 'gist_ncar', 'gist_ncar_r', 'gist_rainbow', 'gist_rainbow_r', 'gist_stern', 'gist_stern_r', 'gist_yarg', 'gist_yarg_r', 'gnuplot', 'gnuplot2', 'gnuplot2_r', 'gnuplot_r', 'gray', 'gray_r', 'hot', 'hot_r', 'hsv', 'hsv_r', 'icefire', 'icefire_r', 'inferno', 'inferno_r', 'jet', 'jet_r', 'magma', 'magma_r', 'mako', 'mako_r', 'nipy_spectral', 'nipy_spectral_r', 'ocean', 'ocean_r', 'pink', 'pink_r', 'plasma', 'plasma_r', 'prism', 'prism_r', 'rainbow', 'rainbow_r', 'rocket', 'rocket_r', 'seismic', 'seismic_r', 'spring', 'spring_r', 'summer', 'summer_r', 'tab10', 'tab10_r', 'tab20', 'tab20_r', 'tab20b', 'tab20b_r', 'tab20c', 'tab20c_r', 'terrain', 'terrain_r', 'turbo', 'turbo_r', 'twilight', 'twilight_r', 'twilight_shifted', 'twilight_shifted_r', 'viridis', 'viridis_r', 'vlag', 'vlag_r', 'winter', 'winter_r'

#disp.color='blue'
plt.show()

在这里插入图片描述
数据集来源:https://www.heywhale.com/home/user/profile/6535165d9217caa11b5ee5b3/overview

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:/a/697150.html

如若内容造成侵权/违法违规/事实不符,请联系我们进行投诉反馈qq邮箱809451989@qq.com,一经查实,立即删除!

相关文章

《TCP/IP网络编程》(第十四章)多播与广播

当需要向多个用户发送多媒体信息时&#xff0c;如果使用TCP套接字&#xff0c;则需要维护与用户数量相等的套接字&#xff1b;如果使用之前学习的UDP&#xff0c;传输次数也需要和用户数量相同。 所以为了解决这些问题&#xff0c;可以采用多播和广播技术&#xff0c;这样只需要…

pxe自动装机:

pxe自动装机&#xff1a; 服务端和客户端 pxe c/s模式&#xff0c;允许客户端通过网络从远程服务器&#xff08;服务端&#xff09;下载引导镜像&#xff0c;加载安装文件&#xff0c;实现自动化安装操作系统。 无人值守 无人值守&#xff0c;就是安装选项不需要人为干预&am…

当前 Python 版本中所有保留字keyword.kwlist

【小白从小学Python、C、Java】 【考研初试复试毕业设计】 【Python基础AI数据分析】 当前 Python 版本中 所有保留字 keyword.kwlist [太阳]选择题 根据给定的Python代码&#xff0c;哪个选项是正确的&#xff1f; import keyword print("【执行】keyword.kwlist"…

vue面试题2-根据以下问题回答

以下是针对提供的关于Vue的问题的回答&#xff1a; Vue的基本原理&#xff1a; Vue.js是一个流行的JavaScript框架&#xff0c;用于构建用户界面和单页面应用。其基本原理包括响应式数据、模板、组件系统、指令、生命周期钩子和虚拟DOM。 双向数据绑定的原理&#xff1a; Vue通…

自动化测试-Selenium(一),简介

自动化测试-Selenium 1. 什么是自动化测试 1.1 自动化测试介绍 自动化测试是一种通过自动化工具执行测试用例来验证软件功能和性能的过程。与手动测试不同&#xff0c;自动化测试使用脚本和软件来自动执行测试步骤&#xff0c;记录结果&#xff0c;并比较预期输出和实际输出…

第十一篇——信息增量:信息压缩中的保守主义原则

目录 一、背景介绍二、思路&方案三、过程1.思维导图2.文章中经典的句子理解3.学习之后对于投资市场的理解4.通过这篇文章结合我知道的东西我能想到什么&#xff1f; 四、总结五、升华 一、背景介绍 通过信息中的保守主义&#xff0c;我想到了现实中人的保守主义一样&#…

【InternLM实战营第二期笔记】07:OpenCompass :是骡子是马,拉出来溜溜

文章目录 课程实操 课程 评测的意义是什么呢&#xff1f;我最近也在想。看到这节开头的内容后忽然有个顿悟&#xff1a;如果大模型最终也会变成一种基础工具&#xff08;类比软件&#xff09;&#xff0c;稳定或可预期的效果需要先于用户感知构建出来&#xff0c;评测 case 就…

项目方案:社会视频资源整合接入汇聚系统解决方案(五)

目录 一、概述 1.1 应用背景 1.2 总体目标 1.3 设计原则 1.4 设计依据 1.5 术语解释 二、需求分析 2.1 政策分析 2.2 业务分析 2.3 系统需求 三、系统总体设计 3.1设计思路 3.2总体架构 3.3联网技术要求 四、视频整合及汇聚接入 4.1设计概述 4.2社会视频资源分…

javascript动态绑定

介绍 先来看看ai的解释 动态绑定机制是面向对象编程中的一个核心概念&#xff0c;特别是在Java这样的语言中。它允许在运行时根据对象的实际类型来决定调用哪个方法&#xff0c;而不是在编译时。这是多态性的关键特性之一。 在Java中&#xff0c;动态绑定是通过方法调用和方法…

安卓事件交互(按键事件、触摸事件、手势识别、手势冲突处理)

本章介绍App开发常见的以下事件交互技术&#xff0c;主要包括&#xff1a;如何检测并接管按键事件&#xff0c;如何对触摸事件进行分发、拦截与处理&#xff0c;如何根据触摸行为辨别几种手势动作&#xff0c;如何正确避免手势冲突的意外状况。 按键事件 本节介绍App开发对按…

人脸考勤项目实训

第一章 Python-----Anaconda安装 文章目录 第一章 Python-----Anaconda安装前言一、Anaconda是什么&#xff1f;二、Anaconda的前世今生二、Windows安装步骤1.官网下载2.安装步骤安装虚拟环境 总结 前言 工欲善其事必先利其器&#xff0c;项目第一步&#xff0c;安装我们的环境…

Mysql的底层实现逻辑

Mysql5.x和Mysql8性能的差异 整体性能有所提高&#xff0c; 在非高并发场景下&#xff0c;他们2这使用区别不大&#xff0c;性能没有明显的区别。 只有高并发时&#xff0c;mysql8才体现他的优势。 2. Mysql数据存储结构Innodb逻辑结构 数据选用B树结构存储数据&#xff0…

基于STM32的595级联的Proteus仿真

文章目录 一、595级联1.题目要求2.思路3.仿真图3.1 未仿真时3.2 模式A3.2 模式B3.3 故障模式 二、总结 一、595级联 1.题目要求 STM32单片机&#xff0c;以及三个LED灯对应红黄绿灯&#xff0c;IIC的OLED显示屏&#xff0c;温湿度传感器DHT11&#xff0c;两个独立按键和两个5…

深度学习的实用性探究:虚幻还是现实?

深度学习的实用性探究&#xff1a;虚幻还是现实&#xff1f; 深度学习作为人工智能领域的一个热点&#xff0c;已经在学术和工业界引起了广泛的关注。尽管深度学习技术显示出惊人的性能和潜力&#xff0c;但有时它们给人的感觉是“虚”的&#xff0c;或许是因为它们的抽象性和…

react修改本地运行项目的端口

一、描述 如果你想让项目在你想要的端口打开的话&#xff0c;就需要进行设置 二、代码 设置一下pages.json文件就可以了&#xff0c;如下&#xff1a; 如果想打开项目不需要点击下面的链接地址&#xff0c;让他运行npm run dev之后自己直接打开到浏览器的话&#xff0c;在后…

猫头虎分享已解决Bug || Uncaught TypeError: Cannot set property ‘innerHTML‘ of null**

猫头虎分享已解决Bug || Uncaught TypeError: Cannot set property ‘innerHTML’ of null** 原创作者&#xff1a; 猫头虎 作者微信号&#xff1a; Libin9iOak 作者公众号&#xff1a; 猫头虎技术团队 更新日期&#xff1a; 2024年6月6日 博主猫头虎的技术世界 &#x…

盘点2024年5月Sui生态发展,了解Sui近期成长历程!

2024年5月是Sui的第一个生日月&#xff0c;Sui迎来了它的上线一周年纪念日。在过去的一年中Sui在技术进步与创新、生态系统的扩展、社区发展与合作伙伴关系以及重大项目和应用推出方面取得重要进展&#xff0c;展示了其作为下一代区块链平台的潜力。 以下是Sui的近期成长历程集…

一、Electron 环境初步搭建

新建一个文件夹&#xff0c;然后进行 npm init -y 进行初始化&#xff0c;然后我们在进行 npm i electron --save-dev , 此时我们按照官网的教程进行一个初步的搭建&#xff0c; 1.在 package.json 文件进行修改 {"name": "electron-ui","version…

pyqt QlineEdit内部增加按钮方法

按钮放在QlineEdit内部&#xff0c;界面更紧凑&#xff0c;体现了按钮和文本框的强关联。 def addButton(self,lineEdit):btn QtWidgets.QPushButton("")icon1 QtGui.QIcon()icon1.addPixmap(QtGui.QPixmap(":/image/images/th.png"), QtGui.QIcon.Norm…

同盾中文点选验证码识别方法

中文验证码一直是识别的难题&#xff0c;首先他分类的种类很多&#xff0c;常见中文都有3500个&#xff0c;而且一般中文验证码都会有变形&#xff0c;导致每一个文字都需要大量训练样本。假设每一个汉字样本需要100个&#xff0c;100350035万个样本&#xff0c;所以标记的样本…