1.魔塔社区下载glm4
https://modelscope.cn/models/ZhipuAI/glm-4-9b-chat
2.jupyter下载,自己修改下载模型位置
3.环境安装:
本机已经安装了conda,使用conda虚拟环境隔离,就不重复演示了
新项目新激活一个环境:conda create --name ckglm4
切换环境:
conda activate ckglm4
安装环境:
安装依赖:
accelerate==0.34.2
aiofiles==23.2.1
aiohappyeyeballs==2.4.0
aiohttp==3.10.5
aiosignal==1.3.1
annotated-types==0.7.0
anyio==4.4.0
attrs==24.2.0
bitsandbytes==0.43.3
blinker==1.8.2
certifi==2024.8.30
charset-normalizer==3.3.2
click==8.1.7
contourpy==1.3.0
cycler==0.12.1
dataclasses-json==0.6.7
distro==1.9.0
einops==0.8.0
fastapi==0.112.4
ffmpy==0.4.0
filelock==3.15.4
Flask==3.0.3
fonttools==4.53.1
frozenlist==1.4.1
fsspec==2024.9.0
gradio==4.43.0
gradio_client==1.3.0
greenlet==3.0.3
h11==0.14.0
httpcore==1.0.5
httpx==0.27.2
huggingface-hub==0.24.6
idna==3.8
importlib_resources==6.4.4
itsdangerous==2.2.0
Jinja2==3.1.4
jiter==0.5.0
joblib==1.4.2
jsonpatch==1.33
jsonpointer==3.0.0
kiwisolver==1.4.7
langchain==0.2.16
langchain-community==0.2.16
langchain-core==0.2.38
langchain-text-splitters==0.2.4
langsmith==0.1.116
markdown-it-py==3.0.0
MarkupSafe==2.1.5
marshmallow==3.22.0
matplotlib==3.9.2
mdurl==0.1.2
mpmath==1.3.0
multidict==6.0.5
mypy-extensions==1.0.0
networkx==3.3
numpy==1.26.4
nvidia-cublas-cu12==12.1.3.1
nvidia-cuda-cupti-cu12==12.1.105
nvidia-cuda-nvrtc-cu12==12.1.105
nvidia-cuda-runtime-cu12==12.1.105
nvidia-cudnn-cu12==9.1.0.70
nvidia-cufft-cu12==11.0.2.54
nvidia-curand-cu12==10.3.2.106
nvidia-cusolver-cu12==11.4.5.107
nvidia-cusparse-cu12==12.1.0.106
nvidia-nccl-cu12==2.20.5
nvidia-nvjitlink-cu12==12.6.68
nvidia-nvtx-cu12==12.1.105
openai==1.44.0
orjson==3.10.7
packaging==24.1
pandas==2.2.2
pillow==10.4.0
psutil==6.0.0
pydantic==2.9.0
pydantic_core==2.23.2
pydub==0.25.1
Pygments==2.18.0
pylzma==0.5.0
pyparsing==3.1.4
python-dateutil==2.9.0.post0
python-multipart==0.0.9
pytz==2024.1
PyYAML==6.0.2
regex==2024.7.24
requests==2.32.3
rich==13.8.0
ruff==0.6.4
safetensors==0.4.5
scikit-learn==1.5.1
scipy==1.14.1
semantic-version==2.10.0
sentence-transformers==3.0.1
sentencepiece==0.2.0
setuptools==72.1.0
shellingham==1.5.4
six==1.16.0
sniffio==1.3.1
SQLAlchemy==2.0.34
sse-starlette==2.1.3
starlette==0.38.4
sympy==1.13.2
tenacity==8.5.0
threadpoolctl==3.5.0
tiktoken==0.7.0
timm==1.0.9
tokenizers==0.19.1
tomlkit==0.12.0
torch==2.4.1
torchvision==0.19.1
tqdm==4.66.5
transformers==4.44.0
triton==3.0.0
typer==0.12.5
typing-inspect==0.9.0
typing_extensions==4.12.2
tzdata==2024.1
urllib3==2.2.2
uvicorn==0.30.6
websockets==12.0
Werkzeug==3.0.4
wheel==0.43.0
yarl==1.11.0
4.python代码
from flask import Flask, request, jsonify
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import torch
from langchain.llms import HuggingFacePipeline
from langchain.cache import InMemoryCache
from langchain.globals import set_llm_cache
app = Flask(__name__)
# 设置 LangChain 的缓存
set_llm_cache(InMemoryCache())
# 加载ChatGLM模型和Tokenizer
model_name = "/home/ck/llm/ZhipuAI/glm-4-9b-chat/" # 替换为本地或远程模型的地址
device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True, torch_dtype=torch.float16).to(device)
# 自定义的生成函数,带有生成参数
def custom_generate(text, max_length=1000, do_sample=False, temperature=0, top_p=0.95, top_k=50):
inputs = tokenizer(text, return_tensors="pt").to(device)
output = model.generate(
inputs.input_ids,
max_length=max_length,
do_sample=do_sample,
temperature=temperature,
top_p=top_p,
top_k=top_k
)
# 将生成的token解码为文本
return tokenizer.decode(output[0], skip_special_tokens=True)
# 创建一个基于 HuggingFace 的自定义 Pipeline
class CustomPipeline:
def __init__(self, model, tokenizer):
self.model = model
self.tokenizer = tokenizer
self.task = "text-generation"
def __call__(self, text):
generated_text = custom_generate(text)
return [{"generated_text": generated_text}]
# 将自定义 Pipeline 传入 LangChain
generator = CustomPipeline(model=model, tokenizer=tokenizer)
# 将模型集成到 LangChain 中
llm = HuggingFacePipeline(pipeline=generator)
# # 将模型转换为 HuggingFacePipeline,用于LangChain集成
# generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=device.index)
# # 将模型集成到LangChain
# llm = HuggingFacePipeline(pipeline=generator)
# 定义生成回答的函数
def generate_answer(question):
question = question.strip() # 确保问题输入的一致性
# 使用 LangChain 的 LLM 对象生成回答
# answer = llm(question)
predict = llm.predict(question)
return predict
# Flask API 路由
@app.route("/ask", methods=["POST"])
def ask_question():
# 获取请求中的问题
data = request.get_json()
question = data.get("question", "")
if not question:
return jsonify({"error": "No question provided"}), 400
# 调用生成回答的函数
answer = generate_answer(question)
# 返回生成的回答
return jsonify({"question": question, "answer": answer})
# 启动 Flask 应用
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000)
5.启动后测试:
6.过程中遇到的报错:
File “/home/ck/anaconda2/envs/ckglm4/lib/python3.12/site-packages/langchain_core/language_models/llms.py”, line 960, in generate
new_results = self._generate_helper(
^^^^^^^^^^^^^^^^^^^^^^
File “/home/ck/anaconda2/envs/ckglm4/lib/python3.12/site-packages/langchain_core/language_models/llms.py”, line 787, in _generate_helper
raise e
File “/home/ck/anaconda2/envs/ckglm4/lib/python3.12/site-packages/langchain_core/language_models/llms.py”, line 774, in _generate_helper
self._generate(
File “/home/ck/anaconda2/envs/ckglm4/lib/python3.12/site-packages/langchain_community/llms/huggingface_pipeline.py”, line 285, in _generate
if self.pipeline.task == “text-generation”:
^^^^^^^^^^^^^^^^^^
AttributeError: ‘CustomPipeline’ object has no attribute ‘task’
代码在调用 HuggingFacePipeline 的 _generate 方法时遇到了 AttributeError,其中 self.pipeline 的对象没有 task 属性。
这通常意味着你传递给 HuggingFacePipeline 的自定义 pipeline 对象没有正确设置 task 属性。HuggingFace 的 pipeline 对象通常应该有 task 属性来标识它的类型,例如 “text-generation”, “translation”, “summarization” 等。
这是我一开始初始化自有模型的时候没有添加task属性
一开始的代码
# 创建一个基于 HuggingFace 的自定义 Pipeline
class CustomPipeline:
def __init__(self, model, tokenizer):
self.model = model
self.tokenizer = tokenizer
def __call__(self, text):
return custom_generate(text)
# 将自定义 Pipeline 传入 LangChain
generator = CustomPipeline(model=model, tokenizer=tokenizer)
修改后的代码:
# 创建一个基于 HuggingFace 的自定义 Pipeline
class CustomPipeline:
def __init__(self, model, tokenizer):
self.model = model
self.tokenizer = tokenizer
self.task = "text-generation" # 显式添加 task 属性
def __call__(self, text):
return [{"generated_text": custom_generate(text)}] # 返回格式要与 Hugging Face 的 pipeline 一致
# 将自定义 Pipeline 传入 LangChain
generator = CustomPipeline(model=model, tokenizer=tokenizer)
# 将模型集成到 LangChain 中
llm = HuggingFacePipeline(pipeline=generator)
# 测试生成
response = llm.generate("Tell me something interesting about AI.")
print(response)