-
dictMenu =f’卡布奇洛’:32,‘摩卡’:30,‘抹茶蛋糕’:28,‘布朗尼’:26}, dictMenu 中存放了你的双人下午套餐(包括咖啡2份和点心2份)的价格,请编写程序,让Python帮忙计算并输出消费总额。
dictMenu = {'卡布奇洛': 32, '摩卡': 30, '抹茶蛋糕': 28, '布朗尼': 26} total = 0 for price in dictMenu.values(): total += price print(f"消费总额为:{total}元")
-
用字典数据类型编写会简单一问一答聊天的可学习机器人程序。(a)自己构建初始对话字典(对话字典自己创建),例如:memory = {你在干嘛:在呼吸和想你,你喜欢哪一天:跟你聊天’,你在想什么:'我在想你 b)对机器人无法回答的问题,请提问者给出答案,并更新字典数据。c)使用空格标识聊天结束。
def chat_bot(): memory = {"你在干嘛": "在呼吸和想你", "你喜欢哪一天": "跟你聊天", "你在想什么": "我在想你"} while True: question = input("你:") if question in memory: print("机器人:" + memory[question]) elif question == " ": print("机器人:再见!") break else: answer = input("机器人:对不起,我不知道该如何回答这个问题。请告诉我你的答案:") memory[question] = answer chat_bot()
-
根据文件"Who Moved My Cheese.txt”的内容,先进行英文词频统计,之后分别用词频为参数的方法和全文英文字符串为参数绘制两个英文词云。要求设置背景色为白色并显示和保存词云图"My Cheese freq.jpg"和“MyCheese text.jpg" 。支持第三方库: wordcloud库和matplotlib 库。提示:文件的读取和去除汉字字符的语句如下txt = open(file, ‘r,encoding=“utf-8”).read()english_only_txt =’'.join(x for x in txt if ord(x) < 256) 。
# 先下载 wordcloud 库 pip install wordcloud
from wordcloud import WordCloud import matplotlib.pyplot as plt # 读取文件内容并去除汉字字符 file = "Who Moved My Cheese.txt" txt = open(file, 'r', encoding="utf-8").read() english_only_txt = ''.join(x for x in txt if ord(x) < 256) # 英文词频统计 wordcloud = WordCloud(background_color="white").generate(english_only_txt) # 绘制词云图(词频为参数) plt.imshow(wordcloud, interpolation='bilinear') plt.axis("off") plt.savefig("MyCheese freq.jpg", dpi=300) plt.show() # 绘制词云图(全文英文字符串为参数) wordcloud = WordCloud(background_color="white").generate_from_text(english_only_txt) # 绘制词云图(全文英文字符串为参数) plt.imshow(wordcloud, interpolation='bilinear') plt.axis("off") plt.savefig("MyCheese text.jpg", dpi=300) plt.show()
-
请读入“沉默的羔羊.txt”内容,分词后输出长度大于2且最多的单词。如果存在多个单词出现频率一致,请输出按照Unicode排序后最大的单词。使用jieba库。(史达琳)
# 先下载 jieba 库 pip install jieba
import jieba # 读取文件内容 file = "沉默的羔羊.txt" txt = open(file, 'r', encoding="utf-8").read() # 分词 seg_list = jieba.lcut(txt) # 统计满足条件的单词 word_count = {} for word in seg_list: if len(word) > 2: word_count[word] = word_count.get(word, 0) + 1 # 找到频率最大的单词列表 max_frequency = max(word_count.values()) max_words = [] for word, frequency in word_count.items(): if frequency == max_frequency: max_words.append(word) # 按照Unicode排序并输出最大的单词 result = sorted(max_words)[-1] print("长度大于2且最多的单词:", result)
-
请读入“三国演义.txt”内容,统计人物出场次数。要求:1 输出排序前 15 人物名称及出场次数。使用 jieba。2根据出场次数制作出场次数前15的人物名称的词云图。使用wordcloud 库。
import jieba from wordcloud import WordCloud import matplotlib.pyplot as plt excludes = {"将军", "却说", "荆州", "二人", "不可", "不能", "如此", "商议", "如何", "主公", "军士", "左右", "军马", "引兵", "次日", "大喜", "天下", "东吴", "于是", "今日", "不敢", "魏兵", "陛下", "一人", "都督", "人马", "不知", "汉中", "只见", "众将", "蜀兵", "上马", "大叫", "太守", "此人", "夫人", "后人", "背后", "城中", "一面", "何不", "大军", "忽报", "先生", "百姓", "何故", "然后", "先锋", "不如", "赶来", "原来", "令人", "江东", "下马", "喊声", "正是", "徐州", "忽然", "因此", "成都", "不见", "未知", "大败", "大事", "之后", "一军", "引军", "起兵", "军中", "接应", "进兵", "大惊", "可以"} txt = open("三国演义.txt", "r", encoding='utf-8').read() words = jieba.lcut(txt) counts = {} for word in words: if len(word) == 1: continue elif word == "诸葛亮" or word == "孔明曰": rword = "孔明" elif word == "关公" or word == "云长": rword = "关羽" elif word == "玄德" or word == "玄德曰" or word == "先主": rword = "刘备" elif word == "孟德" or word == "丞相": rword = "曹操" elif word == "后主": rword = "刘禅" elif word == "天子": rword = "刘协" else: rword = word counts[rword] = counts.get(rword, 0) + 1 for word in excludes: del counts[word] items = list(counts.items()) items.sort(key=lambda x: x[1], reverse=True) for i in range(20): word, count = items[i] print("{0:<10}{1:>5}".format(word, count)) # 制作出场次数前15的人物名称的词云图 wordcloud = WordCloud(font_path="simhei.ttf", background_color="white").generate_from_frequencies(dict(items)) plt.figure(figsize=(8, 8)) plt.imshow(wordcloud, interpolation="bilinear") plt.axis("off") plt.show()