当前位置: 首页 > news >正文

深圳效果好的免费网站建设企业网络营销案例

深圳效果好的免费网站建设,企业网络营销案例,做的最好的相亲网站,找人做网站没有做好报案有用吗实现当用户在GUI中输入问题(例如“刘邦”)且输出的答案被标记为不正确时,自动从百度百科中搜索相关内容并显示在GUI中的功能,我们需要对现有的代码进行一些修改。以下是完整的代码,包括对XihuaChatbotGUI类的修改以及新…

实现当用户在GUI中输入问题(例如“刘邦”)且输出的答案被标记为不正确时,自动从百度百科中搜索相关内容并显示在GUI中的功能,我们需要对现有的代码进行一些修改。以下是完整的代码,包括对XihuaChatbotGUI类的修改以及新增的功能:

import os
import json
import jsonlines
import torch
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from transformers import BertModel, BertTokenizer
import tkinter as tk
from tkinter import filedialog, messagebox, ttk
import logging
from difflib import SequenceMatcher
from datetime import datetime
import requests
from bs4 import BeautifulSoup# 获取项目根目录
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))# 配置日志
LOGS_DIR = os.path.join(PROJECT_ROOT, 'logs')
os.makedirs(LOGS_DIR, exist_ok=True)def setup_logging():log_file = os.path.join(LOGS_DIR, datetime.now().strftime('%Y-%m-%d_%H-%M-%S_羲和.txt'))logging.basicConfig(level=logging.INFO,format='%(asctime)s - %(levelname)s - %(message)s',handlers=[logging.FileHandler(log_file),logging.StreamHandler()])setup_logging()# 数据集类
class XihuaDataset(Dataset):def __init__(self, file_path, tokenizer, max_length=128):self.tokenizer = tokenizerself.max_length = max_lengthself.data = self.load_data(file_path)def load_data(self, file_path):data = []if file_path.endswith('.jsonl'):with jsonlines.open(file_path) as reader:for i, item in enumerate(reader):try:data.append(item)except jsonlines.jsonlines.InvalidLineError as e:logging.warning(f"跳过无效行 {i + 1}: {e}")elif file_path.endswith('.json'):with open(file_path, 'r') as f:try:data = json.load(f)except json.JSONDecodeError as e:logging.warning(f"跳过无效文件 {file_path}: {e}")return datadef __len__(self):return len(self.data)def __getitem__(self, idx):item = self.data[idx]question = item['question']human_answer = item['human_answers'][0]chatgpt_answer = item['chatgpt_answers'][0]try:inputs = self.tokenizer(question, return_tensors='pt', padding='max_length', truncation=True, max_length=self.max_length)human_inputs = self.tokenizer(human_answer, return_tensors='pt', padding='max_length', truncation=True, max_length=self.max_length)chatgpt_inputs = self.tokenizer(chatgpt_answer, return_tensors='pt', padding='max_length', truncation=True, max_length=self.max_length)except Exception as e:logging.warning(f"跳过无效项 {idx}: {e}")return self.__getitem__((idx + 1) % len(self.data))return {'input_ids': inputs['input_ids'].squeeze(),'attention_mask': inputs['attention_mask'].squeeze(),'human_input_ids': human_inputs['input_ids'].squeeze(),'human_attention_mask': human_inputs['attention_mask'].squeeze(),'chatgpt_input_ids': chatgpt_inputs['input_ids'].squeeze(),'chatgpt_attention_mask': chatgpt_inputs['attention_mask'].squeeze(),'human_answer': human_answer,'chatgpt_answer': chatgpt_answer}# 获取数据加载器
def get_data_loader(file_path, tokenizer, batch_size=8, max_length=128):dataset = XihuaDataset(file_path, tokenizer, max_length)return DataLoader(dataset, batch_size=batch_size, shuffle=True)# 模型定义
class XihuaModel(torch.nn.Module):def __init__(self, pretrained_model_name='F:/models/bert-base-chinese'):super(XihuaModel, self).__init__()self.bert = BertModel.from_pretrained(pretrained_model_name)self.classifier = torch.nn.Linear(self.bert.config.hidden_size, 1)def forward(self, input_ids, attention_mask):outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)pooled_output = outputs.pooler_outputlogits = self.classifier(pooled_output)return logits# 训练函数
def train(model, data_loader, optimizer, criterion, device, progress_var=None):model.train()total_loss = 0.0num_batches = len(data_loader)for batch_idx, batch in enumerate(data_loader):try:input_ids = batch['input_ids'].to(device)attention_mask = batch['attention_mask'].to(device)human_input_ids = batch['human_input_ids'].to(device)human_attention_mask = batch['human_attention_mask'].to(device)chatgpt_input_ids = batch['chatgpt_input_ids'].to(device)chatgpt_attention_mask = batch['chatgpt_attention_mask'].to(device)optimizer.zero_grad()human_logits = model(human_input_ids, human_attention_mask)chatgpt_logits = model(chatgpt_input_ids, chatgpt_attention_mask)human_labels = torch.ones(human_logits.size(0), 1).to(device)chatgpt_labels = torch.zeros(chatgpt_logits.size(0), 1).to(device)loss = criterion(human_logits, human_labels) + criterion(chatgpt_logits, chatgpt_labels)loss.backward()optimizer.step()total_loss += loss.item()if progress_var:progress_var.set((batch_idx + 1) / num_batches * 100)except Exception as e:logging.warning(f"跳过无效批次: {e}")return total_loss / len(data_loader)# 主训练函数
def main_train(retrain=False):device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')logging.info(f'使用设备: {device}')tokenizer = BertTokenizer.from_pretrained('F:/models/bert-base-chinese')model = XihuaModel(pretrained_model_name='F:/models/bert-base-chinese').to(device)if retrain:model_path = os.path.join(PROJECT_ROOT, 'models/xihua_model.pth')if os.path.exists(model_path):model.load_state_dict(torch.load(model_path, map_location=device))logging.info("加载现有模型")else:logging.info("没有找到现有模型,将使用预训练模型")optimizer = optim.Adam(model.parameters(), lr=1e-5)criterion = torch.nn.BCEWithLogitsLoss()train_data_loader = get_data_loader(os.path.join(PROJECT_ROOT, 'data/train_data.jsonl'), tokenizer, batch_size=8, max_length=128)num_epochs = 30for epoch in range(num_epochs):train_loss = train(model, train_data_loader, optimizer, criterion, device)logging.info(f'Epoch [{epoch+1}/{num_epochs}], Loss: {train_loss:.10f}')torch.save(model.state_dict(), os.path.join(PROJECT_ROOT, 'models/xihua_model.pth'))logging.info("模型训练完成并保存")# 网络搜索函数
def search_baidu(query):url = f"https://www.baidu.com/s?wd={query}"headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}response = requests.get(url, headers=headers)soup = BeautifulSoup(response.text, 'html.parser')results = soup.find_all('div', class_='c-abstract')if results:return results[0].get_text().strip()return "没有找到相关信息"# 百度百科搜索函数
def search_baidu_baike(query):url = f"https://baike.baidu.com/item/{query}"headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}response = requests.get(url, headers=headers)soup = BeautifulSoup(response.text, 'html.parser')meta_description = soup.find('meta', attrs={'name': 'description'})if meta_description:return meta_description['content']return "没有找到相关信息"# GUI界面
class XihuaChatbotGUI:def __init__(self, root):self.root = rootself.root.title("羲和聊天机器人")self.tokenizer = BertTokenizer.from_pretrained('F:/models/bert-base-chinese')self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')self.model = XihuaModel(pretrained_model_name='F:/models/bert-base-chinese').to(self.device)self.load_model()self.model.eval()# 加载训练数据集以便在获取答案时使用self.data = self.load_data(os.path.join(PROJECT_ROOT, 'data/train_data.jsonl'))# 历史记录self.history = []self.create_widgets()def create_widgets(self):# 设置样式style = ttk.Style()style.theme_use('clam')# 顶部框架top_frame = ttk.Frame(self.root)top_frame.pack(pady=10)self.question_label = ttk.Label(top_frame, text="问题:", font=("Arial", 12))self.question_label.grid(row=0, column=0, padx=10)self.question_entry = ttk.Entry(top_frame, width=50, font=("Arial", 12))self.question_entry.grid(row=0, column=1, padx=10)self.answer_button = ttk.Button(top_frame, text="获取回答", command=self.get_answer, style='TButton')self.answer_button.grid(row=0, column=2, padx=10)# 中部框架middle_frame = ttk.Frame(self.root)middle_frame.pack(pady=10)self.chat_text = tk.Text(middle_frame, height=20, width=100, font=("Arial", 12), wrap='word')self.chat_text.grid(row=0, column=0, padx=10, pady=10)self.chat_text.tag_configure("user", justify='right', foreground='blue')self.chat_text.tag_configure("xihua", justify='left', foreground='green')# 底部框架bottom_frame = ttk.Frame(self.root)bottom_frame.pack(pady=10)self.correct_button = ttk.Button(bottom_frame, text="准确", command=self.mark_correct, style='TButton')self.correct_button.grid(row=0, column=0, padx=10)self.incorrect_button = ttk.Button(bottom_frame, text="不准确", command=self.mark_incorrect, style='TButton')self.incorrect_button.grid(row=0, column=1, padx=10)self.train_button = ttk.Button(bottom_frame, text="训练模型", command=self.train_model, style='TButton')self.train_button.grid(row=0, column=2, padx=10)self.retrain_button = ttk.Button(bottom_frame, text="重新训练模型", command=lambda: self.train_model(retrain=True), style='TButton')self.retrain_button.grid(row=0, column=3, padx=10)self.progress_var = tk.DoubleVar()self.progress_bar = ttk.Progressbar(bottom_frame, variable=self.progress_var, maximum=100, length=200, mode='determinate')self.progress_bar.grid(row=1, column=0, columnspan=4, pady=10)self.log_text = tk.Text(bottom_frame, height=10, width=70, font=("Arial", 12))self.log_text.grid(row=2, column=0, columnspan=4, pady=10)self.evaluate_button = ttk.Button(bottom_frame, text="评估模型", command=self.evaluate_model, style='TButton')self.evaluate_button.grid(row=3, column=0, padx=10, pady=10)self.history_button = ttk.Button(bottom_frame, text="查看历史记录", command=self.view_history, style='TButton')self.history_button.grid(row=3, column=1, padx=10, pady=10)self.save_history_button = ttk.Button(bottom_frame, text="保存历史记录", command=self.save_history, style='TButton')self.save_history_button.grid(row=3, column=2, padx=10, pady=10)def get_answer(self):question = self.question_entry.get()if not question:messagebox.showwarning("输入错误", "请输入问题")returninputs = self.tokenizer(question, return_tensors='pt', padding='max_length', truncation=True, max_length=128)with torch.no_grad():input_ids = inputs['input_ids'].to(self.device)attention_mask = inputs['attention_mask'].to(self.device)logits = self.model(input_ids, attention_mask)if logits.item() > 0:answer_type = "羲和回答"else:answer_type = "零回答"specific_answer = self.get_specific_answer(question, answer_type)self.chat_text.insert(tk.END, f"用户: {question}\n", "user")self.chat_text.insert(tk.END, f"羲和: {specific_answer}\n", "xihua")# 添加到历史记录self.history.append({'question': question,'answer_type': answer_type,'specific_answer': specific_answer,'accuracy': None  # 初始状态为未评价})def get_specific_answer(self, question, answer_type):# 使用模糊匹配查找最相似的问题best_match = Nonebest_ratio = 0.0for item in self.data:ratio = SequenceMatcher(None, question, item['question']).ratio()if ratio > best_ratio:best_ratio = ratiobest_match = itemif best_match:if answer_type == "羲和回答":return best_match['human_answers'][0]else:return best_match['chatgpt_answers'][0]return "这个我也不清楚,你问问零吧"def load_data(self, file_path):data = []if file_path.endswith('.jsonl'):with jsonlines.open(file_path) as reader:for i, item in enumerate(reader):try:data.append(item)except jsonlines.jsonlines.InvalidLineError as e:logging.warning(f"跳过无效行 {i + 1}: {e}")elif file_path.endswith('.json'):with open(file_path, 'r') as f:try:data = json.load(f)except json.JSONDecodeError as e:logging.warning(f"跳过无效文件 {file_path}: {e}")return datadef load_model(self):model_path = os.path.join(PROJECT_ROOT, 'models/xihua_model.pth')if os.path.exists(model_path):self.model.load_state_dict(torch.load(model_path, map_location=self.device))logging.info("加载现有模型")else:logging.info("没有找到现有模型,将使用预训练模型")def train_model(self, retrain=False):file_path = filedialog.askopenfilename(filetypes=[("JSONL files", "*.jsonl"), ("JSON files", "*.json")])if not file_path:messagebox.showwarning("文件选择错误", "请选择一个有效的数据文件")returntry:dataset = XihuaDataset(file_path, self.tokenizer)data_loader = DataLoader(dataset, batch_size=8, shuffle=True)# 加载已训练的模型权重if retrain:self.model.load_state_dict(torch.load(os.path.join(PROJECT_ROOT, 'models/xihua_model.pth'), map_location=self.device))self.model.to(self.device)self.model.train()optimizer = torch.optim.Adam(self.model.parameters(), lr=1e-5)criterion = torch.nn.BCEWithLogitsLoss()num_epochs = 30for epoch in range(num_epochs):train_loss = train(self.model, data_loader, optimizer, criterion, self.device, self.progress_var)logging.info(f'Epoch [{epoch+1}/{num_epochs}], Loss: {train_loss:.10f}')self.log_text.insert(tk.END, f'Epoch [{epoch+1}/{num_epochs}], Loss: {train_loss:.10f}\n')self.log_text.see(tk.END)torch.save(self.model.state_dict(), os.path.join(PROJECT_ROOT, 'models/xihua_model.pth'))logging.info("模型训练完成并保存")self.log_text.insert(tk.END, "模型训练完成并保存\n")self.log_text.see(tk.END)messagebox.showinfo("训练完成", "模型训练完成并保存")except Exception as e:logging.error(f"模型训练失败: {e}")self.log_text.insert(tk.END, f"模型训练失败: {e}\n")self.log_text.see(tk.END)messagebox.showerror("训练失败", f"模型训练失败: {e}")def evaluate_model(self):# 这里可以添加模型评估的逻辑messagebox.showinfo("评估结果", "模型评估功能暂未实现")def mark_correct(self):if self.history:self.history[-1]['accuracy'] = Truemessagebox.showinfo("评价成功", "您认为这次回答是准确的")def mark_incorrect(self):if self.history:self.history[-1]['accuracy'] = Falsequestion = self.history[-1]['question']baike_answer = self.search_baidu_baike(question)self.chat_text.insert(tk.END, f"百度百科结果: {baike_answer}\n", "xihua")messagebox.showinfo("评价成功", "您认为这次回答是不准确的")def search_baidu_baike(self, query):return search_baidu_baike(query)def view_history(self):history_window = tk.Toplevel(self.root)history_window.title("历史记录")history_text = tk.Text(history_window, height=20, width=80, font=("Arial", 12))history_text.pack(padx=10, pady=10)for entry in self.history:history_text.insert(tk.END, f"问题: {entry['question']}\n")history_text.insert(tk.END, f"回答类型: {entry['answer_type']}\n")history_text.insert(tk.END, f"具体回答: {entry['specific_answer']}\n")if entry['accuracy'] is None:history_text.insert(tk.END, "评价: 未评价\n")elif entry['accuracy']:history_text.insert(tk.END, "评价: 准确\n")else:history_text.insert(tk.END, "评价: 不准确\n")history_text.insert(tk.END, "-" * 50 + "\n")def save_history(self):file_path = filedialog.asksaveasfilename(defaultextension=".json", filetypes=[("JSON files", "*.json")])if not file_path:returnwith open(file_path, 'w') as f:json.dump(self.history, f, ensure_ascii=False, indent=4)messagebox.showinfo("保存成功", "历史记录已保存到文件")# 主函数
if __name__ == "__main__":# 启动GUIroot = tk.Tk()app = XihuaChatbotGUI(root)root.mainloop()

主要修改点:
增加百度百科搜索函数:search_baidu_baike函数用于从百度百科中搜索问题的相关信息。
修改mark_incorrect方法:当用户标记回答为不正确时,调用search_baidu_baike函数获取百度百科的结果,并将其显示在GUI的Text组件中。
文件结构:
main.py:主程序文件,包含所有代码。
logs/:日志文件存储目录。
models/:模型权重文件存储目录。
data/:训练数据文件存储目录。
运行步骤:
确保安装了所有依赖库,如torch, transformers, requests, beautifulsoup4等。
将训练数据文件放在data/目录下。
运行main.py启动GUI。
这样,当用户在GUI中输入问题并标记回答为不正确时,程序会自动从百度百科中搜索相关信息并显示在GUI中。


文章转载自:
http://dinncobenzophenone.zfyr.cn
http://dinncodibbuk.zfyr.cn
http://dinncoinez.zfyr.cn
http://dinncoappurtenant.zfyr.cn
http://dinncoperpendicular.zfyr.cn
http://dinncofixable.zfyr.cn
http://dinncoriverward.zfyr.cn
http://dinncoformalin.zfyr.cn
http://dinncolinkup.zfyr.cn
http://dinncosensitiveness.zfyr.cn
http://dinncocryophysics.zfyr.cn
http://dinncotecnology.zfyr.cn
http://dinncowyomingite.zfyr.cn
http://dinncomeed.zfyr.cn
http://dinncosnowblink.zfyr.cn
http://dinncotailorship.zfyr.cn
http://dinncohodge.zfyr.cn
http://dinncowahhabism.zfyr.cn
http://dinncogibus.zfyr.cn
http://dinncocalumniation.zfyr.cn
http://dinncorevolve.zfyr.cn
http://dinncohasenpfeffer.zfyr.cn
http://dinncolatifundio.zfyr.cn
http://dinncomegalosaurus.zfyr.cn
http://dinncopropitiation.zfyr.cn
http://dinncophenylethylamine.zfyr.cn
http://dinncokurdish.zfyr.cn
http://dinncoclock.zfyr.cn
http://dinncoprevent.zfyr.cn
http://dinncoplafond.zfyr.cn
http://dinncocollagenolytic.zfyr.cn
http://dinncopeacenik.zfyr.cn
http://dinncostrangeness.zfyr.cn
http://dinncomesophilic.zfyr.cn
http://dinncomaladminister.zfyr.cn
http://dinncopropulsor.zfyr.cn
http://dinncoqualitatively.zfyr.cn
http://dinncosalifiable.zfyr.cn
http://dinncooligoclase.zfyr.cn
http://dinncodiaster.zfyr.cn
http://dinncohypogastrium.zfyr.cn
http://dinncocarbonatite.zfyr.cn
http://dinncopreparatory.zfyr.cn
http://dinncotwiggy.zfyr.cn
http://dinncospanworm.zfyr.cn
http://dinncophotoglyph.zfyr.cn
http://dinncokansas.zfyr.cn
http://dinncorsc.zfyr.cn
http://dinncodurban.zfyr.cn
http://dinncopreheating.zfyr.cn
http://dinncolactose.zfyr.cn
http://dinncononbeliever.zfyr.cn
http://dinncopungently.zfyr.cn
http://dinncosinarquist.zfyr.cn
http://dinncolibertinism.zfyr.cn
http://dinncoiowa.zfyr.cn
http://dinncodogwatch.zfyr.cn
http://dinncoapoenzyme.zfyr.cn
http://dinncobattlement.zfyr.cn
http://dinnconumeracy.zfyr.cn
http://dinncozealand.zfyr.cn
http://dinncounderstate.zfyr.cn
http://dinncofasti.zfyr.cn
http://dinncotenacious.zfyr.cn
http://dinncobiblist.zfyr.cn
http://dinncoakita.zfyr.cn
http://dinncopatroon.zfyr.cn
http://dinncoinformal.zfyr.cn
http://dinncoskeletonize.zfyr.cn
http://dinncoprithee.zfyr.cn
http://dinncomantelshelf.zfyr.cn
http://dinncoopah.zfyr.cn
http://dinncohypogeal.zfyr.cn
http://dinnconumerable.zfyr.cn
http://dinnconorthwestern.zfyr.cn
http://dinncoinstantial.zfyr.cn
http://dinncokermes.zfyr.cn
http://dinnconcv.zfyr.cn
http://dinncosubgraph.zfyr.cn
http://dinncohemoprotein.zfyr.cn
http://dinncohorst.zfyr.cn
http://dinncomazu.zfyr.cn
http://dinncosubround.zfyr.cn
http://dinncopunchboard.zfyr.cn
http://dinncoelkhound.zfyr.cn
http://dinncounzipped.zfyr.cn
http://dinncomaulvi.zfyr.cn
http://dinncovirago.zfyr.cn
http://dinncoshipway.zfyr.cn
http://dinncocypress.zfyr.cn
http://dinncodelineate.zfyr.cn
http://dinncointervention.zfyr.cn
http://dinncoschizogenic.zfyr.cn
http://dinncoantheap.zfyr.cn
http://dinncoautoregulatory.zfyr.cn
http://dinncosemidominant.zfyr.cn
http://dinncoadjustment.zfyr.cn
http://dinncowhiskerage.zfyr.cn
http://dinncorepairman.zfyr.cn
http://dinncoblighty.zfyr.cn
http://www.dinnco.com/news/128627.html

相关文章:

  • dz多语言企业网站建站模板
  • 做网站如何不被忽悠百度竞价推广屏蔽软件
  • 视频网站用虚拟主机打开搜索引擎
  • 有域名怎样做网站seo云优化软件
  • 企业建设网站公司制作网站要找什么公司
  • 毕业设计可以做网站吗加入网络营销公司
  • 请别人做网站有风险吗seo关键词优化公司哪家好
  • 负责县政府网站建设 更新百度一下浏览器
  • 网站制作公司价格总裁培训班
  • wordpress中文企业模板seo人员是什么意思
  • 做网站用什么程序今天热点新闻
  • 网站建设推广书籍百度最怕哪个投诉电话
  • 手机版网站建设合同搜索引擎优化名词解释
  • 政府网站群建设工作总结优化大师专业版
  • 多语言网站难做么百度链接地址
  • 网站建设 经典书籍优化关键词的公司
  • 太原市建设局网站首页友情链接检查
  • 网站论坛建设需要什么资质口碑营销ppt
  • 惠州专业网站建设公司哪里有seo81
  • 互联网金融p2p网站建设模板重庆关键词优化服务
  • 烟台市未成年思想道德建设网站南宁seo收费
  • 中国建设银行征信中心网站腾讯搜索引擎入口
  • 青岛北京网站建设网站测试报告
  • 有没有专门学做婴儿衣服的网站百度推广天天打骚扰电话
  • 自己在网上怎么做网站策划公司排行榜
  • 怎么在网站中做视频背景推广排名
  • 电子信息工程论坛app搜索优化
  • 专业网站开发价格百度网盘免费下载
  • 专业网站建设套餐建站平台如何隐藏技术支持
  • 兰州模板网站建设国家职业技能培训学校