警告!这些AI教学系统毕设雷区千万别踩:SpringBoot+MySQL正确姿势
基于AIAgent的教学辅助问答系统是一个集智能问答、资源管理、用户管理于一体的综合性教学平台,该系统采用SpringBoot作为后端框架,结合MySQL数据库进行数据存储,前端使用Vue.js配合ElementUI组件库构建用户界面。系统的核心亮点在于集成了AI智能代理技术,能够为学生和教师提供自然语言交互的问答服务,通过分析用户提问的语义和上下文,自动匹配相关的学习资源和答案。平台支持多角色用
一、个人简介
💖💖作者:计算机编程果茶熊
💙💙个人简介:曾长期从事计算机专业培训教学,担任过编程老师,同时本人也热爱上课教学,擅长Java、微信小程序、Python、Golang、安卓Android等多个IT方向。会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我!
💛💛想说的话:感谢大家的关注与支持!
💜💜
网站实战项目
安卓/小程序实战项目
大数据实战项目
计算机毕业设计选题
💕💕文末获取源码联系计算机编程果茶熊
二、系统介绍
开发语言:Java+Python
数据库:MySQL
系统架构:B/S
后端框架:SpringBoot(Spring+SpringMVC+Mybatis)+Django
前端:Vue+HTML+CSS+JavaScript+jQuery
基于AIAgent的教学辅助问答系统是一个集智能问答、资源管理、用户管理于一体的综合性教学平台,该系统采用SpringBoot作为后端框架,结合MySQL数据库进行数据存储,前端使用Vue.js配合ElementUI组件库构建用户界面。系统的核心亮点在于集成了AI智能代理技术,能够为学生和教师提供自然语言交互的问答服务,通过分析用户提问的语义和上下文,自动匹配相关的学习资源和答案。平台支持多角色用户管理,包括学生和教师两种身份,每种身份拥有不同的权限和功能模块。学生可以通过系统发布学习问题、浏览学习资源、获取AI智能答复,而教师则能够管理学习资源、回答学生问题、查看问答统计数据。系统还提供了完善的资源分类管理功能,支持按学科、难度、类型等多维度对教学资源进行组织和检索。整个平台采用B/S架构设计,用户无需安装客户端软件,通过浏览器即可访问和使用所有功能,极大提升了系统的易用性和普及度。
三、视频解说
https://www.bilibili.com/video/BV1g4aRzKEAV/?spm_id_from=333.1387.homepage.video_card.click
四、部分功能展示
五、部分代码展示
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.springframework.stereotype.Service;
import org.springframework.beans.factory.annotation.Autowired;
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
import java.util.*;
@Service
public class TeachingAssistantService {
@Autowired
private QuestionMapper questionMapper;
@Autowired
private ResourceMapper resourceMapper;
@Autowired
private UserMapper userMapper;
private SparkSession spark = SparkSession.builder().appName("TeachingAssistant").master("local[*]").getOrCreate();
public Map<String, Object> aiIntelligentAnswer(String questionContent, Integer userId) {
Map<String, Object> result = new HashMap<>();
List<String> keywords = extractKeywords(questionContent);
QueryWrapper<Question> questionWrapper = new QueryWrapper<>();
for (String keyword : keywords) {
questionWrapper.or().like("content", keyword).or().like("answer", keyword);
}
List<Question> similarQuestions = questionMapper.selectList(questionWrapper);
Dataset<Row> questionDataset = spark.createDataFrame(similarQuestions, Question.class);
Dataset<Row> filteredQuestions = questionDataset.filter(questionDataset.col("status").equalTo(1));
Dataset<Row> rankedQuestions = filteredQuestions.orderBy(questionDataset.col("create_time").desc());
List<Row> topQuestions = rankedQuestions.limit(5).collectAsList();
String aiAnswer = generateAIAnswer(questionContent, topQuestions);
Question newQuestion = new Question();
newQuestion.setUserId(userId);
newQuestion.setContent(questionContent);
newQuestion.setAnswer(aiAnswer);
newQuestion.setQuestionType("AI_GENERATED");
newQuestion.setStatus(1);
newQuestion.setCreateTime(new Date());
questionMapper.insert(newQuestion);
result.put("answer", aiAnswer);
result.put("similarQuestions", topQuestions);
result.put("confidence", calculateConfidence(keywords, topQuestions));
return result;
}
public Map<String, Object> intelligentResourceRecommendation(Integer userId, String subject) {
Map<String, Object> result = new HashMap<>();
User user = userMapper.selectById(userId);
QueryWrapper<Question> userQuestionWrapper = new QueryWrapper<>();
userQuestionWrapper.eq("user_id", userId).orderByDesc("create_time").last("LIMIT 20");
List<Question> userQuestions = questionMapper.selectList(userQuestionWrapper);
Dataset<Row> userBehaviorDataset = spark.createDataFrame(userQuestions, Question.class);
Dataset<Row> subjectFilteredBehavior = userBehaviorDataset.filter(userBehaviorDataset.col("subject").equalTo(subject));
List<String> userInterests = extractUserInterests(subjectFilteredBehavior);
QueryWrapper<Resource> resourceWrapper = new QueryWrapper<>();
resourceWrapper.eq("category", subject).eq("status", 1);
if (userInterests.size() > 0) {
for (String interest : userInterests) {
resourceWrapper.or().like("title", interest).or().like("description", interest);
}
}
List<Resource> candidateResources = resourceMapper.selectList(resourceWrapper);
Dataset<Row> resourceDataset = spark.createDataFrame(candidateResources, Resource.class);
Dataset<Row> scoredResources = resourceDataset.withColumn("recommendation_score",
org.apache.spark.sql.functions.when(resourceDataset.col("difficulty").equalTo(user.getLevel()), 10)
.when(resourceDataset.col("difficulty").equalTo(user.getLevel() + 1), 8)
.when(resourceDataset.col("difficulty").equalTo(user.getLevel() - 1), 6)
.otherwise(3));
Dataset<Row> finalRecommendations = scoredResources.orderBy(org.apache.spark.sql.functions.desc("recommendation_score"),
org.apache.spark.sql.functions.desc("view_count")).limit(10);
List<Row> recommendedResources = finalRecommendations.collectAsList();
result.put("recommendedResources", recommendedResources);
result.put("userInterests", userInterests);
result.put("totalRecommendations", recommendedResources.size());
return result;
}
public Map<String, Object> questionAnalysisStatistics(String timeRange, String category) {
Map<String, Object> result = new HashMap<>();
Date startDate = calculateStartDate(timeRange);
QueryWrapper<Question> statisticsWrapper = new QueryWrapper<>();
statisticsWrapper.ge("create_time", startDate);
if (category != null && !category.isEmpty()) {
statisticsWrapper.eq("category", category);
}
List<Question> questions = questionMapper.selectList(statisticsWrapper);
Dataset<Row> questionDataset = spark.createDataFrame(questions, Question.class);
Dataset<Row> dailyStats = questionDataset.groupBy(
org.apache.spark.sql.functions.date_format(questionDataset.col("create_time"), "yyyy-MM-dd").alias("date"))
.agg(org.apache.spark.sql.functions.count("*").alias("question_count"),
org.apache.spark.sql.functions.countDistinct("user_id").alias("active_users"),
org.apache.spark.sql.functions.avg("response_time").alias("avg_response_time"));
Dataset<Row> categoryStats = questionDataset.groupBy("category")
.agg(org.apache.spark.sql.functions.count("*").alias("category_count"),
org.apache.spark.sql.functions.avg("satisfaction_score").alias("avg_satisfaction"));
Dataset<Row> difficultyDistribution = questionDataset.groupBy("difficulty_level")
.agg(org.apache.spark.sql.functions.count("*").alias("difficulty_count"))
.orderBy("difficulty_level");
List<Row> dailyStatsList = dailyStats.orderBy("date").collectAsList();
List<Row> categoryStatsList = categoryStats.orderBy(org.apache.spark.sql.functions.desc("category_count")).collectAsList();
List<Row> difficultyList = difficultyDistribution.collectAsList();
long totalQuestions = questionDataset.count();
long answeredQuestions = questionDataset.filter(questionDataset.col("answer").isNotNull()).count();
double answerRate = totalQuestions > 0 ? (double) answeredQuestions / totalQuestions * 100 : 0;
result.put("dailyStatistics", dailyStatsList);
result.put("categoryStatistics", categoryStatsList);
result.put("difficultyDistribution", difficultyList);
result.put("totalQuestions", totalQuestions);
result.put("answerRate", answerRate);
result.put("timeRange", timeRange);
return result;
}
private List<String> extractKeywords(String content) {
String[] words = content.toLowerCase().replaceAll("[^a-zA-Z0-9\\u4e00-\\u9fa5\\s]", "").split("\\s+");
List<String> keywords = new ArrayList<>();
for (String word : words) {
if (word.length() > 2) {
keywords.add(word);
}
}
return keywords;
}
private String generateAIAnswer(String question, List<Row> similarQuestions) {
StringBuilder answer = new StringBuilder("根据相关问题分析,");
if (similarQuestions.size() > 0) {
answer.append("建议参考以下解答思路:");
for (Row row : similarQuestions) {
String existingAnswer = row.getAs("answer");
if (existingAnswer != null && existingAnswer.length() > 10) {
answer.append(existingAnswer.substring(0, Math.min(50, existingAnswer.length()))).append("...");
break;
}
}
} else {
answer.append("这是一个新颖的问题,建议咨询相关领域专家或查阅专业资料获取准确答案。");
}
return answer.toString();
}
private List<String> extractUserInterests(Dataset<Row> behaviorData) {
List<Row> behaviors = behaviorData.collectAsList();
Map<String, Integer> interestCount = new HashMap<>();
for (Row row : behaviors) {
String content = row.getAs("content");
if (content != null) {
String[] words = content.split("\\s+");
for (String word : words) {
if (word.length() > 2) {
interestCount.put(word, interestCount.getOrDefault(word, 0) + 1);
}
}
}
}
return interestCount.entrySet().stream()
.sorted((e1, e2) -> e2.getValue().compareTo(e1.getValue()))
.limit(5)
.map(Map.Entry::getKey)
.collect(ArrayList::new, ArrayList::add, ArrayList::addAll);
}
private double calculateConfidence(List<String> keywords, List<Row> similarQuestions) {
if (similarQuestions.isEmpty()) return 0.3;
int matchCount = 0;
for (Row question : similarQuestions) {
String content = question.getAs("content");
if (content != null) {
for (String keyword : keywords) {
if (content.toLowerCase().contains(keyword.toLowerCase())) {
matchCount++;
}
}
}
}
return Math.min(0.95, 0.4 + (double) matchCount / (keywords.size() * similarQuestions.size()) * 0.6);
}
private Date calculateStartDate(String timeRange) {
Calendar calendar = Calendar.getInstance();
switch (timeRange) {
case "week": calendar.add(Calendar.DAY_OF_YEAR, -7); break;
case "month": calendar.add(Calendar.MONTH, -1); break;
case "quarter": calendar.add(Calendar.MONTH, -3); break;
default: calendar.add(Calendar.DAY_OF_YEAR, -30); break;
}
return calendar.getTime();
}
}
六、部分文档展示
七、END
💕💕文末获取源码联系计算机编程果茶熊
更多推荐
所有评论(0)