运营系统

This commit is contained in:
wanjia 2025-04-29 10:22:57 +08:00
parent 19861a46a9
commit 3388527903
61 changed files with 8407 additions and 1053 deletions

105
README_VIDEO_UPLOAD.md Normal file
View File

@ -0,0 +1,105 @@
# 视频上传和定时发布功能使用说明
本文档介绍如何使用系统提供的视频上传和定时发布功能。
## 前提条件
1. 已安装所有依赖包:
```
pip install -r requirements.txt
```
2. 确保Redis服务已启动用于Celery任务队列
```
redis-server
```
3. 启动Celery Worker处理后台任务
```
celery -A role_based_system worker -l info
```
4. 启动Celery Beat处理定时任务
```
celery -A role_based_system beat -l info
```
## 方法一:通过命令行测试
### 准备平台账号
首先确保系统中已存在至少一个平台账号。可以通过admin界面或API创建。
### 上传视频并设置定时发布
使用以下命令上传视频并计划发布:
```bash
python manage.py test_video_upload "D:\pythonproject\role_based\role_based_system\上传视频测试.mp4" 1 --title "测试视频标题" --desc "这是一个测试视频描述" --schedule "2023-08-10 15:30:00"
```
参数说明:
- 第一个参数:视频文件路径
- 第二个参数平台账号ID
- `--title`:视频标题(可选,默认使用文件名)
- `--desc`:视频描述(可选)
- `--schedule`计划发布时间可选格式YYYY-MM-DD HH:MM:SS
### 手动发布视频
如果要立即发布视频,可以使用以下命令:
```bash
python manage.py publish_video 1
```
参数说明:
- 视频ID
## 方法二通过API接口
### 上传视频
使用POST请求上传视频
```
POST /api/operation/videos/upload_video/
```
表单数据:
- `video_file`:视频文件
- `platform_account`平台账号ID
- `title`:视频标题(可选)
- `description`:视频描述(可选)
- `scheduled_time`计划发布时间可选ISO格式
- `tags`:标签(可选)
### 手动发布视频
使用POST请求立即发布视频
```
POST /api/operation/videos/{video_id}/manual_publish/
```
## 如何验证视频是否成功发布
1. 检查视频记录状态:
```
python manage.py shell
>>> from user_management.models import Video
>>> Video.objects.get(id=1).status
'published'
```
2. 查看日志记录:
```
tail -f debug.log
```
3. 通过API查询视频状态
```
GET /api/operation/videos/{video_id}/
```
## 注意事项
1. 视频文件会被保存在媒体目录(默认为 `media/videos/{platform_name}_{account_name}/`
2. 定时发布功能依赖于Celery和Redis确保这些服务正常运行
3. 本系统目前仅模拟视频发布过程实际发布到各平台需要扩展相关API
4. 默认视频状态流转draft草稿-> scheduled已排期-> published已发布或 failed失败

4
celerybeat-schedule.bak Normal file
View File

@ -0,0 +1,4 @@
'entries', (0, 428)
'__version__', (512, 20)
'tz', (1024, 28)
'utc_enabled', (1536, 4)

BIN
celerybeat-schedule.dat Normal file

Binary file not shown.

4
celerybeat-schedule.dir Normal file
View File

@ -0,0 +1,4 @@
'entries', (0, 428)
'__version__', (512, 20)
'tz', (1024, 28)
'utc_enabled', (1536, 4)

File diff suppressed because it is too large Load Diff

50
fix_gmail_cred.py Normal file
View File

@ -0,0 +1,50 @@
import os
import django
# 设置Django环境
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'role_based_system.settings')
django.setup()
from user_management.models import GmailCredential
# 清除特定凭证
def fix_specific_credential(credential_id):
try:
cred = GmailCredential.objects.get(id=credential_id)
cred.credentials = None
cred.needs_reauth = True
cred.save()
print(f'已清除ID为{cred.id}的凭证,邮箱: {cred.gmail_email},并标记为需要重新授权')
return True
except GmailCredential.DoesNotExist:
print(f'找不到ID为{credential_id}的凭证')
return False
except Exception as e:
print(f'处理凭证{credential_id}时出错: {str(e)}')
return False
# 清除所有可能已损坏的凭证
def fix_all_credentials():
credentials = GmailCredential.objects.all()
fixed_count = 0
for cred in credentials:
if cred.credentials: # 只处理有凭证数据的记录
try:
# 直接清除凭证并标记需要重新认证
cred.credentials = None
cred.needs_reauth = True
cred.save()
print(f'已清除ID为{cred.id}的凭证,邮箱: {cred.gmail_email}')
fixed_count += 1
except Exception as e:
print(f'处理凭证{cred.id}时出错: {str(e)}')
print(f'共清除了{fixed_count}个凭证')
if __name__ == '__main__':
# 修复特定的凭证
fix_specific_credential('936c37a7-c6e5-454a-aaef-7c66b1230507')
# 或者修复所有凭证
# fix_all_credentials()

View File

@ -0,0 +1 @@
{"access_token": "ya29.a0AZYkNZgeGgZi3_OfGNwLC4ykCFeSfLHoth9GCDj_UJI9SvhY7nAuWl7Gf2qZH-4iUSeDX-fD6JWSAGPwK9rbNMCgX5RUbsExLA69L3XZ1Og5vE5U1HHePCdGSQ-Ceqxauob1tte2nQHdEaPaW5t9OLOQhwGXsCmGq2fhbNpgRgaCgYKAQ8SARYSFQHGX2Mi-I-L5Q--8gfUvX6_0juX1Q0177", "client_id": "266164728215-v84lngbp3vgr4ulql01sqkg5vaigf4a5.apps.googleusercontent.com", "client_secret": "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", "refresh_token": "1//0ed76IPW5-HYwCgYIARAAGA4SNwF-L9IrWvNjsRtcbhlXa0eSfKZsf1-pv5yWL6GSE-4ve6fPQNfIlP8ujruo9Y9B-eIZP6QjolY", "token_expiry": "2025-04-28T10:26:07Z", "token_uri": "https://oauth2.googleapis.com/token", "user_agent": null, "revoke_uri": "https://oauth2.googleapis.com/revoke", "id_token": null, "id_token_jwt": null, "token_response": {"access_token": "ya29.a0AZYkNZgeGgZi3_OfGNwLC4ykCFeSfLHoth9GCDj_UJI9SvhY7nAuWl7Gf2qZH-4iUSeDX-fD6JWSAGPwK9rbNMCgX5RUbsExLA69L3XZ1Og5vE5U1HHePCdGSQ-Ceqxauob1tte2nQHdEaPaW5t9OLOQhwGXsCmGq2fhbNpgRgaCgYKAQ8SARYSFQHGX2Mi-I-L5Q--8gfUvX6_0juX1Q0177", "expires_in": 3599, "scope": "https://mail.google.com/", "token_type": "Bearer"}, "scopes": ["https://mail.google.com/"], "token_info_uri": "https://oauth2.googleapis.com/tokeninfo", "invalid": false, "_class": "OAuth2Credentials", "_module": "oauth2client.client"}

View File

@ -0,0 +1 @@
{"access_token": "ya29.a0AZYkNZi4RkJ4Lnod5-jpByMzYlS1F6qxcZ0JGEYFBO5k0RNt0AYNdXfd3_ykDpqcRDfJln6S8aSbhfqrLin_VeBJABuzXVbSz7MO1akOMa1IhW9yhtl02xGjag9pd1hreYEM9WztRXrwrzrM8dPEGO4oHJLlEQvgq5xoxO8BaCgYKAX8SARYSFQHGX2MiQLh3UOHBxP2eu5fLcEL5Fg0175", "client_id": "266164728215-v84lngbp3vgr4ulql01sqkg5vaigf4a5.apps.googleusercontent.com", "client_secret": "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", "refresh_token": "1//0efbA-jcfi1XZCgYIARAAGA4SNwF-L9IryppvK1Md7niUUz1nkTCkp8Skh6pVlL1PQe6eWTS8hL7Qorzw7UbmVx0sg9zgXs28pVo", "token_expiry": "2025-04-19T03:29:54Z", "token_uri": "https://oauth2.googleapis.com/token", "user_agent": null, "revoke_uri": "https://oauth2.googleapis.com/revoke", "id_token": null, "id_token_jwt": null, "token_response": {"access_token": "ya29.a0AZYkNZi4RkJ4Lnod5-jpByMzYlS1F6qxcZ0JGEYFBO5k0RNt0AYNdXfd3_ykDpqcRDfJln6S8aSbhfqrLin_VeBJABuzXVbSz7MO1akOMa1IhW9yhtl02xGjag9pd1hreYEM9WztRXrwrzrM8dPEGO4oHJLlEQvgq5xoxO8BaCgYKAX8SARYSFQHGX2MiQLh3UOHBxP2eu5fLcEL5Fg0175", "expires_in": 3599, "refresh_token": "1//0efbA-jcfi1XZCgYIARAAGA4SNwF-L9IryppvK1Md7niUUz1nkTCkp8Skh6pVlL1PQe6eWTS8hL7Qorzw7UbmVx0sg9zgXs28pVo", "scope": "https://mail.google.com/", "token_type": "Bearer"}, "scopes": ["https://mail.google.com/"], "token_info_uri": "https://oauth2.googleapis.com/tokeninfo", "invalid": false, "_class": "OAuth2Credentials", "_module": "oauth2client.client"}

View File

@ -1 +1 @@
{"access_token": "ya29.a0AZYkNZga-tjDnp1lsXRohu1Tji-eVV88RaLnPjxr3HpYuBDW_6boys1aqnRnete1pT-E7ygZ5drpb0Hhbt9o15ryqbfeaKqS4HTDG_iIVvFn3npNNLSqIdvsf98burhBOnR-Nf6ty7xCsPLyFaO15bG2LybRgGL1mubVNMXSaCgYKAdQSARISFQHGX2MicVi2eoShd196_WeptFDUZg0175", "client_id": "266164728215-v84lngbp3vgr4ulql01sqkg5vaigf4a5.apps.googleusercontent.com", "client_secret": "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", "refresh_token": "1//0eAXpVapw8WjjCgYIARAAGA4SNwF-L9Irm0iHkQzqzM7Hn39nctE-DOWKTsm89Ge3nG0bfdfqloRvLMiN4YWHEKcDpLdPIuZel0Q", "token_expiry": "2025-04-10T09:51:34Z", "token_uri": "https://oauth2.googleapis.com/token", "user_agent": null, "revoke_uri": "https://oauth2.googleapis.com/revoke", "id_token": null, "id_token_jwt": null, "token_response": {"access_token": "ya29.a0AZYkNZga-tjDnp1lsXRohu1Tji-eVV88RaLnPjxr3HpYuBDW_6boys1aqnRnete1pT-E7ygZ5drpb0Hhbt9o15ryqbfeaKqS4HTDG_iIVvFn3npNNLSqIdvsf98burhBOnR-Nf6ty7xCsPLyFaO15bG2LybRgGL1mubVNMXSaCgYKAdQSARISFQHGX2MicVi2eoShd196_WeptFDUZg0175", "expires_in": 3599, "refresh_token": "1//0eAXpVapw8WjjCgYIARAAGA4SNwF-L9Irm0iHkQzqzM7Hn39nctE-DOWKTsm89Ge3nG0bfdfqloRvLMiN4YWHEKcDpLdPIuZel0Q", "scope": "https://mail.google.com/", "token_type": "Bearer", "refresh_token_expires_in": 604799}, "scopes": ["https://mail.google.com/"], "token_info_uri": "https://oauth2.googleapis.com/tokeninfo", "invalid": false, "_class": "OAuth2Credentials", "_module": "oauth2client.client"}
{"access_token": "ya29.a0AZYkNZgeGgZi3_OfGNwLC4ykCFeSfLHoth9GCDj_UJI9SvhY7nAuWl7Gf2qZH-4iUSeDX-fD6JWSAGPwK9rbNMCgX5RUbsExLA69L3XZ1Og5vE5U1HHePCdGSQ-Ceqxauob1tte2nQHdEaPaW5t9OLOQhwGXsCmGq2fhbNpgRgaCgYKAQ8SARYSFQHGX2Mi-I-L5Q--8gfUvX6_0juX1Q0177", "client_id": "266164728215-v84lngbp3vgr4ulql01sqkg5vaigf4a5.apps.googleusercontent.com", "client_secret": "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", "refresh_token": "1//0ed76IPW5-HYwCgYIARAAGA4SNwF-L9IrWvNjsRtcbhlXa0eSfKZsf1-pv5yWL6GSE-4ve6fPQNfIlP8ujruo9Y9B-eIZP6QjolY", "token_expiry": "2025-04-28T10:26:07Z", "token_uri": "https://oauth2.googleapis.com/token", "user_agent": null, "revoke_uri": "https://oauth2.googleapis.com/revoke", "id_token": null, "id_token_jwt": null, "token_response": {"access_token": "ya29.a0AZYkNZgeGgZi3_OfGNwLC4ykCFeSfLHoth9GCDj_UJI9SvhY7nAuWl7Gf2qZH-4iUSeDX-fD6JWSAGPwK9rbNMCgX5RUbsExLA69L3XZ1Og5vE5U1HHePCdGSQ-Ceqxauob1tte2nQHdEaPaW5t9OLOQhwGXsCmGq2fhbNpgRgaCgYKAQ8SARYSFQHGX2Mi-I-L5Q--8gfUvX6_0juX1Q0177", "expires_in": 3599, "scope": "https://mail.google.com/", "token_type": "Bearer"}, "scopes": ["https://mail.google.com/"], "token_info_uri": "https://oauth2.googleapis.com/tokeninfo", "invalid": false, "_class": "OAuth2Credentials", "_module": "oauth2client.client"}

1
models.py Normal file
View File

@ -0,0 +1 @@

0
operation/__init__.py Normal file
View File

40
operation/admin.py Normal file
View File

@ -0,0 +1,40 @@
from django.contrib import admin
from user_management.models import OperatorAccount, PlatformAccount, Video
@admin.register(OperatorAccount)
class OperatorAccountAdmin(admin.ModelAdmin):
list_display = ('username', 'real_name', 'position', 'department', 'is_active', 'created_at')
list_filter = ('position', 'department', 'is_active')
search_fields = ('username', 'real_name', 'email', 'phone')
ordering = ('-created_at',)
@admin.register(PlatformAccount)
class PlatformAccountAdmin(admin.ModelAdmin):
list_display = ('account_name', 'platform_name', 'get_operator_name', 'status', 'followers_count', 'created_at')
list_filter = ('platform_name', 'status')
search_fields = ('account_name', 'account_id', 'operator__real_name')
ordering = ('-created_at',)
def get_operator_name(self, obj):
return obj.operator.real_name
get_operator_name.short_description = '运营账号'
get_operator_name.admin_order_field = 'operator__real_name'
@admin.register(Video)
class VideoAdmin(admin.ModelAdmin):
list_display = ('title', 'get_platform_name', 'get_account_name', 'status', 'views_count', 'created_at')
list_filter = ('status', 'platform_account__platform_name')
search_fields = ('title', 'description', 'platform_account__account_name')
ordering = ('-created_at',)
def get_platform_name(self, obj):
return obj.platform_account.get_platform_name_display()
def get_account_name(self, obj):
return obj.platform_account.account_name
get_platform_name.short_description = '平台'
get_platform_name.admin_order_field = 'platform_account__platform_name'
get_account_name.short_description = '账号名称'
get_account_name.admin_order_field = 'platform_account__account_name'

6
operation/apps.py Normal file
View File

@ -0,0 +1,6 @@
from django.apps import AppConfig
class OperationConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'operation'

View File

6
operation/models.py Normal file
View File

@ -0,0 +1,6 @@
from django.db import models
from user_management.models import OperatorAccount, PlatformAccount, Video, KnowledgeBase, KnowledgeBaseDocument
# Create your models here.
# 我们可以在这里添加额外的模型或关系但现在使用user_management中的现有模型

100
operation/serializers.py Normal file
View File

@ -0,0 +1,100 @@
from rest_framework import serializers
from user_management.models import OperatorAccount, PlatformAccount, Video, KnowledgeBase, KnowledgeBaseDocument
import uuid
class OperatorAccountSerializer(serializers.ModelSerializer):
id = serializers.UUIDField(read_only=False, required=False) # 允许前端不提供ID但如果提供则必须是有效的UUID
class Meta:
model = OperatorAccount
fields = ['id', 'username', 'password', 'real_name', 'email', 'phone', 'position', 'department', 'is_active', 'created_at', 'updated_at']
read_only_fields = ['created_at', 'updated_at']
extra_kwargs = {
'password': {'write_only': True}
}
def create(self, validated_data):
# 如果没有提供ID则生成一个UUID
if 'id' not in validated_data:
validated_data['id'] = uuid.uuid4()
password = validated_data.pop('password', None)
instance = self.Meta.model(**validated_data)
if password:
instance.password = password # 在实际应用中应该加密存储密码
instance.save()
return instance
class PlatformAccountSerializer(serializers.ModelSerializer):
operator_name = serializers.CharField(source='operator.real_name', read_only=True)
class Meta:
model = PlatformAccount
fields = ['id', 'operator', 'operator_name', 'platform_name', 'account_name', 'account_id',
'status', 'followers_count', 'account_url', 'description',
'created_at', 'updated_at', 'last_login']
read_only_fields = ['id', 'created_at', 'updated_at']
def to_internal_value(self, data):
# 处理operator字段可能是字符串格式的UUID
if 'operator' in data and isinstance(data['operator'], str):
try:
# 尝试获取对应的运营账号对象
operator = OperatorAccount.objects.get(id=data['operator'])
data['operator'] = operator.id # 确保使用正确的ID格式
except OperatorAccount.DoesNotExist:
# 如果找不到对应的运营账号,保持原值,让验证器捕获此错误
pass
except Exception as e:
# 其他类型的错误如ID格式不正确等
pass
return super().to_internal_value(data)
class VideoSerializer(serializers.ModelSerializer):
platform_account_name = serializers.CharField(source='platform_account.account_name', read_only=True)
platform_name = serializers.CharField(source='platform_account.platform_name', read_only=True)
class Meta:
model = Video
fields = ['id', 'platform_account', 'platform_account_name', 'platform_name', 'title',
'description', 'video_url', 'local_path', 'thumbnail_url', 'status',
'views_count', 'likes_count', 'comments_count', 'shares_count', 'tags',
'publish_time', 'scheduled_time', 'created_at', 'updated_at']
read_only_fields = ['id', 'created_at', 'updated_at', 'views_count', 'likes_count',
'comments_count', 'shares_count']
def to_internal_value(self, data):
# 处理platform_account字段可能是字符串格式的UUID
if 'platform_account' in data and isinstance(data['platform_account'], str):
try:
# 尝试获取对应的平台账号对象
platform_account = PlatformAccount.objects.get(id=data['platform_account'])
data['platform_account'] = platform_account.id # 确保使用正确的ID格式
except PlatformAccount.DoesNotExist:
# 如果找不到对应的平台账号,保持原值,让验证器捕获此错误
pass
except Exception as e:
# 其他类型的错误如ID格式不正确等
pass
return super().to_internal_value(data)
class KnowledgeBaseSerializer(serializers.ModelSerializer):
class Meta:
model = KnowledgeBase
fields = ['id', 'user_id', 'name', 'desc', 'type', 'department', 'group',
'external_id', 'create_time', 'update_time']
read_only_fields = ['id', 'create_time', 'update_time']
class KnowledgeBaseDocumentSerializer(serializers.ModelSerializer):
class Meta:
model = KnowledgeBaseDocument
fields = ['id', 'knowledge_base', 'document_id', 'document_name',
'external_id', 'uploader_name', 'status', 'create_time', 'update_time']
read_only_fields = ['id', 'create_time', 'update_time']

3
operation/tests.py Normal file
View File

@ -0,0 +1,3 @@
from django.test import TestCase
# Create your tests here.

12
operation/urls.py Normal file
View File

@ -0,0 +1,12 @@
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .views import OperatorAccountViewSet, PlatformAccountViewSet, VideoViewSet
router = DefaultRouter()
router.register(r'operators', OperatorAccountViewSet)
router.register(r'platforms', PlatformAccountViewSet)
router.register(r'videos', VideoViewSet)
urlpatterns = [
path('', include(router.urls)),
]

857
operation/views.py Normal file
View File

@ -0,0 +1,857 @@
from django.shortcuts import render
import json
import uuid
import logging
from django.db import transaction
from django.shortcuts import get_object_or_404
from django.conf import settings
from django.utils import timezone
from rest_framework import viewsets, status
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from django.db.models import Q
import os
from user_management.models import OperatorAccount, PlatformAccount, Video, KnowledgeBase, KnowledgeBaseDocument, User
from .serializers import (
OperatorAccountSerializer, PlatformAccountSerializer, VideoSerializer,
KnowledgeBaseSerializer, KnowledgeBaseDocumentSerializer
)
logger = logging.getLogger(__name__)
class OperatorAccountViewSet(viewsets.ModelViewSet):
"""运营账号管理视图集"""
queryset = OperatorAccount.objects.all()
serializer_class = OperatorAccountSerializer
permission_classes = [IsAuthenticated]
def create(self, request, *args, **kwargs):
"""创建运营账号并自动创建对应的私有知识库"""
with transaction.atomic():
# 1. 创建运营账号
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
# 2. 手动保存数据而不是使用serializer.save()确保不传入UUID
operator_data = serializer.validated_data
operator = OperatorAccount.objects.create(**operator_data)
# 3. 为每个运营账号创建一个私有知识库
knowledge_base = KnowledgeBase.objects.create(
user_id=request.user.id, # 使用当前用户作为创建者
name=f"{operator.real_name}的运营知识库",
desc=f"用于存储{operator.real_name}({operator.username})相关的运营数据",
type='private',
department=operator.department
)
# 4. 创建知识库文档记录 - 运营信息文档
document_data = {
"name": f"{operator.real_name}_运营信息",
"paragraphs": [
{
"title": "运营账号基本信息",
"content": f"""
用户名: {operator.username}
真实姓名: {operator.real_name}
邮箱: {operator.email}
电话: {operator.phone}
职位: {operator.get_position_display()}
部门: {operator.department}
创建时间: {operator.created_at.strftime('%Y-%m-%d %H:%M:%S')}
uuid: {operator.uuid}
""",
"is_active": True
}
]
}
# 调用外部API创建文档
document_id = self._create_document(knowledge_base.external_id, document_data)
if document_id:
# 创建知识库文档记录
KnowledgeBaseDocument.objects.create(
knowledge_base=knowledge_base,
document_id=document_id,
document_name=document_data["name"],
external_id=document_id,
uploader_name=request.user.username
)
return Response({
"code": 200,
"message": "运营账号创建成功,并已创建对应知识库",
"data": {
"operator": self.get_serializer(operator).data,
"knowledge_base": {
"id": knowledge_base.id,
"name": knowledge_base.name,
"external_id": knowledge_base.external_id
}
}
}, status=status.HTTP_201_CREATED)
def destroy(self, request, *args, **kwargs):
"""删除运营账号并更新相关知识库状态"""
operator = self.get_object()
# 更新知识库状态或删除关联文档
knowledge_bases = KnowledgeBase.objects.filter(
name__contains=operator.real_name,
type='private'
)
for kb in knowledge_bases:
# 可以选择删除知识库,或者更新知识库状态
# 这里我们更新对应的文档状态
documents = KnowledgeBaseDocument.objects.filter(
knowledge_base=kb,
document_name__contains=operator.real_name
)
for doc in documents:
doc.status = 'deleted'
doc.save()
operator.is_active = False # 软删除
operator.save()
return Response({
"code": 200,
"message": "运营账号已停用,相关知识库文档已标记为删除",
"data": None
})
def _create_document(self, external_id, doc_data):
"""调用外部API创建文档"""
try:
if not external_id:
logger.error("创建文档失败知识库external_id为空")
return None
# 在实际应用中这里需要调用外部API创建文档
# 模拟创建文档并返回document_id
document_id = str(uuid.uuid4())
logger.info(f"模拟创建文档成功document_id: {document_id}")
return document_id
except Exception as e:
logger.error(f"创建文档失败: {str(e)}")
return None
class PlatformAccountViewSet(viewsets.ModelViewSet):
"""平台账号管理视图集"""
queryset = PlatformAccount.objects.all()
serializer_class = PlatformAccountSerializer
permission_classes = [IsAuthenticated]
def create(self, request, *args, **kwargs):
"""创建平台账号并记录到知识库"""
with transaction.atomic():
# 处理operator字段可能是字符串类型的ID
data = request.data.copy()
if 'operator' in data and isinstance(data['operator'], str):
try:
# 尝试通过ID查找运营账号
operator_id = data['operator']
try:
# 先尝试通过整数ID查找
operator_id_int = int(operator_id)
operator = OperatorAccount.objects.get(id=operator_id_int)
except (ValueError, OperatorAccount.DoesNotExist):
# 如果无法转换为整数或找不到对应账号,尝试通过用户名或真实姓名查找
operator = OperatorAccount.objects.filter(
Q(username=operator_id) | Q(real_name=operator_id)
).first()
if not operator:
return Response({
"code": 404,
"message": f"未找到运营账号: {operator_id}请提供有效的ID、用户名或真实姓名",
"data": None
}, status=status.HTTP_404_NOT_FOUND)
# 更新请求数据中的operator字段为找到的operator的ID
data['operator'] = operator.id
except Exception as e:
return Response({
"code": 400,
"message": f"处理运营账号ID时出错: {str(e)}",
"data": None
}, status=status.HTTP_400_BAD_REQUEST)
# 创建平台账号
serializer = self.get_serializer(data=data)
serializer.is_valid(raise_exception=True)
# 手动创建平台账号不使用serializer.save()避免ID问题
platform_data = serializer.validated_data
platform_account = PlatformAccount.objects.create(**platform_data)
# 获取关联的运营账号
operator = platform_account.operator
# 查找对应的知识库
knowledge_base = KnowledgeBase.objects.filter(
name__contains=operator.real_name,
type='private'
).first()
if knowledge_base and knowledge_base.external_id:
# 创建平台账号文档
document_data = {
"name": f"{platform_account.account_name}_{platform_account.platform_name}_账号信息",
"paragraphs": [
{
"title": "平台账号基本信息",
"content": f"""
平台: {platform_account.get_platform_name_display()}
账号名称: {platform_account.account_name}
账号ID: {platform_account.account_id}
账号状态: {platform_account.get_status_display()}
粉丝数: {platform_account.followers_count}
账号链接: {platform_account.account_url}
账号描述: {platform_account.description or ''}
创建时间: {platform_account.created_at.strftime('%Y-%m-%d %H:%M:%S')}
最后登录: {platform_account.last_login.strftime('%Y-%m-%d %H:%M:%S') if platform_account.last_login else '从未登录'}
""",
"is_active": True
}
]
}
# 调用外部API创建文档
document_id = self._create_document(knowledge_base.external_id, document_data)
if document_id:
# 创建知识库文档记录
KnowledgeBaseDocument.objects.create(
knowledge_base=knowledge_base,
document_id=document_id,
document_name=document_data["name"],
external_id=document_id,
uploader_name=request.user.username
)
return Response({
"code": 200,
"message": "平台账号创建成功,并已添加到知识库",
"data": self.get_serializer(platform_account).data
}, status=status.HTTP_201_CREATED)
def destroy(self, request, *args, **kwargs):
"""删除平台账号并更新相关知识库文档"""
platform_account = self.get_object()
# 获取关联的运营账号
operator = platform_account.operator
# 查找对应的知识库
knowledge_base = KnowledgeBase.objects.filter(
name__contains=operator.real_name,
type='private'
).first()
if knowledge_base:
# 查找相关文档并标记为删除
documents = KnowledgeBaseDocument.objects.filter(
knowledge_base=knowledge_base
).filter(
Q(document_name__contains=platform_account.account_name) |
Q(document_name__contains=platform_account.platform_name)
)
for doc in documents:
doc.status = 'deleted'
doc.save()
# 删除平台账号
self.perform_destroy(platform_account)
return Response({
"code": 200,
"message": "平台账号已删除,相关知识库文档已标记为删除",
"data": None
})
def _create_document(self, external_id, doc_data):
"""调用外部API创建文档"""
try:
if not external_id:
logger.error("创建文档失败知识库external_id为空")
return None
# 在实际应用中这里需要调用外部API创建文档
# 模拟创建文档并返回document_id
document_id = str(uuid.uuid4())
logger.info(f"模拟创建文档成功document_id: {document_id}")
return document_id
except Exception as e:
logger.error(f"创建文档失败: {str(e)}")
return None
@action(detail=True, methods=['post'])
def update_followers(self, request, pk=None):
"""更新平台账号粉丝数并同步到知识库"""
platform_account = self.get_object()
followers_count = request.data.get('followers_count')
if not followers_count:
return Response({
"code": 400,
"message": "粉丝数不能为空",
"data": None
}, status=status.HTTP_400_BAD_REQUEST)
# 更新粉丝数
platform_account.followers_count = followers_count
platform_account.save()
# 同步到知识库
operator = platform_account.operator
knowledge_base = KnowledgeBase.objects.filter(
name__contains=operator.real_name,
type='private'
).first()
if knowledge_base:
# 查找相关文档
document = KnowledgeBaseDocument.objects.filter(
knowledge_base=knowledge_base,
status='active'
).filter(
Q(document_name__contains=platform_account.account_name) |
Q(document_name__contains=platform_account.platform_name)
).first()
if document:
# 这里应该调用外部API更新文档内容
# 但由于我们没有实际的API只做记录
logger.info(f"应当更新文档 {document.document_id} 的粉丝数为 {followers_count}")
return Response({
"code": 200,
"message": "粉丝数更新成功",
"data": {
"id": platform_account.id,
"account_name": platform_account.account_name,
"followers_count": platform_account.followers_count
}
})
class VideoViewSet(viewsets.ModelViewSet):
"""视频管理视图集"""
queryset = Video.objects.all()
serializer_class = VideoSerializer
permission_classes = [IsAuthenticated]
def create(self, request, *args, **kwargs):
"""创建视频并记录到知识库"""
with transaction.atomic():
# 处理platform_account字段可能是字符串类型的ID
data = request.data.copy()
if 'platform_account' in data and isinstance(data['platform_account'], str):
try:
# 尝试通过ID查找平台账号
platform_id = data['platform_account']
try:
# 先尝试通过整数ID查找
platform_id_int = int(platform_id)
platform = PlatformAccount.objects.get(id=platform_id_int)
except (ValueError, PlatformAccount.DoesNotExist):
# 如果无法转换为整数或找不到对应账号尝试通过账号名称或账号ID查找
platform = PlatformAccount.objects.filter(
Q(account_name=platform_id) | Q(account_id=platform_id)
).first()
if not platform:
return Response({
"code": 404,
"message": f"未找到平台账号: {platform_id}请提供有效的ID、账号名称或账号ID",
"data": None
}, status=status.HTTP_404_NOT_FOUND)
# 更新请求数据中的platform_account字段为找到的platform的ID
data['platform_account'] = platform.id
except Exception as e:
return Response({
"code": 400,
"message": f"处理平台账号ID时出错: {str(e)}",
"data": None
}, status=status.HTTP_400_BAD_REQUEST)
# 创建视频
serializer = self.get_serializer(data=data)
serializer.is_valid(raise_exception=True)
# 手动创建视频不使用serializer.save()避免ID问题
video_data = serializer.validated_data
video = Video.objects.create(**video_data)
# 获取关联的平台账号和运营账号
platform_account = video.platform_account
operator = platform_account.operator
# 查找对应的知识库
knowledge_base = KnowledgeBase.objects.filter(
name__contains=operator.real_name,
type='private'
).first()
if knowledge_base and knowledge_base.external_id:
# 创建视频文档
document_data = {
"name": f"{video.title}_{platform_account.account_name}_视频信息",
"paragraphs": [
{
"title": "视频基本信息",
"content": f"""
标题: {video.title}
平台: {platform_account.get_platform_name_display()}
账号: {platform_account.account_name}
视频ID: {video.video_id}
发布时间: {video.publish_time.strftime('%Y-%m-%d %H:%M:%S') if video.publish_time else '未发布'}
视频链接: {video.video_url}
点赞数: {video.likes_count}
评论数: {video.comments_count}
分享数: {video.shares_count}
观看数: {video.views_count}
视频描述: {video.description or ''}
""",
"is_active": True
}
]
}
# 调用外部API创建文档
document_id = self._create_document(knowledge_base.external_id, document_data)
if document_id:
# 创建知识库文档记录
KnowledgeBaseDocument.objects.create(
knowledge_base=knowledge_base,
document_id=document_id,
document_name=document_data["name"],
external_id=document_id,
uploader_name=request.user.username
)
return Response({
"code": 200,
"message": "视频创建成功,并已添加到知识库",
"data": self.get_serializer(video).data
}, status=status.HTTP_201_CREATED)
def destroy(self, request, *args, **kwargs):
"""删除视频记录并更新相关知识库文档"""
video = self.get_object()
# 获取关联的平台账号和运营账号
platform_account = video.platform_account
operator = platform_account.operator
# 查找对应的知识库
knowledge_base = KnowledgeBase.objects.filter(
name__contains=operator.real_name,
type='private'
).first()
if knowledge_base:
# 查找相关文档并标记为删除
documents = KnowledgeBaseDocument.objects.filter(
knowledge_base=knowledge_base,
document_name__contains=video.title
)
for doc in documents:
doc.status = 'deleted'
doc.save()
# 删除视频记录
self.perform_destroy(video)
return Response({
"code": 200,
"message": "视频记录已删除,相关知识库文档已标记为删除",
"data": None
})
def _create_document(self, external_id, doc_data):
"""调用外部API创建文档"""
try:
if not external_id:
logger.error("创建文档失败知识库external_id为空")
return None
# 在实际应用中这里需要调用外部API创建文档
# 模拟创建文档并返回document_id
document_id = str(uuid.uuid4())
logger.info(f"模拟创建文档成功document_id: {document_id}")
return document_id
except Exception as e:
logger.error(f"创建文档失败: {str(e)}")
return None
@action(detail=True, methods=['post'])
def update_stats(self, request, pk=None):
"""更新视频统计数据并同步到知识库"""
video = self.get_object()
# 获取更新的统计数据
stats = {}
for field in ['views_count', 'likes_count', 'comments_count', 'shares_count']:
if field in request.data:
stats[field] = request.data[field]
if not stats:
return Response({
"code": 400,
"message": "没有提供任何统计数据",
"data": None
}, status=status.HTTP_400_BAD_REQUEST)
# 更新视频统计数据
for field, value in stats.items():
setattr(video, field, value)
video.save()
# 同步到知识库
# 在实际应用中应该调用外部API更新文档内容
platform_account = video.platform_account
operator = platform_account.operator
knowledge_base = KnowledgeBase.objects.filter(
name__contains=operator.real_name,
type='private'
).first()
if knowledge_base:
document = KnowledgeBaseDocument.objects.filter(
knowledge_base=knowledge_base,
document_name__contains=video.title,
status='active'
).first()
if document:
logger.info(f"应当更新文档 {document.document_id} 的视频统计数据")
return Response({
"code": 200,
"message": "视频统计数据更新成功",
"data": {
"id": video.id,
"title": video.title,
"views_count": video.views_count,
"likes_count": video.likes_count,
"comments_count": video.comments_count,
"shares_count": video.shares_count
}
})
@action(detail=True, methods=['post'])
def publish(self, request, pk=None):
"""发布视频并更新状态"""
video = self.get_object()
# 检查视频状态
if video.status not in ['draft', 'scheduled']:
return Response({
"code": 400,
"message": f"当前视频状态为 {video.get_status_display()},无法发布",
"data": None
}, status=status.HTTP_400_BAD_REQUEST)
# 获取视频URL
video_url = request.data.get('video_url')
if not video_url:
return Response({
"code": 400,
"message": "未提供视频URL",
"data": None
}, status=status.HTTP_400_BAD_REQUEST)
# 更新视频状态和URL
video.video_url = video_url
video.status = 'published'
video.publish_time = timezone.now()
video.save()
# 同步到知识库
# 在实际应用中应该调用外部API更新文档内容
platform_account = video.platform_account
operator = platform_account.operator
knowledge_base = KnowledgeBase.objects.filter(
name__contains=operator.real_name,
type='private'
).first()
if knowledge_base:
document = KnowledgeBaseDocument.objects.filter(
knowledge_base=knowledge_base,
document_name__contains=video.title,
status='active'
).first()
if document:
logger.info(f"应当更新文档 {document.document_id} 的视频发布状态")
return Response({
"code": 200,
"message": "视频已成功发布",
"data": {
"id": video.id,
"title": video.title,
"status": video.status,
"video_url": video.video_url,
"publish_time": video.publish_time
}
})
@action(detail=False, methods=['post'])
def upload_video(self, request):
"""上传视频文件并创建视频记录"""
try:
# 获取上传的视频文件
video_file = request.FILES.get('video_file')
if not video_file:
return Response({
"code": 400,
"message": "未提供视频文件",
"data": None
}, status=status.HTTP_400_BAD_REQUEST)
# 获取平台账号ID
platform_account_id = request.data.get('platform_account')
if not platform_account_id:
return Response({
"code": 400,
"message": "未提供平台账号ID",
"data": None
}, status=status.HTTP_400_BAD_REQUEST)
try:
platform_account = PlatformAccount.objects.get(id=platform_account_id)
except PlatformAccount.DoesNotExist:
return Response({
"code": 404,
"message": f"未找到ID为{platform_account_id}的平台账号",
"data": None
}, status=status.HTTP_404_NOT_FOUND)
# 创建保存视频的目录
import os
from django.conf import settings
# 确保文件保存目录存在
media_root = getattr(settings, 'MEDIA_ROOT', os.path.join(settings.BASE_DIR, 'media'))
videos_dir = os.path.join(media_root, 'videos')
account_dir = os.path.join(videos_dir, f"{platform_account.platform_name}_{platform_account.account_name}")
if not os.path.exists(videos_dir):
os.makedirs(videos_dir)
if not os.path.exists(account_dir):
os.makedirs(account_dir)
# 生成唯一的文件名
import time
timestamp = int(time.time())
file_name = f"{timestamp}_{video_file.name}"
file_path = os.path.join(account_dir, file_name)
# 保存视频文件
with open(file_path, 'wb+') as destination:
for chunk in video_file.chunks():
destination.write(chunk)
# 创建视频记录
video_data = {
'platform_account': platform_account,
'title': request.data.get('title', os.path.splitext(video_file.name)[0]),
'description': request.data.get('description', ''),
'local_path': file_path,
'status': 'draft',
'tags': request.data.get('tags', '')
}
# 如果提供了计划发布时间,则设置状态为已排期
scheduled_time = request.data.get('scheduled_time')
if scheduled_time:
from dateutil import parser
try:
parsed_time = parser.parse(scheduled_time)
video_data['scheduled_time'] = parsed_time
video_data['status'] = 'scheduled'
except Exception as e:
return Response({
"code": 400,
"message": f"计划发布时间格式错误: {str(e)}",
"data": None
}, status=status.HTTP_400_BAD_REQUEST)
# 创建视频记录
video = Video.objects.create(**video_data)
# 添加到知识库
self._add_to_knowledge_base(video, platform_account)
# 如果是已排期状态,创建定时任务
if video.status == 'scheduled':
self._create_publish_task(video)
return Response({
"code": 200,
"message": "视频上传成功",
"data": {
"id": video.id,
"title": video.title,
"status": video.get_status_display(),
"scheduled_time": video.scheduled_time
}
}, status=status.HTTP_201_CREATED)
except Exception as e:
logger.error(f"视频上传失败: {str(e)}")
return Response({
"code": 500,
"message": f"视频上传失败: {str(e)}",
"data": None
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def _add_to_knowledge_base(self, video, platform_account):
"""将视频添加到知识库"""
# 获取关联的运营账号
operator = platform_account.operator
# 查找对应的知识库
knowledge_base = KnowledgeBase.objects.filter(
name__contains=operator.real_name,
type='private'
).first()
if knowledge_base and knowledge_base.external_id:
# 创建视频文档
document_data = {
"name": f"{video.title}_{platform_account.account_name}_视频信息",
"paragraphs": [
{
"title": "视频基本信息",
"content": f"""
标题: {video.title}
平台: {platform_account.get_platform_name_display()}
账号: {platform_account.account_name}
状态: {video.get_status_display()}
本地路径: {video.local_path}
计划发布时间: {video.scheduled_time.strftime('%Y-%m-%d %H:%M:%S') if video.scheduled_time else '未设置'}
视频描述: {video.description or ''}
标签: {video.tags or ''}
创建时间: {video.created_at.strftime('%Y-%m-%d %H:%M:%S')}
""",
"is_active": True
}
]
}
# 调用外部API创建文档
document_id = self._create_document(knowledge_base.external_id, document_data)
if document_id:
# 创建知识库文档记录
KnowledgeBaseDocument.objects.create(
knowledge_base=knowledge_base,
document_id=document_id,
document_name=document_data["name"],
external_id=document_id,
uploader_name="系统"
)
def _create_publish_task(self, video):
"""创建定时发布任务"""
try:
from django_celery_beat.models import PeriodicTask, CrontabSchedule
import json
from datetime import datetime
scheduled_time = video.scheduled_time
# 创建定时任务
schedule, _ = CrontabSchedule.objects.get_or_create(
minute=scheduled_time.minute,
hour=scheduled_time.hour,
day_of_month=scheduled_time.day,
month_of_year=scheduled_time.month,
)
# 创建周期性任务
task_name = f"Publish_Video_{video.id}_{datetime.now().timestamp()}"
PeriodicTask.objects.create(
name=task_name,
task='user_management.tasks.publish_scheduled_video',
crontab=schedule,
args=json.dumps([video.id]),
one_off=True, # 只执行一次
start_time=scheduled_time
)
logger.info(f"已创建视频 {video.id} 的定时发布任务,计划发布时间: {scheduled_time}")
except Exception as e:
logger.error(f"创建定时发布任务失败: {str(e)}")
# 记录错误但不中断流程
@action(detail=True, methods=['post'])
def manual_publish(self, request, pk=None):
"""手动发布视频"""
video = self.get_object()
# 检查视频状态是否允许发布
if video.status not in ['draft', 'scheduled']:
return Response({
"code": 400,
"message": f"当前视频状态为 {video.get_status_display()},无法发布",
"data": None
}, status=status.HTTP_400_BAD_REQUEST)
# 检查视频文件是否存在
if not video.local_path or not os.path.exists(video.local_path):
return Response({
"code": 400,
"message": "视频文件不存在,无法发布",
"data": None
}, status=status.HTTP_400_BAD_REQUEST)
# 执行发布任务
try:
from user_management.tasks import publish_scheduled_video
result = publish_scheduled_video(video.id)
if isinstance(result, dict) and result.get('success', False):
return Response({
"code": 200,
"message": "视频发布成功",
"data": {
"id": video.id,
"title": video.title,
"status": "published",
"video_url": result.get('video_url'),
"publish_time": result.get('publish_time')
}
})
else:
return Response({
"code": 500,
"message": f"发布失败: {result.get('error', '未知错误')}",
"data": None
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Exception as e:
logger.error(f"手动发布视频失败: {str(e)}")
return Response({
"code": 500,
"message": f"发布失败: {str(e)}",
"data": None
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)

Binary file not shown.

View File

@ -0,0 +1,4 @@
# 让Celery在Django启动时自动加载
from .celery import app as celery_app
__all__ = ['celery_app']

View File

@ -8,21 +8,18 @@ https://docs.djangoproject.com/en/5.1/howto/deployment/asgi/
"""
import os
import django
# 首先设置 Django 设置模块
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'role_based_system.settings')
django.setup() # 添加这行来初始化 Django
# 然后再导入其他模块
from django.core.asgi import get_asgi_application
from channels.routing import ProtocolTypeRouter, URLRouter
from channels.auth import AuthMiddlewareStack
from user_management.routing import websocket_urlpatterns
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'role_based_system.settings')
application = ProtocolTypeRouter({
"http": get_asgi_application(),
"websocket": AuthMiddlewareStack(
URLRouter(websocket_urlpatterns)
URLRouter(
websocket_urlpatterns
)
),
})

View File

@ -0,0 +1,27 @@
import os
from celery import Celery
from celery.schedules import crontab
# 设置默认Django设置模块
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'role_based_system.settings')
app = Celery('role_based_system')
# 使用字符串表示避免pickle序列化可能带来的安全问题
app.config_from_object('django.conf:settings', namespace='CELERY')
# 自动从所有注册的Django应用中加载任务
app.autodiscover_tasks()
# 配置定期任务
app.conf.beat_schedule = {
# 每小时检查一次未发布的视频
'check-scheduled-videos-every-hour': {
'task': 'user_management.tasks.check_scheduled_videos',
'schedule': crontab(minute=0, hour='*/1'), # 每小时运行一次
},
}
@app.task(bind=True, ignore_result=True)
def debug_task(self):
print(f'Request: {self.request!r}')

View File

@ -58,6 +58,7 @@ INSTALLED_APPS = [
'user_management',
'channels_redis',
'corsheaders',
'operation', # 添加新的运营管理应用
]
MIDDLEWARE = [
@ -168,15 +169,14 @@ ASGI_APPLICATION = "role_based_system.asgi.application"
# Channel Layers 配置
CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels_redis.core.RedisChannelLayer",
"CONFIG": {
"hosts": [("127.0.0.1", 6379)],
"capacity": 1500, # 消息队列容量
"expiry": 10, # 消息过期时间(秒)
},
},
"BACKEND": "channels.layers.InMemoryChannelLayer"
}
}
# Redis 配置
REDIS_HOST = '127.0.0.1'
REDIS_PORT = 6379
REDIS_DB = 0
# CORS 配置
CORS_ALLOW_ALL_ORIGINS = True
@ -185,9 +185,9 @@ CORS_ALLOWED_ORIGINS = [
"http://localhost:8000",
"http://127.0.0.1:8000",
"http://124.222.236.141:8000",
"ws://localhost:8000", # 添加 WebSocket
"ws://127.0.0.1:8000", # 添加 WebSocket
"ws://124.222.236.141:8000", # 添加 WebSocket
"ws://localhost:8000",
"ws://127.0.0.1:8000",
"ws://124.222.236.141:8000",
]
# 允许的请求头
CORS_ALLOWED_HEADERS = [
@ -200,6 +200,7 @@ CORS_ALLOWED_HEADERS = [
'user-agent',
'x-csrftoken',
'x-requested-with',
'token', # 添加token头
]
# 允许的请求方法
@ -301,7 +302,7 @@ GMAIL_API_SCOPES = ['https://mail.google.com/']
GMAIL_TOPIC_NAME = 'gmail-watch-topic'
# Gmail webhook地址 (开发环境使用本机内网穿透地址)
GMAIL_WEBHOOK_URL = 'https://a7a4-116-227-35-74.ngrok-free.app/api/user/gmail/webhook/'
GMAIL_WEBHOOK_URL = 'https://27b3-180-159-100-165.ngrok-free.app/api/user/gmail/webhook/'
# 如果在生产环境,使用以下固定地址
# GMAIL_WEBHOOK_URL = 'https://你的域名/api/user/gmail/webhook/'
@ -317,7 +318,15 @@ USE_L10N = True
USE_TZ = True # 将此项设置为True以启用时区支持
# DeepSeek API配置
DEEPSEEK_API_KEY = "sk-xqbujijjqqmlmlvkhvxeogqjtzslnhdtqxqgiyuhwpoqcjvf"
# 注意这里需要更新为有效的DeepSeek API密钥
DEEPSEEK_API_KEY = "sk-xqbujijjqqmlmlvkhvxeogqjtzslnhdtqxqgiyuhwpoqcjvf" # 请替换为您的实际有效的DeepSeek API密钥
SILICON_CLOUD_API_KEY = 'sk-xqbujijjqqmlmlvkhvxeogqjtzslnhdtqxqgiyuhwpoqcjvf'
# Celery配置
CELERY_BROKER_URL = 'redis://localhost:6379/0'
CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TIMEZONE = TIME_ZONE
CELERY_BROKER_CONNECTION_RETRY_ON_STARTUP = True

View File

@ -19,7 +19,11 @@ from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from user_management.views import gmail_webhook, feishu_sync_api, feishu_to_kb_api, check_creator_kb_api
from user_management.feishu_chat_views import process_feishu_table, run_auto_chat, feishu_user_goal, check_goal_status
from user_management.feishu_chat_views import (
process_feishu_table, run_auto_chat, feishu_user_goal, check_goal_status,
export_creators_data, download_exported_file
)
from feishu.feishu_ai_chat import api_export_talent_replies
urlpatterns = [
# 管理后台
@ -28,10 +32,13 @@ urlpatterns = [
# API路由
path('api/', include('user_management.urls')),
# 运营管理API路由
path('api/operation/', include('operation.urls')),
# 专用Gmail Webhook路由 - 直接匹配根路径
path('api/user/gmail/webhook/', gmail_webhook, name='root_gmail_webhook'), # 修改为正确路径
path('api/user/', include('user_management.urls')),
path('gmail/webhook/', gmail_webhook, name='alt_gmail_webhook'), # 添加备用路径
# 飞书相关API
path('api/feishu/sync', feishu_sync_api, name='feishu_sync_api'),
path('api/feishu/to_kb', feishu_to_kb_api, name='feishu_to_kb_api'),
@ -43,6 +50,14 @@ urlpatterns = [
path('api/feishu/user-goal/', feishu_user_goal, name='direct_feishu_user_goal'),
path('api/feishu/check-goal/', check_goal_status, name='direct_check_goal_status'),
# 导出数据API
path('api/feishu/export-data/', export_creators_data, name='export_creators_data'),
path('api/feishu/download/<str:filename>/', download_exported_file, name='download_exported_file'),
# 导出达人回复API
path('api/users/talent-replies/export/', api_export_talent_replies, name='export_talent_replies'),
path('api/export-talent-replies/', api_export_talent_replies, name='alt_export_talent_replies'),
# 媒体文件服务
*static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT),

View File

@ -1,16 +1,33 @@
"""
WSGI config for role_based_system project.
ASGI config for role_based_system project.
It exposes the WSGI callable as a module-level variable named ``application``.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/5.1/howto/deployment/wsgi/
https://docs.djangoproject.com/en/5.1/howto/deployment/asgi/
"""
import os
import django
from django.core.wsgi import get_wsgi_application
# 首先设置 Django 设置模块
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'role_based_system.settings')
django.setup() # 添加这行来初始化 Django
application = get_wsgi_application()
# 然后再导入其他模块
from django.core.asgi import get_asgi_application
from channels.routing import ProtocolTypeRouter, URLRouter
from channels.auth import AuthMiddlewareStack
from channels.security.websocket import AllowedHostsOriginValidator
from user_management.routing import websocket_urlpatterns
from user_management.middleware import TokenAuthMiddleware
# 使用TokenAuthMiddleware代替AuthMiddlewareStack
application = ProtocolTypeRouter({
"http": get_asgi_application(),
"websocket": AllowedHostsOriginValidator(
TokenAuthMiddleware(
URLRouter(websocket_urlpatterns)
)
),
})

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,463 @@
@api_view(['POST'])
@permission_classes([])
def gmail_webhook(request):
"""Gmail推送通知webhook"""
try:
# 导入需要的模块
import logging
import traceback
from django.utils import timezone
from django.contrib.auth import get_user_model
from rest_framework import status
from rest_framework.response import Response
# 获取用户模型
User = get_user_model()
# 导入Gmail集成相关的模块
from .models import GmailCredential
from .gmail_integration import GmailIntegration, GmailServiceManager
logger = logging.getLogger(__name__)
# 添加更详细的日志
logger.info(f"接收到Gmail webhook请求: 路径={request.path}, 方法={request.method}")
logger.info(f"请求头: {dict(request.headers)}")
logger.info(f"请求数据: {request.data}")
# 验证请求来源(可以添加额外的安全校验)
data = request.data
if not data:
return Response({
'code': 400,
'message': '无效的请求数据',
'data': None
}, status=status.HTTP_400_BAD_REQUEST)
# 处理数据
email_address = None
history_id = None
# 处理Google Pub/Sub消息格式
if isinstance(data, dict) and 'message' in data and 'data' in data['message']:
try:
import base64
import json
logger.info("检测到Google Pub/Sub消息格式")
# Base64解码data字段
encoded_data = data['message']['data']
decoded_data = base64.b64decode(encoded_data).decode('utf-8')
logger.info(f"解码后的数据: {decoded_data}")
# 解析JSON获取email和historyId
json_data = json.loads(decoded_data)
email_address = json_data.get('emailAddress')
history_id = json_data.get('historyId')
logger.info(f"从Pub/Sub消息中提取: email={email_address}, historyId={history_id}")
except Exception as decode_error:
logger.error(f"解析Pub/Sub消息失败: {str(decode_error)}")
logger.error(traceback.format_exc())
# 处理其他格式的数据
elif isinstance(data, dict):
# 直接使用JSON格式数据
logger.info("接收到JSON格式数据")
email_address = data.get('emailAddress')
history_id = data.get('historyId')
elif hasattr(data, 'decode'):
# 尝试解析原始数据
logger.info("接收到原始数据格式,尝试解析")
try:
import json
json_data = json.loads(data.decode('utf-8'))
email_address = json_data.get('emailAddress')
history_id = json_data.get('historyId')
except Exception as parse_error:
logger.error(f"解析请求数据失败: {str(parse_error)}")
email_address = None
history_id = None
else:
# 尝试从请求参数获取
logger.info("尝试从请求参数获取数据")
email_address = request.GET.get('emailAddress') or request.POST.get('emailAddress')
history_id = request.GET.get('historyId') or request.POST.get('historyId')
logger.info(f"提取的邮箱: {email_address}, 历史ID: {history_id}")
if not email_address or not history_id:
return Response({
'code': 400,
'message': '缺少必要的参数',
'data': None
}, status=status.HTTP_400_BAD_REQUEST)
# 查找用户和认证信息 - 优化的查找逻辑
user = None
credential = None
# 1. 首先尝试直接通过Gmail凭证表查找
credential = GmailCredential.objects.filter(
gmail_email=email_address,
is_active=True
).select_related('user').order_by('-is_default', '-updated_at').first()
if credential:
user = credential.user
logger.info(f"通过gmail_email直接找到用户和凭证: 用户={user.email}, 凭证ID={credential.id}")
else:
# 2. 如果没找到,尝试通过用户邮箱查找
user = User.objects.filter(email=email_address).first()
if user:
logger.info(f"通过用户邮箱找到用户: {user.email}")
# 为该用户查找任何有效的Gmail凭证
credential = GmailCredential.objects.filter(
user=user,
is_active=True
).order_by('-is_default', '-updated_at').first()
if credential:
logger.info(f"为用户 {user.email} 找到有效的Gmail凭证: {credential.id}")
else:
logger.error(f"无法找到与{email_address}关联的用户或凭证")
return Response({
'code': 404,
'message': f'找不到与 {email_address} 关联的用户',
'data': None
}, status=status.HTTP_404_NOT_FOUND)
if not credential:
logger.error(f"用户 {user.email} 没有有效的Gmail凭证")
return Response({
'code': 404,
'message': f'找不到用户 {user.email} 的Gmail凭证',
'data': None
}, status=status.HTTP_404_NOT_FOUND)
# 更新history_id无论如何都记录这次历史ID
credential.last_history_id = history_id
credential.save()
# 清除可能存在的缓存实例,确保使用最新凭证
GmailServiceManager.clear_instance(user, str(credential.id))
# 检查凭证是否需要重新授权
notification_queued = False
if credential.needs_reauth or not credential.credentials:
logger.warning(f"Gmail凭证需要重新授权将通知保存到队列: {email_address}")
# 保存到通知队列
from .models import GmailNotificationQueue
import json
# 将通知数据序列化
try:
notification_json = json.dumps(data)
except:
notification_json = f'{{"emailAddress": "{email_address}", "historyId": "{history_id}"}}'
# 创建队列记录
GmailNotificationQueue.objects.create(
user=user,
gmail_credential=credential,
email=email_address,
history_id=str(history_id),
notification_data=notification_json,
processed=False
)
logger.info(f"Gmail通知已保存到队列等待用户重新授权: {email_address}")
notification_queued = True
# 直接返回成功,但记录需要用户重新授权
return Response({
'code': 202, # Accepted
'message': '通知已保存到队列,等待用户重新授权',
'data': {
'user_id': str(user.id),
'history_id': history_id,
'needs_reauth': True
}
})
# 如果请求中包含达人邮箱,直接处理特定达人的邮件
talent_email = data.get('talent_email') or request.GET.get('talent_email')
if talent_email and user:
logger.info(f"检测到特定达人邮箱: {talent_email},将直接处理其最近邮件")
try:
# 创建Gmail集成实例 - 使用明确的凭证ID
integration = GmailIntegration(user=user, gmail_credential_id=str(credential.id))
if integration.authenticate():
# 获取达人最近的邮件
recent_emails = integration.get_recent_emails(
from_email=talent_email,
max_results=5 # 限制获取最近5封
)
if recent_emails:
logger.info(f"找到 {len(recent_emails)} 封来自 {talent_email} 的最近邮件")
# 创建或获取知识库
knowledge_base, created = integration.create_talent_knowledge_base(talent_email)
# 保存对话
result = integration.save_conversations_to_knowledge_base(recent_emails, knowledge_base)
logger.info(f"已处理达人 {talent_email} 的最近邮件: {result}")
else:
logger.info(f"没有找到来自 {talent_email} 的最近邮件")
else:
logger.error("Gmail认证失败无法处理特定达人邮件")
# 如果还没有保存到队列,保存通知数据
if not notification_queued:
# 保存到通知队列
from .models import GmailNotificationQueue
import json
try:
notification_json = json.dumps(data)
except:
notification_json = f'{{"emailAddress": "{email_address}", "historyId": "{history_id}", "talent_email": "{talent_email}"}}'
GmailNotificationQueue.objects.create(
user=user,
gmail_credential=credential,
email=email_address,
history_id=str(history_id),
notification_data=notification_json,
processed=False
)
logger.info(f"Gmail通知(含达人邮箱)已保存到队列: {email_address}, 达人: {talent_email}")
except Exception as talent_error:
logger.error(f"处理达人邮件失败: {str(talent_error)}")
logger.error(traceback.format_exc())
# 处理普通通知
try:
# 创建Gmail集成实例 - 明确使用找到的凭证ID
integration = GmailIntegration(user=user, gmail_credential_id=str(credential.id))
# 记录详细的凭证信息,帮助排查问题
logger.info(f"处理普通通知: 用户ID={user.id}, 凭证ID={credential.id}, Gmail邮箱={credential.gmail_email}")
auth_success = integration.authenticate()
if auth_success:
logger.info(f"Gmail认证成功开始处理通知: {email_address}")
# 强制设置最小历史ID差值确保能获取新消息
try:
# 从凭证中获取历史ID并确保作为整数比较
last_history_id = int(credential.last_history_id or 0)
current_history_id = int(history_id)
# 如果历史ID没有变化设置一个小的偏移量确保获取最近消息
if current_history_id <= last_history_id:
# 设置较小的历史ID以确保获取最近的消息
adjusted_history_id = max(1, last_history_id - 10)
logger.info(f"调整历史ID: {last_history_id} -> {adjusted_history_id},以确保能获取最近的消息")
# 修改请求中的历史ID
if isinstance(data, dict) and 'message' in data and 'data' in data['message']:
# 对于Pub/Sub格式修改解码后的JSON
try:
import base64
import json
decoded_data = base64.b64decode(data['message']['data']).decode('utf-8')
json_data = json.loads(decoded_data)
json_data['historyId'] = str(adjusted_history_id)
data['message']['data'] = base64.b64encode(json.dumps(json_data).encode('utf-8')).decode('utf-8')
logger.info(f"已调整Pub/Sub消息中的历史ID为: {adjusted_history_id}")
except Exception as adjust_error:
logger.error(f"调整历史ID失败: {str(adjust_error)}")
else:
# 直接修改data中的historyId
if isinstance(data, dict) and 'historyId' in data:
data['historyId'] = str(adjusted_history_id)
logger.info(f"已调整请求中的历史ID为: {adjusted_history_id}")
except Exception as history_adjust_error:
logger.error(f"历史ID调整失败: {str(history_adjust_error)}")
result = integration.process_notification(data)
# 日志记录处理结果
if result:
logger.info(f"Gmail通知处理成功检测到新消息: {email_address}")
else:
logger.warning(f"Gmail通知处理完成但未检测到新消息: {email_address}")
# 如果处理成功尝试通过WebSocket发送通知
if result:
try:
from channels.layers import get_channel_layer
from asgiref.sync import async_to_sync
# 获取Channel Layer
channel_layer = get_channel_layer()
if channel_layer:
# 发送WebSocket消息
async_to_sync(channel_layer.group_send)(
f"notification_user_{user.id}",
{
"type": "notification",
"data": {
"message_type": "gmail_update",
"message": "您的Gmail有新消息已自动处理",
"history_id": history_id,
"timestamp": timezone.now().isoformat()
}
}
)
logger.info(f"发送WebSocket通知成功: user_id={user.id}")
except Exception as ws_error:
logger.error(f"发送WebSocket通知失败: {str(ws_error)}")
logger.info(f"Gmail通知处理成功: {email_address}")
return Response({
'code': 200,
'message': '通知已处理',
'data': {
'user_id': str(user.id),
'history_id': history_id,
'success': True,
'new_messages': result
}
})
else:
# 认证失败,保存通知到队列
logger.error(f"Gmail认证失败: {email_address}, 用户ID={user.id}, 凭证ID={credential.id}")
# 尝试获取详细的认证失败原因
try:
# 尝试刷新令牌
refresh_result = integration.refresh_token()
if refresh_result:
logger.info(f"令牌刷新成功,将重新尝试处理")
result = integration.process_notification(data)
if result:
logger.info(f"刷新令牌后处理成功!")
return Response({
'code': 200,
'message': '通知已处理(令牌刷新后)',
'data': {
'user_id': str(user.id),
'history_id': history_id,
'success': True
}
})
except Exception as refresh_error:
logger.error(f"尝试刷新令牌失败: {str(refresh_error)}")
# 如果还没有保存到队列,保存通知数据
if not notification_queued:
# 保存到通知队列
from .models import GmailNotificationQueue
import json
try:
notification_json = json.dumps(data)
except:
notification_json = f'{{"emailAddress": "{email_address}", "historyId": "{history_id}"}}'
# 标记凭证需要重新授权
credential.needs_reauth = True
credential.save()
logger.info(f"已标记凭证 {credential.id} 需要重新授权")
GmailNotificationQueue.objects.create(
user=user,
gmail_credential=credential,
email=email_address,
history_id=str(history_id),
notification_data=notification_json,
processed=False
)
logger.info(f"Gmail通知已保存到队列: {email_address}")
# 返回处理成功,但告知需要重新授权
return Response({
'code': 202, # Accepted
'message': '通知已保存到队列,等待重新获取授权',
'data': {
'user_id': str(user.id),
'history_id': history_id,
'needs_reauth': True
}
})
except Exception as process_error:
logger.error(f"处理Gmail通知失败: {str(process_error)}")
logger.error(traceback.format_exc())
# 保存到通知队列
if not notification_queued:
try:
from .models import GmailNotificationQueue
import json
try:
notification_json = json.dumps(data)
except:
notification_json = f'{{"emailAddress": "{email_address}", "historyId": "{history_id}"}}'
# 标记凭证需要重新授权 - 可能是令牌问题导致的错误
error_msg = str(process_error).lower()
if "invalid_grant" in error_msg or "token" in error_msg or "auth" in error_msg or "认证" in error_msg:
credential.needs_reauth = True
credential.save()
logger.info(f"根据错误信息标记凭证 {credential.id} 需要重新授权")
GmailNotificationQueue.objects.create(
user=user,
gmail_credential=credential,
email=email_address,
history_id=str(history_id),
notification_data=notification_json,
processed=False,
error_message=str(process_error)[:255]
)
logger.info(f"由于处理错误Gmail通知已保存到队列: {email_address}")
except Exception as queue_error:
logger.error(f"保存通知到队列失败: {str(queue_error)}")
# 仍然返回成功防止Google重试导致重复通知
return Response({
'code': 202,
'message': '通知已保存,稍后处理',
'data': {
'user_id': str(user.id),
'history_id': history_id,
'error': str(process_error)[:100] # 截断错误信息
}
})
except Exception as e:
logger.error(f"处理Gmail webhook失败: {str(e)}")
logger.error(traceback.format_exc())
# 尝试更安全的响应,尽可能提供有用信息
try:
# 如果已经提取了邮箱和历史ID等信息记录在响应中
response_data = {
'error': str(e)[:200]
}
if 'email_address' in locals() and email_address:
response_data['email_address'] = email_address
if 'history_id' in locals() and history_id:
response_data['history_id'] = history_id
if 'user' in locals() and user:
response_data['user_id'] = str(user.id)
return Response({
'code': 500,
'message': '处理通知失败',
'data': response_data
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except:
# 最后的备用方案
return Response({
'code': 500,
'message': '处理通知失败',
'data': None
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)

76
test_config.py Normal file
View File

@ -0,0 +1,76 @@
# -*- coding: utf-8 -*-
"""测试配置文件"""
# API配置
API_BASE_URL = "http://127.0.0.1:8000"
AUTH_TOKEN = "7831a86588bc08d025e4c9bd668de3b7940f7634"
# 请求头配置
HEADERS = {
"Authorization": f"Token {AUTH_TOKEN}",
"User-Agent": "Apifox/1.0.0 (https://apifox.com)",
"Content-Type": "application/json",
"Accept": "*/*",
"Host": "127.0.0.1:8000",
"Connection": "keep-alive",
"Cookie": "csrftoken=FIYybrNUqefEo2z9QyozmYqQhxTMSFPo; sessionid=ckvdyvy4vzsyfzxg7fie7xbhmxboqegv"
}
# 测试用例
TEST_CASES = [
{
"name": "基础总结测试",
"question": "总结下",
"conversation_id": "10b34248-2625-434b-a493-6d43520c837a",
"dataset_id": "8390ca43-6e63-4df9-b0b9-6cb20e1b38af",
"expected_response_time": 10.0
},
{
"name": "空内容测试",
"question": "",
"conversation_id": "10b34248-2625-434b-a493-6d43520c837a",
"dataset_id": "8390ca43-6e63-4df9-b0b9-6cb20e1b38af",
"expected_error": True
},
{
"name": "中文问答测试",
"question": "Python是什么",
"conversation_id": "10b34248-2625-434b-a493-6d43520c837a",
"dataset_id": "8390ca43-6e63-4df9-b0b9-6cb20e1b38af",
"expected_response_time": 10.0
}
]
# 日志配置
LOG_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'formatter': 'standard',
'class': 'logging.StreamHandler',
},
'file': {
'level': 'DEBUG',
'formatter': 'standard',
'class': 'logging.FileHandler',
'filename': 'stream_test.log',
'mode': 'w',
'encoding': 'utf-8',
}
},
'loggers': {
'stream_test': {
'handlers': ['console', 'file'],
'level': 'DEBUG',
'propagate': True
}
}
}

129
test_external_api.py Normal file
View File

@ -0,0 +1,129 @@
import requests
import json
import sys
import time
from datetime import datetime
# 外部API地址
API_URL = "http://81.69.223.133:48329/api/application/chat_message/94922d0e-20e5-11f0-ac62-0242ac120002"
# 测试数据
test_data = {
"message": "1+1",
"re_chat": False,
"stream": True
}
# 请求头
headers = {
"Content-Type": "application/json",
"User-Agent": "Apifox/1.0.0 (https://apifox.com)",
"Accept": "*/*",
"Host": "81.69.223.133:48329",
"Connection": "keep-alive"
}
print(f"API URL: {API_URL}")
print(f"发送请求: {json.dumps(test_data, ensure_ascii=False)}")
print("等待响应...")
start_time = time.time()
# 发送请求并获取流式响应
response = requests.post(
url=API_URL,
json=test_data,
headers=headers,
stream=True # 启用流式传输
)
print(f"响应状态码: {response.status_code}")
print(f"收到初始响应时间: {datetime.now().strftime('%H:%M:%S.%f')[:-3]}")
if response.status_code != 200 and response.status_code != 201:
print(f"错误: {response.status_code}, {response.text}")
sys.exit(1)
# 处理流式响应
print("\n----- 开始接收流式响应 -----\n")
buffer = ""
last_time = start_time
response_count = 0
full_content = "" # 用于收集完整内容
# 使用小的chunk_size更好地展示流式效果
for chunk in response.iter_content(chunk_size=1):
if chunk:
current_time = time.time()
time_diff = current_time - last_time
elapsed = current_time - start_time
# 解码字节为字符串
try:
chunk_str = chunk.decode('utf-8')
buffer += chunk_str
# 检查是否有完整的数据行
if '\n\n' in buffer:
lines = buffer.split('\n\n')
# 除了最后一行,其他都是完整的
for line in lines[:-1]:
if line.strip():
response_count += 1
timestamp = datetime.now().strftime('%H:%M:%S.%f')[:-3]
print(f"\n[{timestamp}] 响应 #{response_count} (距上次: {time_diff:.3f}s, 总计: {elapsed:.3f}s)")
print(f"{line}")
# 如果想解析JSON内容
if line.startswith('data: '):
try:
json_str = line[6:] # 去掉 "data: " 前缀
data = json.loads(json_str)
# 提取并累积内容
if 'content' in data:
content = data.get('content', '')
full_content += content
# 如果内容太长只显示前50个字符
if len(content) > 50:
content_display = content[:50] + "..."
else:
content_display = content
print(f"内容片段: '{content_display}'")
print(f"是否结束: {data.get('is_end', False)}")
except json.JSONDecodeError:
pass
# 保留最后一个可能不完整的行
buffer = lines[-1]
# 重置计时器以准确测量下一个数据包间隔
last_time = current_time
except UnicodeDecodeError:
# 忽略解码错误可能是部分Unicode字符
pass
# 处理可能的剩余数据
if buffer.strip():
timestamp = datetime.now().strftime('%H:%M:%S.%f')[:-3]
elapsed = time.time() - start_time
print(f"\n[{timestamp}] 最终响应 (总计: {elapsed:.3f}s)")
print(f"{buffer}")
# 尝试处理最后一段数据
if buffer.startswith('data: '):
try:
json_str = buffer[6:] # 去掉 "data: " 前缀
data = json.loads(json_str)
if 'content' in data:
full_content += data.get('content', '')
except:
pass
total_time = time.time() - start_time
print(f"\n----- 响应结束 -----")
print(f"总响应时间: {total_time:.3f}秒, 共接收 {response_count} 个数据包")
print(f"完整收集的内容: {full_content}")

106
test_stream_api.py Normal file
View File

@ -0,0 +1,106 @@
import requests
import json
import sys
import time
from datetime import datetime
# 接口地址 - 根据curl命令更新
API_URL = "http://127.0.0.1:8000/api/chat-history/" # 注意没有create路径
# 测试数据
test_data = {
"question": "总结下",
"conversation_id": "10b34248-2625-434b-a493-6d43520c837a",
"dataset_id_list": ["8390ca43-6e63-4df9-b0b9-6cb20e1b38af"],
"stream": True
}
# 请求头 - 添加认证令牌和其他头信息
headers = {
"Content-Type": "application/json",
"Authorization": "Token 7831a86588bc08d025e4c9bd668de3b7940f7634",
"User-Agent": "Apifox/1.0.0 (https://apifox.com)",
"Accept": "*/*",
"Host": "127.0.0.1:8000",
"Connection": "keep-alive"
}
print(f"API URL: {API_URL}")
print(f"发送请求: {json.dumps(test_data, ensure_ascii=False)}")
print("等待响应...")
start_time = time.time()
# 发送请求并获取流式响应
response = requests.post(
url=API_URL,
json=test_data,
headers=headers,
stream=True # 启用流式传输
)
print(f"响应状态码: {response.status_code}")
print(f"收到初始响应时间: {datetime.now().strftime('%H:%M:%S.%f')[:-3]}")
if response.status_code != 200 and response.status_code != 201:
print(f"错误: {response.status_code}, {response.text}")
sys.exit(1)
# 处理流式响应
print("\n----- 开始接收流式响应 -----\n")
buffer = ""
last_time = start_time
response_count = 0
for chunk in response.iter_content(chunk_size=1024):
if chunk:
current_time = time.time()
time_diff = current_time - last_time
last_time = current_time
elapsed = current_time - start_time
# 解码字节为字符串
chunk_str = chunk.decode('utf-8')
buffer += chunk_str
# 检查是否有完整的数据行
if '\n\n' in buffer:
lines = buffer.split('\n\n')
# 除了最后一行,其他都是完整的
for line in lines[:-1]:
if line.strip():
response_count += 1
timestamp = datetime.now().strftime('%H:%M:%S.%f')[:-3]
print(f"\n[{timestamp}] 响应 #{response_count} (距上次: {time_diff:.3f}s, 总计: {elapsed:.3f}s)")
print(f"{line}")
# 如果想解析JSON内容可以取消下面的注释
if line.startswith('data: '):
try:
json_str = line[6:] # 去掉 "data: " 前缀
data = json.loads(json_str)
if data.get('content'):
content = data.get('content')
# 如果内容太长只显示前30个字符
if len(content) > 30:
content = content[:30] + "..."
print(f"内容片段: {content}")
except json.JSONDecodeError:
pass
# 保留最后一个可能不完整的行
buffer = lines[-1]
# 重置计时器
last_time = time.time()
# 处理可能的剩余数据
if buffer.strip():
timestamp = datetime.now().strftime('%H:%M:%S.%f')[:-3]
elapsed = time.time() - start_time
print(f"\n[{timestamp}] 最终响应 (总计: {elapsed:.3f}s)")
print(f"{buffer}")
total_time = time.time() - start_time
print(f"\n----- 响应结束 -----")
print(f"总响应时间: {total_time:.3f}秒, 共接收 {response_count} 个数据包")

161
test_stream_response.py Normal file
View File

@ -0,0 +1,161 @@
# -*- coding: utf-8 -*-
"""流式响应测试脚本"""
import requests
import json
import time
from datetime import datetime
import argparse
import logging.config
from test_config import API_BASE_URL, AUTH_TOKEN, TEST_CASES, LOG_CONFIG, HEADERS
# 配置日志
logging.config.dictConfig(LOG_CONFIG)
logger = logging.getLogger('stream_test')
class StreamResponseTester:
def __init__(self, base_url, headers):
self.base_url = base_url
self.headers = headers
def run_test(self, test_case):
"""运行单个测试用例"""
logger.info(f"\n开始测试: {test_case['name']}")
# 构建请求数据
test_data = {
"question": test_case["question"],
"conversation_id": test_case["conversation_id"],
"dataset_id_list": [test_case["dataset_id"]],
"stream": True
}
start_time = time.time()
response_count = 0
content_length = 0
full_content = ""
try:
# 发送请求
url = f"{self.base_url}/api/chat-history/"
logger.info(f"请求URL: {url}")
logger.info(f"请求数据: {json.dumps(test_data, ensure_ascii=False)}")
response = requests.post(
url=url,
json=test_data,
headers=self.headers,
stream=True
)
# 检查响应状态
if response.status_code != 200:
if test_case.get('expected_error', False):
logger.info("测试通过:预期的错误响应")
return True
logger.error(f"请求失败: {response.status_code}, {response.text}")
return False
# 处理流式响应
buffer = ""
last_time = start_time
for chunk in response.iter_content(chunk_size=1):
if chunk:
current_time = time.time()
chunk_time = current_time - last_time
try:
chunk_str = chunk.decode('utf-8')
buffer += chunk_str
if '\n\n' in buffer:
lines = buffer.split('\n\n')
for line in lines[:-1]:
if line.startswith('data: '):
response_count += 1
try:
data = json.loads(line[6:])
if 'data' in data and 'content' in data['data']:
content = data['data']['content']
prev_length = content_length
content_length += len(content)
full_content += content
# 记录响应信息
logger.debug(
f"响应 #{response_count}: "
f"+{content_length - prev_length} 字符, "
f"间隔: {chunk_time:.3f}s"
)
# 检查是否结束
if data['data'].get('is_end', False):
total_time = time.time() - start_time
logger.info(f"\n测试结果:")
logger.info(f"总响应时间: {total_time:.3f}")
logger.info(f"数据包数量: {response_count}")
logger.info(f"内容长度: {content_length} 字符")
logger.info(f"完整内容: {full_content}")
# 检查响应时间是否符合预期
if 'expected_response_time' in test_case:
if total_time <= test_case['expected_response_time']:
logger.info("响应时间符合预期")
else:
logger.warning(
f"响应时间超出预期: "
f"{total_time:.3f}s > {test_case['expected_response_time']}s"
)
return True
except json.JSONDecodeError as e:
logger.error(f"JSON解析错误: {e}")
if not test_case.get('expected_error', False):
return False
buffer = lines[-1]
last_time = current_time
except UnicodeDecodeError:
logger.debug("解码错误,跳过")
continue
return True
except Exception as e:
logger.error(f"测试执行错误: {str(e)}")
return False
def main():
parser = argparse.ArgumentParser(description='流式响应测试工具')
parser.add_argument('--test-case', type=int, help='指定要运行的测试用例索引')
args = parser.parse_args()
# 创建测试器实例
tester = StreamResponseTester(API_BASE_URL, HEADERS)
if args.test_case is not None:
# 运行指定的测试用例
if 0 <= args.test_case < len(TEST_CASES):
test_case = TEST_CASES[args.test_case]
success = tester.run_test(test_case)
logger.info(f"\n测试用例 {args.test_case} {'通过' if success else '失败'}")
else:
logger.error(f"无效的测试用例索引: {args.test_case}")
else:
# 运行所有测试用例
total_cases = len(TEST_CASES)
passed_cases = 0
for i, test_case in enumerate(TEST_CASES):
logger.info(f"\n运行测试用例 {i+1}/{total_cases}")
if tester.run_test(test_case):
passed_cases += 1
logger.info(f"\n测试完成: {passed_cases}/{total_cases} 个测试用例通过")
if __name__ == '__main__':
main()

154
test_websocket_stream.py Normal file
View File

@ -0,0 +1,154 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
WebSocket流式输出测试脚本
"""
import websocket
import json
import sys
import time
import uuid
from datetime import datetime
import threading
import ssl
import argparse
# 测试配置
WS_URL = "ws://127.0.0.1:8000/ws/chat/stream/" # WebSocket URL
TOKEN = "7831a86588bc08d025e4c9bd668de3b7940f7634" # 替换为你的实际认证令牌
# 测试数据
test_data = {
"question": "什么是流式输出?",
"conversation_id": str(uuid.uuid4()), # 随机生成一个会话ID
"dataset_id_list": ["8390ca43-6e63-4df9-b0b9-6cb20e1b38af"] # 替换为实际的知识库ID
}
# 全局变量
response_count = 0
start_time = None
full_content = ""
is_connected = False
def on_message(ws, message):
"""
处理接收到的WebSocket消息
"""
global response_count, full_content
try:
# 解析JSON响应
response_count += 1
data = json.loads(message)
# 获取当前时间和距开始的时间
current_time = time.time()
elapsed = current_time - start_time
timestamp = datetime.now().strftime('%H:%M:%S.%f')[:-3]
# 打印基本信息
print(f"\n[{timestamp}] 响应 #{response_count} (总计: {elapsed:.3f}s)")
# 检查消息类型
msg_type = data.get('message', '')
if msg_type == '开始流式传输':
print("=== 开始接收流式内容 ===")
elif msg_type == 'partial':
# 显示部分内容
if 'data' in data and 'content' in data['data']:
content = data['data']['content']
full_content += content
# 如果内容太长只显示前50个字符
display_content = content[:50] + "..." if len(content) > 50 else content
print(f"部分内容: {display_content}")
elif msg_type == '完成':
# 显示完整信息
if 'data' in data:
if 'title' in data['data']:
print(f"标题: {data['data']['title']}")
if 'content' in data['data']:
print(f"完整内容长度: {len(data['data']['content'])} 字符")
print("=== 流式传输完成 ===")
# 如果是错误消息
if data.get('code') == 500:
print(f"错误: {data.get('message')}")
ws.close()
except json.JSONDecodeError as e:
print(f"JSON解析错误: {str(e)}")
except Exception as e:
print(f"处理消息时出错: {str(e)}")
def on_error(ws, error):
"""处理WebSocket错误"""
print(f"发生错误: {str(error)}")
def on_close(ws, close_status_code, close_msg):
"""处理WebSocket连接关闭"""
global is_connected
is_connected = False
total_time = time.time() - start_time
print(f"\n===== 连接关闭 =====")
print(f"状态码: {close_status_code}, 消息: {close_msg}")
print(f"总响应时间: {total_time:.3f}秒, 共接收 {response_count} 个数据包")
print(f"接收到的完整内容长度: {len(full_content)} 字符")
def on_open(ws):
"""处理WebSocket连接成功"""
global start_time, is_connected
is_connected = True
print("WebSocket连接已建立")
print(f"发送测试数据: {json.dumps(test_data, ensure_ascii=False)}")
# 记录开始时间
start_time = time.time()
# 发送测试数据
ws.send(json.dumps(test_data))
print("数据已发送,等待响应...")
def main():
"""主函数"""
parser = argparse.ArgumentParser(description='WebSocket流式输出测试工具')
parser.add_argument('--url', type=str, default=WS_URL, help='WebSocket URL')
parser.add_argument('--token', type=str, default=TOKEN, help='认证令牌')
parser.add_argument('--question', type=str, default=test_data['question'], help='要发送的问题')
args = parser.parse_args()
# 更新测试数据
url = f"{args.url}?token={args.token}"
test_data['question'] = args.question
print(f"连接到: {url}")
# 设置更详细的日志级别(可选)
# websocket.enableTrace(True)
# 创建WebSocket连接
ws = websocket.WebSocketApp(
url,
on_open=on_open,
on_message=on_message,
on_error=on_error,
on_close=on_close
)
# 设置运行超时(可选)
# 如果需要SSL连接
# ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE})
# 启动WebSocket连接
ws.run_forever()
# 等待一小段时间以确保所有消息都被处理
time.sleep(1)
if __name__ == "__main__":
main()

View File

@ -5,6 +5,12 @@ from channels.exceptions import StopConsumer
import logging
from rest_framework.authtoken.models import Token
from urllib.parse import parse_qs
from .models import ChatHistory, KnowledgeBase
import aiohttp
import asyncio
from django.conf import settings
import uuid
import traceback
logger = logging.getLogger(__name__)
@ -69,3 +75,816 @@ class NotificationConsumer(AsyncWebsocketConsumer):
logger.info(f"已发送通知给用户 {self.user.username}")
except Exception as e:
logger.error(f"发送通知消息时发生错误: {str(e)}")
class ChatConsumer(AsyncWebsocketConsumer):
async def connect(self):
"""建立 WebSocket 连接"""
try:
# 从URL参数中获取token
query_string = self.scope.get('query_string', b'').decode()
query_params = parse_qs(query_string)
token_key = query_params.get('token', [''])[0]
if not token_key:
logger.warning("WebSocket连接尝试但没有提供token")
await self.close()
return
# 验证token
self.user = await self.get_user_from_token(token_key)
if not self.user:
logger.warning(f"WebSocket连接尝试但token无效: {token_key}")
await self.close()
return
# 将用户信息存储在scope中
self.scope["user"] = self.user
await self.accept()
logger.info(f"用户 {self.user.username} WebSocket连接成功")
except Exception as e:
logger.error(f"WebSocket连接错误: {str(e)}")
await self.close()
@database_sync_to_async
def get_user_from_token(self, token_key):
try:
token = Token.objects.select_related('user').get(key=token_key)
return token.user
except Token.DoesNotExist:
return None
async def disconnect(self, close_code):
"""关闭 WebSocket 连接"""
pass
async def receive(self, text_data):
"""接收消息并处理"""
try:
data = json.loads(text_data)
# 验证必要字段
if 'question' not in data or 'conversation_id' not in data:
await self.send_error("缺少必要字段")
return
# 创建问题记录
question_record = await self.create_question_record(data)
if not question_record:
return
# 开始流式处理
await self.stream_answer(question_record, data)
except Exception as e:
logger.error(f"处理消息时出错: {str(e)}")
await self.send_error(f"处理消息时出错: {str(e)}")
@database_sync_to_async
def _create_question_record_sync(self, data):
"""同步创建问题记录"""
try:
# 获取会话历史记录
conversation_id = data['conversation_id']
existing_records = ChatHistory.objects.filter(
conversation_id=conversation_id
).order_by('created_at')
# 获取或创建元数据
if existing_records.exists():
first_record = existing_records.first()
metadata = first_record.metadata or {}
dataset_ids = metadata.get('dataset_id_list', [])
knowledge_bases = []
# 验证知识库权限
for kb_id in dataset_ids:
try:
kb = KnowledgeBase.objects.get(id=kb_id)
if not self.check_knowledge_base_permission(kb, self.scope["user"], 'read'):
raise Exception(f'无权访问知识库: {kb.name}')
knowledge_bases.append(kb)
except KnowledgeBase.DoesNotExist:
raise Exception(f'知识库不存在: {kb_id}')
else:
# 新会话处理
dataset_ids = data.get('dataset_id_list', [])
if not dataset_ids:
raise Exception('新会话需要提供知识库ID')
knowledge_bases = []
for kb_id in dataset_ids:
kb = KnowledgeBase.objects.get(id=kb_id)
if not self.check_knowledge_base_permission(kb, self.scope["user"], 'read'):
raise Exception(f'无权访问知识库: {kb.name}')
knowledge_bases.append(kb)
metadata = {
'model_id': data.get('model_id', '7a214d0e-e65e-11ef-9f4a-0242ac120006'),
'dataset_id_list': [str(kb.id) for kb in knowledge_bases],
'dataset_external_id_list': [str(kb.external_id) for kb in knowledge_bases if kb.external_id],
'dataset_names': [kb.name for kb in knowledge_bases]
}
# 创建问题记录
return ChatHistory.objects.create(
user=self.scope["user"],
knowledge_base=knowledge_bases[0],
conversation_id=conversation_id,
title=data.get('title', 'New chat'),
role='user',
content=data['question'],
metadata=metadata
)
except Exception as e:
logger.error(f"创建问题记录失败: {str(e)}")
return None, str(e)
async def create_question_record(self, data):
"""异步创建问题记录"""
try:
result = await self._create_question_record_sync(data)
if isinstance(result, tuple):
_, error_message = result
await self.send_error(error_message)
return None
return result
except Exception as e:
await self.send_error(str(e))
return None
def check_knowledge_base_permission(self, kb, user, permission_type):
"""检查知识库权限"""
# 实现权限检查逻辑
return True # 临时返回 True需要根据实际情况实现
async def stream_answer(self, question_record, data):
"""流式处理回答"""
try:
# 创建 AI 回答记录
answer_record = await database_sync_to_async(ChatHistory.objects.create)(
user=self.scope["user"],
knowledge_base=question_record.knowledge_base,
conversation_id=str(question_record.conversation_id),
title=question_record.title,
parent_id=str(question_record.id),
role='assistant',
content="",
metadata=question_record.metadata
)
# 发送初始响应
await self.send_json({
'code': 200,
'message': '开始流式传输',
'data': {
'id': str(answer_record.id),
'conversation_id': str(question_record.conversation_id),
'content': '',
'is_end': False
}
})
# 调用外部 API 获取流式响应
async with aiohttp.ClientSession() as session:
# 创建聊天会话
chat_response = await session.post(
f"{settings.API_BASE_URL}/api/application/chat/open",
json={
"id": "d5d11efa-ea9a-11ef-9933-0242ac120006",
"model_id": question_record.metadata.get('model_id'),
"dataset_id_list": question_record.metadata.get('dataset_external_id_list', []),
"multiple_rounds_dialogue": False,
"dataset_setting": {
"top_n": 10,
"similarity": "0.3",
"max_paragraph_char_number": 10000,
"search_mode": "blend",
"no_references_setting": {
"value": "{question}",
"status": "ai_questioning"
}
},
"model_setting": {
"prompt": "**相关文档内容**{data} **回答要求**:如果相关文档内容中没有可用信息,请回答\"没有在知识库中查找到相关信息,建议咨询相关技术支持或参考官方文档进行操作\"。请根据相关文档内容回答用户问题。不要输出与用户问题无关的内容。请使用中文回答客户问题。**用户问题**{question}"
},
"problem_optimization": False
}
)
chat_data = await chat_response.json()
if chat_data.get('code') != 200:
raise Exception(f"创建聊天会话失败: {chat_data}")
chat_id = chat_data['data']
# 建立流式连接
async with session.post(
f"{settings.API_BASE_URL}/api/application/chat_message/{chat_id}",
json={"message": data['question'], "re_chat": False, "stream": True},
headers={"Content-Type": "application/json"}
) as response:
full_content = ""
buffer = ""
async for chunk in response.content.iter_any():
chunk_str = chunk.decode('utf-8')
buffer += chunk_str
while '\n\n' in buffer:
parts = buffer.split('\n\n', 1)
line = parts[0]
buffer = parts[1]
if line.startswith('data: '):
try:
json_str = line[6:]
chunk_data = json.loads(json_str)
if 'content' in chunk_data:
content_part = chunk_data['content']
full_content += content_part
await self.send_json({
'code': 200,
'message': 'partial',
'data': {
'id': str(answer_record.id),
'conversation_id': str(question_record.conversation_id),
'content': content_part,
'is_end': chunk_data.get('is_end', False)
}
})
if chunk_data.get('is_end', False):
# 保存完整内容
answer_record.content = full_content.strip()
await database_sync_to_async(answer_record.save)()
# 生成或获取标题
title = await self.get_or_generate_title(
question_record.conversation_id,
data['question'],
full_content.strip()
)
# 发送最终响应
await self.send_json({
'code': 200,
'message': '完成',
'data': {
'id': str(answer_record.id),
'conversation_id': str(question_record.conversation_id),
'title': title,
'dataset_id_list': question_record.metadata.get('dataset_id_list', []),
'dataset_names': question_record.metadata.get('dataset_names', []),
'role': 'assistant',
'content': full_content.strip(),
'created_at': answer_record.created_at.strftime('%Y-%m-%d %H:%M:%S'),
'is_end': True
}
})
return
except json.JSONDecodeError as e:
logger.error(f"JSON解析错误: {e}, 数据: {line}")
continue
except Exception as e:
logger.error(f"流式处理出错: {str(e)}")
await self.send_error(str(e))
# 保存已收集的内容
if 'full_content' in locals() and full_content:
try:
answer_record.content = full_content.strip()
await database_sync_to_async(answer_record.save)()
except Exception as save_error:
logger.error(f"保存部分内容失败: {str(save_error)}")
@database_sync_to_async
def get_or_generate_title(self, conversation_id, question, answer):
"""获取或生成对话标题"""
try:
# 先检查是否已有标题
current_title = ChatHistory.objects.filter(
conversation_id=str(conversation_id)
).exclude(
title__in=["New chat", "新对话", ""]
).values_list('title', flat=True).first()
if current_title:
return current_title
# 如果没有标题,生成新标题
# 这里需要实现标题生成的逻辑
generated_title = "新对话" # 临时使用默认标题
# 更新所有相关记录的标题
ChatHistory.objects.filter(
conversation_id=str(conversation_id)
).update(title=generated_title)
return generated_title
except Exception as e:
logger.error(f"获取或生成标题失败: {str(e)}")
return "新对话"
async def send_json(self, content):
"""发送 JSON 格式的消息"""
await self.send(text_data=json.dumps(content))
async def send_error(self, message):
"""发送错误消息"""
await self.send_json({
'code': 500,
'message': message,
'data': {'is_end': True}
})
class ChatStreamConsumer(AsyncWebsocketConsumer):
async def connect(self):
"""建立WebSocket连接"""
try:
# 从URL参数中获取token
query_string = self.scope.get('query_string', b'').decode()
query_params = parse_qs(query_string)
token_key = query_params.get('token', [''])[0]
if not token_key:
logger.warning("WebSocket连接尝试但没有提供token")
await self.close()
return
# 验证token
self.user = await self.get_user_from_token(token_key)
if not self.user:
logger.warning(f"WebSocket连接尝试但token无效: {token_key}")
await self.close()
return
# 将用户信息存储在scope中
self.scope["user"] = self.user
await self.accept()
logger.info(f"用户 {self.user.username} 流式输出WebSocket连接成功")
except Exception as e:
logger.error(f"WebSocket连接错误: {str(e)}")
await self.close()
@database_sync_to_async
def get_user_from_token(self, token_key):
try:
token = Token.objects.select_related('user').get(key=token_key)
return token.user
except Token.DoesNotExist:
return None
async def disconnect(self, close_code):
"""关闭WebSocket连接"""
logger.info(f"用户 {self.user.username if hasattr(self, 'user') else 'unknown'} WebSocket连接断开代码: {close_code}")
async def receive(self, text_data):
"""接收消息并处理"""
try:
data = json.loads(text_data)
# 检查必填字段
if 'question' not in data:
await self.send_error("缺少必填字段: question")
return
if 'conversation_id' not in data:
await self.send_error("缺少必填字段: conversation_id")
return
# 处理新会话或现有会话
await self.process_chat_request(data)
except Exception as e:
logger.error(f"处理消息时出错: {str(e)}")
logger.error(traceback.format_exc())
await self.send_error(f"处理消息时出错: {str(e)}")
async def process_chat_request(self, data):
"""处理聊天请求"""
try:
conversation_id = data['conversation_id']
question = data['question']
# 获取会话信息和知识库
session_info = await self.get_session_info(data)
if not session_info:
return
knowledge_bases, metadata, dataset_external_id_list = session_info
# 创建问题记录
question_record = await self.create_question_record(
conversation_id,
question,
knowledge_bases,
metadata
)
if not question_record:
return
# 创建AI回答记录
answer_record = await self.create_answer_record(
conversation_id,
question_record,
knowledge_bases,
metadata
)
# 发送初始响应
await self.send_json({
'code': 200,
'message': '开始流式传输',
'data': {
'id': str(answer_record.id),
'conversation_id': str(conversation_id),
'content': '',
'is_end': False
}
})
# 调用外部API获取流式响应
await self.stream_from_external_api(
conversation_id,
question,
dataset_external_id_list,
answer_record,
metadata,
knowledge_bases
)
except Exception as e:
logger.error(f"处理聊天请求时出错: {str(e)}")
logger.error(traceback.format_exc())
await self.send_error(f"处理聊天请求时出错: {str(e)}")
@database_sync_to_async
def get_session_info(self, data):
"""获取会话信息和知识库"""
try:
conversation_id = data['conversation_id']
# 查找该会话ID下的历史记录
existing_records = ChatHistory.objects.filter(
conversation_id=conversation_id
).order_by('created_at')
# 如果有历史记录使用第一条记录的metadata
if existing_records.exists():
first_record = existing_records.first()
metadata = first_record.metadata or {}
# 获取知识库信息
dataset_ids = metadata.get('dataset_id_list', [])
external_id_list = metadata.get('dataset_external_id_list', [])
if not dataset_ids:
logger.error('找不到会话关联的知识库信息')
return None
# 验证知识库是否存在且用户有权限
knowledge_bases = []
for kb_id in dataset_ids:
try:
kb = KnowledgeBase.objects.get(id=kb_id)
if not self.check_knowledge_base_permission(kb, self.scope["user"], 'read'):
logger.error(f'无权访问知识库: {kb.name}')
return None
knowledge_bases.append(kb)
except KnowledgeBase.DoesNotExist:
logger.error(f'知识库不存在: {kb_id}')
return None
if not external_id_list or not knowledge_bases:
logger.error('会话关联的知识库信息不完整')
return None
return knowledge_bases, metadata, external_id_list
else:
# 如果是新会话的第一条记录需要提供知识库ID
dataset_ids = []
if 'dataset_id' in data:
dataset_ids.append(str(data['dataset_id']))
elif 'dataset_id_list' in data and isinstance(data['dataset_id_list'], (list, str)):
if isinstance(data['dataset_id_list'], str):
try:
dataset_list = json.loads(data['dataset_id_list'])
if isinstance(dataset_list, list):
dataset_ids = [str(id) for id in dataset_list]
else:
dataset_ids = [str(data['dataset_id_list'])]
except json.JSONDecodeError:
dataset_ids = [str(data['dataset_id_list'])]
else:
dataset_ids = [str(id) for id in data['dataset_id_list']]
if not dataset_ids:
logger.error('新会话需要提供知识库ID')
return None
# 验证所有知识库并收集external_ids
external_id_list = []
knowledge_bases = []
for kb_id in dataset_ids:
try:
knowledge_base = KnowledgeBase.objects.filter(id=kb_id).first()
if not knowledge_base:
logger.error(f'知识库不存在: {kb_id}')
return None
knowledge_bases.append(knowledge_base)
# 使用统一的权限检查方法
if not self.check_knowledge_base_permission(knowledge_base, self.scope["user"], 'read'):
logger.error(f'无权访问知识库: {knowledge_base.name}')
return None
# 添加知识库的external_id到列表
if knowledge_base.external_id:
external_id_list.append(str(knowledge_base.external_id))
else:
logger.warning(f"知识库 {knowledge_base.id} ({knowledge_base.name}) 没有external_id")
except Exception as e:
logger.error(f"处理知识库ID出错: {str(e)}")
return None
if not external_id_list:
logger.error('没有有效的知识库external_id')
return None
# 创建metadata
metadata = {
'model_id': data.get('model_id', '7a214d0e-e65e-11ef-9f4a-0242ac120006'),
'dataset_id_list': [str(id) for id in dataset_ids],
'dataset_external_id_list': [str(id) for id in external_id_list],
'dataset_names': [kb.name for kb in knowledge_bases]
}
return knowledge_bases, metadata, external_id_list
except Exception as e:
logger.error(f"获取会话信息时出错: {str(e)}")
return None
def check_knowledge_base_permission(self, kb, user, permission_type):
"""检查知识库权限"""
# 实现权限检查逻辑
return True # 临时返回 True需要根据实际情况实现
@database_sync_to_async
def create_question_record(self, conversation_id, question, knowledge_bases, metadata):
"""创建问题记录"""
try:
title = metadata.get('title', 'New chat')
# 创建用户问题记录
return ChatHistory.objects.create(
user=self.scope["user"],
knowledge_base=knowledge_bases[0], # 使用第一个知识库作为主知识库
conversation_id=str(conversation_id),
title=title,
role='user',
content=question,
metadata=metadata
)
except Exception as e:
logger.error(f"创建问题记录时出错: {str(e)}")
return None
@database_sync_to_async
def create_answer_record(self, conversation_id, question_record, knowledge_bases, metadata):
"""创建AI回答记录"""
try:
return ChatHistory.objects.create(
user=self.scope["user"],
knowledge_base=knowledge_bases[0],
conversation_id=str(conversation_id),
title=question_record.title,
parent_id=str(question_record.id),
role='assistant',
content="", # 初始内容为空
metadata=metadata
)
except Exception as e:
logger.error(f"创建回答记录时出错: {str(e)}")
return None
async def stream_from_external_api(self, conversation_id, question, dataset_external_id_list, answer_record, metadata, knowledge_bases):
"""从外部API获取流式响应"""
try:
# 确保所有ID都是字符串
dataset_external_ids = [str(id) if isinstance(id, uuid.UUID) else id for id in dataset_external_id_list]
# 获取标题
title = answer_record.title or 'New chat'
# 异步收集完整内容,用于最后保存
full_content = ""
# 使用aiohttp进行异步HTTP请求
async with aiohttp.ClientSession() as session:
# 第一步: 创建聊天会话
async with session.post(
f"{settings.API_BASE_URL}/api/application/chat/open",
json={
"id": "d5d11efa-ea9a-11ef-9933-0242ac120006",
"model_id": metadata.get('model_id', '7a214d0e-e65e-11ef-9f4a-0242ac120006'),
"dataset_id_list": dataset_external_ids,
"multiple_rounds_dialogue": False,
"dataset_setting": {
"top_n": 10, "similarity": "0.3",
"max_paragraph_char_number": 10000,
"search_mode": "blend",
"no_references_setting": {
"value": "{question}",
"status": "ai_questioning"
}
},
"model_setting": {
"prompt": "**相关文档内容**{data} **回答要求**:如果相关文档内容中没有可用信息,请回答\"没有在知识库中查找到相关信息,建议咨询相关技术支持或参考官方文档进行操作\"。请根据相关文档内容回答用户问题。不要输出与用户问题无关的内容。请使用中文回答客户问题。**用户问题**{question}"
},
"problem_optimization": False
}
) as chat_response:
if chat_response.status != 200:
error_msg = f"外部API调用失败: {await chat_response.text()}"
logger.error(error_msg)
await self.send_error(error_msg)
return
chat_data = await chat_response.json()
if chat_data.get('code') != 200 or not chat_data.get('data'):
error_msg = f"外部API返回错误: {chat_data}"
logger.error(error_msg)
await self.send_error(error_msg)
return
chat_id = chat_data['data']
logger.info(f"成功创建聊天会话, chat_id: {chat_id}")
# 第二步: 建立流式连接
message_url = f"{settings.API_BASE_URL}/api/application/chat_message/{chat_id}"
logger.info(f"开始流式请求: {message_url}")
# 创建流式请求
async with session.post(
url=message_url,
json={"message": question, "re_chat": False, "stream": True},
headers={"Content-Type": "application/json"}
) as message_request:
if message_request.status != 200:
error_msg = f"外部API聊天消息调用失败: {message_request.status}, {await message_request.text()}"
logger.error(error_msg)
await self.send_error(error_msg)
return
# 创建一个缓冲区以处理分段的数据
buffer = ""
# 读取并处理每个响应块
logger.info("开始处理流式响应")
async for chunk in message_request.content.iter_any():
chunk_str = chunk.decode('utf-8')
buffer += chunk_str
# 检查是否有完整的数据行
while '\n\n' in buffer:
parts = buffer.split('\n\n', 1)
line = parts[0]
buffer = parts[1]
if line.startswith('data: '):
try:
# 提取JSON数据
json_str = line[6:] # 去掉 "data: " 前缀
data = json.loads(json_str)
# 记录并处理部分响应
if 'content' in data:
content_part = data['content']
full_content += content_part
# 发送部分内容
await self.send_json({
'code': 200,
'message': 'partial',
'data': {
'id': str(answer_record.id),
'conversation_id': str(conversation_id),
'content': content_part,
'is_end': data.get('is_end', False)
}
})
# 处理结束标记
if data.get('is_end', False):
logger.info("收到流式响应结束标记")
# 保存完整内容
await self.update_answer_content(answer_record.id, full_content.strip())
# 处理标题
title = await self.get_or_generate_title(
conversation_id,
question,
full_content.strip()
)
# 发送最终响应
await self.send_json({
'code': 200,
'message': '完成',
'data': {
'id': str(answer_record.id),
'conversation_id': str(conversation_id),
'title': title,
'dataset_id_list': metadata.get('dataset_id_list', []),
'dataset_names': metadata.get('dataset_names', []),
'role': 'assistant',
'content': full_content.strip(),
'created_at': answer_record.created_at.strftime('%Y-%m-%d %H:%M:%S'),
'is_end': True
}
})
return
except json.JSONDecodeError as e:
logger.error(f"JSON解析错误: {e}, 数据: {line}")
continue
except Exception as e:
logger.error(f"流式处理出错: {str(e)}")
logger.error(traceback.format_exc())
await self.send_error(str(e))
# 保存已收集的内容
if 'full_content' in locals() and full_content:
try:
await self.update_answer_content(answer_record.id, full_content.strip())
except Exception as save_error:
logger.error(f"保存部分内容失败: {str(save_error)}")
@database_sync_to_async
def update_answer_content(self, answer_id, content):
"""更新回答内容"""
try:
answer_record = ChatHistory.objects.get(id=answer_id)
answer_record.content = content
answer_record.save()
return True
except Exception as e:
logger.error(f"更新回答内容失败: {str(e)}")
return False
@database_sync_to_async
def get_or_generate_title(self, conversation_id, question, answer):
"""获取或生成对话标题"""
try:
# 先检查是否已有标题
current_title = ChatHistory.objects.filter(
conversation_id=str(conversation_id)
).exclude(
title__in=["New chat", "新对话", ""]
).values_list('title', flat=True).first()
if current_title:
return current_title
# 简单的标题生成逻辑 (可替换为调用DeepSeek API生成标题)
generated_title = question[:20] + "..." if len(question) > 20 else question
# 更新所有相关记录的标题
ChatHistory.objects.filter(
conversation_id=str(conversation_id)
).update(title=generated_title)
return generated_title
except Exception as e:
logger.error(f"获取或生成标题失败: {str(e)}")
return "新对话"
async def send_json(self, content):
"""发送JSON格式的消息"""
await self.send(text_data=json.dumps(content))
async def send_error(self, message):
"""发送错误消息"""
await self.send_json({
'code': 500,
'message': message,
'data': {'is_end': True}
})

View File

@ -1,5 +1,9 @@
import logging
import traceback
import os
import pandas as pd
from django.http import FileResponse, HttpResponse
from django.conf import settings
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
@ -13,7 +17,8 @@ from user_management.gmail_integration import GmailIntegration
from feishu.feishu_ai_chat import (
fetch_table_records, find_duplicate_email_creators,
process_duplicate_emails, auto_chat_session,
check_goal_achieved
check_goal_achieved, export_feishu_creators_to_excel,
export_matching_emails_to_excel
)
logger = logging.getLogger(__name__)
@ -125,11 +130,11 @@ def process_feishu_table(request):
if not duplicate_emails:
return Response(
{"message": "发现重复邮箱"},
{"message": "找到与系统中已有creator匹配的邮箱"},
status=status.HTTP_200_OK
)
# 处理重复邮箱记录
# 处理匹配的邮箱记录
results = process_duplicate_emails(duplicate_emails, goal_template)
# 如果需要自动对话
@ -224,6 +229,16 @@ def run_auto_chat(request):
# 如果是强制发送模式
if force_send:
try:
# 创建Gmail集成实例
gmail_integration = GmailIntegration(request.user)
# 检查Gmail服务是否已正确初始化
if not hasattr(gmail_integration, 'gmail_service') or gmail_integration.gmail_service is None:
return Response(
{"error": "Gmail服务未正确初始化请检查Gmail API配置"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR
)
# 获取知识库映射
mapping = GmailTalentMapping.objects.filter(
user=request.user,
@ -428,6 +443,269 @@ def check_goal_status(request):
except Exception as e:
logger.error(f"检查目标状态时出错: {str(e)}")
logger.error(traceback.format_exc())
return Response(
{"error": str(e)},
status=status.HTTP_500_INTERNAL_SERVER_ERROR
)
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def export_creators_data(request):
"""
导出匹配的FeishuCreator数据到Excel文件
请求参数:
table_id: 表格ID
view_id: 视图ID
app_token: 飞书应用TOKEN (可选)
access_token: 用户访问令牌 (可选)
app_id: 应用ID (可选)
app_secret: 应用密钥 (可选)
export_type: 导出类型'creators''feishu'默认为'creators'
format: 格式支持'excel''csv'默认为'excel'
"""
try:
# 检查用户权限 - 只允许组长使用
if request.user.role != 'leader':
return Response(
{"error": "只有组长角色的用户可以使用此功能"},
status=status.HTTP_403_FORBIDDEN
)
# 获取参数
table_id = request.data.get("table_id", "tbl3oikG3F8YYtVA") # 默认表格ID
view_id = request.data.get("view_id", "vewSOIsmxc") # 默认视图ID
app_token = request.data.get("app_token", "XYE6bMQUOaZ5y5svj4vcWohGnmg")
access_token = request.data.get("access_token", "u-fK0HvbXVte.G2xzYs5oxV6k1nHu1glvFgG00l0Ma24VD")
app_id = request.data.get("app_id", "cli_a5c97daacb9e500d")
app_secret = request.data.get("app_secret", "fdVeOCLXmuIHZVmSV0VbJh9wd0Kq1o5y")
export_type = request.data.get("export_type", "creators") # 导出类型creators或feishu
export_format = request.data.get("format", "excel") # 导出格式excel或csv
if export_format not in ["excel", "csv"]:
return Response(
{"error": "当前支持的格式有: excel, csv"},
status=status.HTTP_400_BAD_REQUEST
)
logger.info(f"导出飞书数据: table_id={table_id}, view_id={view_id}, type={export_type}, format={export_format}")
# 从飞书表格获取记录
records = fetch_table_records(
app_token,
table_id,
view_id,
access_token,
app_id,
app_secret
)
if not records:
logger.warning("未获取到任何记录可能是表格ID或视图ID不正确或无权限访问")
return Response(
{"message": "未获取到任何记录"},
status=status.HTTP_404_NOT_FOUND
)
# 查找重复邮箱的创作者
duplicate_emails = find_duplicate_email_creators(records)
if not duplicate_emails:
return Response(
{"message": "未找到与系统中已有creator匹配的邮箱"},
status=status.HTTP_404_NOT_FOUND
)
# 创建存储导出文件的目录
export_dir = os.path.join(settings.MEDIA_ROOT, 'exports')
os.makedirs(export_dir, exist_ok=True)
# 根据导出类型和格式选择输出文件名
if export_type == "creators":
file_prefix = "feishu_creators"
else:
file_prefix = "feishu_data"
if export_format == "excel":
file_ext = ".xlsx"
else:
file_ext = ".csv"
output_filename = f"{file_prefix}_{request.user.id}{file_ext}"
output_path = os.path.join(export_dir, output_filename)
# 根据导出类型选择导出函数
if export_type == "creators":
# 导出FeishuCreator数据
if export_format == "excel":
excel_path = export_feishu_creators_to_excel(duplicate_emails, output_path)
if not excel_path:
return Response(
{"error": "导出FeishuCreator数据失败"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR
)
else:
# CSV格式导出直接处理
try:
# 获取所有匹配的邮箱
emails = list(duplicate_emails.keys())
# 从数据库获取FeishuCreator记录
from user_management.models import FeishuCreator
creators = FeishuCreator.objects.filter(email__in=emails)
if not creators.exists():
return Response(
{"error": "没有找到匹配的FeishuCreator记录"},
status=status.HTTP_404_NOT_FOUND
)
# 创建数据列表
data = []
for creator in creators:
# 处理datetime字段移除时区信息
created_at = creator.created_at
if hasattr(created_at, 'tzinfo') and created_at.tzinfo is not None:
created_at = created_at.replace(tzinfo=None)
updated_at = creator.updated_at
if hasattr(updated_at, 'tzinfo') and updated_at.tzinfo is not None:
updated_at = updated_at.replace(tzinfo=None)
row = {
'id': str(creator.id),
'handle': creator.handle,
'email': creator.email,
'phone': creator.phone,
'created_at': created_at,
'updated_at': updated_at,
# 其他需要的字段...
}
data.append(row)
# 创建DataFrame并导出到CSV
df = pd.DataFrame(data)
df.to_csv(output_path, index=False, encoding='utf-8-sig') # 使用BOM标记以支持中文
excel_path = output_path
except Exception as e:
logger.error(f"导出CSV时出错: {str(e)}")
logger.error(traceback.format_exc())
return Response(
{"error": f"导出CSV失败: {str(e)}"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR
)
elif export_type == "feishu":
# 导出飞书原始数据
if export_format == "excel":
excel_path = export_matching_emails_to_excel(duplicate_emails, records, output_path)
if not excel_path:
return Response(
{"error": "导出飞书数据失败"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR
)
else:
# CSV格式导出
try:
# 创建数据列表
data = []
for email, email_records in duplicate_emails.items():
for record in email_records:
fields = record.fields
row = {
'Email': email,
'RecordID': record.record_id
}
# 提取所有字段
for field_name, field_value in fields.items():
row[field_name] = extract_field_value(field_value)
data.append(row)
# 创建DataFrame并导出到CSV
df = pd.DataFrame(data)
df.to_csv(output_path, index=False, encoding='utf-8-sig')
excel_path = output_path
except Exception as e:
logger.error(f"导出CSV时出错: {str(e)}")
logger.error(traceback.format_exc())
return Response(
{"error": f"导出CSV失败: {str(e)}"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR
)
else:
return Response(
{"error": f"不支持的导出类型: {export_type},可选值为'creators''feishu'"},
status=status.HTTP_400_BAD_REQUEST
)
# 获取服务器域名,考虑各种情况
domain = request.build_absolute_uri('/').rstrip('/')
# 如果是本地开发环境使用127.0.0.1:8000
if 'localhost' in domain or '127.0.0.1' in domain:
# 从请求头获取Host
host = request.META.get('HTTP_HOST', '127.0.0.1:8000')
if ':' not in host:
host = f"{host}:8000" # 添加默认端口
domain = f"http://{host}"
# 构建下载URL
file_url = f"{domain}/api/feishu/download/{output_filename}"
return Response({
"status": "success",
"message": f"成功导出{export_type}数据,格式为{export_format}",
"matched_emails": len(duplicate_emails),
"file_url": file_url,
"file_name": output_filename
}, status=status.HTTP_200_OK)
except Exception as e:
logger.error(f"导出数据时出错: {str(e)}")
logger.error(traceback.format_exc())
return Response(
{"error": str(e)},
status=status.HTTP_500_INTERNAL_SERVER_ERROR
)
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def download_exported_file(request, filename):
"""
下载已导出的Excel文件
URL参数:
filename: 文件名
"""
try:
# 检查用户权限 - 只允许组长使用
if request.user.role != 'leader':
return Response(
{"error": "只有组长角色的用户可以使用此功能"},
status=status.HTTP_403_FORBIDDEN
)
# 构建文件路径
file_path = os.path.join(settings.MEDIA_ROOT, 'exports', filename)
# 检查文件是否存在
if not os.path.exists(file_path):
return Response(
{"error": f"文件不存在: {filename}"},
status=status.HTTP_404_NOT_FOUND
)
# 返回文件
response = FileResponse(open(file_path, 'rb'))
response['Content-Disposition'] = f'attachment; filename="{filename}"'
return response
except Exception as e:
logger.error(f"下载文件时出错: {str(e)}")
logger.error(traceback.format_exc())
return Response(
{"error": str(e)},
status=status.HTTP_500_INTERNAL_SERVER_ERROR

View File

@ -0,0 +1,753 @@
import logging
import traceback
from datetime import datetime, timedelta
from django.utils import timezone
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework.response import Response
from django.conf import settings
from .models import GmailCredential
from .gmail_integration import GmailIntegration, GmailServiceManager
logger = logging.getLogger(__name__)
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def list_gmail_accounts(request):
"""列出用户的所有Gmail账户"""
try:
# 获取用户的所有Gmail凭证
credentials = GmailCredential.objects.filter(
user=request.user
).order_by('-is_default', '-updated_at')
# 转换为简单的数据结构
accounts = []
for cred in credentials:
# 检查账户状态
is_valid = False
try:
# 尝试使用API获取认证状态
gmail_integration = GmailIntegration(
user=request.user,
gmail_credential_id=str(cred.id)
)
is_valid = gmail_integration.authenticate()
except Exception:
pass
# 检查监听状态
watch_expired = True
if cred.watch_expiration:
watch_expired = cred.watch_expiration < timezone.now()
accounts.append({
'id': str(cred.id),
'name': cred.name,
'gmail_email': cred.gmail_email or "未知",
'is_default': cred.is_default,
'is_active': cred.is_active,
'is_valid': is_valid, # 凭证是否有效
'watch_expired': watch_expired,
'watch_expiration': cred.watch_expiration.strftime('%Y-%m-%d %H:%M:%S') if cred.watch_expiration else None,
'created_at': cred.created_at.strftime('%Y-%m-%d %H:%M:%S'),
'updated_at': cred.updated_at.strftime('%Y-%m-%d %H:%M:%S')
})
return Response({
'code': 200,
'message': '获取Gmail账户列表成功',
'data': accounts
})
except Exception as e:
logger.error(f"获取Gmail账户列表失败: {str(e)}")
logger.error(traceback.format_exc())
return Response({
'code': 500,
'message': f'获取Gmail账户列表失败: {str(e)}',
'data': None
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def set_default_gmail_account(request):
"""设置默认Gmail账户"""
try:
# 获取账户ID
account_id = request.data.get('account_id')
if not account_id:
return Response({
'code': 400,
'message': '缺少账户ID参数',
'data': None
}, status=status.HTTP_400_BAD_REQUEST)
# 查找账户
try:
credential = GmailCredential.objects.get(
id=account_id,
user=request.user
)
except GmailCredential.DoesNotExist:
return Response({
'code': 404,
'message': '找不到指定的Gmail账户',
'data': None
}, status=status.HTTP_404_NOT_FOUND)
# 设置为默认账户
credential.is_default = True
credential.save() # save方法会自动处理其他账户的默认状态
# 清理服务单例,确保下次使用时获取的是最新状态
GmailServiceManager.clear_instance(request.user)
return Response({
'code': 200,
'message': f'已将{credential.gmail_email or credential.name}设为默认Gmail账户',
'data': {
'id': str(credential.id),
'name': credential.name,
'gmail_email': credential.gmail_email,
'is_default': True
}
})
except Exception as e:
logger.error(f"设置默认Gmail账户失败: {str(e)}")
logger.error(traceback.format_exc())
return Response({
'code': 500,
'message': f'设置默认Gmail账户失败: {str(e)}',
'data': None
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def update_gmail_account(request):
"""更新Gmail账户信息"""
try:
# 获取参数
account_id = request.data.get('account_id')
name = request.data.get('name')
is_active = request.data.get('is_active')
if not account_id:
return Response({
'code': 400,
'message': '缺少账户ID参数',
'data': None
}, status=status.HTTP_400_BAD_REQUEST)
# 查找账户
try:
credential = GmailCredential.objects.get(
id=account_id,
user=request.user
)
except GmailCredential.DoesNotExist:
return Response({
'code': 404,
'message': '找不到指定的Gmail账户',
'data': None
}, status=status.HTTP_404_NOT_FOUND)
# 更新信息
if name is not None:
credential.name = name
if is_active is not None:
credential.is_active = is_active
credential.save()
# 清理服务单例
GmailServiceManager.clear_instance(request.user, str(credential.id))
return Response({
'code': 200,
'message': '更新Gmail账户成功',
'data': {
'id': str(credential.id),
'name': credential.name,
'gmail_email': credential.gmail_email,
'is_active': credential.is_active,
'is_default': credential.is_default
}
})
except Exception as e:
logger.error(f"更新Gmail账户失败: {str(e)}")
logger.error(traceback.format_exc())
return Response({
'code': 500,
'message': f'更新Gmail账户失败: {str(e)}',
'data': None
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['DELETE'])
@permission_classes([IsAuthenticated])
def delete_gmail_account(request, account_id):
"""删除Gmail账户"""
try:
# 查找账户
try:
credential = GmailCredential.objects.get(
id=account_id,
user=request.user
)
except GmailCredential.DoesNotExist:
return Response({
'code': 404,
'message': '找不到指定的Gmail账户',
'data': None
}, status=status.HTTP_404_NOT_FOUND)
# 记录账户信息
account_info = {
'id': str(credential.id),
'name': credential.name,
'gmail_email': credential.gmail_email
}
# 清理服务单例
GmailServiceManager.clear_instance(request.user, str(credential.id))
# 删除账户
credential.delete()
# 如果删除了默认账户,设置新的默认账户
default_exists = GmailCredential.objects.filter(
user=request.user,
is_default=True
).exists()
if not default_exists:
# 设置最新的账户为默认
latest_credential = GmailCredential.objects.filter(
user=request.user
).order_by('-updated_at').first()
if latest_credential:
latest_credential.is_default = True
latest_credential.save()
return Response({
'code': 200,
'message': f'成功删除Gmail账户: {account_info["name"]}',
'data': account_info
})
except Exception as e:
logger.error(f"删除Gmail账户失败: {str(e)}")
logger.error(traceback.format_exc())
return Response({
'code': 500,
'message': f'删除Gmail账户失败: {str(e)}',
'data': None
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def add_gmail_account(request):
"""添加新的Gmail账户"""
try:
# 获取可选的代理设置
use_proxy = request.data.get('use_proxy', True)
proxy_url = request.data.get('proxy_url', 'http://127.0.0.1:7890')
# 获取账户名称
name = request.data.get('name', '新Gmail账户')
# 获取客户端密钥支持JSON格式字符串或上传的文件
client_secret_json = None
# 检查是否有上传的JSON文件
if 'client_secret_file' in request.FILES:
import json
file_content = request.FILES['client_secret_file'].read().decode('utf-8')
try:
client_secret_json = json.loads(file_content)
logger.info("成功从上传的JSON文件解析客户端密钥")
except json.JSONDecodeError:
return Response({
'code': 400,
'message': '上传的JSON文件格式无效',
'data': None
}, status=status.HTTP_400_BAD_REQUEST)
else:
# 尝试从请求数据中获取JSON字符串
client_secret_json = request.data.get('client_secret_json')
if not client_secret_json:
return Response({
'code': 400,
'message': '缺少客户端密钥参数请提供client_secret_json或上传client_secret_file',
'data': None
}, status=status.HTTP_400_BAD_REQUEST)
# 验证用户是否已达到Gmail账户数量限制
max_accounts_per_user = getattr(settings, 'MAX_GMAIL_ACCOUNTS_PER_USER', 5)
current_accounts_count = GmailCredential.objects.filter(user=request.user, is_active=True).count()
if current_accounts_count >= max_accounts_per_user:
return Response({
'code': 400,
'message': f'您最多只能绑定{max_accounts_per_user}个Gmail账户',
'data': {
'current_count': current_accounts_count,
'max_allowed': max_accounts_per_user
}
}, status=status.HTTP_400_BAD_REQUEST)
# 创建Gmail集成实例
gmail_integration = GmailIntegration(
user=request.user,
client_secret_json=client_secret_json,
use_proxy=use_proxy,
proxy_url=proxy_url
)
# 开始认证流程
try:
gmail_integration.authenticate()
# 认证会抛出包含认证URL的异常
except Exception as e:
error_message = str(e)
# 检查是否包含认证URL正常的OAuth流程
if "Please visit this URL" in error_message:
# 提取认证URL
auth_url = error_message.split("URL: ")[1].split(" ")[0]
# 记录认证会话信息方便handle_gmail_auth_code使用
request.session['gmail_auth_pending'] = {
'name': name,
'use_proxy': use_proxy,
'proxy_url': proxy_url,
# 不存储client_secret_json应该已经保存在OAuth流程中
}
return Response({
'code': 202, # Accepted需要进一步操作
'message': '需要Gmail授权',
'data': {
'auth_url': auth_url,
'name': name
}
})
elif "Token has been expired or revoked" in error_message:
return Response({
'code': 401,
'message': 'OAuth令牌已过期或被撤销请重新授权',
'data': None
}, status=status.HTTP_401_UNAUTHORIZED)
else:
# 其他错误
logger.error(f"Gmail认证过程中出现错误: {error_message}")
return Response({
'code': 500,
'message': f'Gmail认证失败: {error_message}',
'data': None
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response({
'code': 200,
'message': '添加Gmail账户成功',
'data': None
})
except Exception as e:
logger.error(f"添加Gmail账户失败: {str(e)}")
logger.error(traceback.format_exc())
return Response({
'code': 500,
'message': f'添加Gmail账户失败: {str(e)}',
'data': None
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def handle_gmail_auth_code(request):
"""处理Gmail授权回调码"""
try:
# 获取授权码
auth_code = request.data.get('auth_code')
if not auth_code:
return Response({
'code': 400,
'message': '缺少授权码参数',
'data': None
}, status=status.HTTP_400_BAD_REQUEST)
# 获取账户名称和其他参数,优先从会话中获取
auth_pending = request.session.pop('gmail_auth_pending', {})
name = request.data.get('name', auth_pending.get('name', '新Gmail账户'))
use_proxy = request.data.get('use_proxy', auth_pending.get('use_proxy', True))
proxy_url = request.data.get('proxy_url', auth_pending.get('proxy_url', 'http://127.0.0.1:7890'))
# 创建Gmail集成实例
gmail_integration = GmailIntegration(
user=request.user,
use_proxy=use_proxy,
proxy_url=proxy_url
)
# 处理授权码
try:
result = gmail_integration.handle_auth_code(auth_code)
# 设置账户名称
if gmail_integration.gmail_credential:
gmail_integration.gmail_credential.name = name
gmail_integration.gmail_credential.save()
# 初始化监听
try:
watch_result = gmail_integration.setup_watch()
logger.info(f"初始化Gmail监听成功: {watch_result}")
except Exception as watch_error:
logger.error(f"初始化Gmail监听失败: {str(watch_error)}")
# 返回结果
return Response({
'code': 200,
'message': 'Gmail账户授权成功',
'data': {
'id': str(gmail_integration.gmail_credential.id) if gmail_integration.gmail_credential else None,
'name': name,
'gmail_email': gmail_integration.gmail_credential.gmail_email if gmail_integration.gmail_credential else None,
'watch_result': watch_result if 'watch_result' in locals() else None
}
})
except Exception as auth_error:
error_message = str(auth_error)
logger.error(f"处理Gmail授权码失败: {error_message}")
if "invalid_grant" in error_message.lower():
return Response({
'code': 400,
'message': '授权码无效或已过期,请重新授权',
'data': None
}, status=status.HTTP_400_BAD_REQUEST)
return Response({
'code': 500,
'message': f'处理Gmail授权码失败: {error_message}',
'data': None
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Exception as e:
logger.error(f"处理Gmail授权码失败: {str(e)}")
logger.error(traceback.format_exc())
return Response({
'code': 500,
'message': f'处理Gmail授权码失败: {str(e)}',
'data': None
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def refresh_gmail_account_watch(request):
"""刷新特定Gmail账户的监听"""
try:
# 获取账户ID
account_id = request.data.get('account_id')
if not account_id:
return Response({
'code': 400,
'message': '缺少账户ID参数',
'data': None
}, status=status.HTTP_400_BAD_REQUEST)
# 查找账户
try:
credential = GmailCredential.objects.get(
id=account_id,
user=request.user,
is_active=True
)
except GmailCredential.DoesNotExist:
return Response({
'code': 404,
'message': '找不到指定的Gmail账户',
'data': None
}, status=status.HTTP_404_NOT_FOUND)
# 获取可选的代理设置
use_proxy = request.data.get('use_proxy', True)
proxy_url = request.data.get('proxy_url', 'http://127.0.0.1:7890')
# 创建Gmail集成实例
gmail_integration = GmailIntegration(
user=request.user,
gmail_credential_id=str(credential.id),
use_proxy=use_proxy,
proxy_url=proxy_url
)
# 认证Gmail
if not gmail_integration.authenticate():
return Response({
'code': 400,
'message': 'Gmail认证失败',
'data': None
}, status=status.HTTP_400_BAD_REQUEST)
# 设置监听
watch_result = gmail_integration.setup_watch()
# 更新监听过期时间
expiration = watch_result.get('expiration')
history_id = watch_result.get('historyId')
if expiration:
# 转换为datetime对象
expiration_time = datetime.fromtimestamp(int(expiration) / 1000)
credential.watch_expiration = expiration_time
if history_id:
credential.last_history_id = history_id
credential.save()
return Response({
'code': 200,
'message': '刷新Gmail监听成功',
'data': {
'account_id': str(credential.id),
'name': credential.name,
'gmail_email': credential.gmail_email,
'expiration': credential.watch_expiration.strftime('%Y-%m-%d %H:%M:%S') if credential.watch_expiration else None,
'history_id': credential.last_history_id
}
})
except Exception as e:
logger.error(f"刷新Gmail监听失败: {str(e)}")
logger.error(traceback.format_exc())
return Response({
'code': 500,
'message': f'刷新Gmail监听失败: {str(e)}',
'data': None
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def check_specific_gmail_auth(request):
"""检查特定Gmail账户的认证状态"""
try:
# 获取可选的账户ID参数
gmail_credential_id = request.query_params.get('account_id')
# 如果提供了账户ID检查特定账户
if gmail_credential_id:
credential = GmailCredential.objects.filter(
id=gmail_credential_id,
user=request.user
).first()
else:
# 否则检查默认账户
credential = GmailCredential.objects.filter(
user=request.user,
is_active=True,
is_default=True
).first()
# 如果没有默认账户,检查最新的账户
if not credential:
credential = GmailCredential.objects.filter(
user=request.user,
is_active=True
).order_by('-updated_at').first()
if not credential:
return Response({
'code': 404,
'message': '未找到Gmail认证信息',
'data': {
'authenticated': False,
'needs_setup': True
}
})
# 获取可选的代理设置
use_proxy = request.query_params.get('use_proxy', 'true').lower() == 'true'
proxy_url = request.query_params.get('proxy_url', 'http://127.0.0.1:7890')
# 创建Gmail集成实例
gmail_integration = GmailIntegration(
user=request.user,
gmail_credential_id=str(credential.id),
use_proxy=use_proxy,
proxy_url=proxy_url
)
# 测试认证
auth_valid = gmail_integration.authenticate()
# 检查监听是否过期
watch_expired = True
if credential.watch_expiration:
watch_expired = credential.watch_expiration < timezone.now()
return Response({
'code': 200,
'message': '认证信息获取成功',
'data': {
'authenticated': auth_valid,
'needs_setup': not auth_valid,
'account_id': str(credential.id),
'name': credential.name,
'gmail_email': credential.gmail_email,
'is_default': credential.is_default,
'watch_expired': watch_expired,
'last_history_id': credential.last_history_id,
'watch_expiration': credential.watch_expiration.strftime('%Y-%m-%d %H:%M:%S') if credential.watch_expiration else None
}
})
except Exception as e:
logger.error(f"检查Gmail认证状态失败: {str(e)}")
logger.error(traceback.format_exc())
return Response({
'code': 500,
'message': f'检查Gmail认证状态失败: {str(e)}',
'data': None
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def clear_gmail_cache(request):
"""清除Gmail服务缓存解决授权和监听问题"""
try:
# 获取参数
gmail_email = request.data.get('gmail_email')
account_id = request.data.get('account_id')
if not gmail_email and not account_id:
return Response({
'code': 400,
'message': '需要提供gmail_email或account_id参数',
'data': None
}, status=status.HTTP_400_BAD_REQUEST)
# 根据不同参数清除缓存
if account_id:
# 如果提供了account_id清除特定账号的缓存
try:
credential = GmailCredential.objects.get(
id=account_id,
user=request.user
)
gmail_email = credential.gmail_email
# 清除特定账号的缓存
GmailServiceManager.clear_instance(request.user, account_id)
logger.info(f"已清除用户 {request.user.email} 的Gmail账号 {account_id} 缓存")
except GmailCredential.DoesNotExist:
return Response({
'code': 404,
'message': '找不到指定的Gmail账号',
'data': None
}, status=status.HTTP_404_NOT_FOUND)
else:
# 如果只提供了gmail_email清除所有相关缓存
cleared_count = GmailServiceManager.clear_all_instances_by_email(gmail_email)
logger.info(f"已清除Gmail邮箱 {gmail_email}{cleared_count} 个缓存实例")
# 查找与此邮箱关联的所有凭证
credentials = GmailCredential.objects.filter(
gmail_email=gmail_email,
is_active=True
).select_related('user')
# 创建Gmail集成实例测试认证并刷新监听
success_refreshed = []
failed_refreshed = []
for credential in credentials:
try:
# 创建Gmail集成实例
integration = GmailIntegration(
user=credential.user,
gmail_credential_id=str(credential.id)
)
# 测试认证
if integration.authenticate():
# 刷新监听
try:
watch_result = integration.setup_watch()
# 更新监听过期时间
if 'expiration' in watch_result:
# 转换为datetime对象
from datetime import datetime
expiration_time = datetime.fromtimestamp(int(watch_result['expiration']) / 1000)
credential.watch_expiration = expiration_time
credential.save()
success_refreshed.append({
'id': str(credential.id),
'gmail_email': credential.gmail_email,
'user_id': str(credential.user.id),
'user_email': credential.user.email
})
except Exception as watch_error:
failed_refreshed.append({
'id': str(credential.id),
'gmail_email': credential.gmail_email,
'user_id': str(credential.user.id),
'user_email': credential.user.email,
'error': str(watch_error)
})
else:
failed_refreshed.append({
'id': str(credential.id),
'gmail_email': credential.gmail_email,
'user_id': str(credential.user.id),
'user_email': credential.user.email,
'error': '认证失败'
})
except Exception as e:
failed_refreshed.append({
'id': str(credential.id),
'gmail_email': credential.gmail_email,
'user_id': str(credential.user.id) if credential.user else None,
'user_email': credential.user.email if credential.user else None,
'error': str(e)
})
# 处理队列通知
queue_result = None
if success_refreshed:
try:
# 处理与这些成功刷新的凭证相关的队列通知
from .gmail_integration import GmailIntegration
for cred_info in success_refreshed:
user_obj = GmailCredential.objects.get(id=cred_info['id']).user
queue_result = GmailIntegration.process_queued_notifications(user=user_obj)
except Exception as queue_error:
logger.error(f"处理队列通知失败: {str(queue_error)}")
return Response({
'code': 200,
'message': '清除Gmail缓存成功',
'data': {
'gmail_email': gmail_email,
'success_refreshed': success_refreshed,
'failed_refreshed': failed_refreshed,
'queue_processed': queue_result
}
})
except Exception as e:
logger.error(f"清除Gmail缓存失败: {str(e)}")
logger.error(traceback.format_exc())
return Response({
'code': 500,
'message': f'清除Gmail缓存失败: {str(e)}',
'data': None
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)

File diff suppressed because it is too large Load Diff

View File

@ -1 +1,3 @@
# 管理命令包
# 命令模块初始化

View File

@ -1 +1,3 @@
# Gmail管理命令
# 命令模块初始化

View File

@ -0,0 +1,64 @@
from django.core.management.base import BaseCommand
import logging
import pickle
import json
from user_management.models import GmailCredential
from oauth2client import client
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = '修复Gmail凭证数据格式问题'
def handle(self, *args, **options):
self.stdout.write(self.style.SUCCESS('开始修复Gmail凭证...'))
credentials = GmailCredential.objects.all()
fixed_count = 0
error_count = 0
for cred in credentials:
try:
if not cred.credentials:
self.stdout.write(self.style.WARNING(f'ID {cred.id} 的凭证为空,跳过'))
continue
# 检测当前凭证格式
self.stdout.write(f'处理凭证 ID: {cred.id}, 邮箱: {cred.gmail_email}')
# 1. 尝试作为JSON加载
try:
if isinstance(cred.credentials, str):
# 验证是否为有效JSON
json.loads(cred.credentials)
self.stdout.write(self.style.SUCCESS(f'凭证 {cred.id} 已是有效JSON格式'))
fixed_count += 1
continue
except Exception:
pass
# 2. 尝试从二进制pickle加载并转换为JSON
try:
# 处理可能的pickle格式
if isinstance(cred.credentials, str):
oauth_creds = pickle.loads(cred.credentials.encode('latin1'))
else:
oauth_creds = pickle.loads(cred.credentials)
# 转换为JSON并保存
json_creds = oauth_creds.to_json()
cred.credentials = json_creds
cred.save()
self.stdout.write(self.style.SUCCESS(f'凭证 {cred.id} 已从pickle转换为JSON格式'))
fixed_count += 1
continue
except Exception as e:
self.stdout.write(self.style.ERROR(f'无法处理凭证 {cred.id}: {str(e)}'))
error_count += 1
except Exception as e:
self.stdout.write(self.style.ERROR(f'处理凭证 {cred.id} 时出错: {str(e)}'))
error_count += 1
self.stdout.write(self.style.SUCCESS(f'处理完成! 成功: {fixed_count}, 失败: {error_count}'))

View File

@ -0,0 +1,49 @@
from django.core.management.base import BaseCommand
from user_management.models import Video
from user_management.tasks import publish_scheduled_video
class Command(BaseCommand):
help = '手动发布视频'
def add_arguments(self, parser):
parser.add_argument('video_id', type=int, help='视频ID')
def handle(self, *args, **options):
video_id = options['video_id']
try:
# 获取视频对象
video = Video.objects.get(id=video_id)
except Video.DoesNotExist:
self.stderr.write(self.style.ERROR(f'错误: 未找到ID为{video_id}的视频'))
return
# 检查视频状态是否允许发布
if video.status not in ['draft', 'scheduled']:
self.stderr.write(self.style.ERROR(
f'错误: 当前视频状态为 {video.get_status_display()},无法发布'
))
return
self.stdout.write(f'开始发布视频 "{video.title}" (ID: {video.id})...')
# 执行发布任务
try:
result = publish_scheduled_video(video.id)
if isinstance(result, dict) and result.get('success', False):
self.stdout.write(self.style.SUCCESS(
f'视频发布成功!\n'
f'标题: {video.title}\n'
f'平台: {video.platform_account.get_platform_name_display()}\n'
f'账号: {video.platform_account.account_name}\n'
f'视频链接: {result.get("video_url")}\n'
f'发布时间: {result.get("publish_time")}'
))
else:
self.stderr.write(self.style.ERROR(
f'发布失败: {result.get("error", "未知错误")}'
))
except Exception as e:
self.stderr.write(self.style.ERROR(f'发布过程中出错: {str(e)}'))

View File

@ -0,0 +1,142 @@
import os
import datetime
from django.core.management.base import BaseCommand
from django.utils import timezone
from user_management.models import PlatformAccount, Video
from django.conf import settings
class Command(BaseCommand):
help = '测试视频上传和定时发布功能'
def add_arguments(self, parser):
parser.add_argument('video_path', type=str, help='视频文件路径')
parser.add_argument('platform_account_id', type=int, help='平台账号ID')
parser.add_argument('--title', type=str, help='视频标题(可选)')
parser.add_argument('--desc', type=str, help='视频描述(可选)')
parser.add_argument('--schedule', type=str, help='计划发布时间,格式: YYYY-MM-DD HH:MM:SS (可选)')
def handle(self, *args, **options):
video_path = options['video_path']
platform_account_id = options['platform_account_id']
title = options.get('title')
desc = options.get('desc')
schedule_str = options.get('schedule')
# 验证视频文件是否存在
if not os.path.exists(video_path):
self.stderr.write(self.style.ERROR(f'错误: 视频文件不存在: {video_path}'))
return
# 验证平台账号是否存在
try:
platform_account = PlatformAccount.objects.get(id=platform_account_id)
except PlatformAccount.DoesNotExist:
self.stderr.write(self.style.ERROR(f'错误: 未找到ID为{platform_account_id}的平台账号'))
return
# 设置标题(如果未提供,则使用文件名)
if not title:
title = os.path.splitext(os.path.basename(video_path))[0]
# 准备保存视频的目录
media_root = getattr(settings, 'MEDIA_ROOT', os.path.join(settings.BASE_DIR, 'media'))
videos_dir = os.path.join(media_root, 'videos')
account_dir = os.path.join(videos_dir, f"{platform_account.platform_name}_{platform_account.account_name}")
if not os.path.exists(videos_dir):
os.makedirs(videos_dir)
if not os.path.exists(account_dir):
os.makedirs(account_dir)
# 生成唯一的文件名
import time
timestamp = int(time.time())
file_name = f"{timestamp}_{os.path.basename(video_path)}"
file_path = os.path.join(account_dir, file_name)
# 复制视频文件
with open(video_path, 'rb') as src_file:
with open(file_path, 'wb') as dest_file:
dest_file.write(src_file.read())
self.stdout.write(self.style.SUCCESS(f'视频文件已复制到: {file_path}'))
# 创建视频记录
video_data = {
'platform_account': platform_account,
'title': title,
'description': desc,
'local_path': file_path,
'status': 'draft',
}
# 处理计划发布时间
if schedule_str:
try:
from dateutil import parser
scheduled_time = parser.parse(schedule_str)
# 如果时间已过设置为当前时间后5分钟
now = timezone.now()
if scheduled_time <= now:
scheduled_time = now + datetime.timedelta(minutes=5)
self.stdout.write(self.style.WARNING(
f'警告: 计划时间已过已调整为当前时间后5分钟: {scheduled_time}'
))
video_data['scheduled_time'] = scheduled_time
video_data['status'] = 'scheduled'
except Exception as e:
self.stderr.write(self.style.ERROR(f'错误: 解析时间失败: {str(e)}'))
return
# 创建视频对象
video = Video.objects.create(**video_data)
self.stdout.write(self.style.SUCCESS(f'创建视频记录成功ID: {video.id}'))
# 如果是计划发布,创建定时任务
if video.status == 'scheduled':
try:
from django_celery_beat.models import PeriodicTask, CrontabSchedule
import json
scheduled_time = video.scheduled_time
# 创建定时任务
schedule, _ = CrontabSchedule.objects.get_or_create(
minute=scheduled_time.minute,
hour=scheduled_time.hour,
day_of_month=scheduled_time.day,
month_of_year=scheduled_time.month,
)
# 创建周期性任务
task_name = f"Publish_Video_{video.id}_{time.time()}"
PeriodicTask.objects.create(
name=task_name,
task='user_management.tasks.publish_scheduled_video',
crontab=schedule,
args=json.dumps([video.id]),
one_off=True, # 只执行一次
start_time=scheduled_time
)
self.stdout.write(self.style.SUCCESS(
f'创建定时发布任务成功,计划发布时间: {scheduled_time}'
))
except Exception as e:
self.stderr.write(self.style.ERROR(f'创建定时任务失败: {str(e)}'))
self.stdout.write(self.style.SUCCESS('操作完成'))
# 打印执行方式提示
if video.status == 'scheduled':
self.stdout.write(f"\n视频将在 {video.scheduled_time} 自动发布")
self.stdout.write("\n要手动发布,可以使用以下命令:")
else:
self.stdout.write("\n要发布该视频,可以使用以下命令:")
self.stdout.write(f"python manage.py publish_video {video.id}")

View File

@ -0,0 +1,33 @@
# Generated by Django 5.1.5 on 2025-04-23 03:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_management', 'fix_gmail_email_field'),
]
operations = [
migrations.AddField(
model_name='chathistory',
name='title',
field=models.CharField(blank=True, default='New chat', help_text='对话标题', max_length=100, null=True),
),
migrations.AddField(
model_name='gmailcredential',
name='gmail_email',
field=models.EmailField(blank=True, help_text='实际授权的Gmail账号可能与user.email不同', max_length=255, null=True),
),
migrations.AddField(
model_name='gmailcredential',
name='is_default',
field=models.BooleanField(default=False, help_text='是否为默认Gmail账号'),
),
migrations.AddField(
model_name='gmailcredential',
name='name',
field=models.CharField(default='默认Gmail', help_text='此Gmail账号的自定义名称', max_length=100),
),
]

View File

@ -0,0 +1,18 @@
# Generated by Django 5.1.5 on 2025-04-23 08:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_management', '0001_chathistory_title_gmailcredential_gmail_email_and_more'),
]
operations = [
migrations.AddField(
model_name='knowledgebasedocument',
name='uploader_name',
field=models.CharField(default='未知用户', max_length=100, verbose_name='上传者姓名'),
),
]

View File

@ -0,0 +1,82 @@
# Generated by Django 5.1.5 on 2025-04-26 02:47
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_management', '0002_knowledgebasedocument_uploader_name'),
]
operations = [
migrations.CreateModel(
name='OperatorAccount',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=100, unique=True, verbose_name='用户名')),
('password', models.CharField(max_length=255, verbose_name='密码')),
('real_name', models.CharField(max_length=50, verbose_name='真实姓名')),
('email', models.EmailField(max_length=254, verbose_name='邮箱')),
('phone', models.CharField(max_length=15, verbose_name='电话')),
('position', models.CharField(choices=[('editor', '编辑'), ('planner', '策划'), ('operator', '运营'), ('admin', '管理员')], max_length=20, verbose_name='工作定位')),
('department', models.CharField(max_length=50, verbose_name='部门')),
('is_active', models.BooleanField(default=True, verbose_name='是否在职')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
],
options={
'verbose_name': '运营账号',
'verbose_name_plural': '运营账号',
},
),
migrations.CreateModel(
name='PlatformAccount',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('platform_name', models.CharField(choices=[('youtube', 'YouTube'), ('tiktok', 'TikTok'), ('twitter', 'Twitter/X'), ('instagram', 'Instagram'), ('facebook', 'Facebook'), ('bilibili', 'Bilibili')], max_length=20, verbose_name='平台名称')),
('account_name', models.CharField(max_length=100, verbose_name='账号名称')),
('account_id', models.CharField(max_length=100, verbose_name='账号ID')),
('status', models.CharField(choices=[('active', '正常'), ('restricted', '限流'), ('suspended', '封禁'), ('inactive', '未激活')], default='active', max_length=20, verbose_name='账号状态')),
('followers_count', models.IntegerField(default=0, verbose_name='粉丝数')),
('account_url', models.URLField(verbose_name='账号链接')),
('description', models.TextField(blank=True, null=True, verbose_name='账号描述')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='最后登录时间')),
('operator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='platform_accounts', to='user_management.operatoraccount', verbose_name='关联运营')),
],
options={
'verbose_name': '平台账号',
'verbose_name_plural': '平台账号',
'unique_together': {('platform_name', 'account_id')},
},
),
migrations.CreateModel(
name='Video',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, verbose_name='视频标题')),
('description', models.TextField(blank=True, null=True, verbose_name='视频描述')),
('video_url', models.URLField(blank=True, null=True, verbose_name='视频地址')),
('local_path', models.CharField(blank=True, max_length=255, null=True, verbose_name='本地路径')),
('thumbnail_url', models.URLField(blank=True, null=True, verbose_name='缩略图地址')),
('status', models.CharField(choices=[('draft', '草稿'), ('scheduled', '已排期'), ('published', '已发布'), ('failed', '发布失败'), ('deleted', '已删除')], default='draft', max_length=20, verbose_name='发布状态')),
('views_count', models.IntegerField(default=0, verbose_name='播放次数')),
('likes_count', models.IntegerField(default=0, verbose_name='点赞数')),
('comments_count', models.IntegerField(default=0, verbose_name='评论数')),
('shares_count', models.IntegerField(default=0, verbose_name='分享数')),
('tags', models.CharField(blank=True, max_length=500, null=True, verbose_name='标签')),
('publish_time', models.DateTimeField(blank=True, null=True, verbose_name='发布时间')),
('scheduled_time', models.DateTimeField(blank=True, null=True, verbose_name='计划发布时间')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('platform_account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='videos', to='user_management.platformaccount', verbose_name='发布账号')),
],
options={
'verbose_name': '视频',
'verbose_name_plural': '视频',
},
),
]

View File

@ -0,0 +1,24 @@
# Generated by Django 5.1.5 on 2025-04-27 03:14
import uuid
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_management', '0003_operatoraccount_platformaccount_video'),
]
operations = [
migrations.AddField(
model_name='operatoraccount',
name='uuid',
field=models.UUIDField(default=uuid.uuid4, editable=False, unique=True, verbose_name='UUID'),
),
migrations.AlterField(
model_name='operatoraccount',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
]

View File

@ -0,0 +1,113 @@
# Generated by Django 5.1.5 on 2025-04-28 07:12
import django.db.models.deletion
import uuid
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_management', '0004_operatoraccount_uuid_alter_operatoraccount_id'),
]
operations = [
migrations.AlterModelOptions(
name='gmailcredential',
options={'ordering': ['-is_default', '-updated_at'], 'verbose_name': 'Gmail凭证', 'verbose_name_plural': 'Gmail凭证'},
),
migrations.AlterUniqueTogether(
name='gmailcredential',
unique_together={('user', 'gmail_email')},
),
migrations.AddField(
model_name='gmailcredential',
name='credentials_json',
field=models.TextField(blank=True, null=True, verbose_name='凭证JSON'),
),
migrations.AddField(
model_name='gmailcredential',
name='gmail_credential_id',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='Gmail凭证ID'),
),
migrations.AddField(
model_name='gmailcredential',
name='needs_reauth',
field=models.BooleanField(default=False, verbose_name='需要重新授权'),
),
migrations.AlterField(
model_name='gmailcredential',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='创建时间'),
),
migrations.AlterField(
model_name='gmailcredential',
name='gmail_email',
field=models.EmailField(default='your_default_email@example.com', max_length=255, verbose_name='Gmail邮箱'),
),
migrations.AlterField(
model_name='gmailcredential',
name='is_active',
field=models.BooleanField(default=True, verbose_name='是否活跃'),
),
migrations.AlterField(
model_name='gmailcredential',
name='is_default',
field=models.BooleanField(default=False, verbose_name='是否默认'),
),
migrations.AlterField(
model_name='gmailcredential',
name='last_history_id',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='最后历史ID'),
),
migrations.AlterField(
model_name='gmailcredential',
name='name',
field=models.CharField(default='默认Gmail', max_length=100, verbose_name='名称'),
),
migrations.AlterField(
model_name='gmailcredential',
name='token_path',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='令牌路径'),
),
migrations.AlterField(
model_name='gmailcredential',
name='updated_at',
field=models.DateTimeField(auto_now=True, verbose_name='更新时间'),
),
migrations.AlterField(
model_name='gmailcredential',
name='watch_expiration',
field=models.DateTimeField(blank=True, null=True, verbose_name='监听过期时间'),
),
migrations.AlterModelTable(
name='gmailcredential',
table=None,
),
migrations.CreateModel(
name='GmailNotificationQueue',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('email', models.EmailField(max_length=255, verbose_name='邮箱')),
('history_id', models.CharField(max_length=100, verbose_name='历史ID')),
('notification_data', models.TextField(blank=True, null=True, verbose_name='通知数据')),
('processed', models.BooleanField(default=False, verbose_name='是否已处理')),
('success', models.BooleanField(default=False, verbose_name='处理是否成功')),
('error_message', models.CharField(blank=True, max_length=255, null=True, verbose_name='错误信息')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('processed_at', models.DateTimeField(blank=True, null=True, verbose_name='处理时间')),
('gmail_credential', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notification_queue', to='user_management.gmailcredential')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='gmail_notification_queue', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Gmail通知队列',
'verbose_name_plural': 'Gmail通知队列',
'ordering': ['processed', 'created_at'],
},
),
migrations.RemoveField(
model_name='gmailcredential',
name='credentials',
),
]

View File

@ -0,0 +1,18 @@
# Generated by Django 5.1.5 on 2025-04-28 07:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user_management', '0005_alter_gmailcredential_options_and_more'),
]
operations = [
migrations.RenameField(
model_name='gmailcredential',
old_name='credentials_json',
new_name='credentials',
),
]

View File

@ -0,0 +1,23 @@
# Generated manually
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user_management', '0009_gmailcredential_gmail_email_conversationsummary_and_more'),
]
operations = [
# 添加name和is_default字段
migrations.RunSQL(
sql="""
ALTER TABLE gmail_credential ADD COLUMN name VARCHAR(100) DEFAULT '默认Gmail' NOT NULL;
ALTER TABLE gmail_credential ADD COLUMN is_default BOOLEAN DEFAULT FALSE NOT NULL;
""",
reverse_sql="""
ALTER TABLE gmail_credential DROP COLUMN name;
ALTER TABLE gmail_credential DROP COLUMN is_default;
"""
)
]

View File

@ -291,6 +291,8 @@ class ChatHistory(models.Model):
knowledge_base = models.ForeignKey('KnowledgeBase', on_delete=models.CASCADE)
# 用于标识知识库组合的对话
conversation_id = models.CharField(max_length=100, db_index=True)
# 对话标题
title = models.CharField(max_length=100, null=True, blank=True, default='New chat', help_text="对话标题")
parent_id = models.CharField(max_length=100, null=True, blank=True)
role = models.CharField(max_length=20, choices=ROLE_CHOICES)
content = models.TextField()
@ -683,6 +685,7 @@ class KnowledgeBaseDocument(models.Model):
document_id = models.CharField(max_length=100, verbose_name='文档ID')
document_name = models.CharField(max_length=255, verbose_name='文档名称')
external_id = models.CharField(max_length=100, verbose_name='外部文档ID')
uploader_name = models.CharField(max_length=100, default="未知用户", verbose_name='上传者姓名')
status = models.CharField(
max_length=20,
default='active',
@ -710,23 +713,30 @@ class KnowledgeBaseDocument(models.Model):
return f"{self.knowledge_base.name} - {self.document_name}"
class GmailCredential(models.Model):
"""Gmail认证信息"""
"""Gmail账号凭证"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='gmail_credentials')
gmail_email = models.EmailField(max_length=255, null=True, blank=True, help_text="实际授权的Gmail账号可能与user.email不同")
credentials = models.BinaryField() # 序列化的凭证对象
token_path = models.CharField(max_length=255) # token存储路径
last_history_id = models.CharField(max_length=255, null=True, blank=True) # 最后处理的historyId
watch_expiration = models.DateTimeField(null=True, blank=True) # 监听过期时间
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
is_active = models.BooleanField(default=True)
class Meta:
db_table = 'gmail_credential'
gmail_email = models.EmailField(verbose_name='Gmail邮箱', max_length=255, default='your_default_email@example.com')
name = models.CharField(verbose_name='名称', max_length=100, default='默认Gmail')
credentials = models.TextField(verbose_name='凭证JSON', blank=True, null=True)
token_path = models.CharField(verbose_name='令牌路径', max_length=255, blank=True, null=True)
is_default = models.BooleanField(verbose_name='是否默认', default=False)
last_history_id = models.CharField(verbose_name='最后历史ID', max_length=100, blank=True, null=True)
watch_expiration = models.DateTimeField(verbose_name='监听过期时间', blank=True, null=True)
is_active = models.BooleanField(verbose_name='是否活跃', default=True)
created_at = models.DateTimeField(verbose_name='创建时间', auto_now_add=True)
updated_at = models.DateTimeField(verbose_name='更新时间', auto_now=True)
gmail_credential_id = models.CharField(verbose_name='Gmail凭证ID', max_length=255, blank=True, null=True)
needs_reauth = models.BooleanField(verbose_name='需要重新授权', default=False)
def __str__(self):
return f"{self.user.username}的Gmail认证"
return f"{self.name} ({self.gmail_email})"
class Meta:
verbose_name = 'Gmail凭证'
verbose_name_plural = 'Gmail凭证'
unique_together = ('user', 'gmail_email')
ordering = ['-is_default', '-updated_at']
class GmailTalentMapping(models.Model):
"""Gmail达人映射关系模型"""
@ -802,3 +812,135 @@ class ConversationSummary(models.Model):
def __str__(self):
return f"{self.user.username}{self.talent_email}的对话总结"
class OperatorAccount(models.Model):
"""运营账号信息表"""
id = models.AutoField(primary_key=True) # 保留自动递增的ID字段
uuid = models.UUIDField(default=uuid.uuid4, editable=False, unique=True, verbose_name='UUID')
POSITION_CHOICES = [
('editor', '编辑'),
('planner', '策划'),
('operator', '运营'),
('admin', '管理员'),
]
username = models.CharField(max_length=100, unique=True, verbose_name='用户名')
password = models.CharField(max_length=255, verbose_name='密码')
real_name = models.CharField(max_length=50, verbose_name='真实姓名')
email = models.EmailField(verbose_name='邮箱')
phone = models.CharField(max_length=15, verbose_name='电话')
position = models.CharField(max_length=20, choices=POSITION_CHOICES, verbose_name='工作定位')
department = models.CharField(max_length=50, verbose_name='部门')
is_active = models.BooleanField(default=True, verbose_name='是否在职')
created_at = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
updated_at = models.DateTimeField(auto_now=True, verbose_name='更新时间')
class Meta:
verbose_name = '运营账号'
verbose_name_plural = '运营账号'
def __str__(self):
return f"{self.real_name} ({self.username})"
class PlatformAccount(models.Model):
"""平台账号信息表"""
STATUS_CHOICES = [
('active', '正常'),
('restricted', '限流'),
('suspended', '封禁'),
('inactive', '未激活'),
]
PLATFORM_CHOICES = [
('youtube', 'YouTube'),
('tiktok', 'TikTok'),
('twitter', 'Twitter/X'),
('instagram', 'Instagram'),
('facebook', 'Facebook'),
('bilibili', 'Bilibili'),
]
operator = models.ForeignKey(OperatorAccount, on_delete=models.CASCADE, related_name='platform_accounts', verbose_name='关联运营')
platform_name = models.CharField(max_length=20, choices=PLATFORM_CHOICES, verbose_name='平台名称')
account_name = models.CharField(max_length=100, verbose_name='账号名称')
account_id = models.CharField(max_length=100, verbose_name='账号ID')
status = models.CharField(max_length=20, choices=STATUS_CHOICES, default='active', verbose_name='账号状态')
followers_count = models.IntegerField(default=0, verbose_name='粉丝数')
account_url = models.URLField(verbose_name='账号链接')
description = models.TextField(blank=True, null=True, verbose_name='账号描述')
created_at = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
updated_at = models.DateTimeField(auto_now=True, verbose_name='更新时间')
last_login = models.DateTimeField(blank=True, null=True, verbose_name='最后登录时间')
class Meta:
verbose_name = '平台账号'
verbose_name_plural = '平台账号'
unique_together = ('platform_name', 'account_id')
def __str__(self):
return f"{self.account_name} ({self.platform_name})"
class Video(models.Model):
"""视频信息表"""
STATUS_CHOICES = [
('draft', '草稿'),
('scheduled', '已排期'),
('published', '已发布'),
('failed', '发布失败'),
('deleted', '已删除'),
]
platform_account = models.ForeignKey(PlatformAccount, on_delete=models.CASCADE, related_name='videos', verbose_name='发布账号')
title = models.CharField(max_length=200, verbose_name='视频标题')
description = models.TextField(blank=True, null=True, verbose_name='视频描述')
video_url = models.URLField(blank=True, null=True, verbose_name='视频地址')
local_path = models.CharField(max_length=255, blank=True, null=True, verbose_name='本地路径')
thumbnail_url = models.URLField(blank=True, null=True, verbose_name='缩略图地址')
status = models.CharField(max_length=20, choices=STATUS_CHOICES, default='draft', verbose_name='发布状态')
views_count = models.IntegerField(default=0, verbose_name='播放次数')
likes_count = models.IntegerField(default=0, verbose_name='点赞数')
comments_count = models.IntegerField(default=0, verbose_name='评论数')
shares_count = models.IntegerField(default=0, verbose_name='分享数')
tags = models.CharField(max_length=500, blank=True, null=True, verbose_name='标签')
publish_time = models.DateTimeField(blank=True, null=True, verbose_name='发布时间')
scheduled_time = models.DateTimeField(blank=True, null=True, verbose_name='计划发布时间')
created_at = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
updated_at = models.DateTimeField(auto_now=True, verbose_name='更新时间')
class Meta:
verbose_name = '视频'
verbose_name_plural = '视频'
def __str__(self):
return self.title
def save(self, *args, **kwargs):
if self.status == 'published' and not self.publish_time:
self.publish_time = timezone.now()
super().save(*args, **kwargs)
class GmailNotificationQueue(models.Model):
"""Gmail通知队列存储因认证失败等原因未能处理的通知"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='gmail_notification_queue')
gmail_credential = models.ForeignKey(GmailCredential, on_delete=models.CASCADE, related_name='notification_queue')
email = models.EmailField(verbose_name='邮箱', max_length=255)
history_id = models.CharField(verbose_name='历史ID', max_length=100)
notification_data = models.TextField(verbose_name='通知数据', blank=True, null=True)
processed = models.BooleanField(verbose_name='是否已处理', default=False)
success = models.BooleanField(verbose_name='处理是否成功', default=False)
error_message = models.CharField(verbose_name='错误信息', max_length=255, blank=True, null=True)
created_at = models.DateTimeField(verbose_name='创建时间', auto_now_add=True)
processed_at = models.DateTimeField(verbose_name='处理时间', blank=True, null=True)
def __str__(self):
return f"通知 {self.id} - {self.email} - {self.created_at}"
class Meta:
verbose_name = 'Gmail通知队列'
verbose_name_plural = 'Gmail通知队列'
ordering = ['processed', 'created_at']

View File

@ -3,4 +3,6 @@ from . import consumers
websocket_urlpatterns = [
re_path(r'ws/notifications/$', consumers.NotificationConsumer.as_asgi()),
re_path(r'ws/chat/$', consumers.ChatConsumer.as_asgi()),
re_path(r'ws/chat/stream/$', consumers.ChatStreamConsumer.as_asgi()),
]

143
user_management/tasks.py Normal file
View File

@ -0,0 +1,143 @@
import os
import logging
import requests
from celery import shared_task
from django.utils import timezone
from django.conf import settings
logger = logging.getLogger(__name__)
@shared_task
def publish_scheduled_video(video_id):
"""定时发布视频的任务"""
from .models import Video
try:
# 获取视频记录
video = Video.objects.get(id=video_id)
# 检查视频状态是否为已排期
if video.status != 'scheduled':
logger.warning(f"视频 {video_id} 状态不是'已排期',当前状态: {video.status},跳过发布")
return
# 检查视频文件是否存在
if not video.local_path or not os.path.exists(video.local_path):
logger.error(f"视频 {video_id} 的本地文件不存在: {video.local_path}")
video.status = 'failed'
video.save()
return
# 模拟上传到平台的过程
# 在实际应用中这里需要根据不同平台调用不同的API
platform_account = video.platform_account
platform_name = platform_account.platform_name
# 模拟成功上传并获取视频URL
video_url = f"https://example.com/{platform_name}/{video.id}"
video_id = f"VID_{video.id}"
# 在实际应用中这里应该调用各平台的API
logger.info(f"模拟上传视频 {video.id}{platform_name} 平台")
# 更新视频状态
video.status = 'published'
video.publish_time = timezone.now()
video.video_url = video_url
video.video_id = video_id
video.save()
logger.info(f"视频 {video.id} 已成功发布到 {platform_name} 平台")
# 记录到知识库
_update_knowledge_base(video)
return {
"success": True,
"video_id": video.id,
"platform": platform_name,
"publish_time": video.publish_time.strftime("%Y-%m-%d %H:%M:%S"),
"video_url": video_url
}
except Video.DoesNotExist:
logger.error(f"未找到ID为 {video_id} 的视频记录")
return {"success": False, "error": f"未找到ID为 {video_id} 的视频记录"}
except Exception as e:
logger.error(f"发布视频 {video_id} 失败: {str(e)}")
# 尝试更新视频状态为失败
try:
video = Video.objects.get(id=video_id)
video.status = 'failed'
video.save()
except:
pass
return {"success": False, "error": str(e)}
def _update_knowledge_base(video):
"""更新知识库中的视频信息"""
from .models import KnowledgeBase, KnowledgeBaseDocument
try:
# 获取关联的平台账号和运营账号
platform_account = video.platform_account
operator = platform_account.operator
# 查找对应的知识库
knowledge_base = KnowledgeBase.objects.filter(
name__contains=operator.real_name,
type='private'
).first()
if not knowledge_base:
logger.warning(f"未找到与运营账号 {operator.real_name} 关联的知识库")
return
# 查找相关的文档
document = KnowledgeBaseDocument.objects.filter(
knowledge_base=knowledge_base,
document_name__contains=video.title,
status='active'
).first()
if not document:
logger.warning(f"未找到与视频 {video.title} 关联的知识库文档")
return
# 在实际应用中这里应该调用外部API更新文档内容
logger.info(f"更新知识库文档 {document.document_id} 的视频发布状态")
# 模拟更新文档内容
# 这里只记录日志实际应用中需要调用外部API
except Exception as e:
logger.error(f"更新知识库失败: {str(e)}")
@shared_task
def check_scheduled_videos():
"""定期检查计划发布的视频,处理未被正确调度的视频"""
from .models import Video
from datetime import timedelta
try:
# 查找所有已经过了计划发布时间但仍处于scheduled状态的视频
now = timezone.now()
threshold = now - timedelta(minutes=30) # 30分钟容差
videos = Video.objects.filter(
status='scheduled',
scheduled_time__lt=threshold
)
for video in videos:
logger.warning(f"发现未按计划发布的视频: {video.id}, 计划发布时间: {video.scheduled_time}")
# 手动触发发布任务
publish_scheduled_video.delay(video.id)
return f"检查了 {videos.count()} 个未按计划发布的视频"
except Exception as e:
logger.error(f"检查未发布视频失败: {str(e)}")
return f"检查未发布视频失败: {str(e)}"

View File

@ -27,7 +27,8 @@ from .views import (
sync_talent_emails,
manage_user_goal,
generate_conversation_summary,
get_recommended_reply
get_recommended_reply,
refresh_all_gmail_watches
)
from .feishu_chat_views import (
process_feishu_table,
@ -35,6 +36,7 @@ from .feishu_chat_views import (
feishu_user_goal,
check_goal_status
)
from . import gmail_account_views
# 创建路由器
router = DefaultRouter()
@ -75,6 +77,20 @@ urlpatterns = [
path('gmail/check-auth/', check_gmail_auth, name='check_gmail_auth'),
path('gmail/import-from-sender/', import_gmail_from_sender, name='import_gmail_from_sender'),
path('gmail/sync-talent/', sync_talent_emails, name='sync_talent_emails'),
path('gmail/refresh-all-watches/', refresh_all_gmail_watches, name='refresh_all_gmail_watches'),
path('gmail/webhook/', gmail_webhook, name='gmail_webhook'),
# 添加新路由
path('gmail/clear-cache/', gmail_account_views.clear_gmail_cache, name='clear_gmail_cache'),
# Gmail账户管理
path('gmail/accounts/', gmail_account_views.list_gmail_accounts, name='list_gmail_accounts'),
path('gmail/accounts/add/', gmail_account_views.add_gmail_account, name='add_gmail_account'),
path('gmail/accounts/auth-code/', gmail_account_views.handle_gmail_auth_code, name='handle_gmail_auth_code'),
path('gmail/accounts/update/', gmail_account_views.update_gmail_account, name='update_gmail_account'),
path('gmail/accounts/delete/<uuid:account_id>/', gmail_account_views.delete_gmail_account, name='delete_gmail_account'),
path('gmail/accounts/set-default/', gmail_account_views.set_default_gmail_account, name='set_default_gmail_account'),
path('gmail/accounts/refresh-watch/', gmail_account_views.refresh_gmail_account_watch, name='refresh_gmail_account_watch'),
path('gmail/accounts/check-auth/', gmail_account_views.check_specific_gmail_auth, name='check_specific_gmail_auth'),
# 新增功能API
path('user-goal/', manage_user_goal, name='manage_user_goal'),

File diff suppressed because it is too large Load Diff

BIN
测试视频.mp4 Normal file

Binary file not shown.