链接chatgpt,把key和关键字导入sqlite数据库,批量生成文章。
部分代码演示:
import re
import openai
import sqlite3
import requests
from bs4 import BeautifulSoup # 解析页面
from time import sleep # 等待间隔
import random # 随机
import configparser
import os
config = configparser.RawConfigParser()
config.read(‘config.ini’, encoding=‘utf-8’)
dbconsole = config.get(‘SectionName’, ‘dbconsole’)
chatkeys = config.get(‘SectionName’, ‘chatkeys’)
createfile = config.get(‘SectionName’, ‘createfile’)
frequency = eval(config.get(‘SectionName’, ‘frequency’))
cookie = config.get(‘SectionName’, ‘cookie’)
descr = config.get(‘SectionName’, ‘descr’)
conn = sqlite3.connect(dbconsole)
cursor = conn.cursor()
def baidu_search(keyword):
# 伪装浏览器请求头
headers = {
“User-Agent”: “Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36”,
“Accept”: “text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9”,
“Accept-Language”: “zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7”,
“Connection”: “keep-alive”,
“Accept-Encoding”: “gzip, deflate”,
“Host”: “www.baidu.com”,
# 需要更换Cookie
“Cookie”: cookie
}
# 搜索关键字
# keyword = “笔记本电脑散热不好怎么办“
print(‘开始爬取新标题‘)
wait_seconds = random.uniform(1, 2) # 等待时长秒
print(‘开始等待{}秒‘.format(wait_seconds))
sleep(wait_seconds) # 随机等待
# 构造百度搜索链接
url = f”https://www.baidu.com/s?wd={keyword}“
# 发送 GET 请求获取 HTML 源代码
response = requests.get(url, headers=headers)
html = response.text
print(‘响应码是:{}’.format(response.status_code))
# print(html)
# 使用 BeautifulSoup 解析 HTML 代码
soup = BeautifulSoup(html, “html.parser”)
# 查找相关推荐文字标题
related_titles = []
for item in soup.find_all(“a”, class_=“rs-link_2DE3Q c-line-clamp1 c-color-link”):
related_titles.append(item.text.strip())
# print(related_titles[0:3]) # 选择前4个标题
return related_titles[0:3]
def getKeyWords():
cursor.execute(“select id,name from keywords where isuse=0 LIMIT 1”)
row = cursor.fetchone()
# print(row[0], row[1])
下面是完整代码,付费后才能完整下载
案例展示:
暂无评论内容