import requests from bs4 import beautifulsoup # 爬取唐诗三百首的内容 def scrape_data(): url = "https://so.gushiwen.cn/gushi/tangshi.aspx" headers = { "user-agent": "mozilla/5.0 (windows nt 10.0; win64; x64) applewebkit/537.36 (khtml, like gecko) chrome/89.0.4389.82 safari/537.36" } response = requests.get(url, headers=headers) if response.status_code == 200: soup = beautifulsoup(response.text, "html.parser") content_divs = soup.find_all('div', c


ontentdiv") poems = [] for div in content_divs: title = div.find('p', class_="cont") author = div.find('p', class_="source") poem = div.find('div', class_="contson") if title and author and poem: poems.append({ "title": title.text.strip(), "author": author.text.strip(), "content": poem.text.strip() }) return poems else: return None # 打印爬取结果 poems = scrape_data() for poem in poems: print(poem["title"]) print(poem["author"]) print(poem["content"]) print() [2024-06-24 01:16:00 | AI写代码神器 | 443点数解答]
相关提问
- import requests from bs4 import beautifulsoup # 爬取唐诗三百首的内容 def scrape_data(): url = "https://so.gushiwen.cn/gushi/tangshi.aspx" headers = { "user-agent": "mozilla/5.0 (windows nt 10.0; win64; x64) applewebkit/537.36 (khtml, like gecko) chrome/89.0.4389.82 safari/537.36" } response = requests.get(url, headers=headers) if response.status_code == 200: soup = beautifulsoup(response.text, "html.parser") content_divs = soup.find_all('div', c(443点数解答 | 2024-06-24 01:16:00)272
- import requests from bs4 import BeautifulSoup import csv import time import random def get_anjuke_rental(page): headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36', 'Referer': 'https://km.anjuke.com/', 'Cookie': 'aQQ_ajkguid=DAD5FF6F-F8C7-3A74-D6D3-DDBD83A22773; ajk-appVersion=; fzq_h=c460b2e3f6fa0423cdfde83f31cc1ca7_1738734512582_d8af942581ce43adab63aaa3da34c3dc_1782055253; id58(1989点数解答 | 2025-02-07 14:32:11)124
- import requests from bs4 import BeautifulSoup import csv import time import random def get_anjuke_rental(page): headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36', 'Referer': 'https://km.anjuke.com/', 'Cookie': 'aQQ_ajkguid=DAD5FF6F-F8C7-3A74-D6D3-DDBD83A22773; ajk-appVersion=; fzq_h=c460b2e3f6fa0423cdfde83f31cc1ca7_1738734512582_d8af942581ce43adab63aaa3da34c3dc_1782055253; id58(1989点数解答 | 2025-02-07 14:50:47)115
- import requests from bs4 import beautifulsoup import pandas as pd url = "https://so.gushiwen.cn/gushi/tangshi.aspx" response = requests.get(url) soup = beautifulsoup(response.text, 'html.parser') # 提取所需数据 poems_data = [] for row in soup.find_all('tr', class_='tlist')[1:]: cols = row.find_all('td') type_ = cols.text.strip() title = cols.a.text.strip() content = cols.text.replace('\n', '').replace('\t', '') author = cols.text.strip() poems_data.append([type_, title, conte(120点数解答 | 2024-06-24 02:04:56)260
- import requests from bs4 import beautifulsoup def get_poems(): url = "https://so.gushiwen.cn/gushi/tangshi.aspx" response = requests.get(url) response.encoding = "utf-8" soup = beautifulsoup(response.text, "html.parser") # 获取所有的诗列表 poem_list = soup.find_all("div", class_="typecont") poems = [] # 遍历诗列表,提取相关信息 for poem in poem_list: # 获取诗类型 poem_type = poem.find_previous_sibling("div", class_="bookml").find("span").text # 获取诗题目(316点数解答 | 2024-06-24 00:59:22)235
- 任务一:采集唐诗三百首内容 python import requests from bs4 import beautifulsoup import pandas as pd url = "https://so.gushiwen.cn/gushi/tangshi.aspx" response = requests.get(url) soup = beautifulsoup(response.text, "html.parser") poems = [] for poem in soup.find_all("div", class_="item"): title = poem.find("h1").text.strip() content = poem.find("p").text.strip() author = poem.find("span", class_="author").text.strip() poem_type = "唐诗" poems.append((15点数解答 | 2024-06-24 15:46:11)280
- def fill_and_submit_form(row): print("填充表单并提交...") wait = webdriverwait(driver, 5) # 增加等待时间 try: product_name_input = wait.until(ec.presence_of_element_located((by.xpath, '//*[@id="wcontentpanel"]/div[3]/div/div[2]/form/div[1]/div[3]/div/div/div[1]/input'))) product_category_dropdown = wait.until(ec.element_to_be_clickable((by.xpath, '//*[@id="wcontentpanel"]/div[3]/div/div[2]/form/div[1]/div[4]/div/div/div/span/span/div/div[1]'))) time.sleep(2) # 等待完成(361点数解答 | 2024-11-11 21:56:42)223
- 为了完成上述任务,我将分步骤提供代码。首先,我会提供任务一的代码,用于从网站采集唐诗三百首的内容并保存到txt文件中。 ### 任务一:采集唐诗三百首内容 ```python import requests import re # 定义爬取函数 def fetch_poems(url): response = requests.get(url) poems_info = re.findall( r'<a href="(.*?)"\s*target="_blank" title="(.*?)">\s*<span\s*style="font-size:16px;">(.*?)</span>\s*</a>', response.text, re.s ) poems = [] for info in poems_info: poem_url = "https://so.gushiwen.cn" + info[0] poem_response = requests(208点数解答 | 2024-06-23 11:07:39)226
- python代码 获取到 怎么样变成 \xa0 怎么用让他的 \ 不是转义输出,为什么 import html from bs4 import BeautifulSoup html_content = "压缩天然气储运,减压,燃烧都在严格的密封状态下进行,不易发生泄露。 另外其储气瓶经过各种特殊的破坏性试验,安全可靠。" soup = BeautifulSoup(html_content, "html.parser") text = soup.get_text() print(text) 里面的文本 是 为什么 输出的不是\xa0 而是空格 ,可是我要输出\xa0,要的是直接输出 不是替换(371点数解答 | 2025-04-16 17:09:02)146
- python代码 获取到 怎么样变成 \xa0 怎么用让他的 \ 不是转义输出,为什么 import html from bs4 import BeautifulSoup html_content = "压缩天然气储运,减压,燃烧都在严格的密封状态下进行,不易发生泄露。 另外其储气瓶经过各种特殊的破坏性试验,安全可靠。" soup = BeautifulSoup(html_content, "html.parser") text = soup.get_text() print(text) 里面的文本 是 为什么 输出的不是\xa0 而是空格 ,可是我要输出\xa0(210点数解答 | 2025-04-16 17:09:53)148
- 编写一个爬虫实现深度优先爬虫,爬取的网站为 www.baidu.com。使其输出预期为:Add the seeds url ['http://www.baidu.com'] to the unvisited url list Pop out one url "http://www.baidu.com" from unvisited url list Get 10 new links Visited url count: 1 Visited deepth: 1 10 unvisited links: Pop out one url "http://news.baidu.com" from unvisited url list Get 52 new links Visited url count: 2 Visited deepth: 2 Pop out one url "http://www.hao123.com" from unvisited url list Get 311 new links Visited url count: 3 Visited deepth: 2 Pop out(2142点数解答 | 2025-05-13 15:54:49)189
- import openpyxl import smtplib import imaplib import email import json import os import re from bs4 import BeautifulSoup from fastapi import FastAPI, Form from openpyxl.styles import Alignment from email.mime.multipart import MIMEMultipart from email.mime.base import MIMEBase from email.header import decode_header from email import encoders import pandas as pd app = FastAPI() RECEIVER_EMAILS = { 0: "yundongshijie001@protonmail.com", 1: "xiaobudian001@protonmail.com" } email_address = "(182点数解答 | 2025-04-12 00:49:09)163