Added some comments
This commit is contained in:
parent
929639a394
commit
49d1102a45
|
@ -1,7 +1,7 @@
|
|||
* Import
|
||||
#+begin_src python
|
||||
import json
|
||||
import requests
|
||||
import json # to parse data
|
||||
import requests # to get data
|
||||
import pandas as pd
|
||||
from mastodon import Mastodon # to get the user id
|
||||
from datetime import date # to get the current date
|
||||
|
@ -10,6 +10,7 @@ from bs4 import BeautifulSoup # to more easily read the html output
|
|||
#+end_src
|
||||
* Get/refresh data
|
||||
- I used [[https://jrashford.com/2023/02/13/how-to-scrape-mastodon-timelines-using-python-and-pandas/][this]] setup.
|
||||
- Only have to be refreshed (run) every now and then
|
||||
#+begin_src python
|
||||
# Get access token
|
||||
personal_access_token=subprocess.check_output(["pass", 'mastodon/access_token']).strip().decode('utf-8')
|
||||
|
@ -51,7 +52,7 @@ df.to_csv(file_name_save, index=False)
|
|||
#+end_src
|
||||
|
||||
* Use/search data
|
||||
Use existing data multiple times.
|
||||
- You don't have to load all data for every search.
|
||||
#+begin_src python
|
||||
df=pd.read_csv(file_name_save)
|
||||
|
||||
|
|
Loading…
Reference in a new issue