从 Twitter API V1.1 切换到 Twitter API V2 以将推文从个人资料发送到 Google 工作表
Switching from Twitter API V1.1 to Twitter API V2 to send Tweets from a Profile to Google Sheets
我用来从 API V1.1 收集推文的模型是这样的:
function Twitter_get_tweets()
{
var string_Screen_name = "stakehighroller";
var string_Consumer_key = "AAAAAAAAAAAAAAAAAAAAAAAAAA";
var string_Consumer_secret = "BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB";
var spreadsheet_Tweets = SpreadsheetApp.getActiveSpreadsheet().getSheetByName("Tweets");
spreadsheet_Tweets.getRange(3, 1, 2600, 20).clearContent();
var tokenUrl = "https://api.twitter.com/oauth2/token";
var tokenCredential = Utilities.base64EncodeWebSafe(string_Consumer_key + ":" + string_Consumer_secret);
var tokenOptions = {
headers : {
Authorization: "Basic " + tokenCredential,
"Content-Type": "application/x-www-form-urlencoded;charset=UTF-8"
},
method: "post",
payload: "grant_type=client_credentials"
};
var responseToken = UrlFetchApp.fetch(tokenUrl, tokenOptions);
var parsedToken = JSON.parse(responseToken);
var token = parsedToken.access_token;
var apiUrl = "";
var responseApi = "";
var apiOptions = {
headers : {
Authorization: 'Bearer ' + token
},
"method" : "get"
};
var array_Column_a = [];
var array_Text = [];
var array_Expanded_url = [];
var string_Max_id = 0;
var int_Line_counter = 1;
var int_Break_loop = 0;
do
{
if (int_Line_counter == 1)
{
apiUrl = 'https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name='+ string_Screen_name + '&count=200&include_rts=1';
}
else
{
apiUrl = 'https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name='+ string_Screen_name + '&count=200&include_rts=1&max_id=' + string_Max_id;
}
responseApi = UrlFetchApp.fetch(apiUrl, apiOptions);
if (responseApi.getResponseCode() == 200)
{
var obj_data = JSON.parse(responseApi.getContentText());
for (var int_i = 0; int_i < obj_data.length; int_i++)
{
array_Column_a.push([int_Line_counter]);
array_Text.push([obj_data[int_i].text]);
if (obj_data[int_i].entities.urls[0] != undefined && obj_data[int_i].entities != undefined)
{
array_Expanded_url.push([obj_data[int_i].entities.urls[0].expanded_url]);
}
else
{
array_Expanded_url.push([""]);
}
int_Line_counter++;
}
if (obj_data[(obj_data.length-1)] != undefined && int_i < parseInt(obj_data[0].user.statuses_count))
{
string_Max_id = obj_data[(obj_data.length-1)].id;
}
else
{
int_Break_loop = 1;
}
}
}while (int_Break_loop != 1 && int_Line_counter < 1000)
if (array_Column_a.length > 0)
{
spreadsheet_Tweets.getRange("A3:A"+(array_Column_a.length + 2)).setValues(array_Column_a);
spreadsheet_Tweets.getRange("C3:C"+(array_Text.length + 2)).setValues(array_Text);
spreadsheet_Tweets.getRange("D3:D"+(array_Expanded_url.length + 2)).setValues(array_Expanded_url);
}
else
{
Browser.msgBox("0 Tweets found");
}
}
它工作得很好,但是当我调整 API V2 的请求模型时,数据被完美地检索到,因为当我尝试将 var obj_data = JSON.parse(responseApi.getContentText());
发送到一个单元格时,它告诉我字符数超出单元格限制。
当前型号如下:
function TwitterTest2()
{
var string_Screen_name = "1310800524619386880";
var string_Consumer_key = "AAAAAAAAAAAAAAAAAAAAAAA";
var string_Consumer_secret = "BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB";
var spreadsheet_Tweets = SpreadsheetApp.getActiveSpreadsheet().getSheetByName("Tweets");
spreadsheet_Tweets.getRange(3, 1, 2600, 20).clearContent();
var tokenUrl = "https://api.twitter.com/oauth2/token";
var tokenCredential = Utilities.base64EncodeWebSafe(string_Consumer_key + ":" + string_Consumer_secret);
var tokenOptions = {
headers : {
Authorization: "Basic " + tokenCredential,
"Content-Type": "application/x-www-form-urlencoded;charset=UTF-8"
},
method: "post",
payload: "grant_type=client_credentials"
};
var responseToken = UrlFetchApp.fetch(tokenUrl, tokenOptions);
var parsedToken = JSON.parse(responseToken);
var token = parsedToken.access_token;
var apiUrl = "";
var responseApi = "";
var apiOptions = {
headers : {
Authorization: 'Bearer ' + token
},
"method" : "get"
};
var array_Column_a = [];
var array_Text = [];
var array_Expanded_url = [];
var string_Max_id = 0;
var int_Line_counter = 1;
var int_Break_loop = 0;
do
{
if (int_Line_counter == 1)
{
apiUrl = 'https://api.twitter.com/2/users/' + string_Screen_name + '/tweets?expansions=attachments.poll_ids,attachments.media_keys,author_id,entities.mentions.username,geo.place_id,in_reply_to_user_id,referenced_tweets.id,referenced_tweets.id.author_id&tweet.fields=attachments,author_id,context_annotations,conversation_id,created_at,entities,geo,id,in_reply_to_user_id,lang,possibly_sensitive,public_metrics,referenced_tweets,reply_settings,source,text,withheld&user.fields=created_at,description,entities,id,location,name,pinned_tweet_id,profile_image_url,protected,public_metrics,url,username,verified,withheld&place.fields=contained_within,country,country_code,full_name,geo,id,name,place_type&poll.fields=duration_minutes,end_datetime,id,options,voting_status&media.fields=duration_ms,height,media_key,preview_image_url,type,url,width,public_metrics,non_public_metrics,organic_metrics,promoted_metrics&max_results=100';
}
else
{
apiUrl = 'https://api.twitter.com/2/users/' + string_Screen_name + '/tweets?expansions=attachments.poll_ids,attachments.media_keys,author_id,entities.mentions.username,geo.place_id,in_reply_to_user_id,referenced_tweets.id,referenced_tweets.id.author_id&tweet.fields=attachments,author_id,context_annotations,conversation_id,created_at,entities,geo,id,in_reply_to_user_id,lang,possibly_sensitive,public_metrics,referenced_tweets,reply_settings,source,text,withheld&user.fields=created_at,description,entities,id,location,name,pinned_tweet_id,profile_image_url,protected,public_metrics,url,username,verified,withheld&place.fields=contained_within,country,country_code,full_name,geo,id,name,place_type&poll.fields=duration_minutes,end_datetime,id,options,voting_status&media.fields=duration_ms,height,media_key,preview_image_url,type,url,width,public_metrics,non_public_metrics,organic_metrics,promoted_metrics&max_results=100&until_id=' + string_Max_id;
}
responseApi = UrlFetchApp.fetch(apiUrl, apiOptions);
if (responseApi.getResponseCode() == 200)
{
var obj_data = JSON.parse(responseApi.getContentText());
for (var int_i = 0; int_i < obj_data.length; int_i++)
{
array_Column_a.push([int_Line_counter]);
array_Text.push([obj_data.data[int_i].text]);
if (obj_data.data[int_i].entities.urls[0] != undefined && obj_data.data[int_i].entities != undefined)
{
array_Expanded_url.push([obj_data.data[int_i].entities.urls[0].expanded_url]);
}
else
{
array_Expanded_url.push([""]);
}
int_Line_counter++;
}
if (obj_data[(obj_data.length-1)] != undefined && int_i < parseInt(obj_data.includes.users[0].public_metrics.tweet_count))
{
string_Max_id = obj_data[(obj_data.length-1)].id;
}
else
{
int_Break_loop = 1;
}
}
}while (int_Break_loop != 1 && int_Line_counter < 1000)
if (array_Column_a.length > 0)
{
spreadsheet_Tweets.getRange("A3:A"+(array_Column_a.length + 2)).setValues(array_Column_a);
spreadsheet_Tweets.getRange("C3:C"+(array_Text.length + 2)).setValues(array_Text);
spreadsheet_Tweets.getRange("D3:D"+(array_Expanded_url.length + 2)).setValues(array_Expanded_url);
}
else
{
Browser.msgBox("0 Tweets found");
}
}
但是,当尝试将电子表格每一行收集的值与每条推文的相应值分开时,没有任何内容传递到电子表格。该区域未被分析:
for (var int_i = 0; int_i < obj_data.length; int_i++)
{
array_Column_a.push([int_Line_counter]);
array_Text.push([obj_data.data[int_i].text]);
if (obj_data.data[int_i].entities.urls[0] != undefined && obj_data.data[int_i].entities != undefined)
{
array_Expanded_url.push([obj_data.data[int_i].entities.urls[0].expanded_url]);
}
else
{
array_Expanded_url.push([""]);
}
int_Line_counter++;
}
它甚至没有尝试在 for (var int_i = 0; int_i < obj_data.length; int_i++)
内部进行分析就跳转并根据以下内容发出有关 0 Tweets found
的警告:
else
{
Browser.msgBox("0 Tweets found");
}
我尝试使用
for (var int_i = 0; int_i < obj_data.length; int_i++)
{
array_Column_a.push([int_Line_counter]);
array_Text.push([obj_data.data[int_i].text]);
SpreadsheetApp.getActiveSpreadsheet().getSheetByName("Tweets").getRange("A3").setValue("Test");
return;
要知道内部 for
是否正在分析,但不是因为单元格中没有放置任何值,所以我分析内部是因为脚本没有运行。
我需要做什么才能不发生这种情况并实际在 for
中创建循环并能够提供推文列表?理论上收集数据的方法是正确的,因为通过测试它们提供了准确的值。
链接:
API V2:
https://developer.twitter.com/en/docs/twitter-api/tweets/timelines/api-reference/get-users-id-tweets
我强调 API V1.1 中的数据已完美交付。
V1.1测试结果:
响应的结构不同,您实际上需要为 v2 做一些修改。
更新你的循环条件。
修改:
v2 中的 obj_data
包含额外的 meta
,因此您需要获取 obj_data.data
的长度,而不仅仅是 obj_data
// from: obj_data.length
for (var int_i = 0; int_i < obj_data.length; int_i++) {
// to: obj_data.data.length
for (var int_i = 0; int_i < obj_data.data.length; int_i++) {
- 之后,你应该可以得到100的长度。
- 下一个问题是
entities
下有没有 url
的条目。因此,在访问 entities.url[0]
之前,您需要先检查 entities.url
是否存在。这将跳过那些行 url,但至少不会出错。
// from:
if (obj_data.data[int_i].entities.urls[0] != undefined && obj_data.data[int_i].entities != undefined)
// to: added obj_data.data[int_i].entities.urls to check if it exists
if (obj_data.data[int_i].entities.urls && obj_data.data[int_i].entities.urls[0] != undefined && obj_data.data[int_i].entities != undefined)
结果片段:
if (responseApi.getResponseCode() == 200) {
var obj_data = JSON.parse(responseApi.getContentText());
for (var int_i = 0; int_i < obj_data.data.length; int_i++) {
array_Column_a.push([int_Line_counter]);
array_Text.push([obj_data.data[int_i].text]);
if (obj_data.data[int_i].entities.urls && obj_data.data[int_i].entities.urls[0] != undefined && obj_data.data[int_i].entities != undefined)
array_Expanded_url.push([obj_data.data[int_i].entities.urls[0].expanded_url]);
else
array_Expanded_url.push([""]);
int_Line_counter++;
}
...
输出:
条目 79,没有 entities.url
我用来从 API V1.1 收集推文的模型是这样的:
function Twitter_get_tweets()
{
var string_Screen_name = "stakehighroller";
var string_Consumer_key = "AAAAAAAAAAAAAAAAAAAAAAAAAA";
var string_Consumer_secret = "BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB";
var spreadsheet_Tweets = SpreadsheetApp.getActiveSpreadsheet().getSheetByName("Tweets");
spreadsheet_Tweets.getRange(3, 1, 2600, 20).clearContent();
var tokenUrl = "https://api.twitter.com/oauth2/token";
var tokenCredential = Utilities.base64EncodeWebSafe(string_Consumer_key + ":" + string_Consumer_secret);
var tokenOptions = {
headers : {
Authorization: "Basic " + tokenCredential,
"Content-Type": "application/x-www-form-urlencoded;charset=UTF-8"
},
method: "post",
payload: "grant_type=client_credentials"
};
var responseToken = UrlFetchApp.fetch(tokenUrl, tokenOptions);
var parsedToken = JSON.parse(responseToken);
var token = parsedToken.access_token;
var apiUrl = "";
var responseApi = "";
var apiOptions = {
headers : {
Authorization: 'Bearer ' + token
},
"method" : "get"
};
var array_Column_a = [];
var array_Text = [];
var array_Expanded_url = [];
var string_Max_id = 0;
var int_Line_counter = 1;
var int_Break_loop = 0;
do
{
if (int_Line_counter == 1)
{
apiUrl = 'https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name='+ string_Screen_name + '&count=200&include_rts=1';
}
else
{
apiUrl = 'https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name='+ string_Screen_name + '&count=200&include_rts=1&max_id=' + string_Max_id;
}
responseApi = UrlFetchApp.fetch(apiUrl, apiOptions);
if (responseApi.getResponseCode() == 200)
{
var obj_data = JSON.parse(responseApi.getContentText());
for (var int_i = 0; int_i < obj_data.length; int_i++)
{
array_Column_a.push([int_Line_counter]);
array_Text.push([obj_data[int_i].text]);
if (obj_data[int_i].entities.urls[0] != undefined && obj_data[int_i].entities != undefined)
{
array_Expanded_url.push([obj_data[int_i].entities.urls[0].expanded_url]);
}
else
{
array_Expanded_url.push([""]);
}
int_Line_counter++;
}
if (obj_data[(obj_data.length-1)] != undefined && int_i < parseInt(obj_data[0].user.statuses_count))
{
string_Max_id = obj_data[(obj_data.length-1)].id;
}
else
{
int_Break_loop = 1;
}
}
}while (int_Break_loop != 1 && int_Line_counter < 1000)
if (array_Column_a.length > 0)
{
spreadsheet_Tweets.getRange("A3:A"+(array_Column_a.length + 2)).setValues(array_Column_a);
spreadsheet_Tweets.getRange("C3:C"+(array_Text.length + 2)).setValues(array_Text);
spreadsheet_Tweets.getRange("D3:D"+(array_Expanded_url.length + 2)).setValues(array_Expanded_url);
}
else
{
Browser.msgBox("0 Tweets found");
}
}
它工作得很好,但是当我调整 API V2 的请求模型时,数据被完美地检索到,因为当我尝试将 var obj_data = JSON.parse(responseApi.getContentText());
发送到一个单元格时,它告诉我字符数超出单元格限制。
当前型号如下:
function TwitterTest2()
{
var string_Screen_name = "1310800524619386880";
var string_Consumer_key = "AAAAAAAAAAAAAAAAAAAAAAA";
var string_Consumer_secret = "BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB";
var spreadsheet_Tweets = SpreadsheetApp.getActiveSpreadsheet().getSheetByName("Tweets");
spreadsheet_Tweets.getRange(3, 1, 2600, 20).clearContent();
var tokenUrl = "https://api.twitter.com/oauth2/token";
var tokenCredential = Utilities.base64EncodeWebSafe(string_Consumer_key + ":" + string_Consumer_secret);
var tokenOptions = {
headers : {
Authorization: "Basic " + tokenCredential,
"Content-Type": "application/x-www-form-urlencoded;charset=UTF-8"
},
method: "post",
payload: "grant_type=client_credentials"
};
var responseToken = UrlFetchApp.fetch(tokenUrl, tokenOptions);
var parsedToken = JSON.parse(responseToken);
var token = parsedToken.access_token;
var apiUrl = "";
var responseApi = "";
var apiOptions = {
headers : {
Authorization: 'Bearer ' + token
},
"method" : "get"
};
var array_Column_a = [];
var array_Text = [];
var array_Expanded_url = [];
var string_Max_id = 0;
var int_Line_counter = 1;
var int_Break_loop = 0;
do
{
if (int_Line_counter == 1)
{
apiUrl = 'https://api.twitter.com/2/users/' + string_Screen_name + '/tweets?expansions=attachments.poll_ids,attachments.media_keys,author_id,entities.mentions.username,geo.place_id,in_reply_to_user_id,referenced_tweets.id,referenced_tweets.id.author_id&tweet.fields=attachments,author_id,context_annotations,conversation_id,created_at,entities,geo,id,in_reply_to_user_id,lang,possibly_sensitive,public_metrics,referenced_tweets,reply_settings,source,text,withheld&user.fields=created_at,description,entities,id,location,name,pinned_tweet_id,profile_image_url,protected,public_metrics,url,username,verified,withheld&place.fields=contained_within,country,country_code,full_name,geo,id,name,place_type&poll.fields=duration_minutes,end_datetime,id,options,voting_status&media.fields=duration_ms,height,media_key,preview_image_url,type,url,width,public_metrics,non_public_metrics,organic_metrics,promoted_metrics&max_results=100';
}
else
{
apiUrl = 'https://api.twitter.com/2/users/' + string_Screen_name + '/tweets?expansions=attachments.poll_ids,attachments.media_keys,author_id,entities.mentions.username,geo.place_id,in_reply_to_user_id,referenced_tweets.id,referenced_tweets.id.author_id&tweet.fields=attachments,author_id,context_annotations,conversation_id,created_at,entities,geo,id,in_reply_to_user_id,lang,possibly_sensitive,public_metrics,referenced_tweets,reply_settings,source,text,withheld&user.fields=created_at,description,entities,id,location,name,pinned_tweet_id,profile_image_url,protected,public_metrics,url,username,verified,withheld&place.fields=contained_within,country,country_code,full_name,geo,id,name,place_type&poll.fields=duration_minutes,end_datetime,id,options,voting_status&media.fields=duration_ms,height,media_key,preview_image_url,type,url,width,public_metrics,non_public_metrics,organic_metrics,promoted_metrics&max_results=100&until_id=' + string_Max_id;
}
responseApi = UrlFetchApp.fetch(apiUrl, apiOptions);
if (responseApi.getResponseCode() == 200)
{
var obj_data = JSON.parse(responseApi.getContentText());
for (var int_i = 0; int_i < obj_data.length; int_i++)
{
array_Column_a.push([int_Line_counter]);
array_Text.push([obj_data.data[int_i].text]);
if (obj_data.data[int_i].entities.urls[0] != undefined && obj_data.data[int_i].entities != undefined)
{
array_Expanded_url.push([obj_data.data[int_i].entities.urls[0].expanded_url]);
}
else
{
array_Expanded_url.push([""]);
}
int_Line_counter++;
}
if (obj_data[(obj_data.length-1)] != undefined && int_i < parseInt(obj_data.includes.users[0].public_metrics.tweet_count))
{
string_Max_id = obj_data[(obj_data.length-1)].id;
}
else
{
int_Break_loop = 1;
}
}
}while (int_Break_loop != 1 && int_Line_counter < 1000)
if (array_Column_a.length > 0)
{
spreadsheet_Tweets.getRange("A3:A"+(array_Column_a.length + 2)).setValues(array_Column_a);
spreadsheet_Tweets.getRange("C3:C"+(array_Text.length + 2)).setValues(array_Text);
spreadsheet_Tweets.getRange("D3:D"+(array_Expanded_url.length + 2)).setValues(array_Expanded_url);
}
else
{
Browser.msgBox("0 Tweets found");
}
}
但是,当尝试将电子表格每一行收集的值与每条推文的相应值分开时,没有任何内容传递到电子表格。该区域未被分析:
for (var int_i = 0; int_i < obj_data.length; int_i++)
{
array_Column_a.push([int_Line_counter]);
array_Text.push([obj_data.data[int_i].text]);
if (obj_data.data[int_i].entities.urls[0] != undefined && obj_data.data[int_i].entities != undefined)
{
array_Expanded_url.push([obj_data.data[int_i].entities.urls[0].expanded_url]);
}
else
{
array_Expanded_url.push([""]);
}
int_Line_counter++;
}
它甚至没有尝试在 for (var int_i = 0; int_i < obj_data.length; int_i++)
内部进行分析就跳转并根据以下内容发出有关 0 Tweets found
的警告:
else
{
Browser.msgBox("0 Tweets found");
}
我尝试使用
for (var int_i = 0; int_i < obj_data.length; int_i++)
{
array_Column_a.push([int_Line_counter]);
array_Text.push([obj_data.data[int_i].text]);
SpreadsheetApp.getActiveSpreadsheet().getSheetByName("Tweets").getRange("A3").setValue("Test");
return;
要知道内部 for
是否正在分析,但不是因为单元格中没有放置任何值,所以我分析内部是因为脚本没有运行。
我需要做什么才能不发生这种情况并实际在 for
中创建循环并能够提供推文列表?理论上收集数据的方法是正确的,因为通过测试它们提供了准确的值。
链接:
API V2:
https://developer.twitter.com/en/docs/twitter-api/tweets/timelines/api-reference/get-users-id-tweets
我强调 API V1.1 中的数据已完美交付。
V1.1测试结果:
响应的结构不同,您实际上需要为 v2 做一些修改。
更新你的循环条件。
修改:
-
v2 中的
obj_data
包含额外的meta
,因此您需要获取obj_data.data
的长度,而不仅仅是obj_data
// from: obj_data.length
for (var int_i = 0; int_i < obj_data.length; int_i++) {
// to: obj_data.data.length
for (var int_i = 0; int_i < obj_data.data.length; int_i++) {
- 之后,你应该可以得到100的长度。
- 下一个问题是
entities
下有没有url
的条目。因此,在访问entities.url[0]
之前,您需要先检查entities.url
是否存在。这将跳过那些行 url,但至少不会出错。
// from:
if (obj_data.data[int_i].entities.urls[0] != undefined && obj_data.data[int_i].entities != undefined)
// to: added obj_data.data[int_i].entities.urls to check if it exists
if (obj_data.data[int_i].entities.urls && obj_data.data[int_i].entities.urls[0] != undefined && obj_data.data[int_i].entities != undefined)
结果片段:
if (responseApi.getResponseCode() == 200) {
var obj_data = JSON.parse(responseApi.getContentText());
for (var int_i = 0; int_i < obj_data.data.length; int_i++) {
array_Column_a.push([int_Line_counter]);
array_Text.push([obj_data.data[int_i].text]);
if (obj_data.data[int_i].entities.urls && obj_data.data[int_i].entities.urls[0] != undefined && obj_data.data[int_i].entities != undefined)
array_Expanded_url.push([obj_data.data[int_i].entities.urls[0].expanded_url]);
else
array_Expanded_url.push([""]);
int_Line_counter++;
}
...