Include multiple php files in one line for linux cronjob - php

I have a lot of dynamic images based in php files. When I visit these files, the content (image) is updated.
I use this command lines for visit the php files in the linux cronjob:
*/10 * * * * curl -q https://website.com/folder/A1-imageproduct1.php
*/10 * * * * curl -q https://website.com/folder/A1-imageproduct2.php
*/10 * * * * curl -q https://website.com/folder/A1-imageproduct3.php
I would like to use a single command line, since it is a very large number of files.
I tried to create a php file with this code:
<?php
foreach (glob("A1-*.php") as $name)
{
include($name);
}
?>
but only includes the first file, the rest are apparently ignored, both visiting the file with linux cronjob and from my web browser.

If you have to make a call from curl you should do something like that
<?php
$llamar = new class{
public function curl($file){
$ch = curl_init();
curl_setopt($ch, CURLOPT_URL, "https://website.com/folder/".$file);
curl_setopt($ch, CURLOPT_HEADER, 0);
curl_exec($ch);
curl_close($ch);
}
};
foreach (glob("A1-*.php") as $name){
$llamar->curl($name);
}
?>
If you don't recover all the files it would be better to use this class
Change "RUTE" to the path of your directory
$llamar = new class{
public function curl($file){
$ch = curl_init();
curl_setopt($ch, CURLOPT_URL, "https://website.com/folder/".$file);
curl_setopt($ch, CURLOPT_HEADER, 0);
curl_exec($ch);
curl_close($ch);
}
};
$archivo = new class{
public function leer($carpeta)
{
$archivos = array();
if (is_dir($carpeta)) {
if ($dir = opendir($carpeta)) {
while (($archivo = readdir($dir)) !== false) {
if ($archivo != '.' && $archivo != '..' && $archivo != '.htaccess') {
if (file_exists($carpeta . '/' . $archivo)) {
if (!is_dir($carpeta . '/' . $archivo)) {
$archivos[] = $archivo;
}
}
}
}
closedir($dir);
}
}
return $archivos;
}
}
$archivos = $archivo->leer("RUTE");
for($i=0;$i<count($archivos);$i++){
$llamar->curl($archivos[$i]);
}

I think your path incorrect,add relative path
Try it
$cwd=getcwd();
foreach (glob("$cwd/folder/A1-*.php") as $name) {
include($name);
}

Related

Using cURL and PHP for CACTI in Windows

Recently tasked to monitor external webpage response/loading time via CACTI. I found some PHP scripts that were working (pageload-agent.php and class.pageload.php) using cURL. All was working fine until they requested it to be transferred from LINUX to Windows 2012R2 server. I'm having a very hard time modifying the scripts to work for windows. Already installed PHP and cURL and both working as tested. Here are the scripts taken from askaboutphp.
class.pageload.php
<?php
class PageLoad {
var $siteURL = "";
var $pageInfo = "";
/*
* sets the URLs to check for loadtime into an array $siteURLs
*/
function setURL($url) {
if (!empty($url)) {
$this->siteURL = $url;
return true;
}
return false;
}
/*
* extract the header information of the url
*/
function doPageLoad() {
$u = $this->siteURL;
if(function_exists('curl_init') && !empty($u)) {
$ch = curl_init($u);
curl_setopt($ch, CURLOPT_HEADER, true);
curl_setopt($ch, CURLOPT_ENCODING, "gzip");
curl_setopt($ch, CURLOPT_FOLLOWLOCATION, true);
curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
curl_setopt($ch, CURLOPT_NOBODY, false);
curl_setopt($ch, CURLOPT_FRESH_CONNECT, false);
curl_setopt($ch, CURLOPT_USERAGENT, "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)");
$pageBody = curl_exec($ch);
$this->pageInfo = curl_getinfo($ch);
curl_close ($ch);
return true;
}
return false;
}
/*
* compile the page load statistics only
*/
function getPageLoadStats() {
$info = $this->pageInfo;
//stats from info
$s['dest_url'] = $info['url'];
$s['content_type'] = $info['content_type'];
$s['http_code'] = $info['http_code'];
$s['total_time'] = $info['total_time'];
$s['size_download'] = $info['size_download'];
$s['speed_download'] = $info['speed_download'];
$s['redirect_count'] = $info['redirect_count'];
$s['namelookup_time'] = $info['namelookup_time'];
$s['connect_time'] = $info['connect_time'];
$s['pretransfer_time'] = $info['pretransfer_time'];
$s['starttransfer_time'] = $info['starttransfer_time'];
return $s;
}
}
?>
pageload-agent.php
#! /usr/bin/php -q
<?php
//include the class
include_once 'class.pageload.php';
// read in an argument - must make sure there's an argument to use
if ($argc==2) {
//read in the arg.
$url_argv = $argv[1];
if (!eregi('^http://', $url_argv)) {
$url_argv = "http://$url_argv";
}
// check that the arg is not empty
if ($url_argv!="") {
//initiate the results array
$results = array();
//initiate the class
$lt = new PageLoad();
//set the page to check the loadtime
$lt->setURL($url_argv);
//load the page
if ($lt->doPageLoad()) {
//load the page stats into the results array
$results = $lt->getPageLoadStats();
} else {
//do nothing
print "";
}
//print out the results
if (is_array($results)) {
//expecting only one record as we only passed in 1 page.
$output = $results;
print "dns:".$output['namelookup_time'];
print " con:".$output['connect_time'];
print " pre:".$output['pretransfer_time'];
print " str:".$output['starttransfer_time'];
print " ttl:".$output['total_time'];
print " sze:".$output['size_download'];
print " spd:".$output['speed_download'];
} else {
//do nothing
print "";
}
}
} else {
//do nothing
print "";
}
?>
Thank you. any type of assistance is greatly appreciated.

PHP thread can't unlink files?

When creating a new PHP child thread it becomes unable to delete files using unlink(). is there a good reason for that limitation or I'm forgetting something?
I get a worning:
Warning: unlink(downloads/1e6f6fa1c0552a1af9058f10216b40e8): No such
file or directory
although the file is created in the destination folder, and when I run the same command outside the thread function it deletes the file as it should.
//multithreading class
<?php
class download extends Thread {
public $i;
public $res;
public function __construct($s){
$this->i = $s;
}
public function run() {
try{
$url = "http://my.link.com/{$this->i}";
set_time_limit(0);
$id = md5(uniqid());
$tempName = md5($id.time());
$tmp = "downloads/{$tempName}";
$fp = fopen (dirname(__FILE__) . '/'.$tmp, 'w+');
$ch = curl_init($url);
curl_setopt($ch, CURLOPT_TIMEOUT, 50);
curl_setopt($ch, CURLOPT_FILE, $fp);
curl_setopt($ch, CURLOPT_FOLLOWLOCATION, true);
curl_exec($ch);
curl_close($ch);
fclose($fp);
require('scanner.php');
$results = scanfiles($tmp);
unlink($tmp);
$this->res = $results;
}catch(Exception $e){
$this->res = '0';
}}} ?>
Into fopen you're passing absolute path (dirname(__FILE__) . "/{$tmp}") while into unlink relative path, just use abslute everywhere and it should work. Btw you can use just __DIR__ instead dirname(__FILE__) since PHP 5.3.

Search Files Nothing Found

I am trying to search (filter) for files in a Dropbox folder, but no files are being found when there are files that match the filter. I am not using the PHP library provided by Dropbox.
Here is an extract of the code:
class Dropbox {
private $headers = array();
private $authQueryString = "";
public $SubFolders = array();
public $Files = array();
function __construct() {
$this->headers = array('Authorization: OAuth oauth_version="1.0", oauth_signature_method="PLAINTEXT", oauth_consumer_key="'.DROPBOX_APP_KEY.'", oauth_token="'.DROPBOX_OAUTH_ACCESS_TOKEN.'", oauth_signature="'.DROPBOX_APP_SECRET.'&'.DROPBOX_OAUTH_ACCESS_SECRET.'"');
$this->authQueryString = "oauth_consumer_key=".DROPBOX_APP_KEY."&oauth_token=".DROPBOX_OAUTH_ACCESS_TOKEN."&oauth_signature_method=PLAINTEXT&oauth_signature=".DROPBOX_APP_SECRET."%26".DROPBOX_OAUTH_ACCESS_SECRET."&oauth_version=1.0";
}
public function GetFolder($folder, $fileFilter = "") {
//Add the required folder to the end of the base path for folder call
if ($fileFilter == "")
$subPath = "metadata/sandbox";
else
$subPath = "search/sandbox";
if (strlen($folder) > 1) {
$subPath .= (substr($folder, 0, 1) != "/" ? "/" : "")
.$folder;
}
//Set up the post parameters for the call
$params = null;
if ($fileFilter != "") {
$params = array(
"query" => $fileFilter
);
}
//Clear the sub folders and files logged
$this->SubFolders = array();
$this->Files = array();
//Make the call
$content = $this->doCall($subPath, $params);
//Log the files and folders
for ($i = 0; $i < sizeof($content->contents); $i++) {
$f = $content->contents[$i];
if ($f->is_dir == "1") {
array_push($this->SubFolders, $f->path);
} else {
array_push($this->Files, $f->path);
}
}
//Return the content
return $content;
}
private function doCall($urlSubPath, $params = null, $filePathName = null, $useAPIContentPath = false) {
//Create the full URL for the call
$url = "https://api".($useAPIContentPath ? "-content" : "").".dropbox.com/1/".$urlSubPath;
//Initialise the curl call
$ch = curl_init();
//Set up the curl call
curl_setopt($ch, CURLOPT_HTTPHEADER, $this->headers);
curl_setopt($ch, CURLOPT_URL, $url);
curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
if ($params != null)
curl_setopt($ch, CURLOPT_POSTFIELDS, $params);
$fh = null;
if ($filePathName != null) {
$fh = fopen($filePathName, "rb");
curl_setopt($context, CURLOPT_BINARYTRANSFER, true);
curl_setopt($context, CURLOPT_INFILE, $fh);
curl_setopt($context, CURLOPT_INFILESIZE, filesize($filePathName));
}
//Excecute and get the response
$api_response = curl_exec($ch);
if ($fh != null)
fclose($fh);
//Process the response into an array
$json_response = json_decode($api_response);
//Has there been an error
if (isset($json_response->error )) {
throw new Exception($json_response["error"]);
}
//Send the response back
return $json_response;
}
}
I then call the GetFolder method of Dropbox as such:
$dbx = new Dropbox();
$filter = "MyFilter";
$dbx->GetFolder("MyFolder", $filter);
print "Num files: ".sizeof($dbx->Files);
As I am passing $filter into GetFolder, it uses the search/sandbox path and creates a parameter array ($params) with the required query parameter in it.
The process works fine if I don't provide the $fileFilter parameter to GetFolder and all files in the folder are returned (uses the metadata/sandbox path).
Other methods (that are not in the extract for brevity) of the Dropbox class use the $params feature and they to work fine.
I have been using the Dropbpox API reference for guidance (https://www.dropbox.com/developers/core/docs#search)
At first glance, it looks like you're making a GET request to /search but passing parameters via CURLOPT_POSTFIELDS. Try using a POST or encoding the search query as a query string parameter.
EDIT
Below is some code that works for me (usage: php search.php <term>). Note that I'm using OAuth 2 instead of OAuth 1, so my Authorization header looks different from yours.
<?php
$access_token = '<REDACTED>';
$ch = curl_init();
curl_setopt($ch, CURLOPT_URL, 'https://api.dropbox.com/1/search/auto');
curl_setopt($ch, CURLOPT_HTTPHEADER, array('Authorization:Bearer ' . $access_token));
curl_setopt($ch, CURLOPT_RETURNTRANSFER, 1);
curl_setopt($ch, CURLOPT_POST, 1);
curl_setopt($ch, CURLOPT_POSTFIELDS, array('query' => $argv[1]));
$api_response = curl_exec($ch);
echo "Matching files:\n\t" . join("\n\t",
array_map(function ($file) {
return $file['path'];
}, json_decode($api_response, true)))."\n";
?>

Setting automatic GIT deployment of PHP project

What I want to do is, to switch from FTP deployment into GIT. I mean, I want to keep automatically keep synced my Bitbucket private repo and my shared webhosting. I googled and found following script to deploy my webserver (based on this article).
// Set these dependant on your BB credentials
$username = 'username';
$password = 'password';
// Grab the data from BB's POST service and decode
$json = stripslashes($_POST['payload']);
$data = json_decode($json);
// Set some parameters to fetch the correct files
$uri = $data->repository->absolute_url;
$node = $data->commits[0]->node;
$files = $data->commits[0]->files;
// Foreach through the files and curl them over
foreach ($files as $file) {
if ($file->type == "removed") {
unlink($file->file);
} else {
$url = "https://api.bitbucket.org/1.0/repositories"
. $uri . "raw/" .$node ."/" . $file->file;
$path = $file->file;
$dirname = dirname($path);
if (!is_dir($dirname)) {
mkdir($dirname, 0775, true);
}
$fp = fopen($path, 'w');
$ch = curl_init($url);
curl_setopt($ch, CURLOPT_USERPWD, "$username:$password");
curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
curl_setopt($ch, CURLOPT_FOLLOWLOCATION, true);
curl_setopt($ch, CURLOPT_FILE, $fp);
$data = curl_exec($ch);
curl_close($ch);
fclose($fp);
}
}
The problem is, this works on simple changesets like 5-10 file change. But when I push the whole project for the first time (for example with 600-700 files and folders) into my bitbucket private profile, this script doesn't work. (just doesn't, no error on errors.log)
What am I missing?
By the way, Can I do something like that:
As we know, Bitbucket can send POST information into an exact url (given by user) directly after a commit has been made. So when deploy.php receives POST, we can get the entire commit as a zip or tar, clean our current files and unzip the new commit into webserver.
Is that possible? If yes then how? Any other good way?
Update
I found the code below for automated deploying php project. The problem is https://bitbucket.org/$username/$reponame/get/tip.zip this url doesnt work on bitbucket private git repo: probably related with authentication (I haven't tested this on public repo) What i need is to get the last commit's zip file and unzip inside my project.
<?
// your Bitbucket username
$username = "edifreak";
// your Bitbucket repo name
$reponame = "canvas-game-demo";
// extract to
$dest = "./"; // leave ./ for relative destination
////////////////////////////////////////////////////////
// Let's get stuff done!
// set higher script timeout (for large repo's or slow servers)
set_time_limit(380);
// download the repo zip file
$repofile = file_get_contents("https://bitbucket.org/$username/$reponame/get/tip.zip");
file_put_contents('tip.zip', $repofile);
unset($repofile);
// unzip
$zip = new ZipArchive;
$res = $zip->open('tip.zip');
if ($res === TRUE) {
$zip->extractTo('./');
$zip->close();
} else {
die('ZIP not supported on this server!');
}
// delete unnecessary .hg files
#unlink("$username-$reponame-tip/.hgignore");
#unlink("$username-$reponame-tip/.hg_archival.txt");
// function to delete all files in a directory recursively
function rmdir_recursively($dir) {
if (is_dir($dir)) {
$objects = scandir($dir);
foreach ($objects as $object) {
if ($object != "." && $object != "..") {
if (filetype($dir."/".$object) == "dir") rmdir_recursively($dir."/".$object); else unlink($dir."/".$object);
}
}
reset($objects);
rmdir($dir);
}
}
// function to recursively copy the files
function copy_recursively($src, $dest) {
if (is_dir($src)) {
if($dest != "./") rmdir_recursively($dest);
#mkdir($dest);
$files = scandir($src);
foreach ($files as $file)
if ($file != "." && $file != "..") copy_recursively("$src/$file", "$dest/$file");
}
else if (file_exists($src)) copy($src, $dest);
rmdir_recursively($src);
}
// start copying the files from extracted repo and delete the old directory recursively
copy_recursively("$username-$reponame-tip", $dest);
// delete the repo zip file
unlink("tip.zip");
// Yep, we're done :)
echo "We're done!";
?>
This solution do not provides authentication:
// download the repo zip file
$repofile = file_get_contents("https://bitbucket.org/$username/$reponame/get/tip.zip");
file_put_contents('tip.zip', $repofile);
unset($repofile);
But curl allows it. So a zip archive can be downloaded from a private repository in same way like in first script.
$node = ''; // a node from repo, like c366e96f16...
$fp = fopen($path, 'w');
$ch = curl_init("https://bitbucket.org/$username/$reponame/get/$node.zip");
curl_setopt($ch, CURLOPT_USERPWD, "$username:$password");
curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
curl_setopt($ch, CURLOPT_FOLLOWLOCATION, true);
curl_setopt($ch, CURLOPT_FILE, $fp);
$data = curl_exec($ch);
curl_close($ch);
fclose($fp);
I have tested it for my bitbucket account. It's work very well.
If necessary to get last changeset node that we should use bitbucket api GET a list of changesets:
$username = 'login';
$password = 'pass';
$owner = $username; // if user is owner
$repo = 'repo name';
$response = "";
$callback = function($url, $chunk) use (&$response){
$response .= $chunk;
return strlen($chunk);
};
$ch = curl_init("https://api.bitbucket.org/1.0/repositories/$owner/$repo/changesets?limit=1");
curl_setopt($ch, CURLOPT_USERPWD, "$username:$password");
curl_setopt($ch, CURLOPT_HEADER, 0);
curl_setopt($ch, CURLOPT_HTTPHEADER, array('User-Agent:Mozilla/5.0'));
curl_setopt($ch, CURLOPT_WRITEFUNCTION, $callback);
curl_exec($ch);
curl_close($ch);
$changesets = json_decode($response, true);
$node = $changesets['changesets'][0]['node'];
$raw_node = $changesets['changesets'][0]['raw_node'];
print($node . PHP_EOL);
print($raw_node . PHP_EOL);
I recently discovered Capistrano which is a great tool. It was initially developed for ruby but it's also great in combination with php http://www.davegardner.me.uk/blog/2012/02/13/php-deployment-with-capistrano/
Based on your update, replace you php files contents with code below:
<?php
// Set these dependant on your BB credentials
$username = '';
$password = '';
// your Bitbucket repo name
$reponame = "";
// extract to
$dest = "./"; // leave ./ for relative destination
// Grab the data from BB's POST service and decode
$json = stripslashes($_POST['payload']);
$data = json_decode($json);
// set higher script timeout (for large repo's or slow servers)
set_time_limit(5000);
// Set some parameters to fetch the correct files
$uri = $data->repository->absolute_url;
$node = $data->commits[0]->node;
$files = $data->commits[0]->files;
// download the repo zip file
$fp = fopen("tip.zip", 'w');
$ch = curl_init("https://bitbucket.org/$username/$reponame/get/$node.zip");
curl_setopt($ch, CURLOPT_USERPWD, "$username:$password");
curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
curl_setopt($ch, CURLOPT_FOLLOWLOCATION, true);
curl_setopt($ch, CURLOPT_FILE, $fp);
$data = curl_exec($ch);
curl_close($ch);
fclose($fp);
// unzip
$zip = new ZipArchive;
$res = $zip->open('tip.zip');
if ($res === TRUE) {
$zip->extractTo('./');
$zip->close();
} else {
die('ZIP not supported on this server!');
}
// function to delete all files in a directory recursively
function rmdir_recursively($dir) {
if (is_dir($dir)) {
$objects = scandir($dir);
foreach ($objects as $object) {
if ($object != "." && $object != "..") {
if (filetype($dir . "/" . $object) == "dir")
rmdir_recursively($dir . "/" . $object); else
unlink($dir . "/" . $object);
}
}
reset($objects);
rmdir($dir);
}
}
// function to recursively copy the files
function copy_recursively($src, $dest) {
if (is_dir($src)) {
if ($dest != "./")
rmdir_recursively($dest);
#mkdir($dest);
$files = scandir($src);
foreach ($files as $file)
if ($file != "." && $file != "..")
copy_recursively("$src/$file", "$dest/$file");
}
else if (file_exists($src))
copy($src, $dest);
rmdir_recursively($src);
}
// start copying the files from extracted repo and delete the old directory recursively
copy_recursively("$username-$reponame-$node", $dest);
// delete the repo zip file
unlink("tip.zip");
?>
Update
Here are repositories of this script (Modified by Me) on
GitHub
Bitbucket

Get direct link videos from Vimeo in PHP

I want a direct link to videos from Vimeo with a PHP script.
I managed to find them manually, but my PHP script does not work.
Here is the initiative:
For example I took this video: http://vimeo.com/22439234
When you go on the page, Vimeo generates a signature associated with the current timestamp and this video. This information is stored in a JavaScript variable, around line 520 just after:
window.addEvent ('domready', function () {
Then when you click Play, the HTML5 player reads this variable and sends an HTTP request:
http:// player.vimeo.com/play_redirect?clip_id=37111719&sig={SIGNATURE}&time={TIMESTAMP}&quality=sd&codecs=H264,VP8,VP6&type=moogaloop_local&embed_location=
But it also works with:
http:// player.vimeo.com/play_redirect?clip_id=37111719&sig={SIGNATURE}&time={TIMESTAMP}&quality=sd
If this URL does not open with the IP address that opened http://vimeo.com/22439234, this returns the HTTP code 200 with an error message.
If this URL is opened with the correct IP address, the header "Location" redirects to link to the video file:
http://av.vimeo.com/XXX/XX/XXXX.mp4?aksessionid=XXXX&token=XXXXX_XXXXXXXXX
When I build this link http://player.vimeo.com/play_redirect?... manually ("right click"> "source code"> "line 520") it works.
But with PHP and regex it returns the HTTP code 200 with an error message.
Why ?
From my observations, Vimeo does not check the headers of the HTTP request for http:// player.vimeo.com/play_redirect?...
GET, HEAD, with cookies, without cookies, referrer etc. ... does not change.
With PHP, I use the function file_get_contents() and get_headers().
<?php
function getVimeo($id) {
$content = file_get_contents('http://vimeo.com/'.$id);
if (preg_match('#document\.getElementById\(\'player_(.+)\n#i', $content, $scriptBlock) == 0)
return 1;
preg_match('#"timestamp":([0-9]+)#i', $scriptBlock[1], $matches);
$timestamp = $matches[1];
preg_match('#"signature":"([a-z0-9]+)"#i', $scriptBlock[1], $matches);
$signature = $matches[1];
$url = 'http://player.vimeo.com/play_redirect?clip_id='.$id.'&sig='.$signature.'&time='.$timestamp.'&quality=sd';
print_r(get_headers($url, 1));
}
The algorithm looks like this:
Input data: vimeoUrl.
content = getRemoteContent(vimeoUrl).
Parse content to find and extract the value from data-config-url
attribute.
Navigate to data-config-url and load the content as JSON Object:
$video = json_decode($this->getRemoteContent($video->getAttribute('data-config-url')));
Return $video->request->files->h264->sd->url — this will return a
direct link for SD quality video.
Here is my simple class, that working for this moment.
class VideoController
{
/**
* #var array Vimeo video quality priority
*/
public $vimeoQualityPrioritet = array('sd', 'hd', 'mobile');
/**
* #var string Vimeo video codec priority
*/
public $vimeoVideoCodec = 'h264';
/**
* Get direct URL to Vimeo video file
*
* #param string $url to video on Vimeo
* #return string file URL
*/
public function getVimeoDirectUrl($url)
{
$result = '';
$videoInfo = $this->getVimeoVideoInfo($url);
if ($videoInfo && $videoObject = $this->getVimeoQualityVideo($videoInfo->request->files))
{
$result = $videoObject->url;
}
return $result;
}
/**
* Get Vimeo video info
*
* #param string $url to video on Vimeo
* #return \stdClass|null result
*/
public function getVimeoVideoInfo($url)
{
$videoInfo = null;
$page = $this->getRemoteContent($url);
$dom = new \DOMDocument("1.0", "utf-8");
libxml_use_internal_errors(true);
$dom->loadHTML('<?xml version="1.0" encoding="UTF-8"?>' . "\n" . $page);
$xPath = new \DOMXpath($dom);
$video = $xPath->query('//div[#data-config-url]');
if ($video)
{
$videoObj = json_decode($this->getRemoteContent($video->item(0)->getAttribute('data-config-url')));
if (!property_exists($videoObj, 'message'))
{
$videoInfo = $videoObj;
}
}
return $videoInfo;
}
/**
* Get vimeo video object
*
* #param stdClass $files object of Vimeo files
* #return stdClass Video file object
*/
public function getVimeoQualityVideo($files)
{
$video = null;
if (!property_exists($files, $this->vimeoVideoCodec) && count($files->codecs))
{
$this->vimeoVideoCodec = array_shift($files->codecs);
}
$codecFiles = $files->{$this->vimeoVideoCodec};
foreach ($this->vimeoQualityPrioritet as $quality)
{
if (property_exists($codecFiles, $quality))
{
$video = $codecFiles->{$quality};
break;
}
}
if (!$video)
{
foreach (get_object_vars($codecFiles) as $file)
{
$video = $file;
break;
}
}
return $video;
}
/**
* Get remote content by URL
*
* #param string $url remote page URL
* #return string result content
*/
public function getRemoteContent($url)
{
$ch = curl_init();
curl_setopt($ch, CURLOPT_CONNECTTIMEOUT, 10);
curl_setopt($ch, CURLOPT_TIMEOUT, 20);
curl_setopt($ch, CURLOPT_HEADER, false);
curl_setopt($ch, CURLOPT_URL, $url);
curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
curl_setopt($ch, CURLOPT_FOLLOWLOCATION, true);
curl_setopt($ch, CURLOPT_MAXREDIRS, 10);
curl_setopt($ch, CURLOPT_USERAGENT, 'spider');
$content = curl_exec($ch);
curl_close($ch);
return $content;
}
}
Using:
$video = new VideoController;
var_dump($video->getVimeoDirectUrl('http://vimeo.com/90747156'));
Try add a valid user-agent to headers for an each request.
For this you must use cURL or HttpRequest instead file_get_contents().
After such manipulations I got a working link for downloading the video file.
Here my code:
function getVimeo($id) {
// get page with a player
$queryResult = httpQuery('http://vimeo.com/' . $id);
$content = $queryResult['content'];
if (preg_match('#document\.getElementById\(\'player_(.+)\n#i', $content, $scriptBlock) == 0)
return 1;
preg_match('#"timestamp":([0-9]+)#i', $scriptBlock[1], $matches);
$timestamp = $matches[1];
preg_match('#"signature":"([a-z0-9]+)"#i', $scriptBlock[1], $matches);
$signature = $matches[1];
$url = 'http://player.vimeo.com/play_redirect?clip_id=' . $id . '&sig=' . $signature . '&time=' . $timestamp . '&quality=sd';
// make the request for getting a video url
#print_r(get_headers($url, 1));
$finalQuery = httpQuery($url);
return $finalQuery['redirect_url'];
}
// make queries via CURL
function httpQuery($url) {
$options = array(
CURLOPT_USERAGENT => 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.19 (KHTML, like Gecko) Ubuntu/12.04 Chromium/18.0.1025.168 Chrome/18.0.1025.168 Safari/535.19',
CURLOPT_RETURNTRANSFER => true,
);
$ch = curl_init($url);
curl_setopt_array($ch, $options);
$content = curl_exec($ch);
$info = curl_getinfo($ch);
curl_close($ch);
$result = $info;
$result['content'] = $content;
return $result;
}
echo getVimeo(22439234);

Categories