[prev in list] [next in list] [prev in thread] [next in thread]
List: gtkmm
Subject: ThreadPool scalability
From: "=?ISO-8859-1?Q?Germ=E1n_Diago?=" <germandiago () gmail ! com>
Date: 2008-06-08 21:44:50
Message-ID: b798010f0806081444w66040940qa4bedc7a049a3dc9 () mail ! gmail ! com
[Download RAW message or body]
I'm using this code to make thumbnails of images. When I execute the
code with 1 thread it runs in 7.6 seconds more or less.
If the code is run with 2 threads, the code runs in 7.1 seconds. But
the code should be roughly twice as fast. Anyone can help here,
please?
The program is attached. When I monitor with gnome-system-monitor, in
the case of 1 thread, it gets 100% cpu for one core (Core 2 duo e 6400
cpu).
But If I use 2 threads, they don't get 100% cpu at any time. Is there
a way to change priority of thread pools? Thank you very much. The
code is attached.
["escalado_imagenes2.cpp" (text/x-c++src)]
#include <glibmm.h>
#include <gtkmm.h>
#include <iostream>
#include <cstdlib>
#include <set>
Glib::StaticMutex mutexfinalizar = GLIBMM_STATIC_MUTEX_INIT;
Glib::Cond * condfinalizar = (Glib::Cond *)0;
Glib::StaticMutex mutexnumimagenesprocesadas = GLIBMM_STATIC_MUTEX_INIT;
Glib::StaticMutex muteximagen = GLIBMM_STATIC_MUTEX_INIT;
int numimagenesprocesadas = 0;
bool finalizar = false;
void inicializa_todo()
{
Glib::thread_init();
condfinalizar = new Glib::Cond;
}
void finaliza_todo()
{
delete condfinalizar;
}
void calcular_y_guardar_miniatura(const std::string & nomimagen, int anchura, int \
altura, int numimagenes) {
muteximagen.lock();
Gtk::Image img(nomimagen);
muteximagen.unlock();
Glib::RefPtr<Gdk::Pixbuf> buf = img.get_pixbuf();
Glib::RefPtr<Gdk::Pixbuf> bufminiatura = buf->scale_simple(150, 150, \
Gdk::INTERP_BILINEAR);
//Guardar en el directorio de trabajo del programa las miniaturas, se encontraran \
donde se encontraran if (nomimagen.find("/") != std::string::npos)
bufminiatura->save(nomimagen.substr(nomimagen.rfind("/") + 1, nomimagen.rfind(".")) \
+ "50x50.jpeg", "jpeg"); else
bufminiatura->save(nomimagen.substr(0, nomimagen.rfind(".")) + "50x50.jpeg", \
"jpeg");
mutexnumimagenesprocesadas.lock();
++numimagenesprocesadas;
mutexnumimagenesprocesadas.unlock();
//Salir si ya se han procesado todas las imágenes
if (numimagenesprocesadas == numimagenes)
{
mutexfinalizar.lock();
finalizar = true;
condfinalizar->signal();
mutexfinalizar.unlock();
}
}
void enviar_imagenes(const std::string & directorio, int numimagenes, \
Glib::ThreadPool & pool) {
Glib::Dir directory(directorio);
std::string curr = directory.read_name();
for (int i = 0; i < numimagenes; ++i)
{
std::size_t pos = (curr).rfind(".");
if (pos != std::string::npos)
{
if ((curr).substr(pos, (curr).length()) == ".jpg" ||
(curr).substr(pos, (curr).length()) == ".JPG")
{
std::cout << curr << std::endl;
pool.push(sigc::bind(&calcular_y_guardar_miniatura, directorio + "/" + curr,
50, 50, numimagenes));
}
}
}
std::cout << "Máximo número de hilos: " << pool.get_num_threads() << std::endl;
std::cout << "Hilos no siendo utilizados: " << pool.get_num_unused_threads() << \
std::endl; mutexfinalizar.lock();
while (!finalizar)
condfinalizar->wait(mutexfinalizar);
mutexfinalizar.unlock();
}
int main(int argc, char * argv[])
{
if (argc != 2) {
std::cerr << "Se necesita un argumento con el número de hilos empleados\n";
std::exit(-1);
}
Gtk::Main m(argc, argv);
inicializa_todo();
Glib::ThreadPool piscina_hilos;
Glib::Timer timer;
piscina_hilos.set_max_threads(std::atoi(argv[1]));
std::cout << "Comenzando el cálculo de las imágenes miniaturizadas" << std::endl;
timer.start();
enviar_imagenes("/home/german/fotos", 50,
piscina_hilos);
float segundos = timer.elapsed();
std::cout << "Acabó el cálculo de las imágenes miniaturizadas" << std::endl;
std::cout << "Tiempo de cálculo para " << std::atoi(argv[1]) << " hilos: " <<
segundos << std::endl;
finaliza_todo();
}
_______________________________________________
gtkmm-list mailing list
gtkmm-list@gnome.org
http://mail.gnome.org/mailman/listinfo/gtkmm-list
[prev in list] [next in list] [prev in thread] [next in thread]
Configure |
About |
News |
Add a list |
Sponsored by KoreLogic