/**
* Authors.....: Jens Steube <jens.steube@gmail.com>
* Gabriele Gristina <matrix@hashcat.net>
+ * magnum <john.magnum@hushmail.com>
*
* License.....: MIT
*/
const uint VERSION_BIN = 210;
const uint RESTORE_MIN = 210;
+double TARGET_MS_PROFILE[3] = { 8, 16, 96 };
+
#define INCR_RULES 10000
#define INCR_SALTS 100000
#define INCR_MASKS 1000
#define MARKOV_DISABLE 0
#define MARKOV_CLASSIC 0
#define BENCHMARK 0
-#define BENCHMARK_MODE 1
+#define BENCHMARK_REPEATS 100
#define RESTORE 0
#define RESTORE_TIMER 60
#define RESTORE_DISABLE 0
#define HL_MODE_FILE 4
#define HL_MODE_ARG 5
+#define HLFMTS_CNT 11
#define HLFMT_HASHCAT 0
#define HLFMT_PWDUMP 1
#define HLFMT_PASSWD 2
#define HLFMT_NETNTLM2 8
#define HLFMT_NSLDAP 9
#define HLFMT_NSLDAPS 10
-#define HLFMTS_CNT 11
+
+#define HLFMT_TEXT_HASHCAT "native hashcat"
+#define HLFMT_TEXT_PWDUMP "pwdump"
+#define HLFMT_TEXT_PASSWD "passwd"
+#define HLFMT_TEXT_SHADOW "shadow"
+#define HLFMT_TEXT_DCC "DCC"
+#define HLFMT_TEXT_DCC2 "DCC 2"
+#define HLFMT_TEXT_NETNTLM1 "NetNTLMv1"
+#define HLFMT_TEXT_NETNTLM2 "NetNTLMv2"
+#define HLFMT_TEXT_NSLDAP "nsldap"
+#define HLFMT_TEXT_NSLDAPS "nsldaps"
#define ATTACK_MODE_STRAIGHT 0
#define ATTACK_MODE_COMBI 1
#define MAX_DICTSTAT 10000
-#define NUM_DEFAULT_BENCHMARK_ALGORITHMS 130
+#define NUM_DEFAULT_BENCHMARK_ALGORITHMS 136
#define global_free(attr) \
{ \
5600,
7300,
7500,
+ 13100,
8300,
11100,
11200,
101,
111,
1711,
- 3000, // broken in osx
+ 3000,
1000,
1100,
2100,
12800,
- 1500, // broken in osx
+ 1500,
12400,
500,
3200,
8700,
9100,
133,
+ 13500,
11600,
12500,
13000,
+ 13200,
+ 13300,
6211,
6221,
6231,
10410,
10500,
10600,
- 10700, // broken in osx
+ 10700,
9000,
5200,
6800,
6600,
8200,
11300,
- 12700
+ 12700,
+ 13400,
+ 125
};
/**
" -h, --help Print help",
" --quiet Suppress output",
"",
- "* Benchmark:",
- "",
- " -b, --benchmark Run benchmark",
- " --benchmark-mode=NUM Benchmark-mode, see references below",
- "",
"* Misc:",
"",
" --hex-charset Assume charset is given in hex",
" --remove Enable remove of hash once it is cracked",
" --remove-timer=NUM Update input hash file each NUM seconds",
" --potfile-disable Do not write potfile",
+ " --potfile-path Specific path to potfile",
" --debug-mode=NUM Defines the debug mode (hybrid only by using rules), see references below",
" --debug-file=FILE Output file for debugging rules (see also --debug-mode)",
" --induction-dir=FOLDER Specify induction directory to use, default is $session.induct",
"",
"* Resources:",
"",
+ " -b, --benchmark Run benchmark",
+ " --benchmark-repeats=NUM Repeat the kernel on the device NUM times to increase benchmark accuracy",
" -c, --segment-size=NUM Size in MB to cache from the wordfile",
" --bitmap-min=NUM Minimum number of bits allowed for bitmaps",
" --bitmap-max=NUM Maximum number of bits allowed for bitmaps",
" --opencl-platforms=STR OpenCL platforms to use, separate with comma",
" -d, --opencl-devices=STR OpenCL devices to use, separate with comma",
" --opencl-device-types=STR OpenCL device-types to use, separate with comma, see references below",
- " --opencl-vector-width=NUM OpenCL vector-width (either 1, 2, 4 or 8), overrides value from device query",
+ " --opencl-vector-width=NUM OpenCL vector-width (either 1, 2, 4, 8 or 16), overrides value from device query",
" -w, --workload-profile=NUM Enable a specific workload profile, see references below",
- " -n, --kernel-accel=NUM Workload tuning: 1, 8, 40, 80, 160",
- " -u, --kernel-loops=NUM Workload fine-tuning: 8 - 1024",
+ " -n, --kernel-accel=NUM Workload tuning, increase the outer-loop step size",
+ " -u, --kernel-loops=NUM Workload tuning, increase the inner-loop step size",
" --gpu-temp-disable Disable temperature and fanspeed readings and triggers",
#ifdef HAVE_HWMON
" --gpu-temp-abort=NUM Abort session if GPU temperature reaches NUM degrees celsius",
"",
"* Workload Profile:",
"",
- " 1 = Reduced performance profile (low latency desktop)",
- " 2 = Default performance profile",
- " 3 = Tuned performance profile (high latency desktop)",
- "",
- "* Benchmark Settings:",
- "",
- " 0 = Manual Tuning",
- " 1 = Performance Tuning, default",
+ " 1 = Interactive performance profile, kernel execution runtime to 8ms, lower latency desktop, lower speed",
+ " 2 = Default performance profile, kernel execution runtime to 16ms, economic setting",
+ " 3 = Headless performance profile, kernel execution runtime to 96ms, higher latency desktop, higher speed",
"",
"* OpenCL device-types:",
"",
" 11100 = PostgreSQL Challenge-Response Authentication (MD5)",
" 11200 = MySQL Challenge-Response Authentication (SHA1)",
" 11400 = SIP digest authentication (MD5)",
+ " 13100 = Kerberos 5 TGS-REP etype 23",
"",
"[[ Forums, CMS, E-Commerce, Frameworks, Middleware, Wiki, Management ]]",
"",
" 8500 = RACF",
" 7200 = GRUB 2",
" 9900 = Radmin2",
+ " 125 = ArubaOS",
"",
"[[ Enterprise Application Software (EAS) ]]",
"",
" 8700 = Lotus Notes/Domino 6",
" 9100 = Lotus Notes/Domino 8",
" 133 = PeopleSoft",
+ " 13500 = PeopleSoft Token",
"",
"[[ Archives ]]",
"",
" 11600 = 7-Zip",
" 12500 = RAR3-hp",
" 13000 = RAR5",
+ " 13200 = AxCrypt",
+ " 13300 = AxCrypt in memory SHA1",
"",
"[[ Full-Disk encryptions (FDE) ]]",
"",
" 8200 = 1Password, cloudkeychain",
" 11300 = Bitcoin/Litecoin wallet.dat",
" 12700 = Blockchain, My Wallet",
+ " 13400 = Keepass 1 (AES/Twofish) and Keepass 2 (AES)",
"",
NULL
};
* oclHashcat specific functions
*/
+static double get_avg_exec_time (hc_device_param_t *device_param, const int last_num_entries)
+{
+ int exec_pos = (int) device_param->exec_pos - last_num_entries;
+
+ if (exec_pos < 0) exec_pos += EXEC_CACHE;
+
+ double exec_ms_sum = 0;
+
+ int exec_ms_cnt = 0;
+
+ for (int i = 0; i < last_num_entries; i++)
+ {
+ double exec_ms = device_param->exec_ms[(exec_pos + i) % EXEC_CACHE];
+
+ if (exec_ms)
+ {
+ exec_ms_sum += exec_ms;
+
+ exec_ms_cnt++;
+ }
+ }
+
+ if (exec_ms_cnt == 0) return 0;
+
+ return exec_ms_sum / exec_ms_cnt;
+}
+
void status_display_automat ()
{
FILE *out = stdout;
if (device_param->skipped) continue;
- u64 speed_cnt = 0;
- float speed_ms = 0;
+ u64 speed_cnt = 0;
+ double speed_ms = 0;
for (int i = 0; i < SPEED_CACHE; i++)
{
- float rec_ms;
-
- hc_timer_get (device_param->speed_rec[i], rec_ms);
-
- if (rec_ms > SPEED_MAXAGE) continue;
-
speed_cnt += device_param->speed_cnt[i];
speed_ms += device_param->speed_ms[i];
}
if (device_param->skipped) continue;
- double exec_ms_total = 0;
-
- int exec_ms_cnt = 0;
-
- for (int i = 0; i < EXEC_CACHE; i++)
- {
- double exec_ms = device_param->exec_ms[i];
-
- if (exec_ms)
- {
- exec_ms_total += exec_ms;
+ double exec_ms_avg = get_avg_exec_time (device_param, EXEC_CACHE);
- exec_ms_cnt++;
- }
- }
-
- exec_ms_total /= exec_ms_cnt;
-
- fprintf (out, "%f\t", exec_ms_total);
+ fprintf (out, "%f\t", exec_ms_avg);
}
/**
* counter
*/
- uint salts_left = data.salts_cnt - data.salts_done;
-
- if (salts_left == 0) salts_left = 1;
-
- u64 progress_total = data.words_cnt * salts_left;
+ u64 progress_total = data.words_cnt * data.salts_cnt;
u64 all_done = 0;
u64 all_rejected = 0;
for (uint salt_pos = 0; salt_pos < data.salts_cnt; salt_pos++)
{
- if (salts_left > 1)
- {
- // otherwise the final cracked status shows 0/XXX progress
-
- if (data.salts_shown[salt_pos] == 1) continue;
- }
-
all_done += data.words_progress_done[salt_pos];
all_rejected += data.words_progress_rejected[salt_pos];
all_restored += data.words_progress_restored[salt_pos];
if (data.skip)
{
- progress_skip = MIN (data.skip, data.words_base) * salts_left;
+ progress_skip = MIN (data.skip, data.words_base) * data.salts_cnt;
if (data.attack_kern == ATTACK_KERN_STRAIGHT) progress_skip *= data.kernel_rules_cnt;
else if (data.attack_kern == ATTACK_KERN_COMBI) progress_skip *= data.combs_cnt;
if (data.limit)
{
- progress_end = MIN (data.limit, data.words_base) * salts_left;
+ progress_end = MIN (data.limit, data.words_base) * data.salts_cnt;
if (data.attack_kern == ATTACK_KERN_STRAIGHT) progress_end *= data.kernel_rules_cnt;
else if (data.attack_kern == ATTACK_KERN_COMBI) progress_end *= data.combs_cnt;
{
wpa_t *wpa = (wpa_t *) data.esalts_buf;
- uint pke[25] = { 0 };
-
- char *pke_ptr = (char *) pke;
-
- for (uint i = 0; i < 25; i++)
- {
- pke[i] = byte_swap_32 (wpa->pke[i]);
- }
-
- char mac1[6] = { 0 };
- char mac2[6] = { 0 };
-
- memcpy (mac1, pke_ptr + 23, 6);
- memcpy (mac2, pke_ptr + 29, 6);
-
log_info ("Hash.Target....: %s (%02x:%02x:%02x:%02x:%02x:%02x <-> %02x:%02x:%02x:%02x:%02x:%02x)",
(char *) data.salts_buf[0].salt_buf,
- mac1[0] & 0xff,
- mac1[1] & 0xff,
- mac1[2] & 0xff,
- mac1[3] & 0xff,
- mac1[4] & 0xff,
- mac1[5] & 0xff,
- mac2[0] & 0xff,
- mac2[1] & 0xff,
- mac2[2] & 0xff,
- mac2[3] & 0xff,
- mac2[4] & 0xff,
- mac2[5] & 0xff);
+ wpa->orig_mac1[0],
+ wpa->orig_mac1[1],
+ wpa->orig_mac1[2],
+ wpa->orig_mac1[3],
+ wpa->orig_mac1[4],
+ wpa->orig_mac1[5],
+ wpa->orig_mac2[0],
+ wpa->orig_mac2[1],
+ wpa->orig_mac2[2],
+ wpa->orig_mac2[3],
+ wpa->orig_mac2[4],
+ wpa->orig_mac2[5]);
}
else if (data.hash_mode == 5200)
{
}
else
{
- char out_buf[4096] = { 0 };
+ char out_buf[HCBUFSIZ] = { 0 };
ascii_digest (out_buf, 0, 0);
{
if (data.hash_mode == 3000)
{
- char out_buf1[4096] = { 0 };
- char out_buf2[4096] = { 0 };
+ char out_buf1[32] = { 0 };
+ char out_buf2[32] = { 0 };
ascii_digest (out_buf1, 0, 0);
ascii_digest (out_buf2, 0, 1);
* speed new
*/
- u64 speed_cnt[DEVICES_MAX] = { 0 };
- float speed_ms[DEVICES_MAX] = { 0 };
+ u64 speed_cnt[DEVICES_MAX] = { 0 };
+ double speed_ms[DEVICES_MAX] = { 0 };
for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
if (device_param->skipped) continue;
- // we need to clear values (set to 0) because in case the device does
- // not get new candidates it idles around but speed display would
- // show it as working.
- // if we instantly set it to 0 after reading it happens that the
- // speed can be shown as zero if the users refreshes too fast.
- // therefore, we add a timestamp when a stat was recorded and if its
- // too old we will not use it
-
speed_cnt[device_id] = 0;
speed_ms[device_id] = 0;
for (int i = 0; i < SPEED_CACHE; i++)
{
- float rec_ms;
-
- hc_timer_get (device_param->speed_rec[i], rec_ms);
-
- if (rec_ms > SPEED_MAXAGE) continue;
-
speed_cnt[device_id] += device_param->speed_cnt[i];
speed_ms[device_id] += device_param->speed_ms[i];
}
if (device_param->skipped) continue;
- double exec_ms_total = 0;
-
- int exec_ms_cnt = 0;
-
- for (int i = 0; i < EXEC_CACHE; i++)
- {
- double exec_ms = device_param->exec_ms[i];
-
- if (exec_ms)
- {
- exec_ms_total += exec_ms;
+ double exec_ms_avg = get_avg_exec_time (device_param, EXEC_CACHE);
- exec_ms_cnt++;
- }
- }
-
- exec_ms_total /= exec_ms_cnt;
-
- exec_all_ms[device_id] = exec_ms_total;
+ exec_all_ms[device_id] = exec_ms_avg;
}
/**
* timers
*/
- float ms_running = 0;
+ double ms_running = 0;
hc_timer_get (data.timer_running, ms_running);
- float ms_paused = data.ms_paused;
+ double ms_paused = data.ms_paused;
if (data.devices_status == STATUS_PAUSED)
{
- float ms_paused_tmp = 0;
+ double ms_paused_tmp = 0;
hc_timer_get (data.timer_paused, ms_paused_tmp);
* counters
*/
- uint salts_left = data.salts_cnt - data.salts_done;
-
- if (salts_left == 0) salts_left = 1;
-
- u64 progress_total = data.words_cnt * salts_left;
+ u64 progress_total = data.words_cnt * data.salts_cnt;
u64 all_done = 0;
u64 all_rejected = 0;
u64 all_restored = 0;
+ u64 progress_noneed = 0;
+
for (uint salt_pos = 0; salt_pos < data.salts_cnt; salt_pos++)
{
- if (salts_left > 1)
- {
- // otherwise the final cracked status shows 0/XXX progress
-
- if (data.salts_shown[salt_pos] == 1) continue;
- }
-
all_done += data.words_progress_done[salt_pos];
all_rejected += data.words_progress_rejected[salt_pos];
all_restored += data.words_progress_restored[salt_pos];
+
+ // Important for ETA only
+
+ if (data.salts_shown[salt_pos] == 1)
+ {
+ const u64 all = data.words_progress_done[salt_pos]
+ + data.words_progress_rejected[salt_pos]
+ + data.words_progress_restored[salt_pos];
+
+ const u64 left = data.words_cnt - all;
+
+ progress_noneed += left;
+ }
}
u64 progress_cur = all_restored + all_done + all_rejected;
if (data.skip)
{
- progress_skip = MIN (data.skip, data.words_base) * salts_left;
+ progress_skip = MIN (data.skip, data.words_base) * data.salts_cnt;
if (data.attack_kern == ATTACK_KERN_STRAIGHT) progress_skip *= data.kernel_rules_cnt;
else if (data.attack_kern == ATTACK_KERN_COMBI) progress_skip *= data.combs_cnt;
if (data.limit)
{
- progress_end = MIN (data.limit, data.words_base) * salts_left;
+ progress_end = MIN (data.limit, data.words_base) * data.salts_cnt;
if (data.attack_kern == ATTACK_KERN_STRAIGHT) progress_end *= data.kernel_rules_cnt;
else if (data.attack_kern == ATTACK_KERN_COMBI) progress_end *= data.combs_cnt;
u64 progress_cur_relative_skip = progress_cur - progress_skip;
u64 progress_end_relative_skip = progress_end - progress_skip;
- float speed_ms_real = ms_running - ms_paused;
- u64 speed_plains_real = all_done;
-
if ((data.wordlist_mode == WL_MODE_FILE) || (data.wordlist_mode == WL_MODE_MASK))
{
if (data.devices_status != STATUS_CRACKED)
{
- u64 words_per_ms = 0;
-
- if (speed_plains_real && speed_ms_real)
- {
- words_per_ms = speed_plains_real / speed_ms_real;
- }
-
#ifdef WIN
__time64_t sec_etc = 0;
#else
time_t sec_etc = 0;
#endif
- if (words_per_ms)
+ if (hashes_all_ms)
{
u64 progress_left_relative_skip = progress_end_relative_skip - progress_cur_relative_skip;
- u64 ms_left = progress_left_relative_skip / words_per_ms;
+ u64 ms_left = (progress_left_relative_skip - progress_noneed) / hashes_all_ms;
sec_etc = ms_left / 1000;
}
if (sec_etc == 0)
{
- log_info ("Time.Estimated.: 0 secs");
+ //log_info ("Time.Estimated.: 0 secs");
}
else if ((u64) sec_etc > ETC_MAX)
{
}
}
- float cpt_avg_min = (float) data.cpt_total / ((speed_ms_real / 1000) / 60);
- float cpt_avg_hour = (float) data.cpt_total / ((speed_ms_real / 1000) / 3600);
- float cpt_avg_day = (float) data.cpt_total / ((speed_ms_real / 1000) / 86400);
+ double ms_real = ms_running - ms_paused;
+
+ float cpt_avg_min = (float) data.cpt_total / ((ms_real / 1000) / 60);
+ float cpt_avg_hour = (float) data.cpt_total / ((ms_real / 1000) / 3600);
+ float cpt_avg_day = (float) data.cpt_total / ((ms_real / 1000) / 86400);
if ((data.cpt_start + 86400) < now)
{
static void status_benchmark ()
{
- if (data.devices_status == STATUS_INIT) return;
+ if (data.devices_status == STATUS_INIT) return;
if (data.devices_status == STATUS_STARTING) return;
if (data.words_cnt == 0) return;
- u64 speed_cnt[DEVICES_MAX] = { 0 };
- float speed_ms[DEVICES_MAX] = { 0 };
+ u64 speed_cnt[DEVICES_MAX] = { 0 };
+ double speed_ms[DEVICES_MAX] = { 0 };
for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
if (device_param->skipped) continue;
- speed_cnt[device_id] = 0;
- speed_ms[device_id] = 0;
-
- for (int i = 0; i < SPEED_CACHE; i++)
- {
- speed_cnt[device_id] += device_param->speed_cnt[i];
- speed_ms[device_id] += device_param->speed_ms[i];
- }
-
- speed_cnt[device_id] /= SPEED_CACHE;
- speed_ms[device_id] /= SPEED_CACHE;
+ speed_cnt[device_id] = device_param->speed_cnt[0];
+ speed_ms[device_id] = device_param->speed_ms[0];
}
float hashes_all_ms = 0;
if (device_param->skipped) continue;
- double exec_ms_total = 0;
-
- int exec_ms_cnt = 0;
-
- for (int i = 0; i < EXEC_CACHE; i++)
- {
- double exec_ms = device_param->exec_ms[i];
-
- if (exec_ms)
- {
- exec_ms_total += exec_ms;
-
- exec_ms_cnt++;
- }
- }
-
- exec_ms_total /= exec_ms_cnt;
+ double exec_ms_avg = get_avg_exec_time (device_param, EXEC_CACHE);
- exec_all_ms[device_id] = exec_ms_total;
+ exec_all_ms[device_id] = exec_ms_avg;
}
for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
snprintf (source_file, 255, "%s/OpenCL/m%05d.cl", shared_dir, (int) kern_type);
}
-static void generate_cached_kernel_filename (const uint attack_exec, const uint attack_kern, const uint kern_type, char *profile_dir, char *device_name_chksum, char *cached_file)
+static void generate_cached_kernel_filename (const uint attack_exec, const uint attack_kern, const uint kern_type, char *profile_dir, const char *device_name_chksum, char *cached_file)
{
if (attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
{
}
}
-static void generate_cached_kernel_mp_filename (const uint opti_type, const uint opts_type, char *profile_dir, char *device_name_chksum, char *cached_file)
+static void generate_cached_kernel_mp_filename (const uint opti_type, const uint opts_type, char *profile_dir, const char *device_name_chksum, char *cached_file)
{
if ((opti_type & OPTI_TYPE_BRUTE_FORCE) && (opts_type & OPTS_TYPE_PT_GENERATE_BE))
{
snprintf (source_file, 255, "%s/OpenCL/amp_a%d.cl", shared_dir, attack_kern);
}
-static void generate_cached_kernel_amp_filename (const uint attack_kern, char *profile_dir, char *device_name_chksum, char *cached_file)
+static void generate_cached_kernel_amp_filename (const uint attack_kern, char *profile_dir, const char *device_name_chksum, char *cached_file)
{
snprintf (cached_file, 255, "%s/kernels/amp_a%d.%s.kernel", profile_dir, attack_kern, device_name_chksum);
}
// hash
- char out_buf[4096] = { 0 };
+ char out_buf[HCBUFSIZ] = { 0 };
ascii_digest (out_buf, salt_pos, digest_pos);
for (int i = 0, j = gidm; i < 16; i++, j++)
{
- plain_buf[i] = pw.h.hi1[0][j];
+ plain_buf[i] = pw.i[j];
}
plain_len = pw.pw_len;
for (int i = 0, j = gidm; i < 16; i++, j++)
{
- plain_buf[i] = pw.h.hi1[0][j];
+ plain_buf[i] = pw.i[j];
}
plain_len = pw.pw_len;
for (int i = 0, j = gidm; i < 16; i++, j++)
{
- plain_buf[i] = pw.h.hi1[0][j];
+ plain_buf[i] = pw.i[j];
}
plain_len = pw.pw_len;
for (int i = 0, j = gidm; i < 16; i++, j++)
{
- plain_buf[i] = pw.h.hi1[0][j];
+ plain_buf[i] = pw.i[j];
}
plain_len = pw.pw_len;
if (data.hash_mode != 2500)
{
- char out_buf[4096] = { 0 };
+ char out_buf[HCBUFSIZ] = { 0 };
if (data.username == 1)
{
unlink (old_hashfile);
}
-static float find_kernel_blocks_div (const u64 total_left, const uint kernel_blocks_all)
+static float find_kernel_power_div (const u64 total_left, const uint kernel_power_all)
{
- // function called only in case kernel_blocks_all > words_left)
+ // function called only in case kernel_power_all > words_left
- float kernel_blocks_div = (float) (total_left) / kernel_blocks_all;
+ float kernel_power_div = (float) (total_left) / kernel_power_all;
- kernel_blocks_div += kernel_blocks_div / 100;
+ kernel_power_div += kernel_power_div / 100;
- u32 kernel_blocks_new = (u32) (kernel_blocks_all * kernel_blocks_div);
+ u32 kernel_power_new = (u32) (kernel_power_all * kernel_power_div);
- while (kernel_blocks_new < total_left)
+ while (kernel_power_new < total_left)
{
- kernel_blocks_div += kernel_blocks_div / 100;
+ kernel_power_div += kernel_power_div / 100;
- kernel_blocks_new = (u32) (kernel_blocks_all * kernel_blocks_div);
+ kernel_power_new = (u32) (kernel_power_all * kernel_power_div);
}
if (data.quiet == 0)
fflush (stdout);
}
- if ((kernel_blocks_all * kernel_blocks_div) < 8) return 1;
+ if ((kernel_power_all * kernel_power_div) < 8) return 1;
- return kernel_blocks_div;
+ return kernel_power_div;
}
static void run_kernel (const uint kern_run, hc_device_param_t *device_param, const uint num, const uint event_update)
hc_clSetKernelArg (data.ocl, kernel, 30, sizeof (cl_uint), device_param->kernel_params[30]);
hc_clSetKernelArg (data.ocl, kernel, 31, sizeof (cl_uint), device_param->kernel_params[31]);
- cl_event event;
+ hc_timer_t timer;
+
+ hc_timer_set (&timer);
if ((data.opts_type & OPTS_TYPE_PT_BITSLICE) && (data.attack_mode == ATTACK_MODE_BF))
{
const size_t global_work_size[3] = { num_elements, 32, 1 };
const size_t local_work_size[3] = { kernel_threads / 32, 32, 1 };
- hc_clEnqueueNDRangeKernel (data.ocl, device_param->command_queue, kernel, 2, NULL, global_work_size, local_work_size, 0, NULL, &event);
+ hc_clEnqueueNDRangeKernel (data.ocl, device_param->command_queue, kernel, 2, NULL, global_work_size, local_work_size, 0, NULL, NULL);
}
else
{
size_t workgroup_size = 0;
+
hc_clGetKernelWorkGroupInfo (data.ocl, kernel, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &workgroup_size, NULL);
+
+ if (kern_run == KERN_RUN_2)
+ {
+ if (data.opti_type & OPTI_TYPE_SLOW_HASH_SIMD)
+ {
+ num_elements = CEIL ((float) num_elements / device_param->vector_width);
+ }
+ }
+
if (kernel_threads > workgroup_size) kernel_threads = workgroup_size;
+ while (num_elements % kernel_threads) num_elements++;
+
const size_t global_work_size[3] = { num_elements, 1, 1 };
const size_t local_work_size[3] = { kernel_threads, 1, 1 };
- hc_clEnqueueNDRangeKernel (data.ocl, device_param->command_queue, kernel, 1, NULL, global_work_size, local_work_size, 0, NULL, &event);
+ hc_clEnqueueNDRangeKernel (data.ocl, device_param->command_queue, kernel, 1, NULL, global_work_size, local_work_size, 0, NULL, NULL);
}
hc_clFlush (data.ocl, device_param->command_queue);
- hc_clWaitForEvents (data.ocl, 1, &event);
+ hc_clFinish (data.ocl, device_param->command_queue);
if (event_update)
{
- cl_ulong time_start;
- cl_ulong time_end;
+ double exec_time;
- hc_clGetEventProfilingInfo (data.ocl, event, CL_PROFILING_COMMAND_START, sizeof (time_start), &time_start, NULL);
- hc_clGetEventProfilingInfo (data.ocl, event, CL_PROFILING_COMMAND_END, sizeof (time_end), &time_end, NULL);
-
- const double exec_time = (time_end - time_start) / 1000000.0;
+ hc_timer_get (timer, exec_time);
uint exec_pos = device_param->exec_pos;
device_param->exec_pos = exec_pos;
}
-
- hc_clReleaseEvent (data.ocl, event);
-
- hc_clFinish (data.ocl, device_param->command_queue);
}
static void run_kernel_mp (const uint kern_run, hc_device_param_t *device_param, const uint num)
hc_clFinish (data.ocl, device_param->command_queue);
}
-static void run_kernel_tb (hc_device_param_t *device_param, const uint num)
-{
- uint num_elements = num;
-
- uint kernel_threads = device_param->kernel_threads;
-
- while (num_elements % kernel_threads) num_elements++;
-
- cl_kernel kernel = device_param->kernel_tb;
-
- size_t workgroup_size = 0;
- hc_clGetKernelWorkGroupInfo (data.ocl, kernel, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &workgroup_size, NULL);
- if (kernel_threads > workgroup_size) kernel_threads = workgroup_size;
-
- const size_t global_work_size[3] = { num_elements, 1, 1 };
- const size_t local_work_size[3] = { kernel_threads, 1, 1 };
-
- hc_clEnqueueNDRangeKernel (data.ocl, device_param->command_queue, kernel, 1, NULL, global_work_size, local_work_size, 0, NULL, NULL);
-
- hc_clFlush (data.ocl, device_param->command_queue);
-
- hc_clFinish (data.ocl, device_param->command_queue);
-}
-
static void run_kernel_tm (hc_device_param_t *device_param)
{
const uint num_elements = 1024; // fixed
}
}
+static void choose_kernel (hc_device_param_t *device_param, const uint attack_exec, const uint attack_mode, const uint opts_type, const salt_t *salt_buf, const uint highest_pw_len, const uint pws_cnt)
+{
+ if (attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
+ {
+ if (attack_mode == ATTACK_MODE_BF)
+ {
+ if (opts_type & OPTS_TYPE_PT_BITSLICE)
+ {
+ const uint size_tm = 32 * sizeof (bs_word_t);
+
+ run_kernel_bzero (device_param, device_param->d_tm_c, size_tm);
+
+ run_kernel_tm (device_param);
+
+ hc_clEnqueueCopyBuffer (data.ocl, device_param->command_queue, device_param->d_tm_c, device_param->d_bfs_c, 0, 0, size_tm, 0, NULL, NULL);
+ }
+ }
+
+ if (highest_pw_len < 16)
+ {
+ run_kernel (KERN_RUN_1, device_param, pws_cnt, true);
+ }
+ else if (highest_pw_len < 32)
+ {
+ run_kernel (KERN_RUN_2, device_param, pws_cnt, true);
+ }
+ else
+ {
+ run_kernel (KERN_RUN_3, device_param, pws_cnt, true);
+ }
+ }
+ else
+ {
+ run_kernel_amp (device_param, pws_cnt);
+
+ run_kernel (KERN_RUN_1, device_param, pws_cnt, false);
+
+ if (opts_type & OPTS_TYPE_HOOK12)
+ {
+ run_kernel (KERN_RUN_12, device_param, pws_cnt, false);
+ }
+
+ uint iter = salt_buf->salt_iter;
+
+ uint loop_step = device_param->kernel_loops;
+
+ for (uint loop_pos = 0; loop_pos < iter; loop_pos += loop_step)
+ {
+ uint loop_left = iter - loop_pos;
+
+ loop_left = MIN (loop_left, loop_step);
+
+ device_param->kernel_params_buf32[25] = loop_pos;
+ device_param->kernel_params_buf32[26] = loop_left;
+
+ run_kernel (KERN_RUN_2, device_param, pws_cnt, true);
+
+ if (data.devices_status == STATUS_CRACKED) break;
+ if (data.devices_status == STATUS_ABORTED) break;
+ if (data.devices_status == STATUS_QUIT) break;
+
+ /**
+ * speed
+ */
+
+ const float iter_part = (float) (loop_pos + loop_left) / iter;
+
+ const u64 perf_sum_all = pws_cnt * iter_part;
+
+ double speed_ms;
+
+ hc_timer_get (device_param->timer_speed, speed_ms);
+
+ const u32 speed_pos = device_param->speed_pos;
+
+ device_param->speed_cnt[speed_pos] = perf_sum_all;
+
+ device_param->speed_ms[speed_pos] = speed_ms;
+ }
+
+ if (opts_type & OPTS_TYPE_HOOK23)
+ {
+ run_kernel (KERN_RUN_23, device_param, pws_cnt, false);
+
+ hc_clEnqueueReadBuffer (data.ocl, device_param->command_queue, device_param->d_hooks, CL_TRUE, 0, device_param->size_hooks, device_param->hooks_buf, 0, NULL, NULL);
+
+ // do something with data
+
+ hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_hooks, CL_TRUE, 0, device_param->size_hooks, device_param->hooks_buf, 0, NULL, NULL);
+ }
+
+ run_kernel (KERN_RUN_3, device_param, pws_cnt, false);
+ }
+}
+
static int run_rule_engine (const int rule_len, const char *rule_buf)
{
if (rule_len == 0)
}
else if (data.attack_kern == ATTACK_KERN_COMBI)
{
+ if (data.attack_mode == ATTACK_MODE_HYBRID2)
+ {
+ if (data.opts_type & OPTS_TYPE_PT_ADD01)
+ {
+ for (u32 i = 0; i < pws_cnt; i++)
+ {
+ const u32 pw_len = device_param->pws_buf[i].pw_len;
+
+ u8 *ptr = (u8 *) device_param->pws_buf[i].i;
+
+ ptr[pw_len] = 0x01;
+ }
+ }
+ else if (data.opts_type & OPTS_TYPE_PT_ADD80)
+ {
+ for (u32 i = 0; i < pws_cnt; i++)
+ {
+ const u32 pw_len = device_param->pws_buf[i].pw_len;
+
+ u8 *ptr = (u8 *) device_param->pws_buf[i].i;
+
+ ptr[pw_len] = 0x80;
+ }
+ }
+ }
+
hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_pws_buf, CL_TRUE, 0, pws_cnt * sizeof (pw_t), device_param->pws_buf, 0, NULL, NULL);
}
else if (data.attack_kern == ATTACK_KERN_BF)
}
}
-static void run_cracker (hc_device_param_t *device_param, const uint pw_cnt, const uint pws_cnt)
+static double try_run (hc_device_param_t *device_param, const u32 kernel_accel, const u32 kernel_loops, const int repeat)
+{
+ const u32 kernel_power = device_param->device_processors * device_param->kernel_threads * kernel_accel;
+
+ device_param->kernel_params_buf32[26] = kernel_loops;
+ device_param->kernel_params_buf32[27] = kernel_loops;
+
+ // init some fake words
+
+ for (u32 i = 0; i < kernel_power; i++)
+ {
+ device_param->pws_buf[i].i[0] = i;
+ device_param->pws_buf[i].i[1] = 0x01234567;
+ device_param->pws_buf[i].pw_len = 4 + (i & 3);
+ }
+
+ hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_pws_buf, CL_TRUE, 0, kernel_power * sizeof (pw_t), device_param->pws_buf, 0, NULL, NULL);
+
+ if (data.attack_exec == ATTACK_EXEC_OUTSIDE_KERNEL)
+ {
+ run_kernel_amp (device_param, kernel_power);
+ }
+
+ // caching run
+
+ if (data.attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
+ {
+ run_kernel (KERN_RUN_1, device_param, kernel_power, false);
+ }
+ else
+ {
+ run_kernel (KERN_RUN_2, device_param, kernel_power, false);
+ }
+
+ // now user repeats
+
+ for (int i = 0; i < repeat; i++)
+ {
+ if (data.attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
+ {
+ run_kernel (KERN_RUN_1, device_param, kernel_power, true);
+ }
+ else
+ {
+ run_kernel (KERN_RUN_2, device_param, kernel_power, true);
+ }
+ }
+
+ const double exec_ms_prev = get_avg_exec_time (device_param, repeat);
+
+ // reset fake words
+
+ memset (device_param->pws_buf, 0, kernel_power * sizeof (pw_t));
+
+ hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_pws_buf, CL_TRUE, 0, kernel_power * sizeof (pw_t), device_param->pws_buf, 0, NULL, NULL);
+ hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_pws_amp_buf, CL_TRUE, 0, kernel_power * sizeof (pw_t), device_param->pws_buf, 0, NULL, NULL);
+
+ return exec_ms_prev;
+}
+
+static void autotune (hc_device_param_t *device_param)
{
- const uint kernel_loops = device_param->kernel_loops;
+ const double target_ms = TARGET_MS_PROFILE[data.workload_profile - 1];
+
+ const u32 kernel_accel_min = device_param->kernel_accel_min;
+ const u32 kernel_accel_max = device_param->kernel_accel_max;
+
+ const u32 kernel_loops_min = device_param->kernel_loops_min;
+ const u32 kernel_loops_max = device_param->kernel_loops_max;
+
+ u32 kernel_accel = kernel_accel_min;
+ u32 kernel_loops = kernel_loops_min;
+
+ // steps
+
+ #define STEPS_CNT 10
+
+ #define STEPS_ACCEL_CNT (STEPS_CNT + 2)
+ #define STEPS_LOOPS_CNT (STEPS_CNT + 2)
+
+ u32 steps_accel[STEPS_ACCEL_CNT];
+ u32 steps_loops[STEPS_LOOPS_CNT];
+
+ for (int i = 0; i < STEPS_ACCEL_CNT; i++)
+ {
+ steps_accel[i] = 1 << i;
+ }
+
+ for (int i = 0; i < STEPS_LOOPS_CNT; i++)
+ {
+ steps_loops[i] = 1 << i;
+ }
+
+ steps_accel[STEPS_CNT + 0] = kernel_accel_min;
+ steps_accel[STEPS_CNT + 1] = kernel_accel_max;
+
+ steps_loops[STEPS_CNT + 0] = kernel_loops_min;
+ steps_loops[STEPS_CNT + 1] = kernel_loops_max;
+
+ qsort (steps_accel, STEPS_ACCEL_CNT, sizeof (u32), sort_by_u32);
+ qsort (steps_loops, STEPS_LOOPS_CNT, sizeof (u32), sort_by_u32);
+
+ // find out highest kernel-loops that stays below target_ms, we can use it later for multiplication as this is a linear function
+
+ u32 kernel_loops_tmp;
+
+ for (kernel_loops_tmp = kernel_loops_max; kernel_loops_tmp > kernel_loops_min; kernel_loops_tmp >>= 1)
+ {
+ const double exec_ms = try_run (device_param, kernel_accel_min, kernel_loops_tmp, 1);
+
+ if (exec_ms < target_ms) break;
+ }
+
+ // kernel-accel
+
+ if (kernel_accel_min < kernel_accel_max)
+ {
+ double e_best = 0;
+
+ for (int i = 0; i < STEPS_ACCEL_CNT; i++)
+ {
+ const u32 kernel_accel_try = steps_accel[i];
+
+ if (kernel_accel_try < kernel_accel_min) continue;
+ if (kernel_accel_try > kernel_accel_max) break;
+
+ const double exec_ms = try_run (device_param, kernel_accel_try, kernel_loops_tmp, 1);
+
+ if (exec_ms > target_ms) break;
+
+ const double e = kernel_accel_try / exec_ms;
+
+ if (e > e_best)
+ {
+ kernel_accel = kernel_accel_try;
+
+ e_best = e;
+ }
+ }
+ }
+
+ // kernel-loops final
+
+ if (kernel_loops_min < kernel_loops_max)
+ {
+ double e_best = 0;
+
+ for (int i = 0; i < STEPS_LOOPS_CNT; i++)
+ {
+ const u32 kernel_loops_try = steps_loops[i];
+
+ if (kernel_loops_try < kernel_loops_min) continue;
+ if (kernel_loops_try > kernel_loops_max) break;
+
+ const double exec_ms = try_run (device_param, kernel_accel, kernel_loops_try, 1);
+
+ if (exec_ms > target_ms) break;
+
+ const double e = kernel_loops_try / exec_ms;
+
+ if (e > e_best)
+ {
+ kernel_loops = kernel_loops_try;
+
+ e_best = e;
+ }
+ }
+ }
+
+ // final balance
+
+ u32 kernel_accel_best = kernel_accel;
+ u32 kernel_loops_best = kernel_loops;
+
+ u32 exec_best = -1;
+
+ if ((kernel_accel_min < kernel_accel_max) || (kernel_loops_min < kernel_loops_max))
+ {
+ const double exec_ms = try_run (device_param, kernel_accel_best, kernel_loops_best, 1);
+
+ exec_best = exec_ms;
+ }
+
+ // reset
+
+ if (kernel_accel_min < kernel_accel_max)
+ {
+ u32 kernel_accel_try = kernel_accel;
+ u32 kernel_loops_try = kernel_loops;
+
+ for (int i = 0; i < 2; i++)
+ {
+ kernel_accel_try >>= 1;
+ kernel_loops_try <<= 1;
+
+ if (kernel_accel_try < kernel_accel_min) break;
+ if (kernel_loops_try > kernel_loops_max) break;
+
+ const double exec_ms = try_run (device_param, kernel_accel_try, kernel_loops_try, 1);
+
+ if (exec_ms < exec_best)
+ {
+ kernel_accel_best = kernel_accel_try;
+ kernel_loops_best = kernel_loops_try;
+
+ exec_best = exec_ms;
+ }
+ }
+ }
+
+ // reset
+
+ if (kernel_loops_min < kernel_loops_max)
+ {
+ u32 kernel_accel_try = kernel_accel;
+ u32 kernel_loops_try = kernel_loops;
+
+ for (int i = 0; i < 2; i++)
+ {
+ kernel_accel_try <<= 1;
+ kernel_loops_try >>= 1;
+
+ if (kernel_accel_try > kernel_accel_max) break;
+ if (kernel_loops_try < kernel_loops_min) break;
+
+ const double exec_ms = try_run (device_param, kernel_accel_try, kernel_loops_try, 1);
+
+ if (exec_ms < exec_best)
+ {
+ kernel_accel_best = kernel_accel_try;
+ kernel_loops_best = kernel_loops_try;
+
+ exec_best = exec_ms;
+ }
+ }
+ }
+
+ // because of the balance we may have some free space left!
+ // at this point, allow a small variance to overdrive the limit
+
+ const int exec_left = (target_ms * 1.2) / exec_best;
+
+ const int accel_left = kernel_accel_max / kernel_accel_best;
+
+ const int exec_accel_min = MIN (exec_left, accel_left);
+
+ if (exec_accel_min)
+ {
+ kernel_accel_best *= exec_accel_min;
+ }
+
+ // reset timer
+
+ device_param->exec_pos = 0;
+
+ memset (device_param->exec_ms, 0, EXEC_CACHE * sizeof (double));
- //only useful in debug
- //if (data.quiet == 0)
- // log_info ("Workload.Dev#%u : loops %u, accel %u", device_param->device_id + 1, device_param->kernel_loops, device_param->kernel_accel);
+ // store
+
+ kernel_accel = kernel_accel_best;
+ kernel_loops = kernel_loops_best;
+
+ device_param->kernel_accel = kernel_accel;
+ device_param->kernel_loops = kernel_loops;
+
+ const u32 kernel_power = device_param->device_processors * device_param->kernel_threads * device_param->kernel_accel;
+
+ device_param->kernel_power = kernel_power;
+
+ #ifdef DEBUG
+
+ if (data.quiet == 0)
+ {
+ clear_prompt ();
+
+ log_info ("Device #%u: autotuned kernel-accel to %u\n"
+ "Device #%u: autotuned kernel-loops to %u\n",
+ device_param->device_id + 1,
+ kernel_accel,
+ device_param->device_id + 1,
+ kernel_loops);
+
+ fprintf (stdout, "%s", PROMPT);
+ fflush (stdout);
+ }
+
+ #endif
+}
+
+static void run_cracker (hc_device_param_t *device_param, const uint pws_cnt)
+{
+ char *line_buf = (char *) mymalloc (HCBUFSIZ);
// init speed timer
+ device_param->kernel_params_mp_l_buf32[5];
}
- // bitslice optimization stuff
-
- if (data.attack_mode == ATTACK_MODE_BF)
- {
- if (data.opts_type & OPTS_TYPE_PT_BITSLICE)
- {
- run_kernel_tb (device_param, pws_cnt);
- }
- }
-
// iteration type
uint innerloop_step = 0;
uint innerloop_cnt = 0;
- if (data.attack_exec == ATTACK_EXEC_INSIDE_KERNEL) innerloop_step = kernel_loops;
+ if (data.attack_exec == ATTACK_EXEC_INSIDE_KERNEL) innerloop_step = device_param->kernel_loops;
else innerloop_step = 1;
if (data.attack_kern == ATTACK_KERN_STRAIGHT) innerloop_cnt = data.kernel_rules_cnt;
if (data.devices_status == STATUS_QUIT) break;
if (data.devices_status == STATUS_BYPASS) break;
- if (data.salts_shown[salt_pos] == 1) continue;
-
salt_t *salt_buf = &data.salts_buf[salt_pos];
device_param->kernel_params_buf32[24] = salt_pos;
device_param->kernel_params_buf32[27] = innerloop_left;
- if (innerloop_left == 0) continue;
+ // i think we can get rid of this
+ if (innerloop_left == 0)
+ {
+ puts ("bug, how should this happen????\n");
+
+ continue;
+ }
+
+ if (data.salts_shown[salt_pos] == 1)
+ {
+ data.words_progress_done[salt_pos] += (u64) pws_cnt * (u64) innerloop_left;
+
+ continue;
+ }
// initialize amplifiers
if (data.attack_mode == ATTACK_MODE_COMBI)
{
- char line_buf[BUFSIZ] = { 0 };
-
uint i = 0;
while (i < innerloop_left)
if (rule_len_out < 0)
{
- data.words_progress_rejected[salt_pos] += pw_cnt;
+ data.words_progress_rejected[salt_pos] += pws_cnt;
continue;
}
else if (data.attack_mode == ATTACK_MODE_HYBRID2)
{
hc_clEnqueueCopyBuffer (data.ocl, device_param->command_queue, device_param->d_combs, device_param->d_combs_c, 0, 0, innerloop_left * sizeof (comb_t), 0, NULL, NULL);
- }
-
- if (data.attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
- {
- if (data.attack_mode == ATTACK_MODE_BF)
- {
- if (data.opts_type & OPTS_TYPE_PT_BITSLICE)
- {
- const uint size_tm = 32 * sizeof (bs_word_t);
-
- run_kernel_bzero (device_param, device_param->d_tm_c, size_tm);
-
- run_kernel_tm (device_param);
-
- hc_clEnqueueCopyBuffer (data.ocl, device_param->command_queue, device_param->d_tm_c, device_param->d_bfs_c, 0, 0, size_tm, 0, NULL, NULL);
- }
- }
-
- if (highest_pw_len < 16)
- {
- run_kernel (KERN_RUN_1, device_param, pws_cnt, true);
- }
- else if (highest_pw_len < 32)
- {
- run_kernel (KERN_RUN_2, device_param, pws_cnt, true);
- }
- else
- {
- run_kernel (KERN_RUN_3, device_param, pws_cnt, true);
- }
- }
- else
- {
- run_kernel_amp (device_param, pws_cnt);
-
- run_kernel (KERN_RUN_1, device_param, pws_cnt, false);
-
- if (data.opts_type & OPTS_TYPE_HOOK12)
- {
- run_kernel (KERN_RUN_12, device_param, pws_cnt, false);
- }
+ }
- uint iter = salt_buf->salt_iter;
+ choose_kernel (device_param, data.attack_exec, data.attack_mode, data.opts_type, salt_buf, highest_pw_len, pws_cnt);
- for (uint loop_pos = 0; loop_pos < iter; loop_pos += kernel_loops)
- {
- uint loop_left = iter - loop_pos;
+ if (data.benchmark == 1)
+ {
+ double exec_ms_avg_prev = get_avg_exec_time (device_param, EXEC_CACHE);
- loop_left = MIN (loop_left, kernel_loops);
+ // a few caching rounds
- device_param->kernel_params_buf32[25] = loop_pos;
- device_param->kernel_params_buf32[26] = loop_left;
+ for (u32 i = 0; i < 2; i++)
+ {
+ hc_timer_set (&device_param->timer_speed);
- run_kernel (KERN_RUN_2, device_param, pws_cnt, true);
+ choose_kernel (device_param, data.attack_exec, data.attack_mode, data.opts_type, salt_buf, highest_pw_len, pws_cnt);
- if (data.devices_status == STATUS_STOP_AT_CHECKPOINT) check_checkpoint ();
+ double exec_ms_avg = get_avg_exec_time (device_param, EXEC_CACHE);
- if (data.devices_status == STATUS_CRACKED) break;
- if (data.devices_status == STATUS_ABORTED) break;
- if (data.devices_status == STATUS_QUIT) break;
+ exec_ms_avg_prev = exec_ms_avg;
}
- if (data.opts_type & OPTS_TYPE_HOOK23)
+ // benchmark_repeats became a maximum possible repeats
+
+ for (u32 i = 2; i < data.benchmark_repeats; i++)
{
- run_kernel (KERN_RUN_23, device_param, pws_cnt, false);
+ hc_timer_set (&device_param->timer_speed);
- hc_clEnqueueReadBuffer (data.ocl, device_param->command_queue, device_param->d_hooks, CL_TRUE, 0, device_param->size_hooks, device_param->hooks_buf, 0, NULL, NULL);
+ choose_kernel (device_param, data.attack_exec, data.attack_mode, data.opts_type, salt_buf, highest_pw_len, pws_cnt);
- // do something with data
+ double exec_ms_avg = get_avg_exec_time (device_param, EXEC_CACHE);
- hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_hooks, CL_TRUE, 0, device_param->size_hooks, device_param->hooks_buf, 0, NULL, NULL);
- }
+ if ((exec_ms_avg_prev / exec_ms_avg) < 1.001) break;
- run_kernel (KERN_RUN_3, device_param, pws_cnt, false);
+ exec_ms_avg_prev = exec_ms_avg;
+ }
}
if (data.devices_status == STATUS_STOP_AT_CHECKPOINT) check_checkpoint ();
* progress
*/
- u64 perf_sum_all = (u64) pw_cnt * (u64) innerloop_left;
+ u64 perf_sum_all = (u64) pws_cnt * (u64) innerloop_left;
hc_thread_mutex_lock (mux_counter);
* speed
*/
- float speed_ms;
+ double speed_ms;
hc_timer_get (device_param->timer_speed, speed_ms);
hc_thread_mutex_lock (mux_display);
+ // current speed
+
device_param->speed_cnt[speed_pos] = perf_sum_all;
device_param->speed_ms[speed_pos] = speed_ms;
- device_param->speed_rec[speed_pos] = device_param->timer_speed;
-
hc_thread_mutex_unlock (mux_display);
speed_pos++;
{
speed_pos = 0;
}
+
+ /**
+ * benchmark
+ */
+
+ if (data.benchmark == 1) break;
}
}
device_param->speed_pos = speed_pos;
+
+ myfree (line_buf);
}
static void load_segment (wl_data_t *wl_data, FILE *fd)
if (feof (fd))
{
- fprintf (stderr, "bug!!\n");
+ fprintf (stderr, "BUG feof()!!\n");
return;
}
return (cnt);
}
-static void pw_transpose_to_hi1 (const pw_t *p1, pw_t *p2)
-{
- memcpy (p2->h.hi1, p1->h.hi1, 64 * sizeof (uint));
-}
-
-static uint pw_add_to_hc1 (hc_device_param_t *device_param, const u8 *pw_buf, const uint pw_len)
-{
- if (data.devices_status == STATUS_BYPASS) return 0;
-
- pw_cache_t *pw_cache = device_param->pw_caches + pw_len;
-
- uint cache_cnt = pw_cache->cnt;
-
- u8 *pw_hc1 = pw_cache->pw_buf.h.hc1[cache_cnt];
-
- memcpy (pw_hc1, pw_buf, pw_len);
-
- memset (pw_hc1 + pw_len, 0, 256 - pw_len);
-
- uint pws_cnt = device_param->pws_cnt;
-
- cache_cnt++;
-
- pw_t *pw = device_param->pws_buf + pws_cnt;
-
- device_param->pw_transpose (&pw_cache->pw_buf, pw);
-
- pw->pw_len = pw_len;
-
- pws_cnt++;
-
- device_param->pws_cnt = pws_cnt;
- device_param->pw_cnt = pws_cnt * 1;
-
- cache_cnt = 0;
-
- pw_cache->cnt = cache_cnt;
-
- return pws_cnt;
-}
-
static void *thread_monitor (void *p)
{
uint runtime_check = 0;
fseek (fp, out_info[j].seek, SEEK_SET);
+ char *line_buf = (char *) mymalloc (HCBUFSIZ);
+
while (!feof (fp))
{
- char line_buf[BUFSIZ] = { 0 };
-
- char *ptr = fgets (line_buf, BUFSIZ - 1, fp);
+ char *ptr = fgets (line_buf, HCBUFSIZ - 1, fp);
if (ptr == NULL) break;
wpa_t *wpas = (wpa_t *) data.esalts_buf;
wpa_t *wpa = &wpas[salt_pos];
- uint pke[25] = { 0 };
-
- char *pke_ptr = (char *) pke;
-
- for (uint i = 0; i < 25; i++)
- {
- pke[i] = byte_swap_32 (wpa->pke[i]);
- }
-
- u8 mac1[6] = { 0 };
- u8 mac2[6] = { 0 };
-
- memcpy (mac1, pke_ptr + 23, 6);
- memcpy (mac2, pke_ptr + 29, 6);
-
// compare hex string(s) vs binary MAC address(es)
for (uint i = 0, j = 0; i < 6; i++, j += 2)
{
- if (mac1[i] != hex_to_u8 ((const u8 *) &mac1_pos[j]))
+ if (wpa->orig_mac1[i] != hex_to_u8 ((const u8 *) &mac1_pos[j]))
{
cracked = 0;
+
break;
}
}
for (uint i = 0, j = 0; i < 6; i++, j += 2)
{
- if (mac2[i] != hex_to_u8 ((const u8 *) &mac2_pos[j]))
+ if (wpa->orig_mac2[i] != hex_to_u8 ((const u8 *) &mac2_pos[j]))
{
cracked = 0;
+
break;
}
}
if (data.devices_status == STATUS_CRACKED) break;
}
+ myfree (line_buf);
+
out_info[j].seek = ftell (fp);
//hc_thread_mutex_unlock (mux_display);
return (p);
}
-static uint get_work (hc_device_param_t *device_param, const u64 max)
+static void pw_add (hc_device_param_t *device_param, const u8 *pw_buf, const int pw_len)
+{
+ if (device_param->pws_cnt < device_param->kernel_power)
+ {
+ pw_t *pw = (pw_t *) device_param->pws_buf + device_param->pws_cnt;
+
+ u8 *ptr = (u8 *) pw->i;
+
+ memcpy (ptr, pw_buf, pw_len);
+
+ memset (ptr + pw_len, 0, sizeof (pw->i) - pw_len);
+
+ pw->pw_len = pw_len;
+
+ device_param->pws_cnt++;
+ }
+ else
+ {
+ fprintf (stderr, "BUG pw_add()!!\n");
+
+ return;
+ }
+}
+
+static uint get_work (hc_device_param_t *device_param, const u64 max, const bool allow_div)
{
hc_thread_mutex_lock (mux_dispatcher);
const u64 words_left = words_base - words_cur;
- if (data.kernel_blocks_all > words_left)
+ if (allow_div)
{
- if (data.kernel_blocks_div == 0)
+ if (data.kernel_power_all > words_left)
{
- data.kernel_blocks_div = find_kernel_blocks_div (words_left, data.kernel_blocks_all);
+ if (data.kernel_power_div == 0)
+ {
+ data.kernel_power_div = find_kernel_power_div (words_left, data.kernel_power_all);
+ }
}
- }
- if (data.kernel_blocks_div)
- {
- if (device_param->kernel_blocks == device_param->kernel_blocks_user)
+ if (data.kernel_power_div)
{
- const u32 kernel_blocks_new = (float) device_param->kernel_blocks * data.kernel_blocks_div;
- const u32 kernel_power_new = kernel_blocks_new;
-
- if (kernel_blocks_new < device_param->kernel_blocks)
+ if (device_param->kernel_power == device_param->kernel_power_user)
{
- device_param->kernel_blocks = kernel_blocks_new;
- device_param->kernel_power = kernel_power_new;
+ const u32 kernel_power_new = (float) device_param->kernel_power * data.kernel_power_div;
+
+ if (kernel_power_new < device_param->kernel_power)
+ {
+ device_param->kernel_power = kernel_power_new;
+ }
}
}
}
- const uint kernel_blocks = device_param->kernel_blocks;
+ const uint kernel_power = device_param->kernel_power;
- uint work = MIN (words_left, kernel_blocks);
+ uint work = MIN (words_left, kernel_power);
work = MIN (work, max);
if (device_param->skipped) return NULL;
+ autotune (device_param);
+
+ char *buf = (char *) mymalloc (HCBUFSIZ);
+
const uint attack_kern = data.attack_kern;
- const uint kernel_blocks = device_param->kernel_blocks;
+ const uint kernel_power = device_param->kernel_power;
while ((data.devices_status != STATUS_EXHAUSTED) && (data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
{
uint words_cur = 0;
- while (words_cur < kernel_blocks)
+ while (words_cur < kernel_power)
{
- char buf[BUFSIZ] = { 0 };
-
- char *line_buf = fgets (buf, sizeof (buf), stdin);
+ char *line_buf = fgets (buf, HCBUFSIZ - 1, stdin);
if (line_buf == NULL) break;
}
}
- device_param->pw_add (device_param, (u8 *) line_buf, line_len);
+ pw_add (device_param, (u8 *) line_buf, line_len);
words_cur++;
if (data.devices_status == STATUS_QUIT) break;
if (data.devices_status == STATUS_BYPASS) break;
- // we need 2 flushing because we have two independant caches and it can occur
- // that one buffer is already at threshold plus for that length also exists
- // more data in the 2nd buffer so it would overflow
+ // flush
- // flush session 1
+ const uint pws_cnt = device_param->pws_cnt;
+ if (pws_cnt)
{
- for (int pw_len = 0; pw_len < PW_MAX1; pw_len++)
- {
- pw_cache_t *pw_cache = &device_param->pw_caches[pw_len];
-
- const uint pw_cache_cnt = pw_cache->cnt;
-
- if (pw_cache_cnt == 0) continue;
-
- pw_cache->cnt = 0;
-
- uint pws_cnt = device_param->pws_cnt;
-
- pw_t *pw = device_param->pws_buf + pws_cnt;
-
- device_param->pw_transpose (&pw_cache->pw_buf, pw);
-
- pw->pw_len = pw_len;
-
- uint pw_cnt = device_param->pw_cnt;
-
- pw_cnt += pw_cache_cnt;
-
- device_param->pw_cnt = pw_cnt;
-
- pws_cnt++;
-
- device_param->pws_cnt = pws_cnt;
-
- if (pws_cnt == device_param->kernel_power_user) break;
- }
-
- const uint pw_cnt = device_param->pw_cnt;
- const uint pws_cnt = device_param->pws_cnt;
-
- if (pws_cnt)
- {
- run_copy (device_param, pws_cnt);
+ run_copy (device_param, pws_cnt);
- run_cracker (device_param, pw_cnt, pws_cnt);
-
- device_param->pw_cnt = 0;
- device_param->pws_cnt = 0;
- }
- }
+ run_cracker (device_param, pws_cnt);
- // flush session 2
+ device_param->pws_cnt = 0;
- {
- for (int pw_len = 0; pw_len < PW_MAX1; pw_len++)
+ if (attack_kern == ATTACK_KERN_STRAIGHT)
{
- pw_cache_t *pw_cache = &device_param->pw_caches[pw_len];
-
- const uint pw_cache_cnt = pw_cache->cnt;
-
- if (pw_cache_cnt == 0) continue;
-
- pw_cache->cnt = 0;
-
- uint pws_cnt = device_param->pws_cnt;
-
- pw_t *pw = device_param->pws_buf + pws_cnt;
-
- device_param->pw_transpose (&pw_cache->pw_buf, pw);
-
- pw->pw_len = pw_len;
-
- uint pw_cnt = device_param->pw_cnt;
-
- pw_cnt += pw_cache_cnt;
-
- device_param->pw_cnt = pw_cnt;
-
- pws_cnt++;
-
- device_param->pws_cnt = pws_cnt;
+ run_kernel_bzero (device_param, device_param->d_rules_c, device_param->size_rules_c);
}
-
- const uint pw_cnt = device_param->pw_cnt;
- const uint pws_cnt = device_param->pws_cnt;
-
- if (pws_cnt)
+ else if (attack_kern == ATTACK_KERN_COMBI)
{
- run_copy (device_param, pws_cnt);
-
- run_cracker (device_param, pw_cnt, pws_cnt);
-
- device_param->pw_cnt = 0;
- device_param->pws_cnt = 0;
+ run_kernel_bzero (device_param, device_param->d_combs_c, device_param->size_combs);
}
}
}
+ device_param->kernel_accel = 0;
+ device_param->kernel_loops = 0;
+
+ myfree (buf);
+
return NULL;
}
if (device_param->skipped) return NULL;
+ autotune (device_param);
+
const uint attack_mode = data.attack_mode;
const uint attack_kern = data.attack_kern;
{
while ((data.devices_status != STATUS_EXHAUSTED) && (data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
{
- const uint work = get_work (device_param, -1);
+ const uint work = get_work (device_param, -1, true);
if (work == 0) break;
const u64 words_off = device_param->words_off;
const u64 words_fin = words_off + work;
- const uint pw_cnt = work;
const uint pws_cnt = work;
- device_param->pw_cnt = pw_cnt;
device_param->pws_cnt = pws_cnt;
if (pws_cnt)
{
run_copy (device_param, pws_cnt);
- run_cracker (device_param, pw_cnt, pws_cnt);
+ run_cracker (device_param, pws_cnt);
- device_param->pw_cnt = 0;
device_param->pws_cnt = 0;
+
+ run_kernel_bzero (device_param, device_param->d_bfs_c, device_param->size_bfs);
}
if (data.devices_status == STATUS_STOP_AT_CHECKPOINT) check_checkpoint ();
if (data.devices_status == STATUS_QUIT) break;
if (data.devices_status == STATUS_BYPASS) break;
+ if (data.benchmark == 1) break;
+
device_param->words_done = words_fin;
}
}
u64 words_off = 0;
u64 words_fin = 0;
+ bool allow_div = true;
+
u64 max = -1;
while (max)
{
- const uint work = get_work (device_param, max);
+ const uint work = get_work (device_param, max, allow_div);
+
+ allow_div = false;
if (work == 0) break;
}
}
- device_param->pw_add (device_param, (u8 *) line_buf, line_len);
+ pw_add (device_param, (u8 *) line_buf, line_len);
if (data.devices_status == STATUS_STOP_AT_CHECKPOINT) check_checkpoint ();
if (data.devices_status == STATUS_QUIT) break;
if (data.devices_status == STATUS_BYPASS) break;
- // we need 2 flushing because we have two independant caches and it can occur
- // that one buffer is already at threshold plus for that length also exists
- // more data in the 2nd buffer so it would overflow
-
//
- // flush session 1
+ // flush
//
- {
- for (int pw_len = 0; pw_len < PW_MAX1; pw_len++)
- {
- pw_cache_t *pw_cache = &device_param->pw_caches[pw_len];
-
- const uint pw_cache_cnt = pw_cache->cnt;
-
- if (pw_cache_cnt == 0) continue;
-
- pw_cache->cnt = 0;
-
- uint pws_cnt = device_param->pws_cnt;
-
- pw_t *pw = device_param->pws_buf + pws_cnt;
-
- device_param->pw_transpose (&pw_cache->pw_buf, pw);
-
- pw->pw_len = pw_len;
-
- uint pw_cnt = device_param->pw_cnt;
-
- pw_cnt += pw_cache_cnt;
-
- device_param->pw_cnt = pw_cnt;
-
- pws_cnt++;
-
- device_param->pws_cnt = pws_cnt;
-
- if (pws_cnt == device_param->kernel_power_user) break;
- }
-
- const uint pw_cnt = device_param->pw_cnt;
- const uint pws_cnt = device_param->pws_cnt;
-
- if (pws_cnt)
- {
- run_copy (device_param, pws_cnt);
-
- run_cracker (device_param, pw_cnt, pws_cnt);
-
- device_param->pw_cnt = 0;
- device_param->pws_cnt = 0;
- }
-
- if (data.devices_status == STATUS_STOP_AT_CHECKPOINT) check_checkpoint ();
-
- if (data.devices_status == STATUS_CRACKED) break;
- if (data.devices_status == STATUS_ABORTED) break;
- if (data.devices_status == STATUS_QUIT) break;
- if (data.devices_status == STATUS_BYPASS) break;
- }
-
- //
- // flush session 2
- //
+ const uint pws_cnt = device_param->pws_cnt;
+ if (pws_cnt)
{
- for (int pw_len = 0; pw_len < PW_MAX1; pw_len++)
- {
- pw_cache_t *pw_cache = &device_param->pw_caches[pw_len];
-
- const uint pw_cache_cnt = pw_cache->cnt;
-
- if (pw_cache_cnt == 0) continue;
-
- pw_cache->cnt = 0;
-
- uint pws_cnt = device_param->pws_cnt;
-
- pw_t *pw = device_param->pws_buf + pws_cnt;
-
- device_param->pw_transpose (&pw_cache->pw_buf, pw);
-
- pw->pw_len = pw_len;
-
- uint pw_cnt = device_param->pw_cnt;
-
- pw_cnt += pw_cache_cnt;
+ run_copy (device_param, pws_cnt);
- device_param->pw_cnt = pw_cnt;
+ run_cracker (device_param, pws_cnt);
- pws_cnt++;
+ device_param->pws_cnt = 0;
- device_param->pws_cnt = pws_cnt;
+ if (attack_kern == ATTACK_KERN_STRAIGHT)
+ {
+ run_kernel_bzero (device_param, device_param->d_rules_c, device_param->size_rules_c);
}
-
- const uint pw_cnt = device_param->pw_cnt;
- const uint pws_cnt = device_param->pws_cnt;
-
- if (pws_cnt)
+ else if (attack_kern == ATTACK_KERN_COMBI)
{
- run_copy (device_param, pws_cnt);
-
- run_cracker (device_param, pw_cnt, pws_cnt);
-
- device_param->pw_cnt = 0;
- device_param->pws_cnt = 0;
+ run_kernel_bzero (device_param, device_param->d_combs_c, device_param->size_combs);
}
+ }
- if (data.devices_status == STATUS_STOP_AT_CHECKPOINT) check_checkpoint ();
+ if (data.devices_status == STATUS_STOP_AT_CHECKPOINT) check_checkpoint ();
- if (data.devices_status == STATUS_CRACKED) break;
- if (data.devices_status == STATUS_ABORTED) break;
- if (data.devices_status == STATUS_QUIT) break;
- if (data.devices_status == STATUS_BYPASS) break;
- }
+ if (data.devices_status == STATUS_CRACKED) break;
+ if (data.devices_status == STATUS_ABORTED) break;
+ if (data.devices_status == STATUS_QUIT) break;
+ if (data.devices_status == STATUS_BYPASS) break;
if (words_fin == 0) break;
fclose (fd);
}
+ device_param->kernel_accel = 0;
+ device_param->kernel_loops = 0;
+
return NULL;
}
exit (-1);
}
- const uint kernel_loops = device_param->kernel_loops;
-
salt_t *salt_buf = &data.salts_buf[salt_pos];
device_param->kernel_params_buf32[24] = salt_pos;
{
run_kernel (KERN_RUN_1, device_param, 1, false);
+ uint loop_step = 16;
+
const uint iter = salt_buf->salt_iter;
- for (uint loop_pos = 0; loop_pos < iter; loop_pos += kernel_loops)
+ for (uint loop_pos = 0; loop_pos < iter; loop_pos += loop_step)
{
uint loop_left = iter - loop_pos;
- loop_left = MIN (loop_left, kernel_loops);
+ loop_left = MIN (loop_left, loop_step);
device_param->kernel_params_buf32[25] = loop_pos;
device_param->kernel_params_buf32[26] = loop_left;
// hlfmt hashcat
-static void hlfmt_hash_hashcat (char line_buf[BUFSIZ], int line_len, char **hashbuf_pos, int *hashbuf_len)
+static void hlfmt_hash_hashcat (char *line_buf, int line_len, char **hashbuf_pos, int *hashbuf_len)
{
if (data.username == 0)
{
}
}
-static void hlfmt_user_hashcat (char line_buf[BUFSIZ], int line_len, char **userbuf_pos, int *userbuf_len)
+static void hlfmt_user_hashcat (char *line_buf, int line_len, char **userbuf_pos, int *userbuf_len)
{
char *pos = NULL;
int len = 0;
// hlfmt pwdump
-static int hlfmt_detect_pwdump (char line_buf[BUFSIZ], int line_len)
+static int hlfmt_detect_pwdump (char *line_buf, int line_len)
{
int sep_cnt = 0;
return 0;
}
-static void hlfmt_hash_pwdump (char line_buf[BUFSIZ], int line_len, char **hashbuf_pos, int *hashbuf_len)
+static void hlfmt_hash_pwdump (char *line_buf, int line_len, char **hashbuf_pos, int *hashbuf_len)
{
char *pos = NULL;
int len = 0;
*hashbuf_len = len;
}
-static void hlfmt_user_pwdump (char line_buf[BUFSIZ], int line_len, char **userbuf_pos, int *userbuf_len)
+static void hlfmt_user_pwdump (char *line_buf, int line_len, char **userbuf_pos, int *userbuf_len)
{
char *pos = NULL;
int len = 0;
// hlfmt passwd
-static int hlfmt_detect_passwd (char line_buf[BUFSIZ], int line_len)
+static int hlfmt_detect_passwd (char *line_buf, int line_len)
{
int sep_cnt = 0;
return 0;
}
-static void hlfmt_hash_passwd (char line_buf[BUFSIZ], int line_len, char **hashbuf_pos, int *hashbuf_len)
+static void hlfmt_hash_passwd (char *line_buf, int line_len, char **hashbuf_pos, int *hashbuf_len)
{
char *pos = NULL;
int len = 0;
*hashbuf_len = len;
}
-static void hlfmt_user_passwd (char line_buf[BUFSIZ], int line_len, char **userbuf_pos, int *userbuf_len)
+static void hlfmt_user_passwd (char *line_buf, int line_len, char **userbuf_pos, int *userbuf_len)
{
char *pos = NULL;
int len = 0;
// hlfmt shadow
-static int hlfmt_detect_shadow (char line_buf[BUFSIZ], int line_len)
+static int hlfmt_detect_shadow (char *line_buf, int line_len)
{
int sep_cnt = 0;
return 0;
}
-static void hlfmt_hash_shadow (char line_buf[BUFSIZ], int line_len, char **hashbuf_pos, int *hashbuf_len)
+static void hlfmt_hash_shadow (char *line_buf, int line_len, char **hashbuf_pos, int *hashbuf_len)
{
hlfmt_hash_passwd (line_buf, line_len, hashbuf_pos, hashbuf_len);
}
-static void hlfmt_user_shadow (char line_buf[BUFSIZ], int line_len, char **userbuf_pos, int *userbuf_len)
+static void hlfmt_user_shadow (char *line_buf, int line_len, char **userbuf_pos, int *userbuf_len)
{
hlfmt_user_passwd (line_buf, line_len, userbuf_pos, userbuf_len);
}
// hlfmt main
-static void hlfmt_hash (uint hashfile_format, char line_buf[BUFSIZ], int line_len, char **hashbuf_pos, int *hashbuf_len)
+static void hlfmt_hash (uint hashfile_format, char *line_buf, int line_len, char **hashbuf_pos, int *hashbuf_len)
{
switch (hashfile_format)
{
}
}
-static void hlfmt_user (uint hashfile_format, char line_buf[BUFSIZ], int line_len, char **userbuf_pos, int *userbuf_len)
+static void hlfmt_user (uint hashfile_format, char *line_buf, int line_len, char **userbuf_pos, int *userbuf_len)
{
switch (hashfile_format)
{
}
}
+char *strhlfmt (const uint hashfile_format)
+{
+ switch (hashfile_format)
+ {
+ case HLFMT_HASHCAT: return ((char *) HLFMT_TEXT_HASHCAT); break;
+ case HLFMT_PWDUMP: return ((char *) HLFMT_TEXT_PWDUMP); break;
+ case HLFMT_PASSWD: return ((char *) HLFMT_TEXT_PASSWD); break;
+ case HLFMT_SHADOW: return ((char *) HLFMT_TEXT_SHADOW); break;
+ case HLFMT_DCC: return ((char *) HLFMT_TEXT_DCC); break;
+ case HLFMT_DCC2: return ((char *) HLFMT_TEXT_DCC2); break;
+ case HLFMT_NETNTLM1: return ((char *) HLFMT_TEXT_NETNTLM1); break;
+ case HLFMT_NETNTLM2: return ((char *) HLFMT_TEXT_NETNTLM2); break;
+ case HLFMT_NSLDAP: return ((char *) HLFMT_TEXT_NSLDAP); break;
+ case HLFMT_NSLDAPS: return ((char *) HLFMT_TEXT_NSLDAPS); break;
+ }
+
+ return ((char *) "Unknown");
+}
+
static uint hlfmt_detect (FILE *fp, uint max_check)
{
// Exception: those formats are wrongly detected as HLFMT_SHADOW, prevent it
uint num_check = 0;
+ char *line_buf = (char *) mymalloc (HCBUFSIZ);
+
while (!feof (fp))
{
- char line_buf[BUFSIZ] = { 0 };
-
int line_len = fgetl (fp, line_buf);
if (line_len == 0) continue;
num_check++;
}
+ myfree (line_buf);
+
uint hashlist_format = HLFMT_HASHCAT;
for (int i = 1; i < HLFMTS_CNT; i++)
if (getenv ("GPU_USE_SYNC_OBJECTS") == NULL)
putenv ((char *) "GPU_USE_SYNC_OBJECTS=1");
+ if (getenv ("CUDA_CACHE_DISABLE") == NULL)
+ putenv ((char *) "CUDA_CACHE_DISABLE=1");
+
+ if (getenv ("POCL_KERNEL_CACHE") == NULL)
+ putenv ((char *) "POCL_KERNEL_CACHE=0");
+
/**
* Real init
*/
uint version = VERSION;
uint quiet = QUIET;
uint benchmark = BENCHMARK;
- uint benchmark_mode = BENCHMARK_MODE;
+ uint benchmark_repeats = BENCHMARK_REPEATS;
uint show = SHOW;
uint left = LEFT;
uint username = USERNAME;
u64 limit = LIMIT;
uint keyspace = KEYSPACE;
uint potfile_disable = POTFILE_DISABLE;
+ char *potfile_path = NULL;
uint debug_mode = DEBUG_MODE;
char *debug_file = NULL;
char *induction_dir = NULL;
#define IDX_LIMIT 'l'
#define IDX_KEYSPACE 0xff35
#define IDX_POTFILE_DISABLE 0xff06
+ #define IDX_POTFILE_PATH 0xffe0
#define IDX_DEBUG_MODE 0xff43
#define IDX_DEBUG_FILE 0xff44
#define IDX_INDUCTION_DIR 0xff46
#define IDX_FORCE 0xff08
#define IDX_RUNTIME 0xff09
#define IDX_BENCHMARK 'b'
- #define IDX_BENCHMARK_MODE 0xff32
+ #define IDX_BENCHMARK_REPEATS 0xff78
#define IDX_HASH_MODE 'm'
#define IDX_ATTACK_MODE 'a'
#define IDX_RP_FILE 'r'
{"limit", required_argument, 0, IDX_LIMIT},
{"keyspace", no_argument, 0, IDX_KEYSPACE},
{"potfile-disable", no_argument, 0, IDX_POTFILE_DISABLE},
+ {"potfile-path", required_argument, 0, IDX_POTFILE_PATH},
{"debug-mode", required_argument, 0, IDX_DEBUG_MODE},
{"debug-file", required_argument, 0, IDX_DEBUG_FILE},
{"induction-dir", required_argument, 0, IDX_INDUCTION_DIR},
{"outfile-check-dir", required_argument, 0, IDX_OUTFILE_CHECK_DIR},
{"force", no_argument, 0, IDX_FORCE},
{"benchmark", no_argument, 0, IDX_BENCHMARK},
- {"benchmark-mode", required_argument, 0, IDX_BENCHMARK_MODE},
+ {"benchmark-repeats", required_argument, 0, IDX_BENCHMARK_REPEATS},
{"restore", no_argument, 0, IDX_RESTORE},
{"restore-disable", no_argument, 0, IDX_RESTORE_DISABLE},
{"status", no_argument, 0, IDX_STATUS},
#endif
}
- uint hash_mode_chgd = 0;
- uint runtime_chgd = 0;
- uint kernel_loops_chgd = 0;
- uint kernel_accel_chgd = 0;
- uint attack_mode_chgd = 0;
- uint outfile_format_chgd = 0;
- uint rp_gen_seed_chgd = 0;
- uint remove_timer_chgd = 0;
- uint increment_min_chgd = 0;
- uint increment_max_chgd = 0;
+ uint hash_mode_chgd = 0;
+ uint runtime_chgd = 0;
+ uint kernel_loops_chgd = 0;
+ uint kernel_accel_chgd = 0;
+ uint attack_mode_chgd = 0;
+ uint outfile_format_chgd = 0;
+ uint rp_gen_seed_chgd = 0;
+ uint remove_timer_chgd = 0;
+ uint increment_min_chgd = 0;
+ uint increment_max_chgd = 0;
+ uint workload_profile_chgd = 0;
+ uint opencl_vector_width_chgd = 0;
+
#if defined(HAVE_HWMON) && defined(HAVE_ADL)
- uint gpu_temp_retain_chgd = 0;
- uint gpu_temp_abort_chgd = 0;
+ uint gpu_temp_retain_chgd = 0;
+ uint gpu_temp_abort_chgd = 0;
#endif
optind = 1;
case IDX_REMOVE_TIMER: remove_timer = atoi (optarg);
remove_timer_chgd = 1; break;
case IDX_POTFILE_DISABLE: potfile_disable = 1; break;
+ case IDX_POTFILE_PATH: potfile_path = optarg; break;
case IDX_DEBUG_MODE: debug_mode = atoi (optarg); break;
case IDX_DEBUG_FILE: debug_file = optarg; break;
case IDX_INDUCTION_DIR: induction_dir = optarg; break;
case IDX_LIMIT: limit = atoll (optarg); break;
case IDX_KEYSPACE: keyspace = 1; break;
case IDX_BENCHMARK: benchmark = 1; break;
- case IDX_BENCHMARK_MODE: benchmark_mode = atoi (optarg); break;
+ case IDX_BENCHMARK_REPEATS: benchmark_repeats = atoi (optarg); break;
case IDX_RESTORE: break;
case IDX_RESTORE_DISABLE: restore_disable = 1; break;
case IDX_STATUS: status = 1; break;
case IDX_OPENCL_DEVICE_TYPES:
opencl_device_types = optarg; break;
case IDX_OPENCL_VECTOR_WIDTH:
- opencl_vector_width = atoi (optarg); break;
- case IDX_WORKLOAD_PROFILE: workload_profile = atoi (optarg); break;
- case IDX_KERNEL_ACCEL: kernel_accel = atoi (optarg);
- kernel_accel_chgd = 1; break;
- case IDX_KERNEL_LOOPS: kernel_loops = atoi (optarg);
- kernel_loops_chgd = 1; break;
+ opencl_vector_width = atoi (optarg);
+ opencl_vector_width_chgd = 1; break;
+ case IDX_WORKLOAD_PROFILE: workload_profile = atoi (optarg);
+ workload_profile_chgd = 1; break;
+ case IDX_KERNEL_ACCEL: kernel_accel = atoi (optarg);
+ kernel_accel_chgd = 1; break;
+ case IDX_KERNEL_LOOPS: kernel_loops = atoi (optarg);
+ kernel_loops_chgd = 1; break;
case IDX_GPU_TEMP_DISABLE: gpu_temp_disable = 1; break;
#ifdef HAVE_HWMON
case IDX_GPU_TEMP_ABORT: gpu_temp_abort = atoi (optarg);
return (-1);
}
- if (hash_mode_chgd && hash_mode > 13000) // just added to remove compiler warnings for hash_mode_chgd
+ if (hash_mode_chgd && hash_mode > 13500) // just added to remove compiler warnings for hash_mode_chgd
{
log_error ("ERROR: Invalid hash-type specified");
if (kernel_accel_chgd == 1)
{
- if (workload_profile != WORKLOAD_PROFILE)
- {
- log_error ("ERROR: kernel-accel parameter can only be set when workload-profile %i is used", WORKLOAD_PROFILE);
-
- return (-1);
- }
-
if (kernel_accel < 1)
{
log_error ("ERROR: Invalid kernel-accel specified");
if (kernel_loops_chgd == 1)
{
- if (workload_profile != WORKLOAD_PROFILE)
- {
- log_error ("ERROR: kernel-loops parameter can only be set when workload-profile %i is used", WORKLOAD_PROFILE);
-
- return (-1);
- }
-
if (kernel_loops < 1)
{
log_error ("ERROR: Invalid kernel-loops specified");
}
}
- if (benchmark == 1)
- {
- if (workload_profile != WORKLOAD_PROFILE)
- {
- log_error ("ERROR: Using the workload-profile in benchmark mode is not allowed");
-
- return (-1);
- }
- }
-
if ((workload_profile < 1) || (workload_profile > 3))
{
log_error ("ERROR: workload-profile %i not available", workload_profile);
return (-1);
}
- if ((opencl_vector_width != 0) && (opencl_vector_width != 1) && (opencl_vector_width != 2) && (opencl_vector_width != 4) && (opencl_vector_width != 8))
+ if (opencl_vector_width_chgd && (!is_power_of_2(opencl_vector_width) || opencl_vector_width > 16))
{
log_error ("ERROR: opencl-vector-width %i not allowed", opencl_vector_width);
return (-1);
}
}
-
- if (benchmark_mode == 0)
- {
- // nothing to do
- }
- else if (benchmark_mode == 1)
- {
- if (kernel_accel_chgd == 1 || kernel_loops_chgd == 1)
- {
- log_error ("ERROR: Benchmark-mode 1 does not allow kernel-accel or kernel-loops changed");
-
- return (-1);
- }
- }
- else
- {
- log_error ("ERROR: Benchmark-mode must be 0 or 1");
-
- return (-1);
- }
}
if (skip != 0 && limit != 0)
if (loopback == 1)
{
- if (attack_mode == ATTACK_MODE_BF)
- {
- log_error ("ERROR: Parameter loopback not allowed in attack-mode 3");
-
- return (-1);
- }
- else if (attack_mode == ATTACK_MODE_STRAIGHT)
+ if (attack_mode == ATTACK_MODE_STRAIGHT)
{
if ((rp_files_cnt == 0) && (rp_gen == 0))
{
return (-1);
}
}
+ else
+ {
+ log_error ("ERROR: Parameter loopback allowed in attack-mode 0 only");
+
+ return (-1);
+ }
}
if (debug_mode > 0)
data.rp_gen_seed = rp_gen_seed;
data.force = force;
data.benchmark = benchmark;
+ data.benchmark_repeats = benchmark_repeats;
data.skip = skip;
data.limit = limit;
#if defined(HAVE_HWMON) && defined(HAVE_ADL)
data.logfile_disable = logfile_disable;
data.truecrypt_keyfiles = truecrypt_keyfiles;
data.scrypt_tmto = scrypt_tmto;
+ data.workload_profile = workload_profile;
/**
* cpu affinity
logfile_top_uint (attack_mode);
logfile_top_uint (attack_kern);
logfile_top_uint (benchmark);
- logfile_top_uint (benchmark_mode);
+ logfile_top_uint (benchmark_repeats);
logfile_top_uint (bitmap_min);
logfile_top_uint (bitmap_max);
logfile_top_uint (debug_mode);
logfile_top_uint (outfile_check_timer);
logfile_top_uint (outfile_format);
logfile_top_uint (potfile_disable);
+ logfile_top_string (potfile_path);
#if defined(HAVE_HWMON) && defined(HAVE_ADL)
logfile_top_uint (powertune_enable);
#endif
* disable useless stuff for benchmark
*/
- restore_timer = 0;
- status_timer = 0;
- restore_disable = 1;
- potfile_disable = 1;
- weak_hash_threshold = 0;
-
- data.restore_timer = restore_timer;
- data.status_timer = status_timer;
- data.restore_disable = restore_disable;
+ status_timer = 0;
+ restore_timer = 0;
+ restore_disable = 1;
+ potfile_disable = 1;
+ weak_hash_threshold = 0;
+ gpu_temp_disable = 1;
- if (benchmark_mode == 1)
- {
- markov_disable = 1;
-
- workload_profile = 3;
- }
+ data.status_timer = status_timer;
+ data.restore_timer = restore_timer;
+ data.restore_disable = restore_disable;
/**
* force attack mode to be bruteforce
attack_mode = ATTACK_MODE_BF;
attack_kern = ATTACK_KERN_BF;
- if (runtime_chgd == 0)
+ if (workload_profile_chgd == 0)
{
- runtime = 8;
-
- if (benchmark_mode == 1) runtime = 17;
+ workload_profile = 3;
- data.runtime = runtime;
+ data.workload_profile = workload_profile;
}
}
dgst_pos3 = 1;
break;
+ case 125: hash_type = HASH_TYPE_SHA1;
+ salt_type = SALT_TYPE_EMBEDDED;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
+ opts_type = OPTS_TYPE_PT_GENERATE_BE
+ | OPTS_TYPE_PT_ADD80
+ | OPTS_TYPE_PT_ADDBITS15
+ | OPTS_TYPE_ST_HEX;
+ kern_type = KERN_TYPE_SHA1_SLTPW;
+ dgst_size = DGST_SIZE_4_5;
+ parse_func = arubaos_parse_hash;
+ sort_by_digest = sort_by_digest_4_5;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_PRECOMPUTE_INIT
+ | OPTI_TYPE_PRECOMPUTE_MERKLE
+ | OPTI_TYPE_EARLY_SKIP
+ | OPTI_TYPE_NOT_ITERATED
+ | OPTI_TYPE_PREPENDED_SALT
+ | OPTI_TYPE_RAW_HASH;
+ dgst_pos0 = 3;
+ dgst_pos1 = 4;
+ dgst_pos2 = 2;
+ dgst_pos3 = 1;
+ break;
+
case 130: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_INTERN;
attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
dgst_size = DGST_SIZE_4_4;
parse_func = phpass_parse_hash;
sort_by_digest = sort_by_digest_4_4;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_SLOW_HASH_SIMD;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
case 8300: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_EMBEDDED;
attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
- opts_type = OPTS_TYPE_PT_GENERATE_LE
+ opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_ST_HEX
| OPTS_TYPE_ST_ADD80;
kern_type = KERN_TYPE_NSEC3;
dgst_pos3 = 3;
break;
+ case 13100: hash_type = HASH_TYPE_KRB5TGS;
+ salt_type = SALT_TYPE_EMBEDDED;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
+ opts_type = OPTS_TYPE_PT_GENERATE_LE;
+ kern_type = KERN_TYPE_KRB5TGS;
+ dgst_size = DGST_SIZE_4_4;
+ parse_func = krb5tgs_parse_hash;
+ sort_by_digest = sort_by_digest_4_4;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_NOT_ITERATED;
+ dgst_pos0 = 0;
+ dgst_pos1 = 1;
+ dgst_pos2 = 2;
+ dgst_pos3 = 3;
+ break;
+
+ case 13200: hash_type = HASH_TYPE_AES;
+ salt_type = SALT_TYPE_EMBEDDED;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
+ opts_type = OPTS_TYPE_PT_GENERATE_LE;
+ kern_type = KERN_TYPE_AXCRYPT;
+ dgst_size = DGST_SIZE_4_4;
+ parse_func = axcrypt_parse_hash;
+ sort_by_digest = sort_by_digest_4_4;
+ opti_type = OPTI_TYPE_ZERO_BYTE;
+ dgst_pos0 = 0;
+ dgst_pos1 = 1;
+ dgst_pos2 = 2;
+ dgst_pos3 = 3;
+ break;
+
+ case 13300: hash_type = HASH_TYPE_SHA1;
+ salt_type = SALT_TYPE_NONE;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
+ opts_type = OPTS_TYPE_PT_GENERATE_BE
+ | OPTS_TYPE_PT_ADD80
+ | OPTS_TYPE_PT_ADDBITS15;
+ kern_type = KERN_TYPE_SHA1_AXCRYPT;
+ dgst_size = DGST_SIZE_4_5;
+ parse_func = sha1axcrypt_parse_hash;
+ sort_by_digest = sort_by_digest_4_5;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_PRECOMPUTE_INIT
+ | OPTI_TYPE_EARLY_SKIP
+ | OPTI_TYPE_NOT_ITERATED
+ | OPTI_TYPE_NOT_SALTED;
+ dgst_pos0 = 0;
+ dgst_pos1 = 4;
+ dgst_pos2 = 3;
+ dgst_pos3 = 2;
+ break;
+
+ case 13400: hash_type = HASH_TYPE_AES;
+ salt_type = SALT_TYPE_EMBEDDED;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
+ opts_type = OPTS_TYPE_PT_GENERATE_LE;
+ kern_type = KERN_TYPE_KEEPASS;
+ dgst_size = DGST_SIZE_4_4;
+ parse_func = keepass_parse_hash;
+ sort_by_digest = sort_by_digest_4_4;
+ opti_type = OPTI_TYPE_ZERO_BYTE;
+ dgst_pos0 = 0;
+ dgst_pos1 = 1;
+ dgst_pos2 = 2;
+ dgst_pos3 = 3;
+ break;
+
+ case 13500: hash_type = HASH_TYPE_SHA1;
+ salt_type = SALT_TYPE_EMBEDDED;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
+ opts_type = OPTS_TYPE_PT_GENERATE_BE
+ | OPTS_TYPE_PT_UNICODE
+ | OPTS_TYPE_PT_ADD80;
+ kern_type = KERN_TYPE_PSTOKEN;
+ dgst_size = DGST_SIZE_4_5;
+ parse_func = pstoken_parse_hash;
+ sort_by_digest = sort_by_digest_4_5;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_PRECOMPUTE_INIT
+ | OPTI_TYPE_EARLY_SKIP
+ | OPTI_TYPE_NOT_ITERATED
+ | OPTI_TYPE_PREPENDED_SALT
+ | OPTI_TYPE_RAW_HASH;
+ dgst_pos0 = 3;
+ dgst_pos1 = 4;
+ dgst_pos2 = 2;
+ dgst_pos3 = 1;
+ break;
+
default: usage_mini_print (PROGNAME); return (-1);
}
/**
- * transpose
+ * parser
*/
data.parse_func = parse_func;
case 5400: esalt_size = sizeof (ikepsk_t); break;
case 5500: esalt_size = sizeof (netntlm_t); break;
case 5600: esalt_size = sizeof (netntlm_t); break;
- case 6211:
- case 6212:
- case 6213:
- case 6221:
- case 6222:
- case 6223:
- case 6231:
- case 6232:
- case 6233:
- case 6241:
- case 6242:
+ case 6211: esalt_size = sizeof (tc_t); break;
+ case 6212: esalt_size = sizeof (tc_t); break;
+ case 6213: esalt_size = sizeof (tc_t); break;
+ case 6221: esalt_size = sizeof (tc_t); break;
+ case 6222: esalt_size = sizeof (tc_t); break;
+ case 6223: esalt_size = sizeof (tc_t); break;
+ case 6231: esalt_size = sizeof (tc_t); break;
+ case 6232: esalt_size = sizeof (tc_t); break;
+ case 6233: esalt_size = sizeof (tc_t); break;
+ case 6241: esalt_size = sizeof (tc_t); break;
+ case 6242: esalt_size = sizeof (tc_t); break;
case 6243: esalt_size = sizeof (tc_t); break;
case 6600: esalt_size = sizeof (agilekey_t); break;
case 7100: esalt_size = sizeof (pbkdf2_sha512_t); break;
case 12000: esalt_size = sizeof (pbkdf2_sha1_t); break;
case 12100: esalt_size = sizeof (pbkdf2_sha512_t); break;
case 13000: esalt_size = sizeof (rar5_t); break;
+ case 13100: esalt_size = sizeof (krb5tgs_t); break;
+ case 13400: esalt_size = sizeof (keepass_t); break;
+ case 13500: esalt_size = sizeof (pstoken_t); break;
}
data.esalt_size = esalt_size;
if (keyspace == 0)
{
- snprintf (dictstat, sizeof (dictstat) - 1, "%s/hashcat.dictstat", profile_dir);
+ snprintf (dictstat, sizeof (dictstat) - 1, "%s/%s", profile_dir, DICTSTAT_FILENAME);
dictstat_fp = fopen (dictstat, "rb");
char potfile[256] = { 0 };
- snprintf (potfile, sizeof (potfile) - 1, "%s/%s.pot", session_dir, session);
+ if (potfile_path == NULL)
+ {
+ snprintf (potfile, sizeof (potfile) - 1, "%s/%s", profile_dir, POTFILE_FILENAME);
+ }
+ else
+ {
+ strncpy (potfile, potfile_path, sizeof (potfile) - 1);
+ }
data.pot_fp = NULL;
uint line_num = 0;
+ char *line_buf = (char *) mymalloc (HCBUFSIZ);
+
while (!feof (pot_fp))
{
line_num++;
- char line_buf[BUFSIZ] = { 0 };
-
int line_len = fgetl (pot_fp, line_buf);
if (line_len == 0) continue;
pot_cnt++;
}
+ myfree (line_buf);
+
fclose (pot_fp);
SUPPRESS_OUTPUT = 0;
switch (hash_mode)
{
+ case 125: if (pw_max > 32) pw_max = 32;
+ break;
case 400: if (pw_max > 40) pw_max = 40;
break;
case 500: if (pw_max > 16) pw_max = 16;
hlfmt_hash (hashlist_format, input_buf, input_len, &hash_buf, &hash_len);
- if (hash_len)
+ bool hash_fmt_error = 0;
+
+ if (hash_len < 1) hash_fmt_error = 1;
+ if (hash_buf == NULL) hash_fmt_error = 1;
+
+ if (hash_fmt_error)
+ {
+ log_info ("WARNING: failed to parse hashes using the '%s' format", strhlfmt (hashlist_format));
+ }
+ else
{
if (opts_type & OPTS_TYPE_HASH_COPY)
{
wpa_t *wpa = (wpa_t *) hashes_buf[hashes_cnt].esalt;
- u8 *pke_ptr = (u8 *) wpa->pke;
-
// do the appending task
snprintf (salt_ptr + cur_pos,
rem_len,
":%02x%02x%02x%02x%02x%02x:%02x%02x%02x%02x%02x%02x",
- pke_ptr[20], pke_ptr[27], pke_ptr[26], pke_ptr[25], pke_ptr[24], pke_ptr[31], // MAC1
- pke_ptr[30], pke_ptr[29], pke_ptr[28], pke_ptr[35], pke_ptr[34], pke_ptr[33]); // MAC2
-
+ wpa->orig_mac1[0],
+ wpa->orig_mac1[1],
+ wpa->orig_mac1[2],
+ wpa->orig_mac1[3],
+ wpa->orig_mac1[4],
+ wpa->orig_mac1[5],
+ wpa->orig_mac2[0],
+ wpa->orig_mac2[1],
+ wpa->orig_mac2[2],
+ wpa->orig_mac2[3],
+ wpa->orig_mac2[4],
+ wpa->orig_mac2[5]);
// memset () the remaining part of the salt
uint line_num = 0;
+ char *line_buf = (char *) mymalloc (HCBUFSIZ);
+
while (!feof (fp))
{
line_num++;
- char line_buf[BUFSIZ] = { 0 };
-
int line_len = fgetl (fp, line_buf);
if (line_len == 0) continue;
hlfmt_hash (hashlist_format, line_buf, line_len, &hash_buf, &hash_len);
+ bool hash_fmt_error = 0;
+
+ if (hash_len < 1) hash_fmt_error = 1;
+ if (hash_buf == NULL) hash_fmt_error = 1;
+
+ if (hash_fmt_error)
+ {
+ log_info ("WARNING: failed to parse hashes using the '%s' format", strhlfmt (hashlist_format));
+
+ continue;
+ }
+
if (username)
{
char *user_buf = NULL;
}
}
+ myfree (line_buf);
+
fclose (fp);
if (data.quiet == 0) log_info_nn ("Parsed Hashes: %u/%u (%0.2f%%)", hashes_avail, hashes_avail, 100.00);
switch (hash_mode)
{
- case 1500: hashes_buf[0].salt->salt_len = 2;
+ case 1500: hashes_buf[0].salt->salt_len = 2;
+ hashes_buf[0].salt->salt_buf[0] = 388; // pure magic
break;
case 1731: hashes_buf[0].salt->salt_len = 4;
break;
((seven_zip_t *) hashes_buf[0].esalt)->data_len = 112;
((seven_zip_t *) hashes_buf[0].esalt)->unpack_size = 112;
break;
+ case 13400: ((keepass_t *) hashes_buf[0].esalt)->version = 2;
+ break;
+ case 13500: ((pstoken_t *) hashes_buf[0].esalt)->salt_len = 113;
+ break;
}
}
break;
case 5400: data.hashfile = mystrdup ("hashcat.ikesha1");
break;
- case 6211:
- case 6212:
- case 6213:
- case 6221:
- case 6222:
- case 6223:
- case 6231:
- case 6232:
- case 6233:
- case 6241:
- case 6242:
+ case 6211: data.hashfile = mystrdup ("hashcat.tc");
+ break;
+ case 6212: data.hashfile = mystrdup ("hashcat.tc");
+ break;
+ case 6213: data.hashfile = mystrdup ("hashcat.tc");
+ break;
+ case 6221: data.hashfile = mystrdup ("hashcat.tc");
+ break;
+ case 6222: data.hashfile = mystrdup ("hashcat.tc");
+ break;
+ case 6223: data.hashfile = mystrdup ("hashcat.tc");
+ break;
+ case 6231: data.hashfile = mystrdup ("hashcat.tc");
+ break;
+ case 6232: data.hashfile = mystrdup ("hashcat.tc");
+ break;
+ case 6233: data.hashfile = mystrdup ("hashcat.tc");
+ break;
+ case 6241: data.hashfile = mystrdup ("hashcat.tc");
+ break;
+ case 6242: data.hashfile = mystrdup ("hashcat.tc");
+ break;
case 6243: data.hashfile = mystrdup ("hashcat.tc");
break;
case 6600: data.hashfile = mystrdup ("hashcat.agilekey");
break;
case 5800: hashes_buf[0].salt->salt_iter = ROUNDS_ANDROIDPIN - 1;
break;
- case 6211:
- case 6212:
+ case 6211: hashes_buf[0].salt->salt_iter = ROUNDS_TRUECRYPT_2K;
+ break;
+ case 6212: hashes_buf[0].salt->salt_iter = ROUNDS_TRUECRYPT_2K;
+ break;
case 6213: hashes_buf[0].salt->salt_iter = ROUNDS_TRUECRYPT_2K;
break;
- case 6221:
- case 6222:
+ case 6221: hashes_buf[0].salt->salt_iter = ROUNDS_TRUECRYPT_1K;
+ break;
+ case 6222: hashes_buf[0].salt->salt_iter = ROUNDS_TRUECRYPT_1K;
+ break;
case 6223: hashes_buf[0].salt->salt_iter = ROUNDS_TRUECRYPT_1K;
break;
- case 6231:
- case 6232:
+ case 6231: hashes_buf[0].salt->salt_iter = ROUNDS_TRUECRYPT_1K;
+ break;
+ case 6232: hashes_buf[0].salt->salt_iter = ROUNDS_TRUECRYPT_1K;
+ break;
case 6233: hashes_buf[0].salt->salt_iter = ROUNDS_TRUECRYPT_1K;
break;
- case 6241:
- case 6242:
+ case 6241: hashes_buf[0].salt->salt_iter = ROUNDS_TRUECRYPT_1K;
+ break;
+ case 6242: hashes_buf[0].salt->salt_iter = ROUNDS_TRUECRYPT_1K;
+ break;
case 6243: hashes_buf[0].salt->salt_iter = ROUNDS_TRUECRYPT_1K;
break;
case 6300: hashes_buf[0].salt->salt_iter = ROUNDS_MD5CRYPT;
break;
case 13000: hashes_buf[0].salt->salt_iter = ROUNDS_RAR5 - 1;
break;
+ case 13200: hashes_buf[0].salt->salt_iter = ROUNDS_AXCRYPT;
+ break;
+ case 13400: hashes_buf[0].salt->salt_iter = ROUNDS_KEEPASS;
+ break;
}
hashes_cnt = 1;
if (fp != NULL)
{
+ char *line_buf = (char *) mymalloc (HCBUFSIZ);
+
+ // to be safe work with a copy (because of line_len loop, i etc)
+ // moved up here because it's easier to handle continue case
+ // it's just 64kb
+
+ char *line_buf_cpy = (char *) mymalloc (HCBUFSIZ);
+
while (!feof (fp))
{
- char line_buf[BUFSIZ] = { 0 };
-
- char *ptr = fgets (line_buf, BUFSIZ - 1, fp);
+ char *ptr = fgets (line_buf, HCBUFSIZ - 1, fp);
if (ptr == NULL) break;
// here we have in line_buf: ESSID:MAC1:MAC2 (without the plain)
// manipulate salt_buf
- // to be safe work with a copy (because of line_len loop, i etc)
-
- char line_buf_cpy[BUFSIZ] = { 0 };
-
memcpy (line_buf_cpy, line_buf, i);
char *mac2_pos = strrchr (line_buf_cpy, ':');
{
wpa_t *wpa = (wpa_t *) found->esalt;
- uint pke[25] = { 0 };
-
- char *pke_ptr = (char *) pke;
-
- for (uint i = 0; i < 25; i++)
- {
- pke[i] = byte_swap_32 (wpa->pke[i]);
- }
-
- u8 mac1[6] = { 0 };
- u8 mac2[6] = { 0 };
-
- memcpy (mac1, pke_ptr + 23, 6);
- memcpy (mac2, pke_ptr + 29, 6);
-
// compare hex string(s) vs binary MAC address(es)
for (uint i = 0, j = 0; i < 6; i++, j += 2)
{
- if (mac1[i] != hex_to_u8 ((const u8 *) &mac1_pos[j]))
+ if (wpa->orig_mac1[i] != hex_to_u8 ((const u8 *) &mac1_pos[j]))
{
found = NULL;
+
break;
}
}
for (uint i = 0, j = 0; i < 6; i++, j += 2)
{
- if (mac2[i] != hex_to_u8 ((const u8 *) &mac2_pos[j]))
+ if (wpa->orig_mac2[i] != hex_to_u8 ((const u8 *) &mac2_pos[j]))
{
found = NULL;
+
break;
}
}
}
}
+ myfree (line_buf_cpy);
+
+ myfree (line_buf);
+
fclose (fp);
}
}
all_kernel_rules_buf = (kernel_rule_t **) mycalloc (rp_files_cnt, sizeof (kernel_rule_t *));
}
- char rule_buf[BUFSIZ] = { 0 };
+ char *rule_buf = (char *) mymalloc (HCBUFSIZ);
int rule_len = 0;
while (!feof (fp))
{
- memset (rule_buf, 0, BUFSIZ);
+ memset (rule_buf, 0, HCBUFSIZ);
rule_len = fgetl (fp, rule_buf);
kernel_rules_avail += INCR_RULES;
}
- memset (rule_buf, 0, BLOCK_SIZE);
+ memset (rule_buf, 0, HCBUFSIZ);
rule_len = (int) generate_random_rule (rule_buf, rp_gen_func_min, rp_gen_func_max);
}
}
+ myfree (rule_buf);
+
/**
* generate NOP rules
*/
return (-1);
}
+
+ if (opencl_platforms_filter != (uint) -1)
+ {
+ uint platform_cnt_mask = ~(((uint) -1 >> platforms_cnt) << platforms_cnt);
+
+ if (opencl_platforms_filter > platform_cnt_mask)
+ {
+ log_error ("ERROR: The platform selected by the --opencl-platforms parameter is larger than the number of available platforms (%d)", platforms_cnt);
+
+ return (-1);
+ }
+ }
}
/**
hc_clGetDeviceIDs (data.ocl, platform, CL_DEVICE_TYPE_ALL, DEVICES_MAX, platform_devices, &platform_devices_cnt);
+ char platform_vendor[INFOSZ] = { 0 };
+
+ hc_clGetPlatformInfo (data.ocl, platform, CL_PLATFORM_VENDOR, sizeof (platform_vendor), platform_vendor, NULL);
+
+ // find our own platform vendor because pocl and mesa are pushing original vendor_id through opencl
+ // this causes trouble with vendor id based macros
+ // we'll assign generic to those without special optimization available
+
+ cl_uint vendor_id = 0;
+
+ if (strcmp (platform_vendor, CL_VENDOR_AMD) == 0)
+ {
+ vendor_id = VENDOR_ID_AMD;
+ }
+ else if (strcmp (platform_vendor, CL_VENDOR_APPLE) == 0)
+ {
+ vendor_id = VENDOR_ID_GENERIC;
+ }
+ else if (strcmp (platform_vendor, CL_VENDOR_INTEL_BEIGNET) == 0)
+ {
+ vendor_id = VENDOR_ID_GENERIC;
+ }
+ else if (strcmp (platform_vendor, CL_VENDOR_INTEL_SDK) == 0)
+ {
+ vendor_id = VENDOR_ID_GENERIC;
+ }
+ else if (strcmp (platform_vendor, CL_VENDOR_MESA) == 0)
+ {
+ vendor_id = VENDOR_ID_GENERIC;
+ }
+ else if (strcmp (platform_vendor, CL_VENDOR_NV) == 0)
+ {
+ vendor_id = VENDOR_ID_NV;
+ }
+ else if (strcmp (platform_vendor, CL_VENDOR_POCL) == 0)
+ {
+ vendor_id = VENDOR_ID_GENERIC;
+ }
+ else
+ {
+ vendor_id = VENDOR_ID_GENERIC;
+ }
+
for (uint platform_devices_id = 0; platform_devices_id < platform_devices_cnt; platform_devices_id++)
{
size_t param_value_size = 0;
hc_device_param_t *device_param = &data.devices_param[device_id];
+ device_param->vendor_id = vendor_id;
+
device_param->device = platform_devices[platform_devices_id];
device_param->device_id = device_id;
device_param->device_type = device_type;
- // vendor_id
-
- cl_uint vendor_id = 0;
-
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_VENDOR_ID, sizeof (vendor_id), &vendor_id, NULL);
-
- device_param->vendor_id = vendor_id;
-
// device_name
hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_NAME, 0, NULL, ¶m_value_size);
// tuning db
- tuning_db_entry_t *tuningdb_entry = tuning_db_search (tuning_db, device_param->device_name, attack_mode, hash_mode);
+ tuning_db_entry_t *tuningdb_entry = tuning_db_search (tuning_db, device_param, attack_mode, hash_mode);
// device_version
myfree (device_opencl_version);
- if (strstr (device_version, "pocl"))
- {
- // pocl returns the real vendor_id in CL_DEVICE_VENDOR_ID which causes many problems because of hms and missing amd_bfe () etc
- // we need to overwrite vendor_id to avoid this. maybe open pocl issue?
-
- cl_uint vendor_id = VENDOR_ID_GENERIC;
-
- device_param->vendor_id = vendor_id;
- }
-
// vector_width
cl_uint vector_width;
- if (opencl_vector_width == OPENCL_VECTOR_WIDTH)
+ if (opencl_vector_width_chgd == 0)
{
- if (tuningdb_entry->vector_width == -1)
+ if (tuningdb_entry == NULL || tuningdb_entry->vector_width == -1)
{
if (opti_type & OPTI_TYPE_USES_BITS_64)
{
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_PREFERRED_VECTOR_WIDTH_LONG, sizeof (vector_width), &vector_width, NULL);
+ hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_NATIVE_VECTOR_WIDTH_LONG, sizeof (vector_width), &vector_width, NULL);
}
else
{
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT, sizeof (vector_width), &vector_width, NULL);
+ hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_NATIVE_VECTOR_WIDTH_INT, sizeof (vector_width), &vector_width, NULL);
}
}
else
vector_width = opencl_vector_width;
}
- if (vector_width > 8) vector_width = 8;
+ if (vector_width > 16) vector_width = 16;
device_param->vector_width = vector_width;
if (device_param->skipped == 0)
{
- if (strstr (device_version, "pocl"))
- {
- if (force == 0)
- {
- log_info ("");
- log_info ("ATTENTION! All pocl drivers are known to be broken due to broken LLVM <= 3.7");
- log_info ("You are STRONGLY encouraged not to use it");
- log_info ("You can use --force to override this but do not post error reports if you do so");
- log_info ("");
-
- return (-1);
- }
- }
-
if (device_type & CL_DEVICE_TYPE_GPU)
{
- if (vendor_id == VENDOR_ID_NV)
- {
- if (device_param->kernel_exec_timeout != 0)
- {
- if (data.quiet == 0) log_info ("Device #%u: WARNING! Kernel exec timeout is not disabled, it might cause you errors of code 702", device_id + 1);
- if (data.quiet == 0) log_info (" See the wiki on how to disable it: https://hashcat.net/wiki/doku.php?id=timeout_patch");
- }
- }
- else if (vendor_id == VENDOR_ID_AMD)
+ if (vendor_id == VENDOR_ID_AMD)
{
int catalyst_check = (force == 1) ? 0 : 1;
return (-1);
}
-
- if (catalyst_warn == 1)
+
+ if (catalyst_warn == 1)
+ {
+ log_info ("");
+ log_info ("ATTENTION! Unsupported or incorrect installed catalyst driver detected!");
+ log_info ("You are STRONGLY encouraged to use the official supported catalyst driver for good reasons");
+ log_info ("See oclHashcat's homepage for official supported catalyst drivers");
+ #ifdef _WIN
+ log_info ("Also see: http://hashcat.net/wiki/doku.php?id=upgrading_amd_drivers_how_to");
+ #endif
+ log_info ("You can use --force to override this but do not post error reports if you do so");
+ log_info ("");
+
+ return (-1);
+ }
+ }
+ else if (vendor_id == VENDOR_ID_NV)
+ {
+ if (device_param->kernel_exec_timeout != 0)
+ {
+ if (data.quiet == 0) log_info ("Device #%u: WARNING! Kernel exec timeout is not disabled, it might cause you errors of code 702", device_id + 1);
+ if (data.quiet == 0) log_info (" See the wiki on how to disable it: https://hashcat.net/wiki/doku.php?id=timeout_patch");
+ }
+ }
+ else if (vendor_id == VENDOR_ID_POCL)
+ {
+ if (force == 0)
{
log_info ("");
- log_info ("ATTENTION! Unsupported or incorrect installed catalyst driver detected!");
- log_info ("You are STRONGLY encouraged to use the official supported catalyst driver for good reasons");
- log_info ("See oclHashcat's homepage for official supported catalyst drivers");
- #ifdef _WIN
- log_info ("Also see: http://hashcat.net/wiki/doku.php?id=upgrading_amd_drivers_how_to");
- #endif
+ log_info ("ATTENTION! All pocl drivers are known to be broken due to broken LLVM <= 3.7");
+ log_info ("You are STRONGLY encouraged not to use it");
log_info ("You can use --force to override this but do not post error reports if you do so");
log_info ("");
}
/**
- * kernel accel and loops auto adjustment
+ * kernel accel and loops tuning db adjustment
*/
- uint _kernel_accel = kernel_accel;
- uint _kernel_loops = kernel_loops;
+ device_param->kernel_accel_min = 1;
+ device_param->kernel_accel_max = 1024;
- tuning_db_entry_t *tuningdb_entry = tuning_db_search (tuning_db, device_param->device_name, attack_mode, hash_mode);
+ device_param->kernel_loops_min = 1;
+ device_param->kernel_loops_max = 1024;
- if (kernel_accel_chgd == 0)
- {
- _kernel_accel = tuningdb_entry->kernel_accel;
- }
+ tuning_db_entry_t *tuningdb_entry = tuning_db_search (tuning_db, device_param, attack_mode, hash_mode);
- if (kernel_loops_chgd == 0)
+ if (tuningdb_entry)
{
- _kernel_loops = tuningdb_entry->kernel_loops;
- }
+ u32 _kernel_accel = tuningdb_entry->kernel_accel;
+ u32 _kernel_loops = tuningdb_entry->kernel_loops;
- if (workload_profile == 1)
- {
- _kernel_loops = (_kernel_loops > 8) ? _kernel_loops / 8 : 1;
- }
- else if (workload_profile == 2)
- {
- _kernel_loops = (_kernel_loops > 4) ? _kernel_loops / 4 : 1;
+ if (_kernel_accel)
+ {
+ device_param->kernel_accel_min = _kernel_accel;
+ device_param->kernel_accel_max = _kernel_accel;
+ }
+
+ if (_kernel_loops)
+ {
+ if (workload_profile == 1)
+ {
+ _kernel_loops = (_kernel_loops > 8) ? _kernel_loops / 8 : 1;
+ }
+ else if (workload_profile == 2)
+ {
+ _kernel_loops = (_kernel_loops > 4) ? _kernel_loops / 4 : 1;
+ }
+
+ device_param->kernel_loops_min = _kernel_loops;
+ device_param->kernel_loops_max = _kernel_loops;
+ }
}
- /**
- * there's a few algorithm that force a fixed kernel_loop count
- */
+ // commandline parameters overwrite tuningdb entries
- if ((opts_type & OPTS_TYPE_PT_BITSLICE) && (attack_mode == ATTACK_MODE_BF))
+ if (kernel_accel)
{
- _kernel_loops = 1024;
+ device_param->kernel_accel_min = kernel_accel;
+ device_param->kernel_accel_max = kernel_accel;
}
- if (hash_mode == 12500)
+ if (kernel_loops)
{
- _kernel_loops = ROUNDS_RAR3 / 16;
+ device_param->kernel_loops_min = kernel_loops;
+ device_param->kernel_loops_max = kernel_loops;
}
- device_param->kernel_accel = _kernel_accel;
- device_param->kernel_loops = _kernel_loops;
+ /**
+ * activate device
+ */
devices_active++;
}
return (-1);
}
+ // additional check to see if the user has chosen a device that is not within the range of available devices (i.e. larger than devices_cnt)
+
+ if (devices_filter != (uint) -1)
+ {
+ uint devices_cnt_mask = ~(((uint) -1 >> devices_cnt) << devices_cnt);
+
+ if (devices_filter > devices_cnt_mask)
+ {
+ log_error ("ERROR: The device specified by the --opencl-devices parameter is larger than the number of available devices (%d)", devices_cnt);
+
+ return (-1);
+ }
+ }
+
data.devices_cnt = devices_cnt;
data.devices_active = devices_active;
#endif // HAVE_ADK
#endif // HAVE_HWMON
- #ifdef OSX
- if (hash_mode == 3000 || hash_mode == 1500 || hash_mode == 10700)
- {
- if (force == 0)
- {
- log_info ("");
- log_info ("Warning: Hash mode %d is not stable with OSX.", hash_mode);
- log_info ("You can use --force to override this but do not post error reports if you do so");
- log_info ("");
-
- continue;
- }
- }
- #endif
-
#ifdef DEBUG
if (benchmark == 1) log_info ("Hashmode: %d", data.hash_mode);
#endif
- uint kernel_blocks_all = 0;
+ uint kernel_power_all = 0;
for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
* device properties
*/
- char *device_name_chksum = device_param->device_name_chksum;
-
- uint device_processors = device_param->device_processors;
-
- uint device_processor_cores = device_param->device_processor_cores;
-
- uint kernel_accel = device_param->kernel_accel;
+ const char *device_name_chksum = device_param->device_name_chksum;
+ const u32 device_processors = device_param->device_processors;
+ const u32 device_processor_cores = device_param->device_processor_cores;
/**
* create context for each device
device_param->command_queue = hc_clCreateCommandQueue (data.ocl, device_param->context, device_param->device, CL_QUEUE_PROFILING_ENABLE);
/**
- * create input buffers on device
+ * create input buffers on device : calculate size of fixed memory buffers
*/
- uint kernel_threads = KERNEL_THREADS;
-
- // bcrypt
- if (hash_mode == 3200) kernel_threads = 8;
- if (hash_mode == 9000) kernel_threads = 8;
-
- uint kernel_power = 1;
- uint kernel_blocks = 1;
-
- uint size_pws = 4;
- uint size_tmps = 4;
- uint size_hooks = 4;
-
- // find out if we would request too much memory on memory blocks which are based on kernel_accel
-
- while (kernel_accel)
- {
- kernel_power = device_processors * kernel_threads * kernel_accel;
- kernel_blocks = kernel_power;
-
- // size_pws
-
- size_pws = kernel_blocks * sizeof (pw_t);
-
- // size_tmps
-
- switch (hash_mode)
- {
- case 400: size_tmps = kernel_blocks * sizeof (phpass_tmp_t); break;
- case 500: size_tmps = kernel_blocks * sizeof (md5crypt_tmp_t); break;
- case 501: size_tmps = kernel_blocks * sizeof (md5crypt_tmp_t); break;
- case 1600: size_tmps = kernel_blocks * sizeof (md5crypt_tmp_t); break;
- case 1800: size_tmps = kernel_blocks * sizeof (sha512crypt_tmp_t); break;
- case 2100: size_tmps = kernel_blocks * sizeof (dcc2_tmp_t); break;
- case 2500: size_tmps = kernel_blocks * sizeof (wpa_tmp_t); break;
- case 3200: size_tmps = kernel_blocks * sizeof (bcrypt_tmp_t); break;
- case 5200: size_tmps = kernel_blocks * sizeof (pwsafe3_tmp_t); break;
- case 5800: size_tmps = kernel_blocks * sizeof (androidpin_tmp_t); break;
- case 6211:
- case 6212:
- case 6213: size_tmps = kernel_blocks * sizeof (tc_tmp_t); break;
- case 6221:
- case 6222:
- case 6223: size_tmps = kernel_blocks * sizeof (tc64_tmp_t); break;
- case 6231:
- case 6232:
- case 6233: size_tmps = kernel_blocks * sizeof (tc_tmp_t); break;
- case 6241:
- case 6242:
- case 6243: size_tmps = kernel_blocks * sizeof (tc_tmp_t); break;
- case 6300: size_tmps = kernel_blocks * sizeof (md5crypt_tmp_t); break;
- case 6400: size_tmps = kernel_blocks * sizeof (sha256aix_tmp_t); break;
- case 6500: size_tmps = kernel_blocks * sizeof (sha512aix_tmp_t); break;
- case 6600: size_tmps = kernel_blocks * sizeof (agilekey_tmp_t); break;
- case 6700: size_tmps = kernel_blocks * sizeof (sha1aix_tmp_t); break;
- case 6800: size_tmps = kernel_blocks * sizeof (lastpass_tmp_t); break;
- case 7100: size_tmps = kernel_blocks * sizeof (pbkdf2_sha512_tmp_t); break;
- case 7200: size_tmps = kernel_blocks * sizeof (pbkdf2_sha512_tmp_t); break;
- case 7400: size_tmps = kernel_blocks * sizeof (sha256crypt_tmp_t); break;
- case 7900: size_tmps = kernel_blocks * sizeof (drupal7_tmp_t); break;
- case 8200: size_tmps = kernel_blocks * sizeof (pbkdf2_sha512_tmp_t); break;
- case 8800: size_tmps = kernel_blocks * sizeof (androidfde_tmp_t); break;
- case 8900: size_tmps = kernel_blocks * sizeof (scrypt_tmp_t); break;
- case 9000: size_tmps = kernel_blocks * sizeof (pwsafe2_tmp_t); break;
- case 9100: size_tmps = kernel_blocks * sizeof (lotus8_tmp_t); break;
- case 9200: size_tmps = kernel_blocks * sizeof (pbkdf2_sha256_tmp_t); break;
- case 9300: size_tmps = kernel_blocks * sizeof (scrypt_tmp_t); break;
- case 9400: size_tmps = kernel_blocks * sizeof (office2007_tmp_t); break;
- case 9500: size_tmps = kernel_blocks * sizeof (office2010_tmp_t); break;
- case 9600: size_tmps = kernel_blocks * sizeof (office2013_tmp_t); break;
- case 10000: size_tmps = kernel_blocks * sizeof (pbkdf2_sha256_tmp_t); break;
- case 10200: size_tmps = kernel_blocks * sizeof (cram_md5_t); break;
- case 10300: size_tmps = kernel_blocks * sizeof (saph_sha1_tmp_t); break;
- case 10500: size_tmps = kernel_blocks * sizeof (pdf14_tmp_t); break;
- case 10700: size_tmps = kernel_blocks * sizeof (pdf17l8_tmp_t); break;
- case 10900: size_tmps = kernel_blocks * sizeof (pbkdf2_sha256_tmp_t); break;
- case 11300: size_tmps = kernel_blocks * sizeof (bitcoin_wallet_tmp_t); break;
- case 11600: size_tmps = kernel_blocks * sizeof (seven_zip_tmp_t); break;
- case 11900: size_tmps = kernel_blocks * sizeof (pbkdf2_md5_tmp_t); break;
- case 12000: size_tmps = kernel_blocks * sizeof (pbkdf2_sha1_tmp_t); break;
- case 12100: size_tmps = kernel_blocks * sizeof (pbkdf2_sha512_tmp_t); break;
- case 12200: size_tmps = kernel_blocks * sizeof (ecryptfs_tmp_t); break;
- case 12300: size_tmps = kernel_blocks * sizeof (oraclet_tmp_t); break;
- case 12400: size_tmps = kernel_blocks * sizeof (bsdicrypt_tmp_t); break;
- case 12500: size_tmps = kernel_blocks * sizeof (rar3_tmp_t); break;
- case 12700: size_tmps = kernel_blocks * sizeof (mywallet_tmp_t); break;
- case 12800: size_tmps = kernel_blocks * sizeof (pbkdf2_sha256_tmp_t); break;
- case 12900: size_tmps = kernel_blocks * sizeof (pbkdf2_sha256_tmp_t); break;
- case 13000: size_tmps = kernel_blocks * sizeof (pbkdf2_sha256_tmp_t); break;
- };
-
- // size_hooks
-
- if ((opts_type & OPTS_TYPE_HOOK12) || (opts_type & OPTS_TYPE_HOOK23))
- {
- // none yet
- }
-
- // now check if all device-memory sizes which depend on the kernel_accel amplifier are within its boundaries
- // if not, decrease amplifier and try again
-
- if (size_pws > device_param->device_maxmem_alloc)
- {
- kernel_accel--;
-
- continue;
- }
-
- if (size_tmps > device_param->device_maxmem_alloc)
- {
- kernel_accel--;
-
- continue;
- }
-
- if (size_hooks > device_param->device_maxmem_alloc)
- {
- kernel_accel--;
-
- continue;
- }
-
- if ((size_pws + size_tmps + size_hooks) > device_param->device_global_mem)
- {
- kernel_accel--;
-
- continue;
- }
-
- break;
- }
-
- if (kernel_accel == 0)
- {
- log_error ("ERROR: Device #%u does not provide enough allocatable device-memory to handle hash-type %u", device_id + 1, data.hash_mode);
-
- return -1;
- }
-
- device_param->kernel_threads = kernel_threads;
- device_param->kernel_power_user = kernel_power;
- device_param->kernel_blocks_user = kernel_blocks;
-
- kernel_blocks_all += kernel_blocks;
-
- device_param->size_pws = size_pws;
- device_param->size_tmps = size_tmps;
- device_param->size_hooks = size_hooks;
-
- // we can optimize some stuff here...
-
uint size_root_css = SP_PW_MAX * sizeof (cs_t);
uint size_markov_css = SP_PW_MAX * CHARSIZ * sizeof (cs_t);
uint size_results = KERNEL_THREADS * sizeof (uint);
- device_param->size_results = size_results;
+ device_param->size_results = size_results;
uint size_rules = kernel_rules_cnt * sizeof (kernel_rule_t);
uint size_rules_c = KERNEL_RULES * sizeof (kernel_rule_t);
uint size_bfs = KERNEL_BFS * sizeof (bf_t);
uint size_tm = 32 * sizeof (bs_word_t);
- // scrypt stuff
+ // scryptV stuff
u64 size_scryptV = 1;
if (quiet == 0) log_info ("SCRYPT tmto optimizer value set to: %u, mem: %u\n", data.salts_buf[0].scrypt_tmto, size_scryptV);
}
+ /**
+ * create input buffers on device : calculate size of dynamic size memory buffers
+ */
+
+ uint kernel_threads = KERNEL_THREADS;
+
+ // some algorithms need a fixed kernel-threads count (mostly because of shared memory usage)
+
+ if (hash_mode == 3200) kernel_threads = 8;
+ if (hash_mode == 9000) kernel_threads = 8;
+
+ /**
+ * some algorithms need a fixed kernel-loops count
+ */
+
+ if (hash_mode == 1500)
+ {
+ const u32 kernel_loops_fixed = 1024;
+
+ device_param->kernel_loops_min = kernel_loops_fixed;
+ device_param->kernel_loops_max = kernel_loops_fixed;
+ }
+
+ if (hash_mode == 3000)
+ {
+ const u32 kernel_loops_fixed = 1024;
+
+ device_param->kernel_loops_min = kernel_loops_fixed;
+ device_param->kernel_loops_max = kernel_loops_fixed;
+ }
+
+ if (hash_mode == 8900)
+ {
+ const u32 kernel_loops_fixed = 1;
+
+ device_param->kernel_loops_min = kernel_loops_fixed;
+ device_param->kernel_loops_max = kernel_loops_fixed;
+ }
+
+ if (hash_mode == 9300)
+ {
+ const u32 kernel_loops_fixed = 1;
+
+ device_param->kernel_loops_min = kernel_loops_fixed;
+ device_param->kernel_loops_max = kernel_loops_fixed;
+ }
+
+ if (hash_mode == 12500)
+ {
+ const u32 kernel_loops_fixed = ROUNDS_RAR3 / 16;
+
+ device_param->kernel_loops_min = kernel_loops_fixed;
+ device_param->kernel_loops_max = kernel_loops_fixed;
+ }
+
+ /**
+ * some algorithms have a maximum kernel-loops count
+ */
+
+ if (attack_exec == ATTACK_EXEC_OUTSIDE_KERNEL)
+ {
+ if (data.salts_buf[0].salt_iter < device_param->kernel_loops_max)
+ {
+ device_param->kernel_loops_max = data.salts_buf[0].salt_iter;
+ }
+ }
+
+ /**
+ * some algorithms need a special kernel-accel
+ */
+
+ if (hash_mode == 8900)
+ {
+ device_param->kernel_accel_min = 1;
+ device_param->kernel_accel_max = 64;
+ }
+
+ if (hash_mode == 9300)
+ {
+ device_param->kernel_accel_min = 1;
+ device_param->kernel_accel_max = 64;
+ }
+
+ u32 kernel_accel_min = device_param->kernel_accel_min;
+ u32 kernel_accel_max = device_param->kernel_accel_max;
+
+ // find out if we would request too much memory on memory blocks which are based on kernel_accel
+
+ uint size_pws = 4;
+ uint size_tmps = 4;
+ uint size_hooks = 4;
+
+ while (kernel_accel_max >= kernel_accel_min)
+ {
+ uint kernel_power_max = device_processors * kernel_threads * kernel_accel_max;
+
+ // size_pws
+
+ size_pws = kernel_power_max * sizeof (pw_t);
+
+ // size_tmps
+
+ switch (hash_mode)
+ {
+ case 400: size_tmps = kernel_power_max * sizeof (phpass_tmp_t); break;
+ case 500: size_tmps = kernel_power_max * sizeof (md5crypt_tmp_t); break;
+ case 501: size_tmps = kernel_power_max * sizeof (md5crypt_tmp_t); break;
+ case 1600: size_tmps = kernel_power_max * sizeof (md5crypt_tmp_t); break;
+ case 1800: size_tmps = kernel_power_max * sizeof (sha512crypt_tmp_t); break;
+ case 2100: size_tmps = kernel_power_max * sizeof (dcc2_tmp_t); break;
+ case 2500: size_tmps = kernel_power_max * sizeof (wpa_tmp_t); break;
+ case 3200: size_tmps = kernel_power_max * sizeof (bcrypt_tmp_t); break;
+ case 5200: size_tmps = kernel_power_max * sizeof (pwsafe3_tmp_t); break;
+ case 5800: size_tmps = kernel_power_max * sizeof (androidpin_tmp_t); break;
+ case 6211: size_tmps = kernel_power_max * sizeof (tc_tmp_t); break;
+ case 6212: size_tmps = kernel_power_max * sizeof (tc_tmp_t); break;
+ case 6213: size_tmps = kernel_power_max * sizeof (tc_tmp_t); break;
+ case 6221: size_tmps = kernel_power_max * sizeof (tc64_tmp_t); break;
+ case 6222: size_tmps = kernel_power_max * sizeof (tc64_tmp_t); break;
+ case 6223: size_tmps = kernel_power_max * sizeof (tc64_tmp_t); break;
+ case 6231: size_tmps = kernel_power_max * sizeof (tc_tmp_t); break;
+ case 6232: size_tmps = kernel_power_max * sizeof (tc_tmp_t); break;
+ case 6233: size_tmps = kernel_power_max * sizeof (tc_tmp_t); break;
+ case 6241: size_tmps = kernel_power_max * sizeof (tc_tmp_t); break;
+ case 6242: size_tmps = kernel_power_max * sizeof (tc_tmp_t); break;
+ case 6243: size_tmps = kernel_power_max * sizeof (tc_tmp_t); break;
+ case 6300: size_tmps = kernel_power_max * sizeof (md5crypt_tmp_t); break;
+ case 6400: size_tmps = kernel_power_max * sizeof (sha256aix_tmp_t); break;
+ case 6500: size_tmps = kernel_power_max * sizeof (sha512aix_tmp_t); break;
+ case 6600: size_tmps = kernel_power_max * sizeof (agilekey_tmp_t); break;
+ case 6700: size_tmps = kernel_power_max * sizeof (sha1aix_tmp_t); break;
+ case 6800: size_tmps = kernel_power_max * sizeof (lastpass_tmp_t); break;
+ case 7100: size_tmps = kernel_power_max * sizeof (pbkdf2_sha512_tmp_t); break;
+ case 7200: size_tmps = kernel_power_max * sizeof (pbkdf2_sha512_tmp_t); break;
+ case 7400: size_tmps = kernel_power_max * sizeof (sha256crypt_tmp_t); break;
+ case 7900: size_tmps = kernel_power_max * sizeof (drupal7_tmp_t); break;
+ case 8200: size_tmps = kernel_power_max * sizeof (pbkdf2_sha512_tmp_t); break;
+ case 8800: size_tmps = kernel_power_max * sizeof (androidfde_tmp_t); break;
+ case 8900: size_tmps = kernel_power_max * sizeof (scrypt_tmp_t); break;
+ case 9000: size_tmps = kernel_power_max * sizeof (pwsafe2_tmp_t); break;
+ case 9100: size_tmps = kernel_power_max * sizeof (lotus8_tmp_t); break;
+ case 9200: size_tmps = kernel_power_max * sizeof (pbkdf2_sha256_tmp_t); break;
+ case 9300: size_tmps = kernel_power_max * sizeof (scrypt_tmp_t); break;
+ case 9400: size_tmps = kernel_power_max * sizeof (office2007_tmp_t); break;
+ case 9500: size_tmps = kernel_power_max * sizeof (office2010_tmp_t); break;
+ case 9600: size_tmps = kernel_power_max * sizeof (office2013_tmp_t); break;
+ case 10000: size_tmps = kernel_power_max * sizeof (pbkdf2_sha256_tmp_t); break;
+ case 10200: size_tmps = kernel_power_max * sizeof (cram_md5_t); break;
+ case 10300: size_tmps = kernel_power_max * sizeof (saph_sha1_tmp_t); break;
+ case 10500: size_tmps = kernel_power_max * sizeof (pdf14_tmp_t); break;
+ case 10700: size_tmps = kernel_power_max * sizeof (pdf17l8_tmp_t); break;
+ case 10900: size_tmps = kernel_power_max * sizeof (pbkdf2_sha256_tmp_t); break;
+ case 11300: size_tmps = kernel_power_max * sizeof (bitcoin_wallet_tmp_t); break;
+ case 11600: size_tmps = kernel_power_max * sizeof (seven_zip_tmp_t); break;
+ case 11900: size_tmps = kernel_power_max * sizeof (pbkdf2_md5_tmp_t); break;
+ case 12000: size_tmps = kernel_power_max * sizeof (pbkdf2_sha1_tmp_t); break;
+ case 12100: size_tmps = kernel_power_max * sizeof (pbkdf2_sha512_tmp_t); break;
+ case 12200: size_tmps = kernel_power_max * sizeof (ecryptfs_tmp_t); break;
+ case 12300: size_tmps = kernel_power_max * sizeof (oraclet_tmp_t); break;
+ case 12400: size_tmps = kernel_power_max * sizeof (bsdicrypt_tmp_t); break;
+ case 12500: size_tmps = kernel_power_max * sizeof (rar3_tmp_t); break;
+ case 12700: size_tmps = kernel_power_max * sizeof (mywallet_tmp_t); break;
+ case 12800: size_tmps = kernel_power_max * sizeof (pbkdf2_sha256_tmp_t); break;
+ case 12900: size_tmps = kernel_power_max * sizeof (pbkdf2_sha256_tmp_t); break;
+ case 13000: size_tmps = kernel_power_max * sizeof (pbkdf2_sha256_tmp_t); break;
+ case 13200: size_tmps = kernel_power_max * sizeof (axcrypt_tmp_t); break;
+ case 13400: size_tmps = kernel_power_max * sizeof (keepass_tmp_t); break;
+ };
+
+ // size_hooks
+
+ if ((opts_type & OPTS_TYPE_HOOK12) || (opts_type & OPTS_TYPE_HOOK23))
+ {
+ // none yet
+ }
+
+ // now check if all device-memory sizes which depend on the kernel_accel_max amplifier are within its boundaries
+ // if not, decrease amplifier and try again
+
+ int skip = 0;
+
+ if (size_pws > device_param->device_maxmem_alloc) skip = 1;
+ if (size_tmps > device_param->device_maxmem_alloc) skip = 1;
+ if (size_hooks > device_param->device_maxmem_alloc) skip = 1;
+
+ if (( bitmap_size
+ + bitmap_size
+ + bitmap_size
+ + bitmap_size
+ + bitmap_size
+ + bitmap_size
+ + bitmap_size
+ + bitmap_size
+ + size_bfs
+ + size_combs
+ + size_digests
+ + size_esalts
+ + size_hooks
+ + size_markov_css
+ + size_plains
+ + size_pws
+ + size_results
+ + size_root_css
+ + size_rules
+ + size_rules_c
+ + size_salts
+ + size_scryptV
+ + size_shown
+ + size_tm
+ + size_tmps) > device_param->device_global_mem) skip = 1;
+
+ if (skip == 1)
+ {
+ kernel_accel_max--;
+
+ continue;
+ }
+
+ break;
+ }
+
+ /*
+ if (kernel_accel_max == 0)
+ {
+ log_error ("Device #%u: Device does not provide enough allocatable device-memory to handle hash-type %u", device_id + 1, data.hash_mode);
+
+ return -1;
+ }
+ */
+
+ device_param->kernel_accel_min = kernel_accel_min;
+ device_param->kernel_accel_max = kernel_accel_max;
+
+ /*
+ if (kernel_accel_max < kernel_accel)
+ {
+ if (quiet == 0) log_info ("Device #%u: Reduced maximum kernel-accel to %u", device_id + 1, kernel_accel_max);
+
+ device_param->kernel_accel = kernel_accel_max;
+ }
+ */
+
+ device_param->size_bfs = size_bfs;
+ device_param->size_combs = size_combs;
+ device_param->size_rules = size_rules;
+ device_param->size_rules_c = size_rules_c;
+ device_param->size_pws = size_pws;
+ device_param->size_tmps = size_tmps;
+ device_param->size_hooks = size_hooks;
+
+ // do not confuse kernel_accel_max with kernel_accel here
+
+ const u32 kernel_power = device_processors * kernel_threads * kernel_accel_max;
+
+ device_param->kernel_threads = kernel_threads;
+ device_param->kernel_power_user = kernel_power;
+
+ kernel_power_all += kernel_power;
+
/**
* default building options
*/
// we don't have sm_* on vendors not NV but it doesn't matter
- snprintf (build_opts, sizeof (build_opts) - 1, "-I%s/ -DVENDOR_ID=%u -DCUDA_ARCH=%d -DVECT_SIZE=%u -DDEVICE_TYPE=%u", shared_dir, device_param->vendor_id, (device_param->sm_major * 100) + device_param->sm_minor, device_param->vector_width, (u32) device_param->device_type);
+ snprintf (build_opts, sizeof (build_opts) - 1, "-cl-std=CL1.1 -I\"%s/\" -DVENDOR_ID=%u -DCUDA_ARCH=%d -DVECT_SIZE=%u -DDEVICE_TYPE=%u", shared_dir, device_param->vendor_id, (device_param->sm_major * 100) + device_param->sm_minor, device_param->vector_width, (u32) device_param->device_type);
/**
* main kernel
int rc = hc_clBuildProgram (data.ocl, device_param->program, 1, &device_param->device, build_opts, NULL, NULL, false);
+ #ifdef DEBUG
+ size_t build_log_size = 0;
+
+ hc_clGetProgramBuildInfo (data.ocl, device_param->program, device_param->device, CL_PROGRAM_BUILD_LOG, 0, NULL, &build_log_size);
+
+ if (build_log_size > 1)
+ {
+ char *build_log = (char *) malloc (build_log_size + 1);
+
+ memset (build_log, 0, build_log_size + 1);
+
+ hc_clGetProgramBuildInfo (data.ocl, device_param->program, device_param->device, CL_PROGRAM_BUILD_LOG, build_log_size, build_log, NULL);
+
+ puts (build_log);
+
+ free (build_log);
+ }
+ #endif
+
if (rc != 0)
{
device_param->skipped = true;
{
snprintf (build_opts_update, sizeof (build_opts_update) - 1, "%s -DSCRYPT_N=%d -DSCRYPT_R=%d -DSCRYPT_P=%d -DSCRYPT_TMTO=%d", build_opts, data.salts_buf[0].scrypt_N, data.salts_buf[0].scrypt_r, data.salts_buf[0].scrypt_p, 1 << data.salts_buf[0].scrypt_tmto);
}
+ else
+ {
+ snprintf (build_opts_update, sizeof (build_opts_update) - 1, "%s", build_opts);
+ }
int rc = hc_clBuildProgram (data.ocl, device_param->program, 1, &device_param->device, build_opts_update, NULL, NULL, false);
+ #ifdef DEBUG
+ size_t build_log_size = 0;
+
+ hc_clGetProgramBuildInfo (data.ocl, device_param->program, device_param->device, CL_PROGRAM_BUILD_LOG, 0, NULL, &build_log_size);
+
+ if (build_log_size > 1)
+ {
+ char *build_log = (char *) malloc (build_log_size + 1);
+
+ memset (build_log, 0, build_log_size + 1);
+
+ hc_clGetProgramBuildInfo (data.ocl, device_param->program, device_param->device, CL_PROGRAM_BUILD_LOG, build_log_size, build_log, NULL);
+
+ puts (build_log);
+
+ free (build_log);
+ }
+ #endif
+
if (rc != 0)
{
device_param->skipped = true;
+
log_info ("Device #%u: Kernel %s build failure. Proceed without this device.", device_id + 1, source_file);
}
}
device_param->pws_buf = pws_buf;
- pw_cache_t *pw_caches = (pw_cache_t *) mycalloc (64, sizeof (pw_cache_t));
-
- for (int i = 0; i < 64; i++)
- {
- pw_caches[i].pw_buf.pw_len = i;
- pw_caches[i].cnt = 0;
- }
-
- device_param->pw_caches = pw_caches;
-
comb_t *combs_buf = (comb_t *) mycalloc (KERNEL_COMBS, sizeof (comb_t));
device_param->combs_buf = combs_buf;
device_param->hooks_buf = hooks_buf;
- device_param->pw_transpose = pw_transpose_to_hi1;
- device_param->pw_add = pw_add_to_hc1;
-
/**
* kernel args
*/
device_param->kernel_params_amp[5] = &device_param->kernel_params_amp_buf32[5];
device_param->kernel_params_amp[6] = &device_param->kernel_params_amp_buf32[6];
- device_param->kernel_params_tb[0] = &device_param->d_pws_buf;
-
device_param->kernel_params_tm[0] = &device_param->d_bfs_c;
device_param->kernel_params_tm[1] = &device_param->d_tm_c;
{
if (opts_type & OPTS_TYPE_PT_BITSLICE)
{
- snprintf (kernel_name, sizeof (kernel_name) - 1, "m%05d_tb", kern_type);
-
- device_param->kernel_tb = hc_clCreateKernel (data.ocl, device_param->program, kernel_name);
-
snprintf (kernel_name, sizeof (kernel_name) - 1, "m%05d_tm", kern_type);
device_param->kernel_tm = hc_clCreateKernel (data.ocl, device_param->program, kernel_name);
if (opts_type & OPTS_TYPE_PT_BITSLICE)
{
- hc_clSetKernelArg (data.ocl, device_param->kernel_tb, 0, sizeof (cl_mem), device_param->kernel_params_tb[0]);
-
hc_clSetKernelArg (data.ocl, device_param->kernel_tm, 0, sizeof (cl_mem), device_param->kernel_params_tm[0]);
hc_clSetKernelArg (data.ocl, device_param->kernel_tm, 1, sizeof (cl_mem), device_param->kernel_params_tm[1]);
}
#endif // HAVE_HWMON && HAVE_ADL
}
- data.kernel_blocks_all = kernel_blocks_all;
+ data.kernel_power_all = kernel_power_all;
if (data.quiet == 0) log_info ("");
/**
- * Inform user which algorithm is checked and at which workload setting
+ * In benchmark-mode, inform user which algorithm is checked
*/
if (benchmark == 1)
char *hash_type = strhashtype (data.hash_mode); // not a bug
log_info ("Hashtype: %s", hash_type);
- //log_info ("Workload: %u loops, %u accel", kernel_loops, kernel_accel);
log_info ("");
}
return (-1);
}
- char line_buf[BUFSIZ] = { 0 };
+ char *line_buf = (char *) mymalloc (HCBUFSIZ);
while (!feof (mask_fp))
{
- memset (line_buf, 0, BUFSIZ);
+ memset (line_buf, 0, HCBUFSIZ);
int line_len = fgetl (mask_fp, line_buf);
maskcnt++;
}
+ myfree (line_buf);
+
fclose (mask_fp);
}
else
return (-1);
}
- char line_buf[BUFSIZ] = { 0 };
+ char *line_buf = (char *) mymalloc (HCBUFSIZ);
uint masks_avail = 1;
while (!feof (mask_fp))
{
- memset (line_buf, 0, BUFSIZ);
+ memset (line_buf, 0, HCBUFSIZ);
int line_len = fgetl (mask_fp, line_buf);
maskcnt++;
}
+ myfree (line_buf);
+
fclose (mask_fp);
mask_from_file = 1;
return (-1);
}
- char line_buf[BUFSIZ] = { 0 };
+ char *line_buf = (char *) mymalloc (HCBUFSIZ);
uint masks_avail = 1;
while (!feof (mask_fp))
{
- memset (line_buf, 0, BUFSIZ);
+ memset (line_buf, 0, HCBUFSIZ);
int line_len = fgetl (mask_fp, line_buf);
maskcnt++;
}
+ myfree (line_buf);
+
fclose (mask_fp);
mask_from_file = 1;
device_param->speed_pos = 0;
memset (device_param->speed_cnt, 0, SPEED_CACHE * sizeof (u64));
- memset (device_param->speed_ms, 0, SPEED_CACHE * sizeof (float));
- memset (device_param->speed_rec, 0, SPEED_CACHE * sizeof (hc_timer_t));
+ memset (device_param->speed_ms, 0, SPEED_CACHE * sizeof (double));
device_param->exec_pos = 0;
memset (device_param->exec_ms, 0, EXEC_CACHE * sizeof (double));
- device_param->kernel_power = device_param->kernel_power_user;
- device_param->kernel_blocks = device_param->kernel_blocks_user;
+ device_param->kernel_power = device_param->kernel_power_user;
device_param->outerloop_pos = 0;
device_param->outerloop_left = 0;
// some more resets:
- if (device_param->pw_caches) memset (device_param->pw_caches, 0, 64 * sizeof (pw_cache_t));
-
if (device_param->pws_buf) memset (device_param->pws_buf, 0, device_param->size_pws);
- device_param->pw_cnt = 0;
device_param->pws_cnt = 0;
device_param->words_off = 0;
device_param->words_done = 0;
}
- data.kernel_blocks_div = 0;
+ data.kernel_power_div = 0;
// figure out some workload
if ((wordlist_mode == WL_MODE_FILE) || (wordlist_mode == WL_MODE_MASK))
{
- if (data.words_base < kernel_blocks_all)
+ if (data.words_base < kernel_power_all)
{
if (quiet == 0)
{
local_free (device_param->result);
- local_free (device_param->pw_caches);
-
local_free (device_param->combs_buf);
local_free (device_param->hooks_buf);
if (device_param->kernel_mp) hc_clReleaseKernel (data.ocl, device_param->kernel_mp);
if (device_param->kernel_mp_l) hc_clReleaseKernel (data.ocl, device_param->kernel_mp_l);
if (device_param->kernel_mp_r) hc_clReleaseKernel (data.ocl, device_param->kernel_mp_r);
- if (device_param->kernel_tb) hc_clReleaseKernel (data.ocl, device_param->kernel_tb);
if (device_param->kernel_tm) hc_clReleaseKernel (data.ocl, device_param->kernel_tm);
if (device_param->kernel_amp) hc_clReleaseKernel (data.ocl, device_param->kernel_amp);